[kernel] r17295 - in dists/squeeze/linux-2.6/debian: . config patches/debian patches/features/all patches/features/all/bna patches/features/all/hpsa patches/features/all/pm8001 patches/features/x86 patches/series

Ben Hutchings benh at alioth.debian.org
Wed May 4 01:10:55 UTC 2011


Author: benh
Date: Wed May  4 01:10:25 2011
New Revision: 17295

Log:
Add support for new hardware

  * scsi: Add hpsa driver for HP Smart Array controllers
    - Disable binding to devices currently handled by cciss
  * scsi: Add pm8001 driver for PMC-Sierra SAS/SATA HBAs
  * bnx2i: Add support for BCM5771E
  * wl1251: Add support for PG11 chips
  * bnx2x: Add support for BCM84823
  * ar9170usb: Add several additional USB device IDs
  * net: Add bna driver for Brocade Ethernet adapters

Added:
   dists/squeeze/linux-2.6/debian/patches/debian/hpsa-Remove-device-IDs-currently-handled-by-cciss.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/SCSI-bnx2i-Add-5771E-device-support-to-bnx2i-driver.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/ar9170-add-support-for-NEC-WL300NU-G-USB-dongle.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/ar9170usb-add-Sphairon-Homelink-1202-USB-ID.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/ar9170usb-add-vendor-and-device-ID-for-Qwest-Actiont.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bna/
   dists/squeeze/linux-2.6/debian/patches/features/all/bna/0001-bna-Brocade-10Gb-Ethernet-device-driver.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bna/0002-bna-Delete-get_flags-and-set_flags-ethtool-methods.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bna/0003-bna-Fixed-build-break-for-allyesconfig.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bna/0004-bna-fix-stats-handling.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bna/0006-NET-bna-fix-lock-imbalance.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bna/0007-bna-Check-for-NULL-before-deref-in-bnad_cb_tx_cleanu.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bna/0008-bna-off-by-one.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bna/0009-drivers-net-return-operator-cleanup.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bna/0010-bna-fix-interrupt-handling.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bna/0011-bna-scope-and-dead-code-cleanup.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bna/0013-bna-TxRx-and-datapath-fix.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bna/0014-bna-Port-enable-disable-sync-and-txq-priority-fix.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bna/0015-bna-Fix-ethtool-register-dump-and-reordered-an-API.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bna/0016-bna-Enable-pure-priority-tagged-packet-reception-and.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bna/0017-bna-Fix-for-TX-queue.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bna/0018-bna-IOC-uninit-check-and-misc-cleanup.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bna/0019-bna-Removed-unused-code.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bna/0020-bna-Restore-VLAN-filter-table.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bna/0021-bna-IOC-failure-auto-recovery-fix.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bna/0022-bna-Update-the-driver-version-to-2.3.2.3.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bna/0023-bna-Remove-unnecessary-memset-0.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bnx2x-Add-support-for-BCM84823.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/bnx2x-Fix-10G-mode-in-BCM8481-BCM84823.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0001-SCSI-hpsa-add-driver-for-HP-Smart-Array-controllers.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0002-SCSI-hpsa-fix-typo-in-comments.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0003-SCSI-hpsa-Use-kernel-integer-types-not-userland-ones.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0004-SCSI-hpsa-avoid-unwanted-promotion-from-unsigned-to-.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0005-SCSI-hpsa-Use-BUG_ON-instead-of-an-if-statement.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0006-SCSI-hpsa-make-adjust_hpsa_scsi_table-return-void.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0007-SCSI-hpsa-remove-superfluous-returns-from-void-funct.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0008-SCSI-hpsa-return-proper-error-codes-not-minus-one.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0009-SCSI-hpsa-use-sizeof-not-an-inline-constant-in-memse.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0010-SCSI-hpsa-use-kzalloc-not-kmalloc-plus-memset.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0011-SCSI-hpsa-remove-unwanted-debug-code.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0012-SCSI-hpsa-eliminate-unnecessary-memcpys.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0013-SCSI-hpsa-make-tag-macros-into-functions.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0014-SCSI-hpsa-fix-some-debug-printks-to-use-dev_dbg-inst.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0015-SCSI-hpsa-interrupt-pending-function-should-return-b.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0016-SCSI-hpsa-Allow-multiple-command-completions-per-int.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0017-SCSI-hpsa-add-pci-ids-for-storageworks-1210m-remove-.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0018-SCSI-hpsa-Fix-p1210m-LUN-assignment.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0019-SCSI-hpsa-Return-DID_RESET-for-commands-which-comple.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0020-SCSI-hpsa-Retry-commands-completing-with-a-sense-key.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0021-SCSI-hpsa-Don-t-return-DID_NO_CONNECT-when-a-device-.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0022-SCSI-hpsa-Add-an-shost_to_hba-helper-function.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0023-SCSI-hpsa-use-scan_start-and-scan_finished-entry-poi.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0024-SCSI-hpsa-when-resetting-devices-print-out-which-dev.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0025-SCSI-hpsa-print-all-the-bytes-of-the-CDB-not-just-th.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0026-SCSI-hpsa-clarify-obscure-comment-in-adjust_hpsa_scs.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0027-SCSI-hpsa-Fix-hpsa_find_scsi_entry-so-that-it-doesn-.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0028-SCSI-hpsa-fix-bug-in-adjust_hpsa_scsi_table.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0029-SCSI-hpsa-eliminate-lock_kernel-in-compat_ioctl.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0030-SCSI-hpsa-Reorder-compat-ioctl-functions-to-eliminat.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0031-SCSI-hpsa-update-driver-version-to-2.0.1-3.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0033-SCSI-hpsa-fix-firmwart-typo.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0034-SCSI-hpsa-fix-scsi-status-mis-shift.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0035-SCSI-hpsa-return-ENOMEM-not-1.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0036-SCSI-hpsa-remove-scan-thread.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0037-SCSI-hpsa-mark-hpsa_pci_init-as-__devinit.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0038-SCSI-hpsa-Clarify-calculation-of-padding-for-command.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0039-SCSI-hpsa-Increase-the-number-of-scatter-gather-elem.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0040-SCSI-hpsa-remove-unused-members-next-prev-and-retry_.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0041-SCSI-hpsa-remove-unneeded-defines.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0042-SCSI-hpsa-save-pdev-pointer-in-per-hba-structure-ear.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0043-SCSI-hpsa-factor-out-hpsa_lookup_board_id.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0044-SCSI-hpsa-factor-out-hpsa_board_disabled.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0045-SCSI-hpsa-remove-redundant-board_id-parameter-from-h.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0046-SCSI-hpsa-factor-out-hpsa_find_memory_BAR.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0047-SCSI-hpsa-factor-out-hpsa_wait_for_board_ready.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0048-SCSI-hpsa-factor-out-hpsa_find_cfgtables.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0049-SCSI-hpsa-fix-leak-of-ioremapped-memory-in-hpsa_pci_.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0050-SCSI-hpsa-hpsa-factor-out-hpsa_find_board_params.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0051-SCSI-hpsa-factor-out-hpsa-CISS-signature-present.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0052-SCSI-hpsa-factor-out-hpsa_enable_scsi_prefetch.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0053-SCSI-hpsa-factor-out-hpsa_p600_dma_prefetch_quirk.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0054-SCSI-hpsa-factor-out-hpsa_enter_simple_mode.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0055-SCSI-hpsa-check-that-simple-mode-is-supported.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0056-SCSI-hpsa-clean-up-debug-ifdefs.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0057-SCSI-hpsa-mark-hpsa_mark_hpsa_put_ctlr_into_performa.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0058-SCSI-hpsa-factor-out-hpsa_wait_for_mode_change_ack.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0059-SCSI-hpsa-remove-unused-variable-trans_offset.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0060-SCSI-hpsa-factor-out-hpsa_enter_performant_mode.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0061-SCSI-hpsa-remove-unused-firm_ver-member-of-the-per-h.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0062-SCSI-hpsa-Add-hpsa.txt-to-Documentation-scsi.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0063-SCSI-hpsa-expose-controller-firmware-revision-via-sy.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0064-SCSI-hpsa-fix-block-fetch-table-problem.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0065-SCSI-hpsa-add-new-controllers.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0066-SCSI-hpsa-Make-hpsa_allow_any-1-boot-param-enable-Co.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0067-SCSI-hpsa-make-hpsa_find_memory_BAR-not-require-the-.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0068-SCSI-hpsa-factor-out-hpsa_find_cfg_addrs.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0069-SCSI-hpsa-factor-out-the-code-to-reset-controllers-o.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0070-SCSI-hpsa-Fix-hard-reset-code.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0071-SCSI-hpsa-forbid-hard-reset-of-640x-boards.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0072-SCSI-hpsa-separate-intx-and-msi-msix-interrupt-handl.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0073-SCSI-hpsa-sanitize-max-commands.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0074-SCSI-hpsa-disable-doorbell-reset-on-reset_devices.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0077-SCSI-hpsa-fix-redefinition-of-PCI_DEVICE_ID_CISSF.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0078-SCSI-hpsa-do-not-consider-firmware-revision-when-loo.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0079-SCSI-hpsa-do-not-consider-RAID-level-to-be-part-of-d.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/kernel.h-add-pr_warn-for-symmetry-to-dev_warn-netdev.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/net-use-helpers-to-access-mc-list-V2.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/netdevice.h-Add-netdev_printk-helpers-like-dev_printk.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/netdevice.h-Add-netif_printk-helpers.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/
   dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0001-SCSI-pm8001-add-SAS-SATA-HBA-driver.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0002-SCSI-pm8001-add-reinitialize-SPC-parameters-before-p.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0003-SCSI-pm8001-enhance-IOMB-process-modules.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0004-SCSI-pm8001-Fixes-for-tag-alloc-error-goto-and-code-.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0005-SCSI-pm8001-Fix-for-sata-io-circular-lock-dependency.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0006-SCSI-pm8001-enhance-error-handle-for-IO-patch.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0007-SCSI-pm8001-fix-endian-issues-with-SAS-address.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0008-SCSI-pm8001-set-SSC-down-spreading-only-to-get-less-.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0009-SCSI-pm8001-fix-potential-NULL-pointer-dereference.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0010-SCSI-pm8001-bit-set-pm8001_ha-flags.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0011-SCSI-pm8001-do-not-reset-local-sata-as-it-will-not-b.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0012-SCSI-pm8001-enable-read-HBA-SAS-address-from-VPD.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0013-SCSI-pm8001-misc-code-cleanup.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0014-SCSI-pm8001-Use-kzalloc-for-allocating-only-one-thin.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0017-SCSI-pm8001-drop-redundant-memset.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0018-SCSI-pm8001-potential-null-dereference-in-pm8001_dev.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0022-SCSI-pm8001-introduce-missing-kfree.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0023-SCSI-pm8001-handle-allocation-failures.patch
   dists/squeeze/linux-2.6/debian/patches/features/all/wl1251-add-support-for-PG11-chips.patch
   dists/squeeze/linux-2.6/debian/patches/features/x86/ata-Intel-IDE-R-support.patch
   dists/squeeze/linux-2.6/debian/patches/series/35
Modified:
   dists/squeeze/linux-2.6/debian/changelog
   dists/squeeze/linux-2.6/debian/config/config

Modified: dists/squeeze/linux-2.6/debian/changelog
==============================================================================
--- dists/squeeze/linux-2.6/debian/changelog	Tue May  3 15:28:33 2011	(r17294)
+++ dists/squeeze/linux-2.6/debian/changelog	Wed May  4 01:10:25 2011	(r17295)
@@ -1,3 +1,17 @@
+linux-2.6 (2.6.32-35) UNRELEASED; urgency=low
+
+  [ Ben Hutchings ]
+  * scsi: Add hpsa driver for HP Smart Array controllers
+    - Disable binding to devices currently handled by cciss
+  * scsi: Add pm8001 driver for PMC-Sierra SAS/SATA HBAs
+  * bnx2i: Add support for BCM5771E
+  * wl1251: Add support for PG11 chips
+  * bnx2x: Add support for BCM84823
+  * ar9170usb: Add several additional USB device IDs
+  * net: Add bna driver for Brocade Ethernet adapters
+
+ -- Ben Hutchings <ben at decadent.org.uk>  Wed, 04 May 2011 01:44:34 +0100
+
 linux-2.6 (2.6.32-34) stable; urgency=high
 
   [ Ian Campbell ]

Modified: dists/squeeze/linux-2.6/debian/config/config
==============================================================================
--- dists/squeeze/linux-2.6/debian/config/config	Tue May  3 15:28:33 2011	(r17294)
+++ dists/squeeze/linux-2.6/debian/config/config	Wed May  4 01:10:25 2011	(r17295)
@@ -1452,6 +1452,7 @@
 CONFIG_BNX2X=m
 CONFIG_QLCNIC=m
 CONFIG_QLGE=m
+CONFIG_BNA=m
 CONFIG_DEFXX=m
 # CONFIG_DEFXX_MMIO is not set
 CONFIG_PPP=m
@@ -1980,6 +1981,7 @@
 CONFIG_SCSI_LOWLEVEL=y
 CONFIG_ISCSI_TCP=m
 CONFIG_BLK_DEV_3W_XXXX_RAID=m
+CONFIG_SCSI_HPSA=m
 CONFIG_SCSI_3W_9XXX=m
 CONFIG_SCSI_3W_SAS=m
 CONFIG_SCSI_7000FASST=m
@@ -2006,6 +2008,7 @@
 # CONFIG_SCSI_LPFC_DEBUG_FS is not set
 # CONFIG_SCSI_DEBUG is not set
 CONFIG_SCSI_PMCRAID=m
+CONFIG_SCSI_PM8001=m
 CONFIG_SCSI_SRP=m
 CONFIG_SCSI_BFA_FC=m
 

Added: dists/squeeze/linux-2.6/debian/patches/debian/hpsa-Remove-device-IDs-currently-handled-by-cciss.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/debian/hpsa-Remove-device-IDs-currently-handled-by-cciss.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,32 @@
+From aacda89013c13d20dae4d3b78a2c489cdfcec046 Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Sun, 1 May 2011 20:26:17 +0100
+Subject: [PATCH] hpsa: Remove device IDs currently handled by cciss
+
+We don't want to transfer devices from cciss to hpsa within a stable
+release.  hpsa should only handle the new devices.
+---
+ drivers/scsi/hpsa.c |    7 -------
+ 1 files changed, 0 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index bcbd9aa..3c7c9a7 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -77,13 +77,6 @@ MODULE_PARM_DESC(hpsa_allow_any,
+ 
+ /* define the PCI info for the cards we can control */
+ static const struct pci_device_id hpsa_pci_device_id[] = {
+-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
+-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3243},
+-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
+-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3247},
+-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
+-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324a},
+-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324b},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3250},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3251},
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/SCSI-bnx2i-Add-5771E-device-support-to-bnx2i-driver.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/SCSI-bnx2i-Add-5771E-device-support-to-bnx2i-driver.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,34 @@
+From: Anil Veerabhadrappa <anilgv at broadcom.com>
+Date: Mon, 7 Dec 2009 11:39:33 -0800
+Subject: [PATCH] [SCSI] bnx2i: Add 5771E device support to bnx2i driver
+
+commit 5d9e1fa99c2a9a5977f5757f4e0fd02697c995c2 upstream.
+
+Signed-off-by: Anil Veerabhadrappa <anilgv at broadcom.com>
+Reviewed-by: Mike Christie <michaelc at cs.wisc.edu>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/bnx2i/bnx2i_init.c |    6 +++++-
+ 1 files changed, 5 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
+index 0c4210d..1dba86c 100644
+--- a/drivers/scsi/bnx2i/bnx2i_init.c
++++ b/drivers/scsi/bnx2i/bnx2i_init.c
+@@ -83,8 +83,12 @@ void bnx2i_identify_device(struct bnx2i_hba *hba)
+ 		set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
+ 		hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
+ 	} else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
+-		   hba->pci_did == PCI_DEVICE_ID_NX2_57711)
++		   hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
++		   hba->pci_did == PCI_DEVICE_ID_NX2_57711E)
+ 		set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
++	else
++		printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n",
++				  hba->pci_did);
+ }
+ 
+ 
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/ar9170-add-support-for-NEC-WL300NU-G-USB-dongle.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/ar9170-add-support-for-NEC-WL300NU-G-USB-dongle.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,30 @@
+From: Ben Konrath <ben at bagu.org>
+Date: Thu, 18 Mar 2010 19:06:57 -0400
+Subject: [PATCH 14/34] ar9170: add support for NEC WL300NU-G USB dongle
+
+commit eb3d72c8b7e6bb6a55e15272c52eb4eadf7fb1f1 upstream.
+
+This patch adds support for the NEC WL300NU-G USB wifi dongle.
+
+Signed-off-by: Ben Konrath <ben at bagu.org>
+Signed-off-by: John W. Linville <linville at tuxdriver.com>
+---
+ drivers/net/wireless/ath/ar9170/usb.c |    2 ++
+ 1 files changed, 2 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
+index 4e30197..c185750 100644
+--- a/drivers/net/wireless/ath/ar9170/usb.c
++++ b/drivers/net/wireless/ath/ar9170/usb.c
+@@ -94,6 +94,8 @@ static struct usb_device_id ar9170_usb_ids[] = {
+ 	{ USB_DEVICE(0x04bb, 0x093f) },
+ 	/* AVM FRITZ!WLAN USB Stick N */
+ 	{ USB_DEVICE(0x057C, 0x8401) },
++	/* NEC WL300NU-G */
++	{ USB_DEVICE(0x0409, 0x0249) },
+ 	/* AVM FRITZ!WLAN USB Stick N 2.4 */
+ 	{ USB_DEVICE(0x057C, 0x8402), .driver_info = AR9170_REQ_FW1_ONLY },
+ 
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/ar9170usb-add-Sphairon-Homelink-1202-USB-ID.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/ar9170usb-add-Sphairon-Homelink-1202-USB-ID.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,28 @@
+From: Stefan Seyfried <seife at sphairon.com>
+Date: Tue, 8 Dec 2009 15:21:34 +0100
+Subject: [PATCH 06/34] ar9170usb: add Sphairon Homelink 1202 USB ID
+
+commit 5b6e2f12edd6c46e87a2775321f1912d19be4b35 upstream.
+
+Signed-off-by: Stefan Seyfried <seife at sphairon.com>
+Signed-off-by: John W. Linville <linville at tuxdriver.com>
+---
+ drivers/net/wireless/ath/ar9170/usb.c |    2 ++
+ 1 files changed, 2 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
+index e0799d9..0f36118 100644
+--- a/drivers/net/wireless/ath/ar9170/usb.c
++++ b/drivers/net/wireless/ath/ar9170/usb.c
+@@ -84,6 +84,8 @@ static struct usb_device_id ar9170_usb_ids[] = {
+ 	{ USB_DEVICE(0x0cde, 0x0023) },
+ 	/* Z-Com UB82 ABG */
+ 	{ USB_DEVICE(0x0cde, 0x0026) },
++	/* Sphairon Homelink 1202 */
++	{ USB_DEVICE(0x0cde, 0x0027) },
+ 	/* Arcadyan WN7512 */
+ 	{ USB_DEVICE(0x083a, 0xf522) },
+ 	/* Planex GWUS300 */
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/ar9170usb-add-vendor-and-device-ID-for-Qwest-Actiont.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/ar9170usb-add-vendor-and-device-ID-for-Qwest-Actiont.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,37 @@
+From: Steve Tanner <steve.tanner at gmail.com>
+Date: Tue, 11 May 2010 14:34:16 -0700
+Subject: [PATCH 25/34] ar9170usb: add vendor and device ID for
+ Qwest/Actiontec 802AIN Wireless N USB Network Adapter
+
+commit 2cb1ba153787e195c62eafc2e794b25509fdd26d upstream.
+
+* add support for the Qwest/Actiontec 802AIN Wireless N USB Network Adapter.
+
+lsusb identifies the device as: "ID 1668:1200 Actiontec Electronics, Inc. [hex]"
+
+usb_modeswitch package and appropriate rules are required to switch
+the device from "ID 0ace:20ff ZyDas"
+
+Changes-licensed-under: GPL
+Signed-off-by: Steve Tanner <steve.tanner at gmail.com>
+Signed-off-by: John W. Linville <linville at tuxdriver.com>
+---
+ drivers/net/wireless/ath/ar9170/usb.c |    2 ++
+ 1 files changed, 2 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
+index c7405b6..e0ca1d7 100644
+--- a/drivers/net/wireless/ath/ar9170/usb.c
++++ b/drivers/net/wireless/ath/ar9170/usb.c
+@@ -108,6 +108,8 @@ static struct usb_device_id ar9170_usb_ids[] = {
+ 	{ USB_DEVICE(0x0409, 0x0249) },
+ 	/* AVM FRITZ!WLAN USB Stick N 2.4 */
+ 	{ USB_DEVICE(0x057C, 0x8402), .driver_info = AR9170_REQ_FW1_ONLY },
++	/* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
++	{ USB_DEVICE(0x1668, 0x1200) },
+ 
+ 	/* terminate */
+ 	{}
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bna/0001-bna-Brocade-10Gb-Ethernet-device-driver.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bna/0001-bna-Brocade-10Gb-Ethernet-device-driver.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,22165 @@
+From: Rasesh Mody <rmody at brocade.com>
+Date: Mon, 23 Aug 2010 20:24:12 -0700
+Subject: [PATCH 01/23] bna: Brocade 10Gb Ethernet device driver
+
+commit 8b230ed8ec96c933047dd0625cf95f739e4939a6 upstream.
+
+This is patch 1/6 which contains linux driver source for
+Brocade's BR1010/BR1020 10Gb CEE capable ethernet adapter.
+
+Signed-off-by: Debashis Dutt <ddutt at brocade.com>
+Signed-off-by: Rasesh Mody <rmody at brocade.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+[bwh: Backport to 2.6.32: adjust multicast address type]
+---
+ MAINTAINERS                         |    7 +
+ drivers/net/Kconfig                 |   14 +
+ drivers/net/Makefile                |    1 +
+ drivers/net/bna/Makefile            |   11 +
+ drivers/net/bna/bfa_cee.c           |  407 ++++
+ drivers/net/bna/bfa_cee.h           |   72 +
+ drivers/net/bna/bfa_defs.h          |  243 ++
+ drivers/net/bna/bfa_defs_cna.h      |  223 ++
+ drivers/net/bna/bfa_defs_mfg_comm.h |  244 ++
+ drivers/net/bna/bfa_defs_status.h   |  216 ++
+ drivers/net/bna/bfa_ioc.c           | 1839 +++++++++++++++
+ drivers/net/bna/bfa_ioc.h           |  343 +++
+ drivers/net/bna/bfa_ioc_ct.c        |  391 ++++
+ drivers/net/bna/bfa_sm.h            |   88 +
+ drivers/net/bna/bfa_wc.h            |   69 +
+ drivers/net/bna/bfi.h               |  392 ++++
+ drivers/net/bna/bfi_cna.h           |  199 ++
+ drivers/net/bna/bfi_ctreg.h         |  637 ++++++
+ drivers/net/bna/bfi_ll.h            |  438 ++++
+ drivers/net/bna/bna.h               |  654 ++++++
+ drivers/net/bna/bna_ctrl.c          | 3626 ++++++++++++++++++++++++++++++
+ drivers/net/bna/bna_hw.h            | 1491 +++++++++++++
+ drivers/net/bna/bna_txrx.c          | 4209 +++++++++++++++++++++++++++++++++++
+ drivers/net/bna/bna_types.h         | 1128 ++++++++++
+ drivers/net/bna/bnad.c              | 3270 +++++++++++++++++++++++++++
+ drivers/net/bna/bnad.h              |  334 +++
+ drivers/net/bna/bnad_ethtool.c      | 1282 +++++++++++
+ drivers/net/bna/cna.h               |   81 +
+ drivers/net/bna/cna_fwimg.c         |   64 +
+ include/linux/pci_ids.h             |    3 +
+ 30 files changed, 21976 insertions(+), 0 deletions(-)
+ create mode 100644 drivers/net/bna/Makefile
+ create mode 100644 drivers/net/bna/bfa_cee.c
+ create mode 100644 drivers/net/bna/bfa_cee.h
+ create mode 100644 drivers/net/bna/bfa_defs.h
+ create mode 100644 drivers/net/bna/bfa_defs_cna.h
+ create mode 100644 drivers/net/bna/bfa_defs_mfg_comm.h
+ create mode 100644 drivers/net/bna/bfa_defs_status.h
+ create mode 100644 drivers/net/bna/bfa_ioc.c
+ create mode 100644 drivers/net/bna/bfa_ioc.h
+ create mode 100644 drivers/net/bna/bfa_ioc_ct.c
+ create mode 100644 drivers/net/bna/bfa_sm.h
+ create mode 100644 drivers/net/bna/bfa_wc.h
+ create mode 100644 drivers/net/bna/bfi.h
+ create mode 100644 drivers/net/bna/bfi_cna.h
+ create mode 100644 drivers/net/bna/bfi_ctreg.h
+ create mode 100644 drivers/net/bna/bfi_ll.h
+ create mode 100644 drivers/net/bna/bna.h
+ create mode 100644 drivers/net/bna/bna_ctrl.c
+ create mode 100644 drivers/net/bna/bna_hw.h
+ create mode 100644 drivers/net/bna/bna_txrx.c
+ create mode 100644 drivers/net/bna/bna_types.h
+ create mode 100644 drivers/net/bna/bnad.c
+ create mode 100644 drivers/net/bna/bnad.h
+ create mode 100644 drivers/net/bna/bnad_ethtool.c
+ create mode 100644 drivers/net/bna/cna.h
+ create mode 100644 drivers/net/bna/cna_fwimg.c
+
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -1387,6 +1387,13 @@ L:	linux-scsi at vger.kernel.org
+ S:	Supported
+ F:	drivers/scsi/bfa/
+ 
++BROCADE BNA 10 GIGABIT ETHERNET DRIVER
++M:	Rasesh Mody <rmody at brocade.com>
++M:	Debashis Dutt <ddutt at brocade.com>
++L:	netdev at vger.kernel.org
++S:	Supported
++F:	drivers/net/bna/
++
+ BSG (block layer generic sg v4 driver)
+ M:	FUJITA Tomonori <fujita.tomonori at lab.ntt.co.jp>
+ L:	linux-scsi at vger.kernel.org
+--- a/drivers/net/Kconfig
++++ b/drivers/net/Kconfig
+@@ -2869,6 +2869,20 @@ config QLGE
+ 	  To compile this driver as a module, choose M here: the module
+ 	  will be called qlge.
+ 
++config BNA
++        tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
++        depends on PCI
++        ---help---
++          This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
++          cards.
++          To compile this driver as a module, choose M here: the module
++          will be called bna.
++
++          For general information and support, go to the Brocade support
++          website at:
++
++          <http://support.brocade.com>
++
+ source "drivers/net/sfc/Kconfig"
+ 
+ source "drivers/net/benet/Kconfig"
+--- a/drivers/net/Makefile
++++ b/drivers/net/Makefile
+@@ -34,6 +34,7 @@ obj-$(CONFIG_ENIC) += enic/
+ obj-$(CONFIG_JME) += jme.o
+ obj-$(CONFIG_BE2NET) += benet/
+ obj-$(CONFIG_VMXNET3) += vmxnet3/
++obj-$(CONFIG_BNA) += bna/
+ 
+ gianfar_driver-objs := gianfar.o \
+ 		gianfar_ethtool.o \
+--- /dev/null
++++ b/drivers/net/bna/Makefile
+@@ -0,0 +1,11 @@
++#
++# Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++# All rights reserved.
++#
++
++obj-$(CONFIG_BNA) += bna.o
++
++bna-objs := bnad.o bnad_ethtool.o bna_ctrl.o bna_txrx.o
++bna-objs += bfa_ioc.o bfa_ioc_ct.o bfa_cee.o cna_fwimg.o
++
++EXTRA_CFLAGS := -Idrivers/net/bna
+--- /dev/null
++++ b/drivers/net/bna/bfa_cee.c
+@@ -0,0 +1,407 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++
++#include "bfa_defs_cna.h"
++#include "cna.h"
++#include "bfa_cee.h"
++#include "bfi_cna.h"
++#include "bfa_ioc.h"
++
++#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
++#define bfa_lpuid(__arg) bfa_ioc_portid(&(__arg)->ioc)
++
++static void bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg);
++static void bfa_cee_format_cee_cfg(void *buffer);
++
++static void
++bfa_cee_format_cee_cfg(void *buffer)
++{
++	struct bfa_cee_attr *cee_cfg = buffer;
++	bfa_cee_format_lldp_cfg(&cee_cfg->lldp_remote);
++}
++
++static void
++bfa_cee_stats_swap(struct bfa_cee_stats *stats)
++{
++	u32 *buffer = (u32 *)stats;
++	int i;
++
++	for (i = 0; i < (sizeof(struct bfa_cee_stats) / sizeof(u32));
++		i++) {
++		buffer[i] = ntohl(buffer[i]);
++	}
++}
++
++static void
++bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg)
++{
++	lldp_cfg->time_to_live =
++			ntohs(lldp_cfg->time_to_live);
++	lldp_cfg->enabled_system_cap =
++			ntohs(lldp_cfg->enabled_system_cap);
++}
++
++/**
++ * bfa_cee_attr_meminfo()
++ *
++ * @brief Returns the size of the DMA memory needed by CEE attributes
++ *
++ * @param[in] void
++ *
++ * @return Size of DMA region
++ */
++static u32
++bfa_cee_attr_meminfo(void)
++{
++	return roundup(sizeof(struct bfa_cee_attr), BFA_DMA_ALIGN_SZ);
++}
++/**
++ * bfa_cee_stats_meminfo()
++ *
++ * @brief Returns the size of the DMA memory needed by CEE stats
++ *
++ * @param[in] void
++ *
++ * @return Size of DMA region
++ */
++static u32
++bfa_cee_stats_meminfo(void)
++{
++	return roundup(sizeof(struct bfa_cee_stats), BFA_DMA_ALIGN_SZ);
++}
++
++/**
++ * bfa_cee_get_attr_isr()
++ *
++ * @brief CEE ISR for get-attributes responses from f/w
++ *
++ * @param[in] cee - Pointer to the CEE module
++ *            status - Return status from the f/w
++ *
++ * @return void
++ */
++static void
++bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status)
++{
++	cee->get_attr_status = status;
++	if (status == BFA_STATUS_OK) {
++		memcpy(cee->attr, cee->attr_dma.kva,
++		    sizeof(struct bfa_cee_attr));
++		bfa_cee_format_cee_cfg(cee->attr);
++	}
++	cee->get_attr_pending = false;
++	if (cee->cbfn.get_attr_cbfn)
++		cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
++}
++
++/**
++ * bfa_cee_get_attr_isr()
++ *
++ * @brief CEE ISR for get-stats responses from f/w
++ *
++ * @param[in] cee - Pointer to the CEE module
++ *            status - Return status from the f/w
++ *
++ * @return void
++ */
++static void
++bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status)
++{
++	cee->get_stats_status = status;
++	if (status == BFA_STATUS_OK) {
++		memcpy(cee->stats, cee->stats_dma.kva,
++			sizeof(struct bfa_cee_stats));
++		bfa_cee_stats_swap(cee->stats);
++	}
++	cee->get_stats_pending = false;
++	if (cee->cbfn.get_stats_cbfn)
++		cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status);
++}
++
++/**
++ * bfa_cee_get_attr_isr()
++ *
++ * @brief CEE ISR for reset-stats responses from f/w
++ *
++ * @param[in] cee - Pointer to the CEE module
++ *            status - Return status from the f/w
++ *
++ * @return void
++ */
++static void
++bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
++{
++	cee->reset_stats_status = status;
++	cee->reset_stats_pending = false;
++	if (cee->cbfn.reset_stats_cbfn)
++		cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
++}
++/**
++ * bfa_cee_meminfo()
++ *
++ * @brief Returns the size of the DMA memory needed by CEE module
++ *
++ * @param[in] void
++ *
++ * @return Size of DMA region
++ */
++u32
++bfa_cee_meminfo(void)
++{
++	return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo();
++}
++
++/**
++ * bfa_cee_mem_claim()
++ *
++ * @brief Initialized CEE DMA Memory
++ *
++ * @param[in] cee CEE module pointer
++ *	      dma_kva Kernel Virtual Address of CEE DMA Memory
++ *	      dma_pa  Physical Address of CEE DMA Memory
++ *
++ * @return void
++ */
++void
++bfa_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
++{
++	cee->attr_dma.kva = dma_kva;
++	cee->attr_dma.pa = dma_pa;
++	cee->stats_dma.kva = dma_kva + bfa_cee_attr_meminfo();
++	cee->stats_dma.pa = dma_pa + bfa_cee_attr_meminfo();
++	cee->attr = (struct bfa_cee_attr *) dma_kva;
++	cee->stats = (struct bfa_cee_stats *)
++		(dma_kva + bfa_cee_attr_meminfo());
++}
++
++/**
++ * bfa_cee_get_attr()
++ *
++ * @brief
++ *   Send the request to the f/w to fetch CEE attributes.
++ *
++ * @param[in] Pointer to the CEE module data structure.
++ *
++ * @return Status
++ */
++
++enum bfa_status
++bfa_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
++		     bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
++{
++	struct bfi_cee_get_req *cmd;
++
++	BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
++	if (!bfa_ioc_is_operational(cee->ioc))
++		return BFA_STATUS_IOC_FAILURE;
++	if (cee->get_attr_pending == true)
++		return 	BFA_STATUS_DEVBUSY;
++	cee->get_attr_pending = true;
++	cmd = (struct bfi_cee_get_req *) cee->get_cfg_mb.msg;
++	cee->attr = attr;
++	cee->cbfn.get_attr_cbfn = cbfn;
++	cee->cbfn.get_attr_cbarg = cbarg;
++	bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
++	    bfa_ioc_portid(cee->ioc));
++	bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
++	bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb);
++
++	return BFA_STATUS_OK;
++}
++
++/**
++ * bfa_cee_get_stats()
++ *
++ * @brief
++ *   Send the request to the f/w to fetch CEE statistics.
++ *
++ * @param[in] Pointer to the CEE module data structure.
++ *
++ * @return Status
++ */
++
++enum bfa_status
++bfa_cee_get_stats(struct bfa_cee *cee, struct bfa_cee_stats *stats,
++		      bfa_cee_get_stats_cbfn_t cbfn, void *cbarg)
++{
++	struct bfi_cee_get_req *cmd;
++
++	BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
++
++	if (!bfa_ioc_is_operational(cee->ioc))
++		return BFA_STATUS_IOC_FAILURE;
++	if (cee->get_stats_pending == true)
++		return 	BFA_STATUS_DEVBUSY;
++	cee->get_stats_pending = true;
++	cmd = (struct bfi_cee_get_req *) cee->get_stats_mb.msg;
++	cee->stats = stats;
++	cee->cbfn.get_stats_cbfn = cbfn;
++	cee->cbfn.get_stats_cbarg = cbarg;
++	bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ,
++	    bfa_ioc_portid(cee->ioc));
++	bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa);
++	bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb);
++
++	return BFA_STATUS_OK;
++}
++
++/**
++ * bfa_cee_reset_stats()
++ *
++ * @brief Clears CEE Stats in the f/w.
++ *
++ * @param[in] Pointer to the CEE module data structure.
++ *
++ * @return Status
++ */
++
++enum bfa_status
++bfa_cee_reset_stats(struct bfa_cee *cee, bfa_cee_reset_stats_cbfn_t cbfn,
++			void *cbarg)
++{
++	struct bfi_cee_reset_stats *cmd;
++
++	BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
++	if (!bfa_ioc_is_operational(cee->ioc))
++		return BFA_STATUS_IOC_FAILURE;
++	if (cee->reset_stats_pending == true)
++		return 	BFA_STATUS_DEVBUSY;
++	cee->reset_stats_pending = true;
++	cmd = (struct bfi_cee_reset_stats *) cee->reset_stats_mb.msg;
++	cee->cbfn.reset_stats_cbfn = cbfn;
++	cee->cbfn.reset_stats_cbarg = cbarg;
++	bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS,
++	    bfa_ioc_portid(cee->ioc));
++	bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb);
++	return BFA_STATUS_OK;
++}
++
++/**
++ * bfa_cee_isrs()
++ *
++ * @brief Handles Mail-box interrupts for CEE module.
++ *
++ * @param[in] Pointer to the CEE module data structure.
++ *
++ * @return void
++ */
++
++void
++bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
++{
++	union bfi_cee_i2h_msg_u *msg;
++	struct bfi_cee_get_rsp *get_rsp;
++	struct bfa_cee *cee = (struct bfa_cee *) cbarg;
++	msg = (union bfi_cee_i2h_msg_u *) m;
++	get_rsp = (struct bfi_cee_get_rsp *) m;
++	switch (msg->mh.msg_id) {
++	case BFI_CEE_I2H_GET_CFG_RSP:
++		bfa_cee_get_attr_isr(cee, get_rsp->cmd_status);
++		break;
++	case BFI_CEE_I2H_GET_STATS_RSP:
++		bfa_cee_get_stats_isr(cee, get_rsp->cmd_status);
++		break;
++	case BFI_CEE_I2H_RESET_STATS_RSP:
++		bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status);
++		break;
++	default:
++		BUG_ON(1);
++	}
++}
++
++/**
++ * bfa_cee_hbfail()
++ *
++ * @brief CEE module heart-beat failure handler.
++ *
++ * @param[in] Pointer to the CEE module data structure.
++ *
++ * @return void
++ */
++
++void
++bfa_cee_hbfail(void *arg)
++{
++	struct bfa_cee *cee;
++	cee = (struct bfa_cee *) arg;
++
++	if (cee->get_attr_pending == true) {
++		cee->get_attr_status = BFA_STATUS_FAILED;
++		cee->get_attr_pending  = false;
++		if (cee->cbfn.get_attr_cbfn) {
++			cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg,
++			    BFA_STATUS_FAILED);
++		}
++	}
++	if (cee->get_stats_pending == true) {
++		cee->get_stats_status = BFA_STATUS_FAILED;
++		cee->get_stats_pending  = false;
++		if (cee->cbfn.get_stats_cbfn) {
++			cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg,
++			    BFA_STATUS_FAILED);
++		}
++	}
++	if (cee->reset_stats_pending == true) {
++		cee->reset_stats_status = BFA_STATUS_FAILED;
++		cee->reset_stats_pending  = false;
++		if (cee->cbfn.reset_stats_cbfn) {
++			cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg,
++			    BFA_STATUS_FAILED);
++		}
++	}
++}
++
++/**
++ * bfa_cee_attach()
++ *
++ * @brief CEE module-attach API
++ *
++ * @param[in] cee - Pointer to the CEE module data structure
++ *            ioc - Pointer to the ioc module data structure
++ *            dev - Pointer to the device driver module data structure
++ *                  The device driver specific mbox ISR functions have
++ *                  this pointer as one of the parameters.
++ *
++ * @return void
++ */
++void
++bfa_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
++		void *dev)
++{
++	BUG_ON(!(cee != NULL));
++	cee->dev = dev;
++	cee->ioc = ioc;
++
++	bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
++	bfa_ioc_hbfail_init(&cee->hbfail, bfa_cee_hbfail, cee);
++	bfa_ioc_hbfail_register(cee->ioc, &cee->hbfail);
++}
++
++/**
++ * bfa_cee_detach()
++ *
++ * @brief CEE module-detach API
++ *
++ * @param[in] cee - Pointer to the CEE module data structure
++ *
++ * @return void
++ */
++void
++bfa_cee_detach(struct bfa_cee *cee)
++{
++}
+--- /dev/null
++++ b/drivers/net/bna/bfa_cee.h
+@@ -0,0 +1,72 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++
++#ifndef __BFA_CEE_H__
++#define __BFA_CEE_H__
++
++#include "bfa_defs_cna.h"
++#include "bfa_ioc.h"
++
++typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, enum bfa_status status);
++typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, enum bfa_status status);
++typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, enum bfa_status status);
++typedef void (*bfa_cee_hbfail_cbfn_t) (void *dev, enum bfa_status status);
++
++struct bfa_cee_cbfn {
++	bfa_cee_get_attr_cbfn_t    get_attr_cbfn;
++	void *get_attr_cbarg;
++	bfa_cee_get_stats_cbfn_t   get_stats_cbfn;
++	void *get_stats_cbarg;
++	bfa_cee_reset_stats_cbfn_t reset_stats_cbfn;
++	void *reset_stats_cbarg;
++};
++
++struct bfa_cee {
++	void *dev;
++	bool get_attr_pending;
++	bool get_stats_pending;
++	bool reset_stats_pending;
++	enum bfa_status get_attr_status;
++	enum bfa_status get_stats_status;
++	enum bfa_status reset_stats_status;
++	struct bfa_cee_cbfn cbfn;
++	struct bfa_ioc_hbfail_notify hbfail;
++	struct bfa_cee_attr *attr;
++	struct bfa_cee_stats *stats;
++	struct bfa_dma attr_dma;
++	struct bfa_dma stats_dma;
++	struct bfa_ioc *ioc;
++	struct bfa_mbox_cmd get_cfg_mb;
++	struct bfa_mbox_cmd get_stats_mb;
++	struct bfa_mbox_cmd reset_stats_mb;
++};
++
++u32 bfa_cee_meminfo(void);
++void bfa_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva,
++	u64 dma_pa);
++void bfa_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev);
++void bfa_cee_detach(struct bfa_cee *cee);
++enum bfa_status bfa_cee_get_attr(struct bfa_cee *cee,
++	struct bfa_cee_attr *attr, bfa_cee_get_attr_cbfn_t cbfn, void *cbarg);
++enum bfa_status bfa_cee_get_stats(struct bfa_cee *cee,
++	struct bfa_cee_stats *stats, bfa_cee_get_stats_cbfn_t cbfn,
++	void *cbarg);
++enum bfa_status bfa_cee_reset_stats(struct bfa_cee *cee,
++	bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg);
++
++#endif /* __BFA_CEE_H__ */
+--- /dev/null
++++ b/drivers/net/bna/bfa_defs.h
+@@ -0,0 +1,243 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++
++#ifndef __BFA_DEFS_H__
++#define __BFA_DEFS_H__
++
++#include "cna.h"
++#include "bfa_defs_status.h"
++#include "bfa_defs_mfg_comm.h"
++
++#define BFA_STRING_32	32
++#define BFA_VERSION_LEN 64
++
++/**
++ * ---------------------- adapter definitions ------------
++ */
++
++/**
++ * BFA adapter level attributes.
++ */
++enum {
++	BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
++					/*
++					 *!< adapter serial num length
++					 */
++	BFA_ADAPTER_MODEL_NAME_LEN  = 16,  /*!< model name length */
++	BFA_ADAPTER_MODEL_DESCR_LEN = 128, /*!< model description length */
++	BFA_ADAPTER_MFG_NAME_LEN    = 8,   /*!< manufacturer name length */
++	BFA_ADAPTER_SYM_NAME_LEN    = 64,  /*!< adapter symbolic name length */
++	BFA_ADAPTER_OS_TYPE_LEN	    = 64,  /*!< adapter os type length */
++};
++
++struct bfa_adapter_attr {
++	char		manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
++	char		serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
++	u32	card_type;
++	char		model[BFA_ADAPTER_MODEL_NAME_LEN];
++	char		model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
++	u64		pwwn;
++	char		node_symname[FC_SYMNAME_MAX];
++	char		hw_ver[BFA_VERSION_LEN];
++	char		fw_ver[BFA_VERSION_LEN];
++	char		optrom_ver[BFA_VERSION_LEN];
++	char		os_type[BFA_ADAPTER_OS_TYPE_LEN];
++	struct bfa_mfg_vpd vpd;
++	struct mac mac;
++
++	u8		nports;
++	u8		max_speed;
++	u8		prototype;
++	char	        asic_rev;
++
++	u8		pcie_gen;
++	u8		pcie_lanes_orig;
++	u8		pcie_lanes;
++	u8	        cna_capable;
++
++	u8		is_mezz;
++	u8		trunk_capable;
++};
++
++/**
++ * ---------------------- IOC definitions ------------
++ */
++
++enum {
++	BFA_IOC_DRIVER_LEN	= 16,
++	BFA_IOC_CHIP_REV_LEN 	= 8,
++};
++
++/**
++ * Driver and firmware versions.
++ */
++struct bfa_ioc_driver_attr {
++	char		driver[BFA_IOC_DRIVER_LEN];	/*!< driver name */
++	char		driver_ver[BFA_VERSION_LEN];	/*!< driver version */
++	char		fw_ver[BFA_VERSION_LEN];	/*!< firmware version */
++	char		bios_ver[BFA_VERSION_LEN];	/*!< bios version */
++	char		efi_ver[BFA_VERSION_LEN];	/*!< EFI version */
++	char		ob_ver[BFA_VERSION_LEN];	/*!< openboot version */
++};
++
++/**
++ * IOC PCI device attributes
++ */
++struct bfa_ioc_pci_attr {
++	u16	vendor_id;	/*!< PCI vendor ID */
++	u16	device_id;	/*!< PCI device ID */
++	u16	ssid;		/*!< subsystem ID */
++	u16	ssvid;		/*!< subsystem vendor ID */
++	u32	pcifn;		/*!< PCI device function */
++	u32	rsvd;		/* padding */
++	char		chip_rev[BFA_IOC_CHIP_REV_LEN];	 /*!< chip revision */
++};
++
++/**
++ * IOC states
++ */
++enum bfa_ioc_state {
++	BFA_IOC_RESET		= 1,	/*!< IOC is in reset state */
++	BFA_IOC_SEMWAIT		= 2,	/*!< Waiting for IOC h/w semaphore */
++	BFA_IOC_HWINIT		= 3,	/*!< IOC h/w is being initialized */
++	BFA_IOC_GETATTR		= 4,	/*!< IOC is being configured */
++	BFA_IOC_OPERATIONAL	= 5,	/*!< IOC is operational */
++	BFA_IOC_INITFAIL	= 6,	/*!< IOC hardware failure */
++	BFA_IOC_HBFAIL		= 7,	/*!< IOC heart-beat failure */
++	BFA_IOC_DISABLING	= 8,	/*!< IOC is being disabled */
++	BFA_IOC_DISABLED	= 9,	/*!< IOC is disabled */
++	BFA_IOC_FWMISMATCH	= 10,	/*!< IOC f/w different from drivers */
++};
++
++/**
++ * IOC firmware stats
++ */
++struct bfa_fw_ioc_stats {
++	u32	enable_reqs;
++	u32	disable_reqs;
++	u32	get_attr_reqs;
++	u32	dbg_sync;
++	u32	dbg_dump;
++	u32	unknown_reqs;
++};
++
++/**
++ * IOC driver stats
++ */
++struct bfa_ioc_drv_stats {
++	u32	ioc_isrs;
++	u32	ioc_enables;
++	u32	ioc_disables;
++	u32	ioc_hbfails;
++	u32	ioc_boots;
++	u32	stats_tmos;
++	u32	hb_count;
++	u32	disable_reqs;
++	u32	enable_reqs;
++	u32	disable_replies;
++	u32	enable_replies;
++};
++
++/**
++ * IOC statistics
++ */
++struct bfa_ioc_stats {
++	struct bfa_ioc_drv_stats drv_stats; /*!< driver IOC stats */
++	struct bfa_fw_ioc_stats fw_stats;  /*!< firmware IOC stats */
++};
++
++enum bfa_ioc_type {
++	BFA_IOC_TYPE_FC		= 1,
++	BFA_IOC_TYPE_FCoE	= 2,
++	BFA_IOC_TYPE_LL		= 3,
++};
++
++/**
++ * IOC attributes returned in queries
++ */
++struct bfa_ioc_attr {
++	enum bfa_ioc_type ioc_type;
++	enum bfa_ioc_state 		state;		/*!< IOC state      */
++	struct bfa_adapter_attr adapter_attr;	/*!< HBA attributes */
++	struct bfa_ioc_driver_attr driver_attr;	/*!< driver attr    */
++	struct bfa_ioc_pci_attr pci_attr;
++	u8				port_id;	/*!< port number    */
++	u8				rsvd[7];	/*!< 64bit align    */
++};
++
++/**
++ * ---------------------- mfg definitions ------------
++ */
++
++/**
++ * Checksum size
++ */
++#define BFA_MFG_CHKSUM_SIZE			16
++
++#define BFA_MFG_PARTNUM_SIZE			14
++#define BFA_MFG_SUPPLIER_ID_SIZE		10
++#define BFA_MFG_SUPPLIER_PARTNUM_SIZE		20
++#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE		20
++#define BFA_MFG_SUPPLIER_REVISION_SIZE		4
++
++#pragma pack(1)
++
++/**
++ * @brief BFA adapter manufacturing block definition.
++ *
++ * All numerical fields are in big-endian format.
++ */
++struct bfa_mfg_block {
++	u8		version;	/*!< manufacturing block version */
++	u8		mfg_sig[3];	/*!< characters 'M', 'F', 'G' */
++	u16	mfgsize;	/*!< mfg block size */
++	u16	u16_chksum;	/*!< old u16 checksum */
++	char		brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
++	char		brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
++	u8		mfg_day;	/*!< manufacturing day */
++	u8		mfg_month;	/*!< manufacturing month */
++	u16	mfg_year;	/*!< manufacturing year */
++	u64		mfg_wwn;	/*!< wwn base for this adapter */
++	u8		num_wwn;	/*!< number of wwns assigned */
++	u8		mfg_speeds;	/*!< speeds allowed for this adapter */
++	u8		rsv[2];
++	char		supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
++	char		supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
++	char
++		supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
++	char
++		supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
++	mac_t		mfg_mac;	/*!< mac address */
++	u8		num_mac;	/*!< number of mac addresses */
++	u8		rsv2;
++	u32	mfg_type;	/*!< card type */
++	u8		rsv3[108];
++	u8		md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
++};
++
++#pragma pack()
++
++/**
++ * ---------------------- pci definitions ------------
++ */
++
++#define bfa_asic_id_ct(devid)			\
++	((devid) == PCI_DEVICE_ID_BROCADE_CT ||	\
++	(devid) == PCI_DEVICE_ID_BROCADE_CT_FC)
++
++#endif /* __BFA_DEFS_H__ */
+--- /dev/null
++++ b/drivers/net/bna/bfa_defs_cna.h
+@@ -0,0 +1,223 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++
++#ifndef __BFA_DEFS_CNA_H__
++#define __BFA_DEFS_CNA_H__
++
++#include "bfa_defs.h"
++
++/**
++ * @brief
++ * FC physical port statistics.
++ */
++struct bfa_port_fc_stats {
++	u64	secs_reset;	/*!< Seconds since stats is reset */
++	u64	tx_frames;	/*!< Tx frames			*/
++	u64	tx_words;	/*!< Tx words			*/
++	u64	tx_lip;		/*!< Tx LIP			*/
++	u64	tx_nos;		/*!< Tx NOS			*/
++	u64	tx_ols;		/*!< Tx OLS			*/
++	u64	tx_lr;		/*!< Tx LR			*/
++	u64	tx_lrr;		/*!< Tx LRR			*/
++	u64	rx_frames;	/*!< Rx frames			*/
++	u64	rx_words;	/*!< Rx words			*/
++	u64	lip_count;	/*!< Rx LIP			*/
++	u64	nos_count;	/*!< Rx NOS			*/
++	u64	ols_count;	/*!< Rx OLS			*/
++	u64	lr_count;	/*!< Rx LR			*/
++	u64	lrr_count;	/*!< Rx LRR			*/
++	u64	invalid_crcs;	/*!< Rx CRC err frames		*/
++	u64	invalid_crc_gd_eof; /*!< Rx CRC err good EOF frames */
++	u64	undersized_frm; /*!< Rx undersized frames	*/
++	u64	oversized_frm;	/*!< Rx oversized frames	*/
++	u64	bad_eof_frm;	/*!< Rx frames with bad EOF	*/
++	u64	error_frames;	/*!< Errored frames		*/
++	u64	dropped_frames;	/*!< Dropped frames		*/
++	u64	link_failures;	/*!< Link Failure (LF) count	*/
++	u64	loss_of_syncs;	/*!< Loss of sync count		*/
++	u64	loss_of_signals; /*!< Loss of signal count	*/
++	u64	primseq_errs;	/*!< Primitive sequence protocol err. */
++	u64	bad_os_count;	/*!< Invalid ordered sets	*/
++	u64	err_enc_out;	/*!< Encoding err nonframe_8b10b */
++	u64	err_enc;	/*!< Encoding err frame_8b10b	*/
++};
++
++/**
++ * @brief
++ * Eth Physical Port statistics.
++ */
++struct bfa_port_eth_stats {
++	u64	secs_reset;	/*!< Seconds since stats is reset */
++	u64	frame_64;	/*!< Frames 64 bytes		*/
++	u64	frame_65_127;	/*!< Frames 65-127 bytes	*/
++	u64	frame_128_255;	/*!< Frames 128-255 bytes	*/
++	u64	frame_256_511;	/*!< Frames 256-511 bytes	*/
++	u64	frame_512_1023;	/*!< Frames 512-1023 bytes	*/
++	u64	frame_1024_1518; /*!< Frames 1024-1518 bytes	*/
++	u64	frame_1519_1522; /*!< Frames 1519-1522 bytes	*/
++	u64	tx_bytes;	/*!< Tx bytes			*/
++	u64	tx_packets;	 /*!< Tx packets		*/
++	u64	tx_mcast_packets; /*!< Tx multicast packets	*/
++	u64	tx_bcast_packets; /*!< Tx broadcast packets	*/
++	u64	tx_control_frame; /*!< Tx control frame		*/
++	u64	tx_drop;	/*!< Tx drops			*/
++	u64	tx_jabber;	/*!< Tx jabber			*/
++	u64	tx_fcs_error;	/*!< Tx FCS errors		*/
++	u64	tx_fragments;	/*!< Tx fragments		*/
++	u64	rx_bytes;	/*!< Rx bytes			*/
++	u64	rx_packets;	/*!< Rx packets			*/
++	u64	rx_mcast_packets; /*!< Rx multicast packets	*/
++	u64	rx_bcast_packets; /*!< Rx broadcast packets	*/
++	u64	rx_control_frames; /*!< Rx control frames	*/
++	u64	rx_unknown_opcode; /*!< Rx unknown opcode	*/
++	u64	rx_drop;	/*!< Rx drops			*/
++	u64	rx_jabber;	/*!< Rx jabber			*/
++	u64	rx_fcs_error;	/*!< Rx FCS errors		*/
++	u64	rx_alignment_error; /*!< Rx alignment errors	*/
++	u64	rx_frame_length_error; /*!< Rx frame len errors	*/
++	u64	rx_code_error;	/*!< Rx code errors		*/
++	u64	rx_fragments;	/*!< Rx fragments		*/
++	u64	rx_pause;	/*!< Rx pause			*/
++	u64	rx_zero_pause;	/*!< Rx zero pause		*/
++	u64	tx_pause;	/*!< Tx pause			*/
++	u64	tx_zero_pause;	/*!< Tx zero pause		*/
++	u64	rx_fcoe_pause;	/*!< Rx FCoE pause		*/
++	u64	rx_fcoe_zero_pause; /*!< Rx FCoE zero pause	*/
++	u64	tx_fcoe_pause;	/*!< Tx FCoE pause		*/
++	u64	tx_fcoe_zero_pause; /*!< Tx FCoE zero pause	*/
++};
++
++/**
++ * @brief
++ *		Port statistics.
++ */
++union bfa_port_stats_u {
++	struct bfa_port_fc_stats fc;
++	struct bfa_port_eth_stats eth;
++};
++
++#pragma pack(1)
++
++#define BFA_CEE_LLDP_MAX_STRING_LEN (128)
++#define BFA_CEE_DCBX_MAX_PRIORITY	(8)
++#define BFA_CEE_DCBX_MAX_PGID		(8)
++
++#define BFA_CEE_LLDP_SYS_CAP_OTHER	0x0001
++#define BFA_CEE_LLDP_SYS_CAP_REPEATER	0x0002
++#define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE	0x0004
++#define BFA_CEE_LLDP_SYS_CAP_WLAN_AP	0x0008
++#define BFA_CEE_LLDP_SYS_CAP_ROUTER	0x0010
++#define BFA_CEE_LLDP_SYS_CAP_TELEPHONE	0x0020
++#define BFA_CEE_LLDP_SYS_CAP_DOCSIS_CD	0x0040
++#define BFA_CEE_LLDP_SYS_CAP_STATION	0x0080
++#define BFA_CEE_LLDP_SYS_CAP_CVLAN	0x0100
++#define BFA_CEE_LLDP_SYS_CAP_SVLAN	0x0200
++#define BFA_CEE_LLDP_SYS_CAP_TPMR	0x0400
++
++/* LLDP string type */
++struct bfa_cee_lldp_str {
++	u8 sub_type;
++	u8 len;
++	u8 rsvd[2];
++	u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
++};
++
++/* LLDP paramters */
++struct bfa_cee_lldp_cfg {
++	struct bfa_cee_lldp_str chassis_id;
++	struct bfa_cee_lldp_str port_id;
++	struct bfa_cee_lldp_str port_desc;
++	struct bfa_cee_lldp_str sys_name;
++	struct bfa_cee_lldp_str sys_desc;
++	struct bfa_cee_lldp_str mgmt_addr;
++	u16 time_to_live;
++	u16 enabled_system_cap;
++};
++
++enum bfa_cee_dcbx_version {
++	DCBX_PROTOCOL_PRECEE	= 1,
++	DCBX_PROTOCOL_CEE	= 2,
++};
++
++enum bfa_cee_lls {
++	/* LLS is down because the TLV not sent by the peer */
++	CEE_LLS_DOWN_NO_TLV = 0,
++	/* LLS is down as advertised by the peer */
++	CEE_LLS_DOWN	= 1,
++	CEE_LLS_UP	= 2,
++};
++
++/* CEE/DCBX parameters */
++struct bfa_cee_dcbx_cfg {
++	u8 pgid[BFA_CEE_DCBX_MAX_PRIORITY];
++	u8 pg_percentage[BFA_CEE_DCBX_MAX_PGID];
++	u8 pfc_primap; /* bitmap of priorties with PFC enabled */
++	u8 fcoe_primap; /* bitmap of priorities used for FcoE traffic */
++	u8 iscsi_primap; /* bitmap of priorities used for iSCSI traffic */
++	u8 dcbx_version; /* operating version:CEE or preCEE */
++	u8 lls_fcoe; /* FCoE Logical Link Status */
++	u8 lls_lan; /* LAN Logical Link Status */
++	u8 rsvd[2];
++};
++
++/* CEE status */
++/* Making this to tri-state for the benefit of port list command */
++enum bfa_cee_status {
++	CEE_UP = 0,
++	CEE_PHY_UP = 1,
++	CEE_LOOPBACK = 2,
++	CEE_PHY_DOWN = 3,
++};
++
++/* CEE Query */
++struct bfa_cee_attr {
++	u8	cee_status;
++	u8 error_reason;
++	struct bfa_cee_lldp_cfg lldp_remote;
++	struct bfa_cee_dcbx_cfg dcbx_remote;
++	mac_t src_mac;
++	u8 link_speed;
++	u8 nw_priority;
++	u8 filler[2];
++};
++
++/* LLDP/DCBX/CEE Statistics */
++struct bfa_cee_stats {
++	u32	lldp_tx_frames;		/*!< LLDP Tx Frames */
++	u32	lldp_rx_frames;		/*!< LLDP Rx Frames */
++	u32	lldp_rx_frames_invalid;	/*!< LLDP Rx Frames invalid */
++	u32	lldp_rx_frames_new;	/*!< LLDP Rx Frames new */
++	u32	lldp_tlvs_unrecognized;	/*!< LLDP Rx unrecognized TLVs */
++	u32	lldp_rx_shutdown_tlvs;	/*!< LLDP Rx shutdown TLVs */
++	u32	lldp_info_aged_out;	/*!< LLDP remote info aged out */
++	u32	dcbx_phylink_ups;	/*!< DCBX phy link ups */
++	u32	dcbx_phylink_downs;	/*!< DCBX phy link downs */
++	u32	dcbx_rx_tlvs;		/*!< DCBX Rx TLVs */
++	u32	dcbx_rx_tlvs_invalid;	/*!< DCBX Rx TLVs invalid */
++	u32	dcbx_control_tlv_error;	/*!< DCBX control TLV errors */
++	u32	dcbx_feature_tlv_error;	/*!< DCBX feature TLV errors */
++	u32	dcbx_cee_cfg_new;	/*!< DCBX new CEE cfg rcvd */
++	u32	cee_status_down;	/*!< CEE status down */
++	u32	cee_status_up;		/*!< CEE status up */
++	u32	cee_hw_cfg_changed;	/*!< CEE hw cfg changed */
++	u32	cee_rx_invalid_cfg;	/*!< CEE invalid cfg */
++};
++
++#pragma pack()
++
++#endif	/* __BFA_DEFS_CNA_H__ */
+--- /dev/null
++++ b/drivers/net/bna/bfa_defs_mfg_comm.h
+@@ -0,0 +1,244 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++#ifndef __BFA_DEFS_MFG_COMM_H__
++#define __BFA_DEFS_MFG_COMM_H__
++
++#include "cna.h"
++
++/**
++ * Manufacturing block version
++ */
++#define BFA_MFG_VERSION				2
++#define BFA_MFG_VERSION_UNINIT			0xFF
++
++/**
++ * Manufacturing block encrypted version
++ */
++#define BFA_MFG_ENC_VER				2
++
++/**
++ * Manufacturing block version 1 length
++ */
++#define BFA_MFG_VER1_LEN			128
++
++/**
++ * Manufacturing block header length
++ */
++#define BFA_MFG_HDR_LEN				4
++
++#define BFA_MFG_SERIALNUM_SIZE			11
++#define STRSZ(_n)				(((_n) + 4) & ~3)
++
++/**
++ * Manufacturing card type
++ */
++enum {
++	BFA_MFG_TYPE_CB_MAX  = 825,      /*!< Crossbow card type max	*/
++	BFA_MFG_TYPE_FC8P2   = 825,      /*!< 8G 2port FC card		*/
++	BFA_MFG_TYPE_FC8P1   = 815,      /*!< 8G 1port FC card		*/
++	BFA_MFG_TYPE_FC4P2   = 425,      /*!< 4G 2port FC card		*/
++	BFA_MFG_TYPE_FC4P1   = 415,      /*!< 4G 1port FC card		*/
++	BFA_MFG_TYPE_CNA10P2 = 1020,     /*!< 10G 2port CNA card	*/
++	BFA_MFG_TYPE_CNA10P1 = 1010,     /*!< 10G 1port CNA card	*/
++	BFA_MFG_TYPE_JAYHAWK = 804,	 /*!< Jayhawk mezz card		*/
++	BFA_MFG_TYPE_WANCHESE = 1007,	 /*!< Wanchese mezz card	*/
++	BFA_MFG_TYPE_ASTRA    = 807,	 /*!< Astra mezz card		*/
++	BFA_MFG_TYPE_LIGHTNING_P0 = 902, /*!< Lightning mezz card - old	*/
++	BFA_MFG_TYPE_LIGHTNING = 1741,	 /*!< Lightning mezz card	*/
++	BFA_MFG_TYPE_INVALID = 0,	 /*!< Invalid card type		*/
++};
++
++#pragma pack(1)
++
++/**
++ * Check if 1-port card
++ */
++#define bfa_mfg_is_1port(type) (( \
++	(type) == BFA_MFG_TYPE_FC8P1 || \
++	(type) == BFA_MFG_TYPE_FC4P1 || \
++	(type) == BFA_MFG_TYPE_CNA10P1))
++
++/**
++ * Check if Mezz card
++ */
++#define bfa_mfg_is_mezz(type) (( \
++	(type) == BFA_MFG_TYPE_JAYHAWK || \
++	(type) == BFA_MFG_TYPE_WANCHESE || \
++	(type) == BFA_MFG_TYPE_ASTRA || \
++	(type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
++	(type) == BFA_MFG_TYPE_LIGHTNING))
++
++/**
++ * Check if card type valid
++ */
++#define bfa_mfg_is_card_type_valid(type) (( \
++	(type) == BFA_MFG_TYPE_FC8P2 || \
++	(type) == BFA_MFG_TYPE_FC8P1 || \
++	(type) == BFA_MFG_TYPE_FC4P2 || \
++	(type) == BFA_MFG_TYPE_FC4P1 || \
++	(type) == BFA_MFG_TYPE_CNA10P2 || \
++	(type) == BFA_MFG_TYPE_CNA10P1 || \
++	bfa_mfg_is_mezz(type)))
++
++/**
++ * Check if the card having old wwn/mac handling
++ */
++#define bfa_mfg_is_old_wwn_mac_model(type) (( \
++	(type) == BFA_MFG_TYPE_FC8P2 || \
++	(type) == BFA_MFG_TYPE_FC8P1 || \
++	(type) == BFA_MFG_TYPE_FC4P2 || \
++	(type) == BFA_MFG_TYPE_FC4P1 || \
++	(type) == BFA_MFG_TYPE_CNA10P2 || \
++	(type) == BFA_MFG_TYPE_CNA10P1 || \
++	(type) == BFA_MFG_TYPE_JAYHAWK || \
++	(type) == BFA_MFG_TYPE_WANCHESE))
++
++#define bfa_mfg_increment_wwn_mac(m, i)				\
++do {								\
++	u32 t = ((m)[0] << 16) | ((m)[1] << 8) | (m)[2];	\
++	t += (i);						\
++	(m)[0] = (t >> 16) & 0xFF;				\
++	(m)[1] = (t >> 8) & 0xFF;				\
++	(m)[2] = t & 0xFF;					\
++} while (0)
++
++#define bfa_mfg_adapter_prop_init_flash(card_type, prop)	\
++do {								\
++	switch ((card_type)) {					\
++	case BFA_MFG_TYPE_FC8P2:				\
++	case BFA_MFG_TYPE_JAYHAWK:				\
++	case BFA_MFG_TYPE_ASTRA:				\
++		(prop) = BFI_ADAPTER_SETP(NPORTS, 2) |		\
++			BFI_ADAPTER_SETP(SPEED, 8);		\
++		break;						\
++	case BFA_MFG_TYPE_FC8P1:				\
++		(prop) = BFI_ADAPTER_SETP(NPORTS, 1) |		\
++			BFI_ADAPTER_SETP(SPEED, 8);		\
++		break;						\
++	case BFA_MFG_TYPE_FC4P2:				\
++		(prop) = BFI_ADAPTER_SETP(NPORTS, 2) |		\
++			BFI_ADAPTER_SETP(SPEED, 4);		\
++		break;						\
++	case BFA_MFG_TYPE_FC4P1:				\
++		(prop) = BFI_ADAPTER_SETP(NPORTS, 1) |		\
++			BFI_ADAPTER_SETP(SPEED, 4);		\
++		break;						\
++	case BFA_MFG_TYPE_CNA10P2:				\
++	case BFA_MFG_TYPE_WANCHESE:				\
++	case BFA_MFG_TYPE_LIGHTNING_P0:				\
++	case BFA_MFG_TYPE_LIGHTNING:				\
++		(prop) = BFI_ADAPTER_SETP(NPORTS, 2);		\
++		(prop) |= BFI_ADAPTER_SETP(SPEED, 10);		\
++		break;						\
++	case BFA_MFG_TYPE_CNA10P1:				\
++		(prop) = BFI_ADAPTER_SETP(NPORTS, 1);		\
++		(prop) |= BFI_ADAPTER_SETP(SPEED, 10);		\
++		break;						\
++	default:						\
++		(prop) = BFI_ADAPTER_UNSUPP;			\
++	}							\
++} while (0)
++
++enum {
++	CB_GPIO_TTV	= (1),		/*!< TTV debug capable cards	*/
++	CB_GPIO_FC8P2   = (2),		/*!< 8G 2port FC card		*/
++	CB_GPIO_FC8P1   = (3),		/*!< 8G 1port FC card		*/
++	CB_GPIO_FC4P2   = (4),		/*!< 4G 2port FC card		*/
++	CB_GPIO_FC4P1   = (5),		/*!< 4G 1port FC card		*/
++	CB_GPIO_DFLY    = (6),		/*!< 8G 2port FC mezzanine card	*/
++	CB_GPIO_PROTO   = (1 << 7)	/*!< 8G 2port FC prototypes	*/
++};
++
++#define bfa_mfg_adapter_prop_init_gpio(gpio, card_type, prop)	\
++do {								\
++	if ((gpio) & CB_GPIO_PROTO) {				\
++		(prop) |= BFI_ADAPTER_PROTO;			\
++		(gpio) &= ~CB_GPIO_PROTO;			\
++	}							\
++	switch ((gpio)) {					\
++	case CB_GPIO_TTV:					\
++		(prop) |= BFI_ADAPTER_TTV;			\
++	case CB_GPIO_DFLY:					\
++	case CB_GPIO_FC8P2:					\
++		(prop) |= BFI_ADAPTER_SETP(NPORTS, 2);		\
++		(prop) |= BFI_ADAPTER_SETP(SPEED, 8);		\
++		(card_type) = BFA_MFG_TYPE_FC8P2;		\
++		break;						\
++	case CB_GPIO_FC8P1:					\
++		(prop) |= BFI_ADAPTER_SETP(NPORTS, 1);		\
++		(prop) |= BFI_ADAPTER_SETP(SPEED, 8);		\
++		(card_type) = BFA_MFG_TYPE_FC8P1;		\
++		break;						\
++	case CB_GPIO_FC4P2:					\
++		(prop) |= BFI_ADAPTER_SETP(NPORTS, 2);		\
++		(prop) |= BFI_ADAPTER_SETP(SPEED, 4);		\
++		(card_type) = BFA_MFG_TYPE_FC4P2;		\
++		break;						\
++	case CB_GPIO_FC4P1:					\
++		(prop) |= BFI_ADAPTER_SETP(NPORTS, 1);		\
++		(prop) |= BFI_ADAPTER_SETP(SPEED, 4);		\
++		(card_type) = BFA_MFG_TYPE_FC4P1;		\
++		break;						\
++	default:						\
++		(prop) |= BFI_ADAPTER_UNSUPP;			\
++		(card_type) = BFA_MFG_TYPE_INVALID;		\
++	}							\
++} while (0)
++
++/**
++ * VPD data length
++ */
++#define BFA_MFG_VPD_LEN			512
++#define BFA_MFG_VPD_LEN_INVALID		0
++
++#define BFA_MFG_VPD_PCI_HDR_OFF		137
++#define BFA_MFG_VPD_PCI_VER_MASK	0x07	/*!< version mask 3 bits */
++#define BFA_MFG_VPD_PCI_VDR_MASK	0xf8	/*!< vendor mask 5 bits */
++
++/**
++ * VPD vendor tag
++ */
++enum {
++	BFA_MFG_VPD_UNKNOWN	= 0,     /*!< vendor unknown 		*/
++	BFA_MFG_VPD_IBM 	= 1,     /*!< vendor IBM 		*/
++	BFA_MFG_VPD_HP  	= 2,     /*!< vendor HP  		*/
++	BFA_MFG_VPD_DELL  	= 3,     /*!< vendor DELL  		*/
++	BFA_MFG_VPD_PCI_IBM 	= 0x08,  /*!< PCI VPD IBM     		*/
++	BFA_MFG_VPD_PCI_HP  	= 0x10,  /*!< PCI VPD HP		*/
++	BFA_MFG_VPD_PCI_DELL  	= 0x20,  /*!< PCI VPD DELL		*/
++	BFA_MFG_VPD_PCI_BRCD 	= 0xf8,  /*!< PCI VPD Brocade 		*/
++};
++
++/**
++ * @brief BFA adapter flash vpd data definition.
++ *
++ * All numerical fields are in big-endian format.
++ */
++struct bfa_mfg_vpd {
++	u8		version;	/*!< vpd data version */
++	u8		vpd_sig[3];	/*!< characters 'V', 'P', 'D' */
++	u8		chksum;		/*!< u8 checksum */
++	u8		vendor;		/*!< vendor */
++	u8 	len;		/*!< vpd data length excluding header */
++	u8 	rsv;
++	u8		data[BFA_MFG_VPD_LEN];	/*!< vpd data */
++};
++
++#pragma pack()
++
++#endif /* __BFA_DEFS_MFG_H__ */
+--- /dev/null
++++ b/drivers/net/bna/bfa_defs_status.h
+@@ -0,0 +1,216 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++#ifndef __BFA_DEFS_STATUS_H__
++#define __BFA_DEFS_STATUS_H__
++
++/**
++ * API status return values
++ *
++ * NOTE: The error msgs are auto generated from the comments. Only singe line
++ * comments are supported
++ */
++enum bfa_status {
++	BFA_STATUS_OK 		= 0,
++	BFA_STATUS_FAILED 	= 1,
++	BFA_STATUS_EINVAL 	= 2,
++	BFA_STATUS_ENOMEM 	= 3,
++	BFA_STATUS_ENOSYS 	= 4,
++	BFA_STATUS_ETIMER 	= 5,
++	BFA_STATUS_EPROTOCOL 	= 6,
++	BFA_STATUS_ENOFCPORTS 	= 7,
++	BFA_STATUS_NOFLASH 	= 8,
++	BFA_STATUS_BADFLASH 	= 9,
++	BFA_STATUS_SFP_UNSUPP 	= 10,
++	BFA_STATUS_UNKNOWN_VFID = 11,
++	BFA_STATUS_DATACORRUPTED = 12,
++	BFA_STATUS_DEVBUSY 	= 13,
++	BFA_STATUS_ABORTED 	= 14,
++	BFA_STATUS_NODEV 	= 15,
++	BFA_STATUS_HDMA_FAILED 	= 16,
++	BFA_STATUS_FLASH_BAD_LEN = 17,
++	BFA_STATUS_UNKNOWN_LWWN = 18,
++	BFA_STATUS_UNKNOWN_RWWN = 19,
++	BFA_STATUS_FCPT_LS_RJT 	= 20,
++	BFA_STATUS_VPORT_EXISTS = 21,
++	BFA_STATUS_VPORT_MAX 	= 22,
++	BFA_STATUS_UNSUPP_SPEED = 23,
++	BFA_STATUS_INVLD_DFSZ 	= 24,
++	BFA_STATUS_CNFG_FAILED 	= 25,
++	BFA_STATUS_CMD_NOTSUPP 	= 26,
++	BFA_STATUS_NO_ADAPTER 	= 27,
++	BFA_STATUS_LINKDOWN 	= 28,
++	BFA_STATUS_FABRIC_RJT 	= 29,
++	BFA_STATUS_UNKNOWN_VWWN = 30,
++	BFA_STATUS_NSLOGIN_FAILED = 31,
++	BFA_STATUS_NO_RPORTS 	= 32,
++	BFA_STATUS_NSQUERY_FAILED = 33,
++	BFA_STATUS_PORT_OFFLINE = 34,
++	BFA_STATUS_RPORT_OFFLINE = 35,
++	BFA_STATUS_TGTOPEN_FAILED = 36,
++	BFA_STATUS_BAD_LUNS 	= 37,
++	BFA_STATUS_IO_FAILURE 	= 38,
++	BFA_STATUS_NO_FABRIC 	= 39,
++	BFA_STATUS_EBADF 	= 40,
++	BFA_STATUS_EINTR 	= 41,
++	BFA_STATUS_EIO 		= 42,
++	BFA_STATUS_ENOTTY 	= 43,
++	BFA_STATUS_ENXIO 	= 44,
++	BFA_STATUS_EFOPEN 	= 45,
++	BFA_STATUS_VPORT_WWN_BP = 46,
++	BFA_STATUS_PORT_NOT_DISABLED = 47,
++	BFA_STATUS_BADFRMHDR 	= 48,
++	BFA_STATUS_BADFRMSZ 	= 49,
++	BFA_STATUS_MISSINGFRM 	= 50,
++	BFA_STATUS_LINKTIMEOUT 	= 51,
++	BFA_STATUS_NO_FCPIM_NEXUS = 52,
++	BFA_STATUS_CHECKSUM_FAIL = 53,
++	BFA_STATUS_GZME_FAILED 	= 54,
++	BFA_STATUS_SCSISTART_REQD = 55,
++	BFA_STATUS_IOC_FAILURE 	= 56,
++	BFA_STATUS_INVALID_WWN 	= 57,
++	BFA_STATUS_MISMATCH 	= 58,
++	BFA_STATUS_IOC_ENABLED 	= 59,
++	BFA_STATUS_ADAPTER_ENABLED = 60,
++	BFA_STATUS_IOC_NON_OP 	= 61,
++	BFA_STATUS_ADDR_MAP_FAILURE = 62,
++	BFA_STATUS_SAME_NAME 	= 63,
++	BFA_STATUS_PENDING      = 64,
++	BFA_STATUS_8G_SPD	= 65,
++	BFA_STATUS_4G_SPD	= 66,
++	BFA_STATUS_AD_IS_ENABLE = 67,
++	BFA_STATUS_EINVAL_TOV 	= 68,
++	BFA_STATUS_EINVAL_QDEPTH = 69,
++	BFA_STATUS_VERSION_FAIL = 70,
++	BFA_STATUS_DIAG_BUSY    = 71,
++	BFA_STATUS_BEACON_ON	= 72,
++	BFA_STATUS_BEACON_OFF	= 73,
++	BFA_STATUS_LBEACON_ON   = 74,
++	BFA_STATUS_LBEACON_OFF	= 75,
++	BFA_STATUS_PORT_NOT_INITED = 76,
++	BFA_STATUS_RPSC_ENABLED = 77,
++	BFA_STATUS_ENOFSAVE 	= 78,
++	BFA_STATUS_BAD_FILE		= 79,
++	BFA_STATUS_RLIM_EN		= 80,
++	BFA_STATUS_RLIM_DIS		= 81,
++	BFA_STATUS_IOC_DISABLED  = 82,
++	BFA_STATUS_ADAPTER_DISABLED  = 83,
++	BFA_STATUS_BIOS_DISABLED  = 84,
++	BFA_STATUS_AUTH_ENABLED  = 85,
++	BFA_STATUS_AUTH_DISABLED  = 86,
++	BFA_STATUS_ERROR_TRL_ENABLED  = 87,
++	BFA_STATUS_ERROR_QOS_ENABLED  = 88,
++	BFA_STATUS_NO_SFP_DEV = 89,
++	BFA_STATUS_MEMTEST_FAILED = 90,
++	BFA_STATUS_INVALID_DEVID = 91,
++	BFA_STATUS_QOS_ENABLED = 92,
++	BFA_STATUS_QOS_DISABLED = 93,
++	BFA_STATUS_INCORRECT_DRV_CONFIG = 94,
++	BFA_STATUS_REG_FAIL = 95,
++	BFA_STATUS_IM_INV_CODE = 96,
++	BFA_STATUS_IM_INV_VLAN = 97,
++	BFA_STATUS_IM_INV_ADAPT_NAME = 98,
++	BFA_STATUS_IM_LOW_RESOURCES = 99,
++	BFA_STATUS_IM_VLANID_IS_PVID = 100,
++	BFA_STATUS_IM_VLANID_EXISTS = 101,
++	BFA_STATUS_IM_FW_UPDATE_FAIL = 102,
++	BFA_STATUS_PORTLOG_ENABLED = 103,
++	BFA_STATUS_PORTLOG_DISABLED = 104,
++	BFA_STATUS_FILE_NOT_FOUND = 105,
++	BFA_STATUS_QOS_FC_ONLY = 106,
++	BFA_STATUS_RLIM_FC_ONLY = 107,
++	BFA_STATUS_CT_SPD = 108,
++	BFA_STATUS_LEDTEST_OP = 109,
++	BFA_STATUS_CEE_NOT_DN = 110,
++	BFA_STATUS_10G_SPD = 111,
++	BFA_STATUS_IM_INV_TEAM_NAME = 112,
++	BFA_STATUS_IM_DUP_TEAM_NAME = 113,
++	BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114,
++	BFA_STATUS_IM_ADAPT_HAS_VLANS = 115,
++	BFA_STATUS_IM_PVID_MISMATCH = 116,
++	BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117,
++	BFA_STATUS_IM_MTU_MISMATCH = 118,
++	BFA_STATUS_IM_RSS_MISMATCH = 119,
++	BFA_STATUS_IM_HDS_MISMATCH = 120,
++	BFA_STATUS_IM_OFFLOAD_MISMATCH = 121,
++	BFA_STATUS_IM_PORT_PARAMS = 122,
++	BFA_STATUS_IM_PORT_NOT_IN_TEAM = 123,
++	BFA_STATUS_IM_CANNOT_REM_PRI = 124,
++	BFA_STATUS_IM_MAX_PORTS_REACHED = 125,
++	BFA_STATUS_IM_LAST_PORT_DELETE = 126,
++	BFA_STATUS_IM_NO_DRIVER = 127,
++	BFA_STATUS_IM_MAX_VLANS_REACHED = 128,
++	BFA_STATUS_TOMCAT_SPD_NOT_ALLOWED = 129,
++	BFA_STATUS_NO_MINPORT_DRIVER = 130,
++	BFA_STATUS_CARD_TYPE_MISMATCH = 131,
++	BFA_STATUS_BAD_ASICBLK = 132,
++	BFA_STATUS_NO_DRIVER = 133,
++	BFA_STATUS_INVALID_MAC = 134,
++	BFA_STATUS_IM_NO_VLAN = 135,
++	BFA_STATUS_IM_ETH_LB_FAILED = 136,
++	BFA_STATUS_IM_PVID_REMOVE = 137,
++	BFA_STATUS_IM_PVID_EDIT = 138,
++	BFA_STATUS_CNA_NO_BOOT = 139,
++	BFA_STATUS_IM_PVID_NON_ZERO = 140,
++	BFA_STATUS_IM_INETCFG_LOCK_FAILED = 141,
++	BFA_STATUS_IM_GET_INETCFG_FAILED = 142,
++	BFA_STATUS_IM_NOT_BOUND = 143,
++	BFA_STATUS_INSUFFICIENT_PERMS = 144,
++	BFA_STATUS_IM_INV_VLAN_NAME = 145,
++	BFA_STATUS_CMD_NOTSUPP_CNA = 146,
++	BFA_STATUS_IM_PASSTHRU_EDIT = 147,
++	BFA_STATUS_IM_BIND_FAILED = 148,
++	BFA_STATUS_IM_UNBIND_FAILED = 149,
++	BFA_STATUS_IM_PORT_IN_TEAM = 150,
++	BFA_STATUS_IM_VLAN_NOT_FOUND = 151,
++	BFA_STATUS_IM_TEAM_NOT_FOUND = 152,
++	BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153,
++	BFA_STATUS_PBC = 154,
++	BFA_STATUS_DEVID_MISSING = 155,
++	BFA_STATUS_BAD_FWCFG = 156,
++	BFA_STATUS_CREATE_FILE = 157,
++	BFA_STATUS_INVALID_VENDOR = 158,
++	BFA_STATUS_SFP_NOT_READY = 159,
++	BFA_STATUS_FLASH_UNINIT = 160,
++	BFA_STATUS_FLASH_EMPTY = 161,
++	BFA_STATUS_FLASH_CKFAIL = 162,
++	BFA_STATUS_TRUNK_UNSUPP = 163,
++	BFA_STATUS_TRUNK_ENABLED = 164,
++	BFA_STATUS_TRUNK_DISABLED  = 165,
++	BFA_STATUS_TRUNK_ERROR_TRL_ENABLED = 166,
++	BFA_STATUS_BOOT_CODE_UPDATED = 167,
++	BFA_STATUS_BOOT_VERSION = 168,
++	BFA_STATUS_CARDTYPE_MISSING = 169,
++	BFA_STATUS_INVALID_CARDTYPE = 170,
++	BFA_STATUS_NO_TOPOLOGY_FOR_CNA = 171,
++	BFA_STATUS_IM_VLAN_OVER_TEAM_DELETE_FAILED = 172,
++	BFA_STATUS_ETHBOOT_ENABLED  = 173,
++	BFA_STATUS_ETHBOOT_DISABLED  = 174,
++	BFA_STATUS_IOPROFILE_OFF = 175,
++	BFA_STATUS_NO_PORT_INSTANCE = 176,
++	BFA_STATUS_BOOT_CODE_TIMEDOUT = 177,
++	BFA_STATUS_NO_VPORT_LOCK = 178,
++	BFA_STATUS_VPORT_NO_CNFG = 179,
++	BFA_STATUS_MAX_VAL
++};
++
++enum bfa_eproto_status {
++	BFA_EPROTO_BAD_ACCEPT = 0,
++	BFA_EPROTO_UNKNOWN_RSP = 1
++};
++
++#endif /* __BFA_DEFS_STATUS_H__ */
+--- /dev/null
++++ b/drivers/net/bna/bfa_ioc.c
+@@ -0,0 +1,1839 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++
++#include "bfa_ioc.h"
++#include "cna.h"
++#include "bfi.h"
++#include "bfi_ctreg.h"
++#include "bfa_defs.h"
++
++/**
++ * IOC local definitions
++ */
++
++#define bfa_ioc_timer_start(__ioc)					\
++	mod_timer(&(__ioc)->ioc_timer, jiffies +	\
++			msecs_to_jiffies(BFA_IOC_TOV))
++#define bfa_ioc_timer_stop(__ioc)   del_timer(&(__ioc)->ioc_timer)
++
++#define bfa_ioc_recovery_timer_start(__ioc)				\
++	mod_timer(&(__ioc)->ioc_timer, jiffies +	\
++			msecs_to_jiffies(BFA_IOC_TOV_RECOVER))
++
++#define bfa_sem_timer_start(__ioc)					\
++	mod_timer(&(__ioc)->sem_timer, jiffies +	\
++			msecs_to_jiffies(BFA_IOC_HWSEM_TOV))
++#define bfa_sem_timer_stop(__ioc)	del_timer(&(__ioc)->sem_timer)
++
++#define bfa_hb_timer_start(__ioc)					\
++	mod_timer(&(__ioc)->hb_timer, jiffies +		\
++			msecs_to_jiffies(BFA_IOC_HB_TOV))
++#define bfa_hb_timer_stop(__ioc)	del_timer(&(__ioc)->hb_timer)
++
++/**
++ * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
++ */
++
++#define bfa_ioc_firmware_lock(__ioc)			\
++			((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
++#define bfa_ioc_firmware_unlock(__ioc)			\
++			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
++#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
++#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
++#define bfa_ioc_notify_hbfail(__ioc)			\
++			((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
++
++#define bfa_ioc_is_optrom(__ioc)	\
++	(bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
++
++#define bfa_ioc_mbox_cmd_pending(__ioc)		\
++			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
++			readl((__ioc)->ioc_regs.hfn_mbox_cmd))
++
++bool bfa_auto_recover = true;
++
++/*
++ * forward declarations
++ */
++static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
++static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
++static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
++static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
++static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
++static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
++static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
++static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
++static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
++static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
++static void bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc);
++static void bfa_ioc_recover(struct bfa_ioc *ioc);
++static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
++static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
++static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
++
++/**
++ * IOC state machine events
++ */
++enum ioc_event {
++	IOC_E_ENABLE		= 1,	/*!< IOC enable request		*/
++	IOC_E_DISABLE		= 2,	/*!< IOC disable request	*/
++	IOC_E_TIMEOUT		= 3,	/*!< f/w response timeout	*/
++	IOC_E_FWREADY		= 4,	/*!< f/w initialization done	*/
++	IOC_E_FWRSP_GETATTR	= 5,	/*!< IOC get attribute response	*/
++	IOC_E_FWRSP_ENABLE	= 6,	/*!< enable f/w response	*/
++	IOC_E_FWRSP_DISABLE	= 7,	/*!< disable f/w response	*/
++	IOC_E_HBFAIL		= 8,	/*!< heartbeat failure		*/
++	IOC_E_HWERROR		= 9,	/*!< hardware error interrupt	*/
++	IOC_E_SEMLOCKED		= 10,	/*!< h/w semaphore is locked	*/
++	IOC_E_DETACH		= 11,	/*!< driver detach cleanup	*/
++};
++
++bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
++bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc, enum ioc_event);
++bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc, enum ioc_event);
++bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc, enum ioc_event);
++bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc, enum ioc_event);
++bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
++bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
++bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
++bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc, enum ioc_event);
++bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc, enum ioc_event);
++bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
++bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
++
++static struct bfa_sm_table ioc_sm_table[] = {
++	{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
++	{BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
++	{BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
++	{BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
++	{BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
++	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
++	{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
++	{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
++	{BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
++	{BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
++	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
++	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
++};
++
++/**
++ * Reset entry actions -- initialize state machine
++ */
++static void
++bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
++{
++	ioc->retry_count = 0;
++	ioc->auto_recover = bfa_auto_recover;
++}
++
++/**
++ * Beginning state. IOC is in reset state.
++ */
++static void
++bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
++{
++	switch (event) {
++	case IOC_E_ENABLE:
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
++		break;
++
++	case IOC_E_DISABLE:
++		bfa_ioc_disable_comp(ioc);
++		break;
++
++	case IOC_E_DETACH:
++		break;
++
++	default:
++		bfa_sm_fault(ioc, event);
++	}
++}
++
++/**
++ * Semaphore should be acquired for version check.
++ */
++static void
++bfa_ioc_sm_fwcheck_entry(struct bfa_ioc *ioc)
++{
++	bfa_ioc_hw_sem_get(ioc);
++}
++
++/**
++ * Awaiting h/w semaphore to continue with version check.
++ */
++static void
++bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event)
++{
++	switch (event) {
++	case IOC_E_SEMLOCKED:
++		if (bfa_ioc_firmware_lock(ioc)) {
++			ioc->retry_count = 0;
++			bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
++		} else {
++			bfa_ioc_hw_sem_release(ioc);
++			bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
++		}
++		break;
++
++	case IOC_E_DISABLE:
++		bfa_ioc_disable_comp(ioc);
++		/* fall through */
++
++	case IOC_E_DETACH:
++		bfa_ioc_hw_sem_get_cancel(ioc);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
++		break;
++
++	case IOC_E_FWREADY:
++		break;
++
++	default:
++		bfa_sm_fault(ioc, event);
++	}
++}
++
++/**
++ * Notify enable completion callback and generate mismatch AEN.
++ */
++static void
++bfa_ioc_sm_mismatch_entry(struct bfa_ioc *ioc)
++{
++	/**
++	 * Provide enable completion callback and AEN notification only once.
++	 */
++	if (ioc->retry_count == 0)
++		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
++	ioc->retry_count++;
++	bfa_ioc_timer_start(ioc);
++}
++
++/**
++ * Awaiting firmware version match.
++ */
++static void
++bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event)
++{
++	switch (event) {
++	case IOC_E_TIMEOUT:
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
++		break;
++
++	case IOC_E_DISABLE:
++		bfa_ioc_disable_comp(ioc);
++		/* fall through */
++
++	case IOC_E_DETACH:
++		bfa_ioc_timer_stop(ioc);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
++		break;
++
++	case IOC_E_FWREADY:
++		break;
++
++	default:
++		bfa_sm_fault(ioc, event);
++	}
++}
++
++/**
++ * Request for semaphore.
++ */
++static void
++bfa_ioc_sm_semwait_entry(struct bfa_ioc *ioc)
++{
++	bfa_ioc_hw_sem_get(ioc);
++}
++
++/**
++ * Awaiting semaphore for h/w initialzation.
++ */
++static void
++bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event)
++{
++	switch (event) {
++	case IOC_E_SEMLOCKED:
++		ioc->retry_count = 0;
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
++		break;
++
++	case IOC_E_DISABLE:
++		bfa_ioc_hw_sem_get_cancel(ioc);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
++		break;
++
++	default:
++		bfa_sm_fault(ioc, event);
++	}
++}
++
++static void
++bfa_ioc_sm_hwinit_entry(struct bfa_ioc *ioc)
++{
++	bfa_ioc_timer_start(ioc);
++	bfa_ioc_reset(ioc, false);
++}
++
++/**
++ * @brief
++ * Hardware is being initialized. Interrupts are enabled.
++ * Holding hardware semaphore lock.
++ */
++static void
++bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event)
++{
++	switch (event) {
++	case IOC_E_FWREADY:
++		bfa_ioc_timer_stop(ioc);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
++		break;
++
++	case IOC_E_HWERROR:
++		bfa_ioc_timer_stop(ioc);
++		/* fall through */
++
++	case IOC_E_TIMEOUT:
++		ioc->retry_count++;
++		if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
++			bfa_ioc_timer_start(ioc);
++			bfa_ioc_reset(ioc, true);
++			break;
++		}
++
++		bfa_ioc_hw_sem_release(ioc);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
++		break;
++
++	case IOC_E_DISABLE:
++		bfa_ioc_hw_sem_release(ioc);
++		bfa_ioc_timer_stop(ioc);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
++		break;
++
++	default:
++		bfa_sm_fault(ioc, event);
++	}
++}
++
++static void
++bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
++{
++	bfa_ioc_timer_start(ioc);
++	bfa_ioc_send_enable(ioc);
++}
++
++/**
++ * Host IOC function is being enabled, awaiting response from firmware.
++ * Semaphore is acquired.
++ */
++static void
++bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
++{
++	switch (event) {
++	case IOC_E_FWRSP_ENABLE:
++		bfa_ioc_timer_stop(ioc);
++		bfa_ioc_hw_sem_release(ioc);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
++		break;
++
++	case IOC_E_HWERROR:
++		bfa_ioc_timer_stop(ioc);
++		/* fall through */
++
++	case IOC_E_TIMEOUT:
++		ioc->retry_count++;
++		if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
++			writel(BFI_IOC_UNINIT,
++				      ioc->ioc_regs.ioc_fwstate);
++			bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
++			break;
++		}
++
++		bfa_ioc_hw_sem_release(ioc);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
++		break;
++
++	case IOC_E_DISABLE:
++		bfa_ioc_timer_stop(ioc);
++		bfa_ioc_hw_sem_release(ioc);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
++		break;
++
++	case IOC_E_FWREADY:
++		bfa_ioc_send_enable(ioc);
++		break;
++
++	default:
++		bfa_sm_fault(ioc, event);
++	}
++}
++
++static void
++bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
++{
++	bfa_ioc_timer_start(ioc);
++	bfa_ioc_send_getattr(ioc);
++}
++
++/**
++ * @brief
++ * IOC configuration in progress. Timer is active.
++ */
++static void
++bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
++{
++	switch (event) {
++	case IOC_E_FWRSP_GETATTR:
++		bfa_ioc_timer_stop(ioc);
++		bfa_ioc_check_attr_wwns(ioc);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
++		break;
++
++	case IOC_E_HWERROR:
++		bfa_ioc_timer_stop(ioc);
++		/* fall through */
++
++	case IOC_E_TIMEOUT:
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
++		break;
++
++	case IOC_E_DISABLE:
++		bfa_ioc_timer_stop(ioc);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
++		break;
++
++	default:
++		bfa_sm_fault(ioc, event);
++	}
++}
++
++static void
++bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
++{
++	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
++	bfa_ioc_hb_monitor(ioc);
++}
++
++static void
++bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
++{
++	switch (event) {
++	case IOC_E_ENABLE:
++		break;
++
++	case IOC_E_DISABLE:
++		bfa_ioc_hb_stop(ioc);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
++		break;
++
++	case IOC_E_HWERROR:
++	case IOC_E_FWREADY:
++		/**
++		 * Hard error or IOC recovery by other function.
++		 * Treat it same as heartbeat failure.
++		 */
++		bfa_ioc_hb_stop(ioc);
++		/* !!! fall through !!! */
++
++	case IOC_E_HBFAIL:
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
++		break;
++
++	default:
++		bfa_sm_fault(ioc, event);
++	}
++}
++
++static void
++bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
++{
++	bfa_ioc_timer_start(ioc);
++	bfa_ioc_send_disable(ioc);
++}
++
++/**
++ * IOC is being disabled
++ */
++static void
++bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
++{
++	switch (event) {
++	case IOC_E_FWRSP_DISABLE:
++		bfa_ioc_timer_stop(ioc);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
++		break;
++
++	case IOC_E_HWERROR:
++		bfa_ioc_timer_stop(ioc);
++		/*
++		 * !!! fall through !!!
++		 */
++
++	case IOC_E_TIMEOUT:
++		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
++		break;
++
++	default:
++		bfa_sm_fault(ioc, event);
++	}
++}
++
++/**
++ * IOC disable completion entry.
++ */
++static void
++bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
++{
++	bfa_ioc_disable_comp(ioc);
++}
++
++static void
++bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
++{
++	switch (event) {
++	case IOC_E_ENABLE:
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
++		break;
++
++	case IOC_E_DISABLE:
++		ioc->cbfn->disable_cbfn(ioc->bfa);
++		break;
++
++	case IOC_E_FWREADY:
++		break;
++
++	case IOC_E_DETACH:
++		bfa_ioc_firmware_unlock(ioc);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
++		break;
++
++	default:
++		bfa_sm_fault(ioc, event);
++	}
++}
++
++static void
++bfa_ioc_sm_initfail_entry(struct bfa_ioc *ioc)
++{
++	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
++	bfa_ioc_timer_start(ioc);
++}
++
++/**
++ * @brief
++ * Hardware initialization failed.
++ */
++static void
++bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event)
++{
++	switch (event) {
++	case IOC_E_DISABLE:
++		bfa_ioc_timer_stop(ioc);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
++		break;
++
++	case IOC_E_DETACH:
++		bfa_ioc_timer_stop(ioc);
++		bfa_ioc_firmware_unlock(ioc);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
++		break;
++
++	case IOC_E_TIMEOUT:
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
++		break;
++
++	default:
++		bfa_sm_fault(ioc, event);
++	}
++}
++
++static void
++bfa_ioc_sm_hbfail_entry(struct bfa_ioc *ioc)
++{
++	struct list_head			*qe;
++	struct bfa_ioc_hbfail_notify *notify;
++
++	/**
++	 * Mark IOC as failed in hardware and stop firmware.
++	 */
++	bfa_ioc_lpu_stop(ioc);
++	writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
++
++	/**
++	 * Notify other functions on HB failure.
++	 */
++	bfa_ioc_notify_hbfail(ioc);
++
++	/**
++	 * Notify driver and common modules registered for notification.
++	 */
++	ioc->cbfn->hbfail_cbfn(ioc->bfa);
++	list_for_each(qe, &ioc->hb_notify_q) {
++		notify = (struct bfa_ioc_hbfail_notify *) qe;
++		notify->cbfn(notify->cbarg);
++	}
++
++	/**
++	 * Flush any queued up mailbox requests.
++	 */
++	bfa_ioc_mbox_hbfail(ioc);
++
++	/**
++	 * Trigger auto-recovery after a delay.
++	 */
++	if (ioc->auto_recover)
++		mod_timer(&ioc->ioc_timer, jiffies +
++			msecs_to_jiffies(BFA_IOC_TOV_RECOVER));
++}
++
++/**
++ * @brief
++ * IOC heartbeat failure.
++ */
++static void
++bfa_ioc_sm_hbfail(struct bfa_ioc *ioc, enum ioc_event event)
++{
++	switch (event) {
++
++	case IOC_E_ENABLE:
++		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
++		break;
++
++	case IOC_E_DISABLE:
++		if (ioc->auto_recover)
++			bfa_ioc_timer_stop(ioc);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
++		break;
++
++	case IOC_E_TIMEOUT:
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
++		break;
++
++	case IOC_E_FWREADY:
++		/**
++		 * Recovery is already initiated by other function.
++		 */
++		break;
++
++	case IOC_E_HWERROR:
++		/*
++		 * HB failure notification, ignore.
++		 */
++		break;
++	default:
++		bfa_sm_fault(ioc, event);
++	}
++}
++
++/**
++ * BFA IOC private functions
++ */
++
++static void
++bfa_ioc_disable_comp(struct bfa_ioc *ioc)
++{
++	struct list_head			*qe;
++	struct bfa_ioc_hbfail_notify *notify;
++
++	ioc->cbfn->disable_cbfn(ioc->bfa);
++
++	/**
++	 * Notify common modules registered for notification.
++	 */
++	list_for_each(qe, &ioc->hb_notify_q) {
++		notify = (struct bfa_ioc_hbfail_notify *) qe;
++		notify->cbfn(notify->cbarg);
++	}
++}
++
++void
++bfa_ioc_sem_timeout(void *ioc_arg)
++{
++	struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
++
++	bfa_ioc_hw_sem_get(ioc);
++}
++
++bool
++bfa_ioc_sem_get(void __iomem *sem_reg)
++{
++	u32 r32;
++	int cnt = 0;
++#define BFA_SEM_SPINCNT	3000
++
++	r32 = readl(sem_reg);
++
++	while (r32 && (cnt < BFA_SEM_SPINCNT)) {
++		cnt++;
++		udelay(2);
++		r32 = readl(sem_reg);
++	}
++
++	if (r32 == 0)
++		return true;
++
++	BUG_ON(!(cnt < BFA_SEM_SPINCNT));
++	return false;
++}
++
++void
++bfa_ioc_sem_release(void __iomem *sem_reg)
++{
++	writel(1, sem_reg);
++}
++
++static void
++bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
++{
++	u32	r32;
++
++	/**
++	 * First read to the semaphore register will return 0, subsequent reads
++	 * will return 1. Semaphore is released by writing 1 to the register
++	 */
++	r32 = readl(ioc->ioc_regs.ioc_sem_reg);
++	if (r32 == 0) {
++		bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
++		return;
++	}
++
++	mod_timer(&ioc->sem_timer, jiffies +
++		msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
++}
++
++void
++bfa_ioc_hw_sem_release(struct bfa_ioc *ioc)
++{
++	writel(1, ioc->ioc_regs.ioc_sem_reg);
++}
++
++static void
++bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
++{
++	del_timer(&ioc->sem_timer);
++}
++
++/**
++ * @brief
++ * Initialize LPU local memory (aka secondary memory / SRAM)
++ */
++static void
++bfa_ioc_lmem_init(struct bfa_ioc *ioc)
++{
++	u32	pss_ctl;
++	int		i;
++#define PSS_LMEM_INIT_TIME  10000
++
++	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
++	pss_ctl &= ~__PSS_LMEM_RESET;
++	pss_ctl |= __PSS_LMEM_INIT_EN;
++
++	/*
++	 * i2c workaround 12.5khz clock
++	 */
++	pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
++	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
++
++	/**
++	 * wait for memory initialization to be complete
++	 */
++	i = 0;
++	do {
++		pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
++		i++;
++	} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
++
++	/**
++	 * If memory initialization is not successful, IOC timeout will catch
++	 * such failures.
++	 */
++	BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
++
++	pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
++	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
++}
++
++static void
++bfa_ioc_lpu_start(struct bfa_ioc *ioc)
++{
++	u32	pss_ctl;
++
++	/**
++	 * Take processor out of reset.
++	 */
++	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
++	pss_ctl &= ~__PSS_LPU0_RESET;
++
++	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
++}
++
++static void
++bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
++{
++	u32	pss_ctl;
++
++	/**
++	 * Put processors in reset.
++	 */
++	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
++	pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
++
++	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
++}
++
++/**
++ * Get driver and firmware versions.
++ */
++void
++bfa_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
++{
++	u32	pgnum, pgoff;
++	u32	loff = 0;
++	int		i;
++	u32	*fwsig = (u32 *) fwhdr;
++
++	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
++	pgoff = bfa_ioc_smem_pgoff(ioc, loff);
++	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
++
++	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
++	     i++) {
++		fwsig[i] =
++			swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
++		loff += sizeof(u32);
++	}
++}
++
++/**
++ * Returns TRUE if same.
++ */
++bool
++bfa_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
++{
++	struct bfi_ioc_image_hdr *drv_fwhdr;
++	int i;
++
++	drv_fwhdr = (struct bfi_ioc_image_hdr *)
++		bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
++
++	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
++		if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
++			return false;
++	}
++
++	return true;
++}
++
++/**
++ * Return true if current running version is valid. Firmware signature and
++ * execution context (driver/bios) must match.
++ */
++static bool
++bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
++{
++	struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
++
++	/**
++	 * If bios/efi boot (flash based) -- return true
++	 */
++	if (bfa_ioc_is_optrom(ioc))
++		return true;
++
++	bfa_ioc_fwver_get(ioc, &fwhdr);
++	drv_fwhdr = (struct bfi_ioc_image_hdr *)
++		bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
++
++	if (fwhdr.signature != drv_fwhdr->signature)
++		return false;
++
++	if (fwhdr.exec != drv_fwhdr->exec)
++		return false;
++
++	return bfa_ioc_fwver_cmp(ioc, &fwhdr);
++}
++
++/**
++ * Conditionally flush any pending message from firmware at start.
++ */
++static void
++bfa_ioc_msgflush(struct bfa_ioc *ioc)
++{
++	u32	r32;
++
++	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
++	if (r32)
++		writel(1, ioc->ioc_regs.lpu_mbox_cmd);
++}
++
++/**
++ * @img ioc_init_logic.jpg
++ */
++static void
++bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
++{
++	enum bfi_ioc_state ioc_fwstate;
++	bool fwvalid;
++
++	ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
++
++	if (force)
++		ioc_fwstate = BFI_IOC_UNINIT;
++
++	/**
++	 * check if firmware is valid
++	 */
++	fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
++		false : bfa_ioc_fwver_valid(ioc);
++
++	if (!fwvalid) {
++		bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
++		return;
++	}
++
++	/**
++	 * If hardware initialization is in progress (initialized by other IOC),
++	 * just wait for an initialization completion interrupt.
++	 */
++	if (ioc_fwstate == BFI_IOC_INITING) {
++		ioc->cbfn->reset_cbfn(ioc->bfa);
++		return;
++	}
++
++	/**
++	 * If IOC function is disabled and firmware version is same,
++	 * just re-enable IOC.
++	 *
++	 * If option rom, IOC must not be in operational state. With
++	 * convergence, IOC will be in operational state when 2nd driver
++	 * is loaded.
++	 */
++	if (ioc_fwstate == BFI_IOC_DISABLED ||
++	    (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
++		/**
++		 * When using MSI-X any pending firmware ready event should
++		 * be flushed. Otherwise MSI-X interrupts are not delivered.
++		 */
++		bfa_ioc_msgflush(ioc);
++		ioc->cbfn->reset_cbfn(ioc->bfa);
++		bfa_fsm_send_event(ioc, IOC_E_FWREADY);
++		return;
++	}
++
++	/**
++	 * Initialize the h/w for any other states.
++	 */
++	bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
++}
++
++void
++bfa_ioc_timeout(void *ioc_arg)
++{
++	struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
++
++	bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
++}
++
++void
++bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
++{
++	u32 *msgp = (u32 *) ioc_msg;
++	u32 i;
++
++	BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
++
++	/*
++	 * first write msg to mailbox registers
++	 */
++	for (i = 0; i < len / sizeof(u32); i++)
++		writel(cpu_to_le32(msgp[i]),
++			      ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
++
++	for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
++		writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
++
++	/*
++	 * write 1 to mailbox CMD to trigger LPU event
++	 */
++	writel(1, ioc->ioc_regs.hfn_mbox_cmd);
++	(void) readl(ioc->ioc_regs.hfn_mbox_cmd);
++}
++
++static void
++bfa_ioc_send_enable(struct bfa_ioc *ioc)
++{
++	struct bfi_ioc_ctrl_req enable_req;
++	struct timeval tv;
++
++	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
++		    bfa_ioc_portid(ioc));
++	enable_req.ioc_class = ioc->ioc_mc;
++	do_gettimeofday(&tv);
++	enable_req.tv_sec = ntohl(tv.tv_sec);
++	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
++}
++
++static void
++bfa_ioc_send_disable(struct bfa_ioc *ioc)
++{
++	struct bfi_ioc_ctrl_req disable_req;
++
++	bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
++		    bfa_ioc_portid(ioc));
++	bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
++}
++
++static void
++bfa_ioc_send_getattr(struct bfa_ioc *ioc)
++{
++	struct bfi_ioc_getattr_req attr_req;
++
++	bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
++		    bfa_ioc_portid(ioc));
++	bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
++	bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
++}
++
++void
++bfa_ioc_hb_check(void *cbarg)
++{
++	struct bfa_ioc *ioc = cbarg;
++	u32	hb_count;
++
++	hb_count = readl(ioc->ioc_regs.heartbeat);
++	if (ioc->hb_count == hb_count) {
++		pr_crit("Firmware heartbeat failure at %d", hb_count);
++		bfa_ioc_recover(ioc);
++		return;
++	} else {
++		ioc->hb_count = hb_count;
++	}
++
++	bfa_ioc_mbox_poll(ioc);
++	mod_timer(&ioc->hb_timer, jiffies +
++		msecs_to_jiffies(BFA_IOC_HB_TOV));
++}
++
++static void
++bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
++{
++	ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
++	mod_timer(&ioc->hb_timer, jiffies +
++		msecs_to_jiffies(BFA_IOC_HB_TOV));
++}
++
++static void
++bfa_ioc_hb_stop(struct bfa_ioc *ioc)
++{
++	del_timer(&ioc->hb_timer);
++}
++
++/**
++ * @brief
++ *	Initiate a full firmware download.
++ */
++static void
++bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
++		    u32 boot_param)
++{
++	u32 *fwimg;
++	u32 pgnum, pgoff;
++	u32 loff = 0;
++	u32 chunkno = 0;
++	u32 i;
++
++	/**
++	 * Initialize LMEM first before code download
++	 */
++	bfa_ioc_lmem_init(ioc);
++
++	/**
++	 * Flash based firmware boot
++	 */
++	if (bfa_ioc_is_optrom(ioc))
++		boot_type = BFI_BOOT_TYPE_FLASH;
++	fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
++
++	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
++	pgoff = bfa_ioc_smem_pgoff(ioc, loff);
++
++	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
++
++	for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
++		if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
++			chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
++			fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
++					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
++		}
++
++		/**
++		 * write smem
++		 */
++		writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
++			      ((ioc->ioc_regs.smem_page_start) + (loff)));
++
++		loff += sizeof(u32);
++
++		/**
++		 * handle page offset wrap around
++		 */
++		loff = PSS_SMEM_PGOFF(loff);
++		if (loff == 0) {
++			pgnum++;
++			writel(pgnum,
++				      ioc->ioc_regs.host_page_num_fn);
++		}
++	}
++
++	writel(bfa_ioc_smem_pgnum(ioc, 0),
++		      ioc->ioc_regs.host_page_num_fn);
++
++	/*
++	 * Set boot type and boot param at the end.
++	*/
++	writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
++			+ (BFI_BOOT_TYPE_OFF)));
++	writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
++			+ (BFI_BOOT_PARAM_OFF)));
++}
++
++static void
++bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
++{
++	bfa_ioc_hwinit(ioc, force);
++}
++
++/**
++ * @brief
++ * Update BFA configuration from firmware configuration.
++ */
++static void
++bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
++{
++	struct bfi_ioc_attr *attr = ioc->attr;
++
++	attr->adapter_prop  = ntohl(attr->adapter_prop);
++	attr->card_type     = ntohl(attr->card_type);
++	attr->maxfrsize	    = ntohs(attr->maxfrsize);
++
++	bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
++}
++
++/**
++ * Attach time initialization of mbox logic.
++ */
++static void
++bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
++{
++	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
++	int	mc;
++
++	INIT_LIST_HEAD(&mod->cmd_q);
++	for (mc = 0; mc < BFI_MC_MAX; mc++) {
++		mod->mbhdlr[mc].cbfn = NULL;
++		mod->mbhdlr[mc].cbarg = ioc->bfa;
++	}
++}
++
++/**
++ * Mbox poll timer -- restarts any pending mailbox requests.
++ */
++static void
++bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
++{
++	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
++	struct bfa_mbox_cmd *cmd;
++	u32			stat;
++
++	/**
++	 * If no command pending, do nothing
++	 */
++	if (list_empty(&mod->cmd_q))
++		return;
++
++	/**
++	 * If previous command is not yet fetched by firmware, do nothing
++	 */
++	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
++	if (stat)
++		return;
++
++	/**
++	 * Enqueue command to firmware.
++	 */
++	bfa_q_deq(&mod->cmd_q, &cmd);
++	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
++}
++
++/**
++ * Cleanup any pending requests.
++ */
++static void
++bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
++{
++	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
++	struct bfa_mbox_cmd *cmd;
++
++	while (!list_empty(&mod->cmd_q))
++		bfa_q_deq(&mod->cmd_q, &cmd);
++}
++
++/**
++ * IOC public
++ */
++enum bfa_status
++bfa_ioc_pll_init(struct bfa_ioc *ioc)
++{
++	/*
++	 *  Hold semaphore so that nobody can access the chip during init.
++	 */
++	bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
++
++	bfa_ioc_pll_init_asic(ioc);
++
++	ioc->pllinit = true;
++	/*
++	 *  release semaphore.
++	 */
++	bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
++
++	return BFA_STATUS_OK;
++}
++
++/**
++ * Interface used by diag module to do firmware boot with memory test
++ * as the entry vector.
++ */
++void
++bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
++{
++	void __iomem *rb;
++
++	bfa_ioc_stats(ioc, ioc_boots);
++
++	if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
++		return;
++
++	/**
++	 * Initialize IOC state of all functions on a chip reset.
++	 */
++	rb = ioc->pcidev.pci_bar_kva;
++	if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
++		writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
++		writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
++	} else {
++		writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
++		writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
++	}
++
++	bfa_ioc_msgflush(ioc);
++	bfa_ioc_download_fw(ioc, boot_type, boot_param);
++
++	/**
++	 * Enable interrupts just before starting LPU
++	 */
++	ioc->cbfn->reset_cbfn(ioc->bfa);
++	bfa_ioc_lpu_start(ioc);
++}
++
++/**
++ * Enable/disable IOC failure auto recovery.
++ */
++void
++bfa_ioc_auto_recover(bool auto_recover)
++{
++	bfa_auto_recover = auto_recover;
++}
++
++bool
++bfa_ioc_is_operational(struct bfa_ioc *ioc)
++{
++	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
++}
++
++bool
++bfa_ioc_is_initialized(struct bfa_ioc *ioc)
++{
++	u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
++
++	return ((r32 != BFI_IOC_UNINIT) &&
++		(r32 != BFI_IOC_INITING) &&
++		(r32 != BFI_IOC_MEMTEST));
++}
++
++void
++bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
++{
++	u32	*msgp = mbmsg;
++	u32	r32;
++	int		i;
++
++	/**
++	 * read the MBOX msg
++	 */
++	for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
++	     i++) {
++		r32 = readl(ioc->ioc_regs.lpu_mbox +
++				   i * sizeof(u32));
++		msgp[i] = htonl(r32);
++	}
++
++	/**
++	 * turn off mailbox interrupt by clearing mailbox status
++	 */
++	writel(1, ioc->ioc_regs.lpu_mbox_cmd);
++	readl(ioc->ioc_regs.lpu_mbox_cmd);
++}
++
++void
++bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
++{
++	union bfi_ioc_i2h_msg_u	*msg;
++
++	msg = (union bfi_ioc_i2h_msg_u *) m;
++
++	bfa_ioc_stats(ioc, ioc_isrs);
++
++	switch (msg->mh.msg_id) {
++	case BFI_IOC_I2H_HBEAT:
++		break;
++
++	case BFI_IOC_I2H_READY_EVENT:
++		bfa_fsm_send_event(ioc, IOC_E_FWREADY);
++		break;
++
++	case BFI_IOC_I2H_ENABLE_REPLY:
++		bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
++		break;
++
++	case BFI_IOC_I2H_DISABLE_REPLY:
++		bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
++		break;
++
++	case BFI_IOC_I2H_GETATTR_REPLY:
++		bfa_ioc_getattr_reply(ioc);
++		break;
++
++	default:
++		BUG_ON(1);
++	}
++}
++
++/**
++ * IOC attach time initialization and setup.
++ *
++ * @param[in]	ioc	memory for IOC
++ * @param[in]	bfa	driver instance structure
++ */
++void
++bfa_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
++{
++	ioc->bfa	= bfa;
++	ioc->cbfn	= cbfn;
++	ioc->fcmode	= false;
++	ioc->pllinit	= false;
++	ioc->dbg_fwsave_once = true;
++
++	bfa_ioc_mbox_attach(ioc);
++	INIT_LIST_HEAD(&ioc->hb_notify_q);
++
++	bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
++}
++
++/**
++ * Driver detach time IOC cleanup.
++ */
++void
++bfa_ioc_detach(struct bfa_ioc *ioc)
++{
++	bfa_fsm_send_event(ioc, IOC_E_DETACH);
++}
++
++/**
++ * Setup IOC PCI properties.
++ *
++ * @param[in]	pcidev	PCI device information for this IOC
++ */
++void
++bfa_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
++		 enum bfi_mclass mc)
++{
++	ioc->ioc_mc	= mc;
++	ioc->pcidev	= *pcidev;
++	ioc->ctdev	= bfa_asic_id_ct(ioc->pcidev.device_id);
++	ioc->cna	= ioc->ctdev && !ioc->fcmode;
++
++	bfa_ioc_set_ct_hwif(ioc);
++
++	bfa_ioc_map_port(ioc);
++	bfa_ioc_reg_init(ioc);
++}
++
++/**
++ * Initialize IOC dma memory
++ *
++ * @param[in]	dm_kva	kernel virtual address of IOC dma memory
++ * @param[in]	dm_pa	physical address of IOC dma memory
++ */
++void
++bfa_ioc_mem_claim(struct bfa_ioc *ioc,  u8 *dm_kva, u64 dm_pa)
++{
++	/**
++	 * dma memory for firmware attribute
++	 */
++	ioc->attr_dma.kva = dm_kva;
++	ioc->attr_dma.pa = dm_pa;
++	ioc->attr = (struct bfi_ioc_attr *) dm_kva;
++}
++
++/**
++ * Return size of dma memory required.
++ */
++u32
++bfa_ioc_meminfo(void)
++{
++	return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
++}
++
++void
++bfa_ioc_enable(struct bfa_ioc *ioc)
++{
++	bfa_ioc_stats(ioc, ioc_enables);
++	ioc->dbg_fwsave_once = true;
++
++	bfa_fsm_send_event(ioc, IOC_E_ENABLE);
++}
++
++void
++bfa_ioc_disable(struct bfa_ioc *ioc)
++{
++	bfa_ioc_stats(ioc, ioc_disables);
++	bfa_fsm_send_event(ioc, IOC_E_DISABLE);
++}
++
++u32
++bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
++{
++	return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
++}
++
++u32
++bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr)
++{
++	return PSS_SMEM_PGOFF(fmaddr);
++}
++
++/**
++ * Register mailbox message handler functions
++ *
++ * @param[in]	ioc		IOC instance
++ * @param[in]	mcfuncs		message class handler functions
++ */
++void
++bfa_ioc_mbox_register(struct bfa_ioc *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
++{
++	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
++	int				mc;
++
++	for (mc = 0; mc < BFI_MC_MAX; mc++)
++		mod->mbhdlr[mc].cbfn = mcfuncs[mc];
++}
++
++/**
++ * Register mailbox message handler function, to be called by common modules
++ */
++void
++bfa_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
++		    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
++{
++	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
++
++	mod->mbhdlr[mc].cbfn	= cbfn;
++	mod->mbhdlr[mc].cbarg = cbarg;
++}
++
++/**
++ * Queue a mailbox command request to firmware. Waits if mailbox is busy.
++ * Responsibility of caller to serialize
++ *
++ * @param[in]	ioc	IOC instance
++ * @param[i]	cmd	Mailbox command
++ */
++void
++bfa_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
++{
++	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
++	u32			stat;
++
++	/**
++	 * If a previous command is pending, queue new command
++	 */
++	if (!list_empty(&mod->cmd_q)) {
++		list_add_tail(&cmd->qe, &mod->cmd_q);
++		return;
++	}
++
++	/**
++	 * If mailbox is busy, queue command for poll timer
++	 */
++	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
++	if (stat) {
++		list_add_tail(&cmd->qe, &mod->cmd_q);
++		return;
++	}
++
++	/**
++	 * mailbox is free -- queue command to firmware
++	 */
++	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
++}
++
++/**
++ * Handle mailbox interrupts
++ */
++void
++bfa_ioc_mbox_isr(struct bfa_ioc *ioc)
++{
++	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
++	struct bfi_mbmsg m;
++	int				mc;
++
++	bfa_ioc_msgget(ioc, &m);
++
++	/**
++	 * Treat IOC message class as special.
++	 */
++	mc = m.mh.msg_class;
++	if (mc == BFI_MC_IOC) {
++		bfa_ioc_isr(ioc, &m);
++		return;
++	}
++
++	if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
++		return;
++
++	mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
++}
++
++void
++bfa_ioc_error_isr(struct bfa_ioc *ioc)
++{
++	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
++}
++
++void
++bfa_ioc_set_fcmode(struct bfa_ioc *ioc)
++{
++	ioc->fcmode  = true;
++	ioc->port_id = bfa_ioc_pcifn(ioc);
++}
++
++/**
++ * return true if IOC is disabled
++ */
++bool
++bfa_ioc_is_disabled(struct bfa_ioc *ioc)
++{
++	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
++		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
++}
++
++/**
++ * return true if IOC firmware is different.
++ */
++bool
++bfa_ioc_fw_mismatch(struct bfa_ioc *ioc)
++{
++	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
++		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck) ||
++		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch);
++}
++
++#define bfa_ioc_state_disabled(__sm)		\
++	(((__sm) == BFI_IOC_UNINIT) ||		\
++	 ((__sm) == BFI_IOC_INITING) ||		\
++	 ((__sm) == BFI_IOC_HWINIT) ||		\
++	 ((__sm) == BFI_IOC_DISABLED) ||	\
++	 ((__sm) == BFI_IOC_FAIL) ||		\
++	 ((__sm) == BFI_IOC_CFG_DISABLED))
++
++/**
++ * Check if adapter is disabled -- both IOCs should be in a disabled
++ * state.
++ */
++bool
++bfa_ioc_adapter_is_disabled(struct bfa_ioc *ioc)
++{
++	u32	ioc_state;
++	void __iomem *rb = ioc->pcidev.pci_bar_kva;
++
++	if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
++		return false;
++
++	ioc_state = readl(rb + BFA_IOC0_STATE_REG);
++	if (!bfa_ioc_state_disabled(ioc_state))
++		return false;
++
++	if (ioc->pcidev.device_id != PCI_DEVICE_ID_BROCADE_FC_8G1P) {
++		ioc_state = readl(rb + BFA_IOC1_STATE_REG);
++		if (!bfa_ioc_state_disabled(ioc_state))
++			return false;
++	}
++
++	return true;
++}
++
++/**
++ * Add to IOC heartbeat failure notification queue. To be used by common
++ * modules such as cee, port, diag.
++ */
++void
++bfa_ioc_hbfail_register(struct bfa_ioc *ioc,
++			struct bfa_ioc_hbfail_notify *notify)
++{
++	list_add_tail(&notify->qe, &ioc->hb_notify_q);
++}
++
++#define BFA_MFG_NAME "Brocade"
++void
++bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
++			 struct bfa_adapter_attr *ad_attr)
++{
++	struct bfi_ioc_attr *ioc_attr;
++
++	ioc_attr = ioc->attr;
++
++	bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
++	bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
++	bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
++	bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
++	memcpy(&ad_attr->vpd, &ioc_attr->vpd,
++		      sizeof(struct bfa_mfg_vpd));
++
++	ad_attr->nports = bfa_ioc_get_nports(ioc);
++	ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
++
++	bfa_ioc_get_adapter_model(ioc, ad_attr->model);
++	/* For now, model descr uses same model string */
++	bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
++
++	ad_attr->card_type = ioc_attr->card_type;
++	ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
++
++	if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
++		ad_attr->prototype = 1;
++	else
++		ad_attr->prototype = 0;
++
++	ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
++	ad_attr->mac  = bfa_ioc_get_mac(ioc);
++
++	ad_attr->pcie_gen = ioc_attr->pcie_gen;
++	ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
++	ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
++	ad_attr->asic_rev = ioc_attr->asic_rev;
++
++	bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
++
++	ad_attr->cna_capable = ioc->cna;
++	ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
++}
++
++enum bfa_ioc_type
++bfa_ioc_get_type(struct bfa_ioc *ioc)
++{
++	if (!ioc->ctdev || ioc->fcmode)
++		return BFA_IOC_TYPE_FC;
++	else if (ioc->ioc_mc == BFI_MC_IOCFC)
++		return BFA_IOC_TYPE_FCoE;
++	else if (ioc->ioc_mc == BFI_MC_LL)
++		return BFA_IOC_TYPE_LL;
++	else {
++		BUG_ON(!(ioc->ioc_mc == BFI_MC_LL));
++		return BFA_IOC_TYPE_LL;
++	}
++}
++
++void
++bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
++{
++	memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
++	memcpy(serial_num,
++			(void *)ioc->attr->brcd_serialnum,
++			BFA_ADAPTER_SERIAL_NUM_LEN);
++}
++
++void
++bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
++{
++	memset(fw_ver, 0, BFA_VERSION_LEN);
++	memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
++}
++
++void
++bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
++{
++	BUG_ON(!(chip_rev));
++
++	memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
++
++	chip_rev[0] = 'R';
++	chip_rev[1] = 'e';
++	chip_rev[2] = 'v';
++	chip_rev[3] = '-';
++	chip_rev[4] = ioc->attr->asic_rev;
++	chip_rev[5] = '\0';
++}
++
++void
++bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
++{
++	memset(optrom_ver, 0, BFA_VERSION_LEN);
++	memcpy(optrom_ver, ioc->attr->optrom_version,
++		      BFA_VERSION_LEN);
++}
++
++void
++bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
++{
++	memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
++	memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
++}
++
++void
++bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
++{
++	struct bfi_ioc_attr *ioc_attr;
++
++	BUG_ON(!(model));
++	memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
++
++	ioc_attr = ioc->attr;
++
++	/**
++	 * model name
++	 */
++	snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
++		BFA_MFG_NAME, ioc_attr->card_type);
++}
++
++enum bfa_ioc_state
++bfa_ioc_get_state(struct bfa_ioc *ioc)
++{
++	return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
++}
++
++void
++bfa_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
++{
++	memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
++
++	ioc_attr->state = bfa_ioc_get_state(ioc);
++	ioc_attr->port_id = ioc->port_id;
++
++	ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
++
++	bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
++
++	ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
++	ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
++	bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
++}
++
++/**
++ * WWN public
++ */
++u64
++bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
++{
++	return ioc->attr->pwwn;
++}
++
++u64
++bfa_ioc_get_nwwn(struct bfa_ioc *ioc)
++{
++	return ioc->attr->nwwn;
++}
++
++u64
++bfa_ioc_get_adid(struct bfa_ioc *ioc)
++{
++	return ioc->attr->mfg_pwwn;
++}
++
++mac_t
++bfa_ioc_get_mac(struct bfa_ioc *ioc)
++{
++	/*
++	 * Currently mfg mac is used as FCoE enode mac (not configured by PBC)
++	 */
++	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
++		return bfa_ioc_get_mfg_mac(ioc);
++	else
++		return ioc->attr->mac;
++}
++
++u64
++bfa_ioc_get_mfg_pwwn(struct bfa_ioc *ioc)
++{
++	return ioc->attr->mfg_pwwn;
++}
++
++u64
++bfa_ioc_get_mfg_nwwn(struct bfa_ioc *ioc)
++{
++	return ioc->attr->mfg_nwwn;
++}
++
++mac_t
++bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc)
++{
++	mac_t	m;
++
++	m = ioc->attr->mfg_mac;
++	if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
++		m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
++	else
++		bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
++			bfa_ioc_pcifn(ioc));
++
++	return m;
++}
++
++bool
++bfa_ioc_get_fcmode(struct bfa_ioc *ioc)
++{
++	return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
++}
++
++/**
++ * Firmware failure detected. Start recovery actions.
++ */
++static void
++bfa_ioc_recover(struct bfa_ioc *ioc)
++{
++	bfa_ioc_stats(ioc, ioc_hbfails);
++	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
++}
++
++static void
++bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
++{
++	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
++		return;
++
++}
+--- /dev/null
++++ b/drivers/net/bna/bfa_ioc.h
+@@ -0,0 +1,343 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++
++#ifndef __BFA_IOC_H__
++#define __BFA_IOC_H__
++
++#include "bfa_sm.h"
++#include "bfi.h"
++#include "cna.h"
++
++#define BFA_IOC_TOV		3000	/* msecs */
++#define BFA_IOC_HWSEM_TOV	500	/* msecs */
++#define BFA_IOC_HB_TOV		500	/* msecs */
++#define BFA_IOC_HWINIT_MAX	2
++#define BFA_IOC_TOV_RECOVER	BFA_IOC_HB_TOV
++
++/**
++ * Generic Scatter Gather Element used by driver
++ */
++struct bfa_sge {
++	u32	sg_len;
++	void	*sg_addr;
++};
++
++/**
++ * PCI device information required by IOC
++ */
++struct bfa_pcidev {
++	int	pci_slot;
++	u8	pci_func;
++	u16	device_id;
++	void	__iomem *pci_bar_kva;
++};
++
++/**
++ * Structure used to remember the DMA-able memory block's KVA and Physical
++ * Address
++ */
++struct bfa_dma {
++	void	*kva;	/* ! Kernel virtual address	*/
++	u64	pa;	/* ! Physical address		*/
++};
++
++#define BFA_DMA_ALIGN_SZ	256
++
++/**
++ * smem size for Crossbow and Catapult
++ */
++#define BFI_SMEM_CB_SIZE	0x200000U	/* ! 2MB for crossbow	*/
++#define BFI_SMEM_CT_SIZE	0x280000U	/* ! 2.5MB for catapult	*/
++
++/**
++ * @brief BFA dma address assignment macro
++ */
++#define bfa_dma_addr_set(dma_addr, pa)	\
++		__bfa_dma_addr_set(&dma_addr, (u64)pa)
++
++static inline void
++__bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
++{
++	dma_addr->a32.addr_lo = (u32) pa;
++	dma_addr->a32.addr_hi = (u32) (upper_32_bits(pa));
++}
++
++/**
++ * @brief BFA dma address assignment macro. (big endian format)
++ */
++#define bfa_dma_be_addr_set(dma_addr, pa)	\
++		__bfa_dma_be_addr_set(&dma_addr, (u64)pa)
++static inline void
++__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
++{
++	dma_addr->a32.addr_lo = (u32) htonl(pa);
++	dma_addr->a32.addr_hi = (u32) htonl(upper_32_bits(pa));
++}
++
++struct bfa_ioc_regs {
++	void __iomem *hfn_mbox_cmd;
++	void __iomem *hfn_mbox;
++	void __iomem *lpu_mbox_cmd;
++	void __iomem *lpu_mbox;
++	void __iomem *pss_ctl_reg;
++	void __iomem *pss_err_status_reg;
++	void __iomem *app_pll_fast_ctl_reg;
++	void __iomem *app_pll_slow_ctl_reg;
++	void __iomem *ioc_sem_reg;
++	void __iomem *ioc_usage_sem_reg;
++	void __iomem *ioc_init_sem_reg;
++	void __iomem *ioc_usage_reg;
++	void __iomem *host_page_num_fn;
++	void __iomem *heartbeat;
++	void __iomem *ioc_fwstate;
++	void __iomem *ll_halt;
++	void __iomem *err_set;
++	void __iomem *shirq_isr_next;
++	void __iomem *shirq_msk_next;
++	void __iomem *smem_page_start;
++	u32	smem_pg0;
++};
++
++/**
++ * IOC Mailbox structures
++ */
++struct bfa_mbox_cmd {
++	struct list_head	qe;
++	u32			msg[BFI_IOC_MSGSZ];
++};
++
++/**
++ * IOC mailbox module
++ */
++typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m);
++struct bfa_ioc_mbox_mod {
++	struct list_head	cmd_q;		/*!< pending mbox queue	*/
++	int			nmclass;	/*!< number of handlers */
++	struct {
++		bfa_ioc_mbox_mcfunc_t	cbfn;	/*!< message handlers	*/
++		void			*cbarg;
++	} mbhdlr[BFI_MC_MAX];
++};
++
++/**
++ * IOC callback function interfaces
++ */
++typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
++typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa);
++typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa);
++typedef void (*bfa_ioc_reset_cbfn_t)(void *bfa);
++struct bfa_ioc_cbfn {
++	bfa_ioc_enable_cbfn_t	enable_cbfn;
++	bfa_ioc_disable_cbfn_t	disable_cbfn;
++	bfa_ioc_hbfail_cbfn_t	hbfail_cbfn;
++	bfa_ioc_reset_cbfn_t	reset_cbfn;
++};
++
++/**
++ * Heartbeat failure notification queue element.
++ */
++struct bfa_ioc_hbfail_notify {
++	struct list_head	qe;
++	bfa_ioc_hbfail_cbfn_t	cbfn;
++	void			*cbarg;
++};
++
++/**
++ * Initialize a heartbeat failure notification structure
++ */
++#define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do {	\
++	(__notify)->cbfn = (__cbfn);				\
++	(__notify)->cbarg = (__cbarg);				\
++} while (0)
++
++struct bfa_ioc {
++	bfa_fsm_t		fsm;
++	struct bfa 		*bfa;
++	struct bfa_pcidev 	pcidev;
++	struct bfa_timer_mod	*timer_mod;
++	struct timer_list 	ioc_timer;
++	struct timer_list 	sem_timer;
++	struct timer_list	hb_timer;
++	u32			hb_count;
++	u32			retry_count;
++	struct list_head	hb_notify_q;
++	void			*dbg_fwsave;
++	int			dbg_fwsave_len;
++	bool			dbg_fwsave_once;
++	enum bfi_mclass		ioc_mc;
++	struct bfa_ioc_regs 	ioc_regs;
++	struct bfa_ioc_drv_stats stats;
++	bool			auto_recover;
++	bool			fcmode;
++	bool			ctdev;
++	bool			cna;
++	bool			pllinit;
++	bool   			stats_busy;	/*!< outstanding stats */
++	u8			port_id;
++
++	struct bfa_dma		attr_dma;
++	struct bfi_ioc_attr	*attr;
++	struct bfa_ioc_cbfn	*cbfn;
++	struct bfa_ioc_mbox_mod	mbox_mod;
++	struct bfa_ioc_hwif	*ioc_hwif;
++};
++
++struct bfa_ioc_hwif {
++	enum bfa_status (*ioc_pll_init) (void __iomem *rb, bool fcmode);
++	bool		(*ioc_firmware_lock)	(struct bfa_ioc *ioc);
++	void		(*ioc_firmware_unlock)	(struct bfa_ioc *ioc);
++	void		(*ioc_reg_init)	(struct bfa_ioc *ioc);
++	void		(*ioc_map_port)	(struct bfa_ioc *ioc);
++	void		(*ioc_isr_mode_set)	(struct bfa_ioc *ioc,
++					bool msix);
++	void		(*ioc_notify_hbfail)	(struct bfa_ioc *ioc);
++	void		(*ioc_ownership_reset)	(struct bfa_ioc *ioc);
++};
++
++#define bfa_ioc_pcifn(__ioc)		((__ioc)->pcidev.pci_func)
++#define bfa_ioc_devid(__ioc)		((__ioc)->pcidev.device_id)
++#define bfa_ioc_bar0(__ioc)		((__ioc)->pcidev.pci_bar_kva)
++#define bfa_ioc_portid(__ioc)		((__ioc)->port_id)
++#define bfa_ioc_fetch_stats(__ioc, __stats) \
++		(((__stats)->drv_stats) = (__ioc)->stats)
++#define bfa_ioc_clr_stats(__ioc)	\
++		memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
++#define bfa_ioc_maxfrsize(__ioc)	((__ioc)->attr->maxfrsize)
++#define bfa_ioc_rx_bbcredit(__ioc)	((__ioc)->attr->rx_bbcredit)
++#define bfa_ioc_speed_sup(__ioc)	\
++	BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
++#define bfa_ioc_get_nports(__ioc)	\
++	BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
++
++#define bfa_ioc_stats(_ioc, _stats)	((_ioc)->stats._stats++)
++#define BFA_IOC_FWIMG_MINSZ	(16 * 1024)
++#define BFA_IOC_FWIMG_TYPE(__ioc)					\
++	(((__ioc)->ctdev) ? 						\
++	 (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) :	\
++	 BFI_IMAGE_CB_FC)
++#define BFA_IOC_FW_SMEM_SIZE(__ioc)					\
++	(((__ioc)->ctdev) ? BFI_SMEM_CT_SIZE : BFI_SMEM_CB_SIZE)
++#define BFA_IOC_FLASH_CHUNK_NO(off)		(off / BFI_FLASH_CHUNK_SZ_WORDS)
++#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off)	(off % BFI_FLASH_CHUNK_SZ_WORDS)
++#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno)  (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
++
++/**
++ * IOC mailbox interface
++ */
++void bfa_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd);
++void bfa_ioc_mbox_register(struct bfa_ioc *ioc,
++		bfa_ioc_mbox_mcfunc_t *mcfuncs);
++void bfa_ioc_mbox_isr(struct bfa_ioc *ioc);
++void bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len);
++void bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg);
++void bfa_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
++		bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
++
++/**
++ * IOC interfaces
++ */
++
++#define bfa_ioc_pll_init_asic(__ioc) \
++	((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
++			   (__ioc)->fcmode))
++
++enum bfa_status bfa_ioc_pll_init(struct bfa_ioc *ioc);
++enum bfa_status bfa_ioc_cb_pll_init(void __iomem *rb, bool fcmode);
++enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
++
++#define	bfa_ioc_isr_mode_set(__ioc, __msix)			\
++			((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
++#define	bfa_ioc_ownership_reset(__ioc)				\
++			((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
++
++void bfa_ioc_set_ct_hwif(struct bfa_ioc *ioc);
++
++void bfa_ioc_attach(struct bfa_ioc *ioc, void *bfa,
++		struct bfa_ioc_cbfn *cbfn);
++void bfa_ioc_auto_recover(bool auto_recover);
++void bfa_ioc_detach(struct bfa_ioc *ioc);
++void bfa_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
++		enum bfi_mclass mc);
++u32 bfa_ioc_meminfo(void);
++void bfa_ioc_mem_claim(struct bfa_ioc *ioc,  u8 *dm_kva, u64 dm_pa);
++void bfa_ioc_enable(struct bfa_ioc *ioc);
++void bfa_ioc_disable(struct bfa_ioc *ioc);
++bool bfa_ioc_intx_claim(struct bfa_ioc *ioc);
++
++void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
++		u32 boot_param);
++void bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *msg);
++void bfa_ioc_error_isr(struct bfa_ioc *ioc);
++bool bfa_ioc_is_operational(struct bfa_ioc *ioc);
++bool bfa_ioc_is_initialized(struct bfa_ioc *ioc);
++bool bfa_ioc_is_disabled(struct bfa_ioc *ioc);
++bool bfa_ioc_fw_mismatch(struct bfa_ioc *ioc);
++bool bfa_ioc_adapter_is_disabled(struct bfa_ioc *ioc);
++void bfa_ioc_cfg_complete(struct bfa_ioc *ioc);
++enum bfa_ioc_type bfa_ioc_get_type(struct bfa_ioc *ioc);
++void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num);
++void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver);
++void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver);
++void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
++void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
++		char *manufacturer);
++void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev);
++enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc *ioc);
++
++void bfa_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
++void bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
++		struct bfa_adapter_attr *ad_attr);
++u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
++u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr);
++void bfa_ioc_set_fcmode(struct bfa_ioc *ioc);
++bool bfa_ioc_get_fcmode(struct bfa_ioc *ioc);
++void bfa_ioc_hbfail_register(struct bfa_ioc *ioc,
++	struct bfa_ioc_hbfail_notify *notify);
++bool bfa_ioc_sem_get(void __iomem *sem_reg);
++void bfa_ioc_sem_release(void __iomem *sem_reg);
++void bfa_ioc_hw_sem_release(struct bfa_ioc *ioc);
++void bfa_ioc_fwver_get(struct bfa_ioc *ioc,
++			struct bfi_ioc_image_hdr *fwhdr);
++bool bfa_ioc_fwver_cmp(struct bfa_ioc *ioc,
++			struct bfi_ioc_image_hdr *fwhdr);
++
++/*
++ * Timeout APIs
++ */
++void bfa_ioc_timeout(void *ioc);
++void bfa_ioc_hb_check(void *ioc);
++void bfa_ioc_sem_timeout(void *ioc);
++
++/*
++ * bfa mfg wwn API functions
++ */
++u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
++u64 bfa_ioc_get_nwwn(struct bfa_ioc *ioc);
++mac_t bfa_ioc_get_mac(struct bfa_ioc *ioc);
++u64 bfa_ioc_get_mfg_pwwn(struct bfa_ioc *ioc);
++u64 bfa_ioc_get_mfg_nwwn(struct bfa_ioc *ioc);
++mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc);
++u64 bfa_ioc_get_adid(struct bfa_ioc *ioc);
++
++/*
++ * F/W Image Size & Chunk
++ */
++u32 *bfa_cb_image_get_chunk(int type, u32 off);
++u32 bfa_cb_image_get_size(int type);
++
++#endif /* __BFA_IOC_H__ */
+--- /dev/null
++++ b/drivers/net/bna/bfa_ioc_ct.c
+@@ -0,0 +1,391 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++
++#include "bfa_ioc.h"
++#include "cna.h"
++#include "bfi.h"
++#include "bfi_ctreg.h"
++#include "bfa_defs.h"
++
++/*
++ * forward declarations
++ */
++static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
++static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
++static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
++static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
++static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
++static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc);
++static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
++
++struct bfa_ioc_hwif hwif_ct;
++
++/**
++ * Called from bfa_ioc_attach() to map asic specific calls.
++ */
++void
++bfa_ioc_set_ct_hwif(struct bfa_ioc *ioc)
++{
++	hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
++	hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
++	hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
++	hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
++	hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
++	hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
++	hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
++	hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
++
++	ioc->ioc_hwif = &hwif_ct;
++}
++
++/**
++ * Return true if firmware of current driver matches the running firmware.
++ */
++static bool
++bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
++{
++	enum bfi_ioc_state ioc_fwstate;
++	u32 usecnt;
++	struct bfi_ioc_image_hdr fwhdr;
++
++	/**
++	 * Firmware match check is relevant only for CNA.
++	 */
++	if (!ioc->cna)
++		return true;
++
++	/**
++	 * If bios boot (flash based) -- do not increment usage count
++	 */
++	if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
++						BFA_IOC_FWIMG_MINSZ)
++		return true;
++
++	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
++	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
++
++	/**
++	 * If usage count is 0, always return TRUE.
++	 */
++	if (usecnt == 0) {
++		writel(1, ioc->ioc_regs.ioc_usage_reg);
++		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
++		return true;
++	}
++
++	ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
++
++	/**
++	 * Use count cannot be non-zero and chip in uninitialized state.
++	 */
++	BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
++
++	/**
++	 * Check if another driver with a different firmware is active
++	 */
++	bfa_ioc_fwver_get(ioc, &fwhdr);
++	if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
++		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
++		return false;
++	}
++
++	/**
++	 * Same firmware version. Increment the reference count.
++	 */
++	usecnt++;
++	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
++	bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
++	return true;
++}
++
++static void
++bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
++{
++	u32 usecnt;
++
++	/**
++	 * Firmware lock is relevant only for CNA.
++	 */
++	if (!ioc->cna)
++		return;
++
++	/**
++	 * If bios boot (flash based) -- do not decrement usage count
++	 */
++	if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
++						BFA_IOC_FWIMG_MINSZ)
++		return;
++
++	/**
++	 * decrement usage count
++	 */
++	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
++	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
++	BUG_ON(!(usecnt > 0));
++
++	usecnt--;
++	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
++
++	bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
++}
++
++/**
++ * Notify other functions on HB failure.
++ */
++static void
++bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc)
++{
++	if (ioc->cna) {
++		writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
++		/* Wait for halt to take effect */
++		readl(ioc->ioc_regs.ll_halt);
++	} else {
++		writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
++		readl(ioc->ioc_regs.err_set);
++	}
++}
++
++/**
++ * Host to LPU mailbox message addresses
++ */
++static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
++	{ HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
++	{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
++	{ HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
++	{ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
++};
++
++/**
++ * Host <-> LPU mailbox command/status registers - port 0
++ */
++static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
++	{ HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
++	{ HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
++	{ HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
++	{ HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
++};
++
++/**
++ * Host <-> LPU mailbox command/status registers - port 1
++ */
++static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
++	{ HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
++	{ HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
++	{ HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
++	{ HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
++};
++
++static void
++bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
++{
++	void __iomem *rb;
++	int		pcifn = bfa_ioc_pcifn(ioc);
++
++	rb = bfa_ioc_bar0(ioc);
++
++	ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
++	ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
++	ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
++
++	if (ioc->port_id == 0) {
++		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
++		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
++		ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
++		ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
++		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
++	} else {
++		ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
++		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
++		ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
++		ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
++		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
++	}
++
++	/*
++	 * PSS control registers
++	 */
++	ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
++	ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
++	ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
++	ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
++
++	/*
++	 * IOC semaphore registers and serialization
++	 */
++	ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
++	ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
++	ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
++	ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
++
++	/**
++	 * sram memory access
++	 */
++	ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
++	ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
++
++	/*
++	 * err set reg : for notification of hb failure in fcmode
++	 */
++	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
++}
++
++/**
++ * Initialize IOC to port mapping.
++ */
++
++#define FNC_PERS_FN_SHIFT(__fn)	((__fn) * 8)
++static void
++bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
++{
++	void __iomem *rb = ioc->pcidev.pci_bar_kva;
++	u32	r32;
++
++	/**
++	 * For catapult, base port id on personality register and IOC type
++	 */
++	r32 = readl(rb + FNC_PERS_REG);
++	r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
++	ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
++
++}
++
++/**
++ * Set interrupt mode for a function: INTX or MSIX
++ */
++static void
++bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
++{
++	void __iomem *rb = ioc->pcidev.pci_bar_kva;
++	u32	r32, mode;
++
++	r32 = readl(rb + FNC_PERS_REG);
++
++	mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
++		__F0_INTX_STATUS;
++
++	/**
++	 * If already in desired mode, do not change anything
++	 */
++	if (!msix && mode)
++		return;
++
++	if (msix)
++		mode = __F0_INTX_STATUS_MSIX;
++	else
++		mode = __F0_INTX_STATUS_INTA;
++
++	r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
++	r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
++
++	writel(r32, rb + FNC_PERS_REG);
++}
++
++/**
++ * Cleanup hw semaphore and usecnt registers
++ */
++static void
++bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
++{
++	if (ioc->cna) {
++		bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
++		writel(0, ioc->ioc_regs.ioc_usage_reg);
++		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
++	}
++
++	/*
++	 * Read the hw sem reg to make sure that it is locked
++	 * before we clear it. If it is not locked, writing 1
++	 * will lock it instead of clearing it.
++	 */
++	readl(ioc->ioc_regs.ioc_sem_reg);
++	bfa_ioc_hw_sem_release(ioc);
++}
++
++enum bfa_status
++bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode)
++{
++	u32	pll_sclk, pll_fclk, r32;
++
++	pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
++		__APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
++		__APP_PLL_312_JITLMT0_1(3U) |
++		__APP_PLL_312_CNTLMT0_1(1U);
++	pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
++		__APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
++		__APP_PLL_425_JITLMT0_1(3U) |
++		__APP_PLL_425_CNTLMT0_1(1U);
++	if (fcmode) {
++		writel(0, (rb + OP_MODE));
++		writel(__APP_EMS_CMLCKSEL |
++				__APP_EMS_REFCKBUFEN2 |
++				__APP_EMS_CHANNEL_SEL,
++				(rb + ETH_MAC_SER_REG));
++	} else {
++		writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
++		writel(__APP_EMS_REFCKBUFEN1,
++				(rb + ETH_MAC_SER_REG));
++	}
++	writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
++	writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
++	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
++	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
++	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
++	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
++	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
++	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
++	writel(pll_sclk |
++		__APP_PLL_312_LOGIC_SOFT_RESET,
++		rb + APP_PLL_312_CTL_REG);
++	writel(pll_fclk |
++		__APP_PLL_425_LOGIC_SOFT_RESET,
++		rb + APP_PLL_425_CTL_REG);
++	writel(pll_sclk |
++		__APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
++		rb + APP_PLL_312_CTL_REG);
++	writel(pll_fclk |
++		__APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
++		rb + APP_PLL_425_CTL_REG);
++	readl(rb + HOSTFN0_INT_MSK);
++	udelay(2000);
++	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
++	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
++	writel(pll_sclk |
++		__APP_PLL_312_ENABLE,
++		rb + APP_PLL_312_CTL_REG);
++	writel(pll_fclk |
++		__APP_PLL_425_ENABLE,
++		rb + APP_PLL_425_CTL_REG);
++	if (!fcmode) {
++		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
++		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
++	}
++	r32 = readl((rb + PSS_CTL_REG));
++	r32 &= ~__PSS_LMEM_RESET;
++	writel(r32, (rb + PSS_CTL_REG));
++	udelay(1000);
++	if (!fcmode) {
++		writel(0, (rb + PMM_1T_RESET_REG_P0));
++		writel(0, (rb + PMM_1T_RESET_REG_P1));
++	}
++
++	writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
++	udelay(1000);
++	r32 = readl((rb + MBIST_STAT_REG));
++	writel(0, (rb + MBIST_CTL_REG));
++	return BFA_STATUS_OK;
++}
+--- /dev/null
++++ b/drivers/net/bna/bfa_sm.h
+@@ -0,0 +1,88 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++
++/**
++ * @file bfasm.h State machine defines
++ */
++
++#ifndef __BFA_SM_H__
++#define __BFA_SM_H__
++
++#include "cna.h"
++
++typedef void (*bfa_sm_t)(void *sm, int event);
++
++/**
++ * oc - object class eg. bfa_ioc
++ * st - state, eg. reset
++ * otype - object type, eg. struct bfa_ioc
++ * etype - object type, eg. enum ioc_event
++ */
++#define bfa_sm_state_decl(oc, st, otype, etype)		\
++	static void oc ## _sm_ ## st(otype * fsm, etype event)
++
++#define bfa_sm_set_state(_sm, _state)	((_sm)->sm = (bfa_sm_t)(_state))
++#define bfa_sm_send_event(_sm, _event)	((_sm)->sm((_sm), (_event)))
++#define bfa_sm_get_state(_sm)		((_sm)->sm)
++#define bfa_sm_cmp_state(_sm, _state)	((_sm)->sm == (bfa_sm_t)(_state))
++
++/**
++ * For converting from state machine function to state encoding.
++ */
++struct bfa_sm_table {
++	bfa_sm_t	sm;	/*!< state machine function	*/
++	int		state;	/*!< state machine encoding	*/
++	char		*name;	/*!< state name for display	*/
++};
++#define BFA_SM(_sm)	((bfa_sm_t)(_sm))
++
++/**
++ * State machine with entry actions.
++ */
++typedef void (*bfa_fsm_t)(void *fsm, int event);
++
++/**
++ * oc - object class eg. bfa_ioc
++ * st - state, eg. reset
++ * otype - object type, eg. struct bfa_ioc
++ * etype - object type, eg. enum ioc_event
++ */
++#define bfa_fsm_state_decl(oc, st, otype, etype)		\
++	static void oc ## _sm_ ## st(otype * fsm, etype event);	\
++	static void oc ## _sm_ ## st ## _entry(otype * fsm)
++
++#define bfa_fsm_set_state(_fsm, _state) do {	\
++	(_fsm)->fsm = (bfa_fsm_t)(_state);	\
++	_state ## _entry(_fsm);			\
++} while (0)
++
++#define bfa_fsm_send_event(_fsm, _event)	((_fsm)->fsm((_fsm), (_event)))
++#define bfa_fsm_get_state(_fsm)			((_fsm)->fsm)
++#define bfa_fsm_cmp_state(_fsm, _state)		\
++	((_fsm)->fsm == (bfa_fsm_t)(_state))
++
++static inline int
++bfa_sm_to_state(struct bfa_sm_table *smt, bfa_sm_t sm)
++{
++	int	i = 0;
++
++	while (smt[i].sm && smt[i].sm != sm)
++		i++;
++	return smt[i].state;
++}
++#endif
+--- /dev/null
++++ b/drivers/net/bna/bfa_wc.h
+@@ -0,0 +1,69 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++
++/**
++ * @file bfa_wc.h Generic wait counter.
++ */
++
++#ifndef __BFA_WC_H__
++#define __BFA_WC_H__
++
++typedef void (*bfa_wc_resume_t) (void *cbarg);
++
++struct bfa_wc {
++	bfa_wc_resume_t wc_resume;
++	void		*wc_cbarg;
++	int		wc_count;
++};
++
++static inline void
++bfa_wc_up(struct bfa_wc *wc)
++{
++	wc->wc_count++;
++}
++
++static inline void
++bfa_wc_down(struct bfa_wc *wc)
++{
++	wc->wc_count--;
++	if (wc->wc_count == 0)
++		wc->wc_resume(wc->wc_cbarg);
++}
++
++/**
++ * Initialize a waiting counter.
++ */
++static inline void
++bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
++{
++	wc->wc_resume = wc_resume;
++	wc->wc_cbarg = wc_cbarg;
++	wc->wc_count = 0;
++	bfa_wc_up(wc);
++}
++
++/**
++ * Wait for counter to reach zero
++ */
++static inline void
++bfa_wc_wait(struct bfa_wc *wc)
++{
++	bfa_wc_down(wc);
++}
++
++#endif
+--- /dev/null
++++ b/drivers/net/bna/bfi.h
+@@ -0,0 +1,392 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++
++#ifndef __BFI_H__
++#define __BFI_H__
++
++#include "bfa_defs.h"
++
++#pragma pack(1)
++
++/**
++ * BFI FW image type
++ */
++#define	BFI_FLASH_CHUNK_SZ			256	/*!< Flash chunk size */
++#define	BFI_FLASH_CHUNK_SZ_WORDS	(BFI_FLASH_CHUNK_SZ/sizeof(u32))
++enum {
++	BFI_IMAGE_CB_FC,
++	BFI_IMAGE_CT_FC,
++	BFI_IMAGE_CT_CNA,
++	BFI_IMAGE_MAX,
++};
++
++/**
++ * Msg header common to all msgs
++ */
++struct bfi_mhdr {
++	u8		msg_class;	/*!< @ref enum bfi_mclass	    */
++	u8		msg_id;		/*!< msg opcode with in the class   */
++	union {
++		struct {
++			u8	rsvd;
++			u8	lpu_id;	/*!< msg destination		    */
++		} h2i;
++		u16	i2htok;	/*!< token in msgs to host	    */
++	} mtag;
++};
++
++#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do {		\
++	(_mh).msg_class 		= (_mc);		\
++	(_mh).msg_id			= (_op);		\
++	(_mh).mtag.h2i.lpu_id	= (_lpuid);			\
++} while (0)
++
++#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do {		\
++	(_mh).msg_class 		= (_mc);		\
++	(_mh).msg_id			= (_op);		\
++	(_mh).mtag.i2htok		= (_i2htok);		\
++} while (0)
++
++/*
++ * Message opcodes: 0-127 to firmware, 128-255 to host
++ */
++#define BFI_I2H_OPCODE_BASE	128
++#define BFA_I2HM(_x) 			((_x) + BFI_I2H_OPCODE_BASE)
++
++/**
++ ****************************************************************************
++ *
++ * Scatter Gather Element and Page definition
++ *
++ ****************************************************************************
++ */
++
++#define BFI_SGE_INLINE	1
++#define BFI_SGE_INLINE_MAX	(BFI_SGE_INLINE + 1)
++
++/**
++ * SG Flags
++ */
++enum {
++	BFI_SGE_DATA		= 0,	/*!< data address, not last	     */
++	BFI_SGE_DATA_CPL	= 1,	/*!< data addr, last in current page */
++	BFI_SGE_DATA_LAST	= 3,	/*!< data address, last		     */
++	BFI_SGE_LINK		= 2,	/*!< link address		     */
++	BFI_SGE_PGDLEN		= 2,	/*!< cumulative data length for page */
++};
++
++/**
++ * DMA addresses
++ */
++union bfi_addr_u {
++	struct {
++		u32	addr_lo;
++		u32	addr_hi;
++	} a32;
++};
++
++/**
++ * Scatter Gather Element
++ */
++struct bfi_sge {
++#ifdef __BIGENDIAN
++	u32	flags:2,
++			rsvd:2,
++			sg_len:28;
++#else
++	u32	sg_len:28,
++			rsvd:2,
++			flags:2;
++#endif
++	union bfi_addr_u sga;
++};
++
++/**
++ * Scatter Gather Page
++ */
++#define BFI_SGPG_DATA_SGES		7
++#define BFI_SGPG_SGES_MAX		(BFI_SGPG_DATA_SGES + 1)
++#define BFI_SGPG_RSVD_WD_LEN	8
++struct bfi_sgpg {
++	struct bfi_sge sges[BFI_SGPG_SGES_MAX];
++	u32	rsvd[BFI_SGPG_RSVD_WD_LEN];
++};
++
++/*
++ * Large Message structure - 128 Bytes size Msgs
++ */
++#define BFI_LMSG_SZ		128
++#define BFI_LMSG_PL_WSZ	\
++			((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4)
++
++struct bfi_msg {
++	struct bfi_mhdr mhdr;
++	u32	pl[BFI_LMSG_PL_WSZ];
++};
++
++/**
++ * Mailbox message structure
++ */
++#define BFI_MBMSG_SZ		7
++struct bfi_mbmsg {
++	struct bfi_mhdr mh;
++	u32		pl[BFI_MBMSG_SZ];
++};
++
++/**
++ * Message Classes
++ */
++enum bfi_mclass {
++	BFI_MC_IOC		= 1,	/*!< IO Controller (IOC)	    */
++	BFI_MC_DIAG		= 2,	/*!< Diagnostic Msgs		    */
++	BFI_MC_FLASH		= 3,	/*!< Flash message class	    */
++	BFI_MC_CEE		= 4,	/*!< CEE			    */
++	BFI_MC_FCPORT		= 5,	/*!< FC port			    */
++	BFI_MC_IOCFC		= 6,	/*!< FC - IO Controller (IOC)	    */
++	BFI_MC_LL		= 7,	/*!< Link Layer			    */
++	BFI_MC_UF		= 8,	/*!< Unsolicited frame receive	    */
++	BFI_MC_FCXP		= 9,	/*!< FC Transport		    */
++	BFI_MC_LPS		= 10,	/*!< lport fc login services	    */
++	BFI_MC_RPORT		= 11,	/*!< Remote port		    */
++	BFI_MC_ITNIM		= 12,	/*!< I-T nexus (Initiator mode)	    */
++	BFI_MC_IOIM_READ	= 13,	/*!< read IO (Initiator mode)	    */
++	BFI_MC_IOIM_WRITE	= 14,	/*!< write IO (Initiator mode)	    */
++	BFI_MC_IOIM_IO		= 15,	/*!< IO (Initiator mode)	    */
++	BFI_MC_IOIM		= 16,	/*!< IO (Initiator mode)	    */
++	BFI_MC_IOIM_IOCOM	= 17,	/*!< good IO completion		    */
++	BFI_MC_TSKIM		= 18,	/*!< Initiator Task management	    */
++	BFI_MC_SBOOT		= 19,	/*!< SAN boot services		    */
++	BFI_MC_IPFC		= 20,	/*!< IP over FC Msgs		    */
++	BFI_MC_PORT		= 21,	/*!< Physical port		    */
++	BFI_MC_SFP		= 22,	/*!< SFP module			    */
++	BFI_MC_MSGQ		= 23,	/*!< MSGQ			    */
++	BFI_MC_ENET		= 24,	/*!< ENET commands/responses	    */
++	BFI_MC_MAX		= 32
++};
++
++#define BFI_IOC_MAX_CQS		4
++#define BFI_IOC_MAX_CQS_ASIC	8
++#define BFI_IOC_MSGLEN_MAX	32	/* 32 bytes */
++
++#define BFI_BOOT_TYPE_OFF		8
++#define BFI_BOOT_PARAM_OFF		12
++
++#define BFI_BOOT_TYPE_NORMAL 		0	/* param is device id */
++#define	BFI_BOOT_TYPE_FLASH		1
++#define	BFI_BOOT_TYPE_MEMTEST		2
++
++#define BFI_BOOT_MEMTEST_RES_ADDR   0x900
++#define BFI_BOOT_MEMTEST_RES_SIG    0xA0A1A2A3
++
++/**
++ *----------------------------------------------------------------------
++ *				IOC
++ *----------------------------------------------------------------------
++ */
++
++enum bfi_ioc_h2i_msgs {
++	BFI_IOC_H2I_ENABLE_REQ		= 1,
++	BFI_IOC_H2I_DISABLE_REQ		= 2,
++	BFI_IOC_H2I_GETATTR_REQ		= 3,
++	BFI_IOC_H2I_DBG_SYNC		= 4,
++	BFI_IOC_H2I_DBG_DUMP		= 5,
++};
++
++enum bfi_ioc_i2h_msgs {
++	BFI_IOC_I2H_ENABLE_REPLY	= BFA_I2HM(1),
++	BFI_IOC_I2H_DISABLE_REPLY 	= BFA_I2HM(2),
++	BFI_IOC_I2H_GETATTR_REPLY 	= BFA_I2HM(3),
++	BFI_IOC_I2H_READY_EVENT 	= BFA_I2HM(4),
++	BFI_IOC_I2H_HBEAT		= BFA_I2HM(5),
++};
++
++/**
++ * BFI_IOC_H2I_GETATTR_REQ message
++ */
++struct bfi_ioc_getattr_req {
++	struct bfi_mhdr mh;
++	union bfi_addr_u	attr_addr;
++};
++
++struct bfi_ioc_attr {
++	u64		mfg_pwwn;	/*!< Mfg port wwn	   */
++	u64		mfg_nwwn;	/*!< Mfg node wwn	   */
++	mac_t		mfg_mac;	/*!< Mfg mac		   */
++	u16	rsvd_a;
++	u64		pwwn;
++	u64		nwwn;
++	mac_t		mac;		/*!< PBC or Mfg mac	   */
++	u16	rsvd_b;
++	mac_t		fcoe_mac;
++	u16	rsvd_c;
++	char		brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
++	u8		pcie_gen;
++	u8		pcie_lanes_orig;
++	u8		pcie_lanes;
++	u8		rx_bbcredit;	/*!< receive buffer credits */
++	u32	adapter_prop;	/*!< adapter properties     */
++	u16	maxfrsize;	/*!< max receive frame size */
++	char		asic_rev;
++	u8		rsvd_d;
++	char		fw_version[BFA_VERSION_LEN];
++	char		optrom_version[BFA_VERSION_LEN];
++	struct bfa_mfg_vpd vpd;
++	u32	card_type;	/*!< card type			*/
++};
++
++/**
++ * BFI_IOC_I2H_GETATTR_REPLY message
++ */
++struct bfi_ioc_getattr_reply {
++	struct bfi_mhdr mh;	/*!< Common msg header		*/
++	u8			status;	/*!< cfg reply status		*/
++	u8			rsvd[3];
++};
++
++/**
++ * Firmware memory page offsets
++ */
++#define BFI_IOC_SMEM_PG0_CB	(0x40)
++#define BFI_IOC_SMEM_PG0_CT	(0x180)
++
++/**
++ * Firmware statistic offset
++ */
++#define BFI_IOC_FWSTATS_OFF	(0x6B40)
++#define BFI_IOC_FWSTATS_SZ	(4096)
++
++/**
++ * Firmware trace offset
++ */
++#define BFI_IOC_TRC_OFF		(0x4b00)
++#define BFI_IOC_TRC_ENTS	256
++
++#define BFI_IOC_FW_SIGNATURE	(0xbfadbfad)
++#define BFI_IOC_MD5SUM_SZ	4
++struct bfi_ioc_image_hdr {
++	u32	signature;	/*!< constant signature */
++	u32	rsvd_a;
++	u32	exec;		/*!< exec vector	*/
++	u32	param;		/*!< parameters		*/
++	u32	rsvd_b[4];
++	u32	md5sum[BFI_IOC_MD5SUM_SZ];
++};
++
++/**
++ *  BFI_IOC_I2H_READY_EVENT message
++ */
++struct bfi_ioc_rdy_event {
++	struct bfi_mhdr mh;		/*!< common msg header */
++	u8			init_status;	/*!< init event status */
++	u8			rsvd[3];
++};
++
++struct bfi_ioc_hbeat {
++	struct bfi_mhdr mh;		/*!< common msg header		*/
++	u32	   hb_count;	/*!< current heart beat count	*/
++};
++
++/**
++ * IOC hardware/firmware state
++ */
++enum bfi_ioc_state {
++	BFI_IOC_UNINIT		= 0,	/*!< not initialized		     */
++	BFI_IOC_INITING		= 1,	/*!< h/w is being initialized	     */
++	BFI_IOC_HWINIT		= 2,	/*!< h/w is initialized		     */
++	BFI_IOC_CFG		= 3,	/*!< IOC configuration in progress   */
++	BFI_IOC_OP		= 4,	/*!< IOC is operational		     */
++	BFI_IOC_DISABLING	= 5,	/*!< IOC is being disabled	     */
++	BFI_IOC_DISABLED	= 6,	/*!< IOC is disabled		     */
++	BFI_IOC_CFG_DISABLED	= 7,	/*!< IOC is being disabled;transient */
++	BFI_IOC_FAIL		= 8,	/*!< IOC heart-beat failure	     */
++	BFI_IOC_MEMTEST		= 9,	/*!< IOC is doing memtest	     */
++};
++
++#define BFI_IOC_ENDIAN_SIG  0x12345678
++
++enum {
++	BFI_ADAPTER_TYPE_FC	= 0x01,		/*!< FC adapters	   */
++	BFI_ADAPTER_TYPE_MK	= 0x0f0000,	/*!< adapter type mask     */
++	BFI_ADAPTER_TYPE_SH	= 16,	        /*!< adapter type shift    */
++	BFI_ADAPTER_NPORTS_MK	= 0xff00,	/*!< number of ports mask  */
++	BFI_ADAPTER_NPORTS_SH	= 8,	        /*!< number of ports shift */
++	BFI_ADAPTER_SPEED_MK	= 0xff,		/*!< adapter speed mask    */
++	BFI_ADAPTER_SPEED_SH	= 0,	        /*!< adapter speed shift   */
++	BFI_ADAPTER_PROTO	= 0x100000,	/*!< prototype adapaters   */
++	BFI_ADAPTER_TTV		= 0x200000,	/*!< TTV debug capable     */
++	BFI_ADAPTER_UNSUPP	= 0x400000,	/*!< unknown adapter type  */
++};
++
++#define BFI_ADAPTER_GETP(__prop, __adap_prop)			\
++	(((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >>	\
++		BFI_ADAPTER_ ## __prop ## _SH)
++#define BFI_ADAPTER_SETP(__prop, __val)				\
++	((__val) << BFI_ADAPTER_ ## __prop ## _SH)
++#define BFI_ADAPTER_IS_PROTO(__adap_type)			\
++	((__adap_type) & BFI_ADAPTER_PROTO)
++#define BFI_ADAPTER_IS_TTV(__adap_type)				\
++	((__adap_type) & BFI_ADAPTER_TTV)
++#define BFI_ADAPTER_IS_UNSUPP(__adap_type)			\
++	((__adap_type) & BFI_ADAPTER_UNSUPP)
++#define BFI_ADAPTER_IS_SPECIAL(__adap_type)			\
++	((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO |	\
++			BFI_ADAPTER_UNSUPP))
++
++/**
++ * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
++ */
++struct bfi_ioc_ctrl_req {
++	struct bfi_mhdr mh;
++	u8			ioc_class;
++	u8			rsvd[3];
++	u32		tv_sec;
++};
++
++/**
++ * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
++ */
++struct bfi_ioc_ctrl_reply {
++	struct bfi_mhdr mh;		/*!< Common msg header     */
++	u8			status;		/*!< enable/disable status */
++	u8			rsvd[3];
++};
++
++#define BFI_IOC_MSGSZ   8
++/**
++ * H2I Messages
++ */
++union bfi_ioc_h2i_msg_u {
++	struct bfi_mhdr mh;
++	struct bfi_ioc_ctrl_req enable_req;
++	struct bfi_ioc_ctrl_req disable_req;
++	struct bfi_ioc_getattr_req getattr_req;
++	u32			mboxmsg[BFI_IOC_MSGSZ];
++};
++
++/**
++ * I2H Messages
++ */
++union bfi_ioc_i2h_msg_u {
++	struct bfi_mhdr mh;
++	struct bfi_ioc_rdy_event rdy_event;
++	u32			mboxmsg[BFI_IOC_MSGSZ];
++};
++
++#pragma pack()
++
++#endif /* __BFI_H__ */
+--- /dev/null
++++ b/drivers/net/bna/bfi_cna.h
+@@ -0,0 +1,199 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++#ifndef __BFI_CNA_H__
++#define __BFI_CNA_H__
++
++#include "bfi.h"
++#include "bfa_defs_cna.h"
++
++#pragma pack(1)
++
++enum bfi_port_h2i {
++	BFI_PORT_H2I_ENABLE_REQ		= (1),
++	BFI_PORT_H2I_DISABLE_REQ	= (2),
++	BFI_PORT_H2I_GET_STATS_REQ	= (3),
++	BFI_PORT_H2I_CLEAR_STATS_REQ	= (4),
++};
++
++enum bfi_port_i2h {
++	BFI_PORT_I2H_ENABLE_RSP		= BFA_I2HM(1),
++	BFI_PORT_I2H_DISABLE_RSP	= BFA_I2HM(2),
++	BFI_PORT_I2H_GET_STATS_RSP	= BFA_I2HM(3),
++	BFI_PORT_I2H_CLEAR_STATS_RSP	= BFA_I2HM(4),
++};
++
++/**
++ * Generic REQ type
++ */
++struct bfi_port_generic_req {
++	struct bfi_mhdr mh;		/*!< msg header			    */
++	u32	msgtag;		/*!< msgtag for reply		    */
++	u32	rsvd;
++};
++
++/**
++ * Generic RSP type
++ */
++struct bfi_port_generic_rsp {
++	struct bfi_mhdr mh;		/*!< common msg header		    */
++	u8		status;		/*!< port enable status		    */
++	u8		rsvd[3];
++	u32	msgtag;		/*!< msgtag for reply		    */
++};
++
++/**
++ * @todo
++ * BFI_PORT_H2I_ENABLE_REQ
++ */
++
++/**
++ * @todo
++ * BFI_PORT_I2H_ENABLE_RSP
++ */
++
++/**
++ * BFI_PORT_H2I_DISABLE_REQ
++ */
++
++/**
++ * BFI_PORT_I2H_DISABLE_RSP
++ */
++
++/**
++ * BFI_PORT_H2I_GET_STATS_REQ
++ */
++struct bfi_port_get_stats_req {
++	struct bfi_mhdr mh;		/*!< common msg header		    */
++	union bfi_addr_u   dma_addr;
++};
++
++/**
++ * BFI_PORT_I2H_GET_STATS_RSP
++ */
++
++/**
++ * BFI_PORT_H2I_CLEAR_STATS_REQ
++ */
++
++/**
++ * BFI_PORT_I2H_CLEAR_STATS_RSP
++ */
++
++union bfi_port_h2i_msg_u {
++	struct bfi_mhdr mh;
++	struct bfi_port_generic_req enable_req;
++	struct bfi_port_generic_req disable_req;
++	struct bfi_port_get_stats_req getstats_req;
++	struct bfi_port_generic_req clearstats_req;
++};
++
++union bfi_port_i2h_msg_u {
++	struct bfi_mhdr mh;
++	struct bfi_port_generic_rsp enable_rsp;
++	struct bfi_port_generic_rsp disable_rsp;
++	struct bfi_port_generic_rsp getstats_rsp;
++	struct bfi_port_generic_rsp clearstats_rsp;
++};
++
++/* @brief Mailbox commands from host to (DCBX/LLDP) firmware */
++enum bfi_cee_h2i_msgs {
++	BFI_CEE_H2I_GET_CFG_REQ = 1,
++	BFI_CEE_H2I_RESET_STATS = 2,
++	BFI_CEE_H2I_GET_STATS_REQ = 3,
++};
++
++/* @brief Mailbox reply and AEN messages from DCBX/LLDP firmware to host */
++enum bfi_cee_i2h_msgs {
++	BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1),
++	BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2),
++	BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3),
++};
++
++/* Data structures */
++
++/*
++ * @brief H2I command structure for resetting the stats.
++ * BFI_CEE_H2I_RESET_STATS
++ */
++struct bfi_lldp_reset_stats {
++	struct bfi_mhdr mh;
++};
++
++/*
++ * @brief H2I command structure for resetting the stats.
++ * BFI_CEE_H2I_RESET_STATS
++ */
++struct bfi_cee_reset_stats {
++	struct bfi_mhdr mh;
++};
++
++/*
++ * @brief  get configuration  command from host
++ * BFI_CEE_H2I_GET_CFG_REQ
++ */
++struct bfi_cee_get_req {
++	struct bfi_mhdr mh;
++	union bfi_addr_u   dma_addr;
++};
++
++/*
++ * @brief reply message from firmware
++ * BFI_CEE_I2H_GET_CFG_RSP
++ */
++struct bfi_cee_get_rsp {
++	struct bfi_mhdr mh;
++	u8			cmd_status;
++	u8			rsvd[3];
++};
++
++/*
++ * @brief  get configuration  command from host
++ * BFI_CEE_H2I_GET_STATS_REQ
++ */
++struct bfi_cee_stats_req {
++	struct bfi_mhdr mh;
++	union bfi_addr_u   dma_addr;
++};
++
++/*
++ * @brief reply message from firmware
++ * BFI_CEE_I2H_GET_STATS_RSP
++ */
++struct bfi_cee_stats_rsp {
++	struct bfi_mhdr mh;
++	u8			cmd_status;
++	u8			rsvd[3];
++};
++
++/* @brief mailbox command structures from host to firmware */
++union bfi_cee_h2i_msg_u {
++	struct bfi_mhdr mh;
++	struct bfi_cee_get_req get_req;
++	struct bfi_cee_stats_req stats_req;
++};
++
++/* @brief mailbox message structures from firmware to host	*/
++union bfi_cee_i2h_msg_u {
++	struct bfi_mhdr mh;
++	struct bfi_cee_get_rsp get_rsp;
++	struct bfi_cee_stats_rsp stats_rsp;
++};
++
++#pragma pack()
++
++#endif /* __BFI_CNA_H__ */
+--- /dev/null
++++ b/drivers/net/bna/bfi_ctreg.h
+@@ -0,0 +1,637 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++
++/*
++ * bfi_ctreg.h catapult host block register definitions
++ *
++ * !!! Do not edit. Auto generated. !!!
++ */
++
++#ifndef __BFI_CTREG_H__
++#define __BFI_CTREG_H__
++
++#define HOSTFN0_LPU_MBOX0_0		0x00019200
++#define HOSTFN1_LPU_MBOX0_8		0x00019260
++#define LPU_HOSTFN0_MBOX0_0		0x00019280
++#define LPU_HOSTFN1_MBOX0_8		0x000192e0
++#define HOSTFN2_LPU_MBOX0_0		0x00019400
++#define HOSTFN3_LPU_MBOX0_8		0x00019460
++#define LPU_HOSTFN2_MBOX0_0		0x00019480
++#define LPU_HOSTFN3_MBOX0_8		0x000194e0
++#define HOSTFN0_INT_STATUS		0x00014000
++#define __HOSTFN0_HALT_OCCURRED		0x01000000
++#define __HOSTFN0_INT_STATUS_LVL_MK	0x00f00000
++#define __HOSTFN0_INT_STATUS_LVL_SH	20
++#define __HOSTFN0_INT_STATUS_LVL(_v)	((_v) << __HOSTFN0_INT_STATUS_LVL_SH)
++#define __HOSTFN0_INT_STATUS_P_MK	0x000f0000
++#define __HOSTFN0_INT_STATUS_P_SH	16
++#define __HOSTFN0_INT_STATUS_P(_v)	((_v) << __HOSTFN0_INT_STATUS_P_SH)
++#define __HOSTFN0_INT_STATUS_F		0x0000ffff
++#define HOSTFN0_INT_MSK			0x00014004
++#define HOST_PAGE_NUM_FN0		0x00014008
++#define __HOST_PAGE_NUM_FN		0x000001ff
++#define HOST_MSIX_ERR_INDEX_FN0		0x0001400c
++#define __MSIX_ERR_INDEX_FN		0x000001ff
++#define HOSTFN1_INT_STATUS		0x00014100
++#define __HOSTFN1_HALT_OCCURRED		0x01000000
++#define __HOSTFN1_INT_STATUS_LVL_MK	0x00f00000
++#define __HOSTFN1_INT_STATUS_LVL_SH	20
++#define __HOSTFN1_INT_STATUS_LVL(_v)	((_v) << __HOSTFN1_INT_STATUS_LVL_SH)
++#define __HOSTFN1_INT_STATUS_P_MK	0x000f0000
++#define __HOSTFN1_INT_STATUS_P_SH	16
++#define __HOSTFN1_INT_STATUS_P(_v)	((_v) << __HOSTFN1_INT_STATUS_P_SH)
++#define __HOSTFN1_INT_STATUS_F		0x0000ffff
++#define HOSTFN1_INT_MSK			0x00014104
++#define HOST_PAGE_NUM_FN1		0x00014108
++#define HOST_MSIX_ERR_INDEX_FN1		0x0001410c
++#define APP_PLL_425_CTL_REG		0x00014204
++#define __P_425_PLL_LOCK		0x80000000
++#define __APP_PLL_425_SRAM_USE_100MHZ	0x00100000
++#define __APP_PLL_425_RESET_TIMER_MK	0x000e0000
++#define __APP_PLL_425_RESET_TIMER_SH	17
++#define __APP_PLL_425_RESET_TIMER(_v)	((_v) << __APP_PLL_425_RESET_TIMER_SH)
++#define __APP_PLL_425_LOGIC_SOFT_RESET	0x00010000
++#define __APP_PLL_425_CNTLMT0_1_MK	0x0000c000
++#define __APP_PLL_425_CNTLMT0_1_SH	14
++#define __APP_PLL_425_CNTLMT0_1(_v)	((_v) << __APP_PLL_425_CNTLMT0_1_SH)
++#define __APP_PLL_425_JITLMT0_1_MK	0x00003000
++#define __APP_PLL_425_JITLMT0_1_SH	12
++#define __APP_PLL_425_JITLMT0_1(_v)	((_v) << __APP_PLL_425_JITLMT0_1_SH)
++#define __APP_PLL_425_HREF		0x00000800
++#define __APP_PLL_425_HDIV		0x00000400
++#define __APP_PLL_425_P0_1_MK		0x00000300
++#define __APP_PLL_425_P0_1_SH		8
++#define __APP_PLL_425_P0_1(_v)		((_v) << __APP_PLL_425_P0_1_SH)
++#define __APP_PLL_425_Z0_2_MK		0x000000e0
++#define __APP_PLL_425_Z0_2_SH		5
++#define __APP_PLL_425_Z0_2(_v)		((_v) << __APP_PLL_425_Z0_2_SH)
++#define __APP_PLL_425_RSEL200500	0x00000010
++#define __APP_PLL_425_ENARST		0x00000008
++#define __APP_PLL_425_BYPASS		0x00000004
++#define __APP_PLL_425_LRESETN		0x00000002
++#define __APP_PLL_425_ENABLE		0x00000001
++#define APP_PLL_312_CTL_REG		0x00014208
++#define __P_312_PLL_LOCK		0x80000000
++#define __ENABLE_MAC_AHB_1		0x00800000
++#define __ENABLE_MAC_AHB_0		0x00400000
++#define __ENABLE_MAC_1			0x00200000
++#define __ENABLE_MAC_0			0x00100000
++#define __APP_PLL_312_RESET_TIMER_MK	0x000e0000
++#define __APP_PLL_312_RESET_TIMER_SH	17
++#define __APP_PLL_312_RESET_TIMER(_v)	((_v) << __APP_PLL_312_RESET_TIMER_SH)
++#define __APP_PLL_312_LOGIC_SOFT_RESET	0x00010000
++#define __APP_PLL_312_CNTLMT0_1_MK	0x0000c000
++#define __APP_PLL_312_CNTLMT0_1_SH	14
++#define __APP_PLL_312_CNTLMT0_1(_v)	((_v) << __APP_PLL_312_CNTLMT0_1_SH)
++#define __APP_PLL_312_JITLMT0_1_MK	0x00003000
++#define __APP_PLL_312_JITLMT0_1_SH	12
++#define __APP_PLL_312_JITLMT0_1(_v)	((_v) << __APP_PLL_312_JITLMT0_1_SH)
++#define __APP_PLL_312_HREF		0x00000800
++#define __APP_PLL_312_HDIV		0x00000400
++#define __APP_PLL_312_P0_1_MK		0x00000300
++#define __APP_PLL_312_P0_1_SH		8
++#define __APP_PLL_312_P0_1(_v)		((_v) << __APP_PLL_312_P0_1_SH)
++#define __APP_PLL_312_Z0_2_MK		0x000000e0
++#define __APP_PLL_312_Z0_2_SH		5
++#define __APP_PLL_312_Z0_2(_v)		((_v) << __APP_PLL_312_Z0_2_SH)
++#define __APP_PLL_312_RSEL200500	0x00000010
++#define __APP_PLL_312_ENARST		0x00000008
++#define __APP_PLL_312_BYPASS		0x00000004
++#define __APP_PLL_312_LRESETN		0x00000002
++#define __APP_PLL_312_ENABLE		0x00000001
++#define MBIST_CTL_REG			0x00014220
++#define __EDRAM_BISTR_START		0x00000004
++#define __MBIST_RESET			0x00000002
++#define __MBIST_START			0x00000001
++#define MBIST_STAT_REG			0x00014224
++#define __EDRAM_BISTR_STATUS		0x00000008
++#define __EDRAM_BISTR_DONE		0x00000004
++#define __MEM_BIT_STATUS		0x00000002
++#define __MBIST_DONE			0x00000001
++#define HOST_SEM0_REG			0x00014230
++#define __HOST_SEMAPHORE		0x00000001
++#define HOST_SEM1_REG			0x00014234
++#define HOST_SEM2_REG			0x00014238
++#define HOST_SEM3_REG			0x0001423c
++#define HOST_SEM0_INFO_REG		0x00014240
++#define HOST_SEM1_INFO_REG		0x00014244
++#define HOST_SEM2_INFO_REG		0x00014248
++#define HOST_SEM3_INFO_REG		0x0001424c
++#define ETH_MAC_SER_REG			0x00014288
++#define __APP_EMS_CKBUFAMPIN		0x00000020
++#define __APP_EMS_REFCLKSEL		0x00000010
++#define __APP_EMS_CMLCKSEL		0x00000008
++#define __APP_EMS_REFCKBUFEN2		0x00000004
++#define __APP_EMS_REFCKBUFEN1		0x00000002
++#define __APP_EMS_CHANNEL_SEL		0x00000001
++#define HOSTFN2_INT_STATUS		0x00014300
++#define __HOSTFN2_HALT_OCCURRED		0x01000000
++#define __HOSTFN2_INT_STATUS_LVL_MK	0x00f00000
++#define __HOSTFN2_INT_STATUS_LVL_SH	20
++#define __HOSTFN2_INT_STATUS_LVL(_v)	((_v) << __HOSTFN2_INT_STATUS_LVL_SH)
++#define __HOSTFN2_INT_STATUS_P_MK	0x000f0000
++#define __HOSTFN2_INT_STATUS_P_SH	16
++#define __HOSTFN2_INT_STATUS_P(_v)	((_v) << __HOSTFN2_INT_STATUS_P_SH)
++#define __HOSTFN2_INT_STATUS_F		0x0000ffff
++#define HOSTFN2_INT_MSK			0x00014304
++#define HOST_PAGE_NUM_FN2		0x00014308
++#define HOST_MSIX_ERR_INDEX_FN2		0x0001430c
++#define HOSTFN3_INT_STATUS		0x00014400
++#define __HALT_OCCURRED			0x01000000
++#define __HOSTFN3_INT_STATUS_LVL_MK	0x00f00000
++#define __HOSTFN3_INT_STATUS_LVL_SH	20
++#define __HOSTFN3_INT_STATUS_LVL(_v)	((_v) << __HOSTFN3_INT_STATUS_LVL_SH)
++#define __HOSTFN3_INT_STATUS_P_MK	0x000f0000
++#define __HOSTFN3_INT_STATUS_P_SH	16
++#define __HOSTFN3_INT_STATUS_P(_v)	((_v) << __HOSTFN3_INT_STATUS_P_SH)
++#define __HOSTFN3_INT_STATUS_F		0x0000ffff
++#define HOSTFN3_INT_MSK			0x00014404
++#define HOST_PAGE_NUM_FN3		0x00014408
++#define HOST_MSIX_ERR_INDEX_FN3		0x0001440c
++#define FNC_ID_REG			0x00014600
++#define __FUNCTION_NUMBER		0x00000007
++#define FNC_PERS_REG			0x00014604
++#define __F3_FUNCTION_ACTIVE		0x80000000
++#define __F3_FUNCTION_MODE		0x40000000
++#define __F3_PORT_MAP_MK		0x30000000
++#define __F3_PORT_MAP_SH		28
++#define __F3_PORT_MAP(_v)		((_v) << __F3_PORT_MAP_SH)
++#define __F3_VM_MODE			0x08000000
++#define __F3_INTX_STATUS_MK		0x07000000
++#define __F3_INTX_STATUS_SH		24
++#define __F3_INTX_STATUS(_v)		((_v) << __F3_INTX_STATUS_SH)
++#define __F2_FUNCTION_ACTIVE		0x00800000
++#define __F2_FUNCTION_MODE		0x00400000
++#define __F2_PORT_MAP_MK		0x00300000
++#define __F2_PORT_MAP_SH		20
++#define __F2_PORT_MAP(_v)		((_v) << __F2_PORT_MAP_SH)
++#define __F2_VM_MODE			0x00080000
++#define __F2_INTX_STATUS_MK		0x00070000
++#define __F2_INTX_STATUS_SH		16
++#define __F2_INTX_STATUS(_v)		((_v) << __F2_INTX_STATUS_SH)
++#define __F1_FUNCTION_ACTIVE		0x00008000
++#define __F1_FUNCTION_MODE		0x00004000
++#define __F1_PORT_MAP_MK		0x00003000
++#define __F1_PORT_MAP_SH		12
++#define __F1_PORT_MAP(_v)		((_v) << __F1_PORT_MAP_SH)
++#define __F1_VM_MODE			0x00000800
++#define __F1_INTX_STATUS_MK		0x00000700
++#define __F1_INTX_STATUS_SH		8
++#define __F1_INTX_STATUS(_v)		((_v) << __F1_INTX_STATUS_SH)
++#define __F0_FUNCTION_ACTIVE		0x00000080
++#define __F0_FUNCTION_MODE		0x00000040
++#define __F0_PORT_MAP_MK		0x00000030
++#define __F0_PORT_MAP_SH		4
++#define __F0_PORT_MAP(_v)		((_v) << __F0_PORT_MAP_SH)
++#define __F0_VM_MODE		0x00000008
++#define __F0_INTX_STATUS		0x00000007
++enum {
++	__F0_INTX_STATUS_MSIX		= 0x0,
++	__F0_INTX_STATUS_INTA		= 0x1,
++	__F0_INTX_STATUS_INTB		= 0x2,
++	__F0_INTX_STATUS_INTC		= 0x3,
++	__F0_INTX_STATUS_INTD		= 0x4,
++};
++#define OP_MODE				0x0001460c
++#define __APP_ETH_CLK_LOWSPEED		0x00000004
++#define __GLOBAL_CORECLK_HALFSPEED	0x00000002
++#define __GLOBAL_FCOE_MODE		0x00000001
++#define HOST_SEM4_REG			0x00014610
++#define HOST_SEM5_REG			0x00014614
++#define HOST_SEM6_REG			0x00014618
++#define HOST_SEM7_REG			0x0001461c
++#define HOST_SEM4_INFO_REG		0x00014620
++#define HOST_SEM5_INFO_REG		0x00014624
++#define HOST_SEM6_INFO_REG		0x00014628
++#define HOST_SEM7_INFO_REG		0x0001462c
++#define HOSTFN0_LPU0_MBOX0_CMD_STAT	0x00019000
++#define __HOSTFN0_LPU0_MBOX0_INFO_MK	0xfffffffe
++#define __HOSTFN0_LPU0_MBOX0_INFO_SH	1
++#define __HOSTFN0_LPU0_MBOX0_INFO(_v)	((_v) << __HOSTFN0_LPU0_MBOX0_INFO_SH)
++#define __HOSTFN0_LPU0_MBOX0_CMD_STATUS 0x00000001
++#define HOSTFN0_LPU1_MBOX0_CMD_STAT	0x00019004
++#define __HOSTFN0_LPU1_MBOX0_INFO_MK	0xfffffffe
++#define __HOSTFN0_LPU1_MBOX0_INFO_SH	1
++#define __HOSTFN0_LPU1_MBOX0_INFO(_v)	((_v) << __HOSTFN0_LPU1_MBOX0_INFO_SH)
++#define __HOSTFN0_LPU1_MBOX0_CMD_STATUS 0x00000001
++#define LPU0_HOSTFN0_MBOX0_CMD_STAT	0x00019008
++#define __LPU0_HOSTFN0_MBOX0_INFO_MK	0xfffffffe
++#define __LPU0_HOSTFN0_MBOX0_INFO_SH	1
++#define __LPU0_HOSTFN0_MBOX0_INFO(_v)	((_v) << __LPU0_HOSTFN0_MBOX0_INFO_SH)
++#define __LPU0_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
++#define LPU1_HOSTFN0_MBOX0_CMD_STAT	0x0001900c
++#define __LPU1_HOSTFN0_MBOX0_INFO_MK	0xfffffffe
++#define __LPU1_HOSTFN0_MBOX0_INFO_SH	1
++#define __LPU1_HOSTFN0_MBOX0_INFO(_v)	((_v) << __LPU1_HOSTFN0_MBOX0_INFO_SH)
++#define __LPU1_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
++#define HOSTFN1_LPU0_MBOX0_CMD_STAT	0x00019010
++#define __HOSTFN1_LPU0_MBOX0_INFO_MK	0xfffffffe
++#define __HOSTFN1_LPU0_MBOX0_INFO_SH	1
++#define __HOSTFN1_LPU0_MBOX0_INFO(_v)	((_v) << __HOSTFN1_LPU0_MBOX0_INFO_SH)
++#define __HOSTFN1_LPU0_MBOX0_CMD_STATUS 0x00000001
++#define HOSTFN1_LPU1_MBOX0_CMD_STAT	0x00019014
++#define __HOSTFN1_LPU1_MBOX0_INFO_MK	0xfffffffe
++#define __HOSTFN1_LPU1_MBOX0_INFO_SH	1
++#define __HOSTFN1_LPU1_MBOX0_INFO(_v)	((_v) << __HOSTFN1_LPU1_MBOX0_INFO_SH)
++#define __HOSTFN1_LPU1_MBOX0_CMD_STATUS 0x00000001
++#define LPU0_HOSTFN1_MBOX0_CMD_STAT	0x00019018
++#define __LPU0_HOSTFN1_MBOX0_INFO_MK	0xfffffffe
++#define __LPU0_HOSTFN1_MBOX0_INFO_SH	1
++#define __LPU0_HOSTFN1_MBOX0_INFO(_v)	((_v) << __LPU0_HOSTFN1_MBOX0_INFO_SH)
++#define __LPU0_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
++#define LPU1_HOSTFN1_MBOX0_CMD_STAT	0x0001901c
++#define __LPU1_HOSTFN1_MBOX0_INFO_MK	0xfffffffe
++#define __LPU1_HOSTFN1_MBOX0_INFO_SH	1
++#define __LPU1_HOSTFN1_MBOX0_INFO(_v)	((_v) << __LPU1_HOSTFN1_MBOX0_INFO_SH)
++#define __LPU1_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
++#define HOSTFN2_LPU0_MBOX0_CMD_STAT	0x00019150
++#define __HOSTFN2_LPU0_MBOX0_INFO_MK	0xfffffffe
++#define __HOSTFN2_LPU0_MBOX0_INFO_SH	1
++#define __HOSTFN2_LPU0_MBOX0_INFO(_v)	((_v) << __HOSTFN2_LPU0_MBOX0_INFO_SH)
++#define __HOSTFN2_LPU0_MBOX0_CMD_STATUS 0x00000001
++#define HOSTFN2_LPU1_MBOX0_CMD_STAT	0x00019154
++#define __HOSTFN2_LPU1_MBOX0_INFO_MK	0xfffffffe
++#define __HOSTFN2_LPU1_MBOX0_INFO_SH	1
++#define __HOSTFN2_LPU1_MBOX0_INFO(_v)	((_v) << __HOSTFN2_LPU1_MBOX0_INFO_SH)
++#define __HOSTFN2_LPU1_MBOX0BOX0_CMD_STATUS 0x00000001
++#define LPU0_HOSTFN2_MBOX0_CMD_STAT	0x00019158
++#define __LPU0_HOSTFN2_MBOX0_INFO_MK	0xfffffffe
++#define __LPU0_HOSTFN2_MBOX0_INFO_SH	1
++#define __LPU0_HOSTFN2_MBOX0_INFO(_v)	((_v) << __LPU0_HOSTFN2_MBOX0_INFO_SH)
++#define __LPU0_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
++#define LPU1_HOSTFN2_MBOX0_CMD_STAT	0x0001915c
++#define __LPU1_HOSTFN2_MBOX0_INFO_MK	0xfffffffe
++#define __LPU1_HOSTFN2_MBOX0_INFO_SH	1
++#define __LPU1_HOSTFN2_MBOX0_INFO(_v)	((_v) << __LPU1_HOSTFN2_MBOX0_INFO_SH)
++#define __LPU1_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
++#define HOSTFN3_LPU0_MBOX0_CMD_STAT	0x00019160
++#define __HOSTFN3_LPU0_MBOX0_INFO_MK	0xfffffffe
++#define __HOSTFN3_LPU0_MBOX0_INFO_SH	1
++#define __HOSTFN3_LPU0_MBOX0_INFO(_v)	((_v) << __HOSTFN3_LPU0_MBOX0_INFO_SH)
++#define __HOSTFN3_LPU0_MBOX0_CMD_STATUS 0x00000001
++#define HOSTFN3_LPU1_MBOX0_CMD_STAT	0x00019164
++#define __HOSTFN3_LPU1_MBOX0_INFO_MK	0xfffffffe
++#define __HOSTFN3_LPU1_MBOX0_INFO_SH	1
++#define __HOSTFN3_LPU1_MBOX0_INFO(_v)	((_v) << __HOSTFN3_LPU1_MBOX0_INFO_SH)
++#define __HOSTFN3_LPU1_MBOX0_CMD_STATUS 0x00000001
++#define LPU0_HOSTFN3_MBOX0_CMD_STAT	0x00019168
++#define __LPU0_HOSTFN3_MBOX0_INFO_MK	0xfffffffe
++#define __LPU0_HOSTFN3_MBOX0_INFO_SH	1
++#define __LPU0_HOSTFN3_MBOX0_INFO(_v)	((_v) << __LPU0_HOSTFN3_MBOX0_INFO_SH)
++#define __LPU0_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
++#define LPU1_HOSTFN3_MBOX0_CMD_STAT	0x0001916c
++#define __LPU1_HOSTFN3_MBOX0_INFO_MK	0xfffffffe
++#define __LPU1_HOSTFN3_MBOX0_INFO_SH	1
++#define __LPU1_HOSTFN3_MBOX0_INFO(_v)	((_v) << __LPU1_HOSTFN3_MBOX0_INFO_SH)
++#define __LPU1_HOSTFN3_MBOX0_CMD_STATUS	0x00000001
++#define FW_INIT_HALT_P0			0x000191ac
++#define __FW_INIT_HALT_P		0x00000001
++#define FW_INIT_HALT_P1			0x000191bc
++#define CPE_PI_PTR_Q0			0x00038000
++#define __CPE_PI_UNUSED_MK		0xffff0000
++#define __CPE_PI_UNUSED_SH		16
++#define __CPE_PI_UNUSED(_v)		((_v) << __CPE_PI_UNUSED_SH)
++#define __CPE_PI_PTR			0x0000ffff
++#define CPE_PI_PTR_Q1			0x00038040
++#define CPE_CI_PTR_Q0			0x00038004
++#define __CPE_CI_UNUSED_MK		0xffff0000
++#define __CPE_CI_UNUSED_SH		16
++#define __CPE_CI_UNUSED(_v)		((_v) << __CPE_CI_UNUSED_SH)
++#define __CPE_CI_PTR			0x0000ffff
++#define CPE_CI_PTR_Q1			0x00038044
++#define CPE_DEPTH_Q0			0x00038008
++#define __CPE_DEPTH_UNUSED_MK		0xf8000000
++#define __CPE_DEPTH_UNUSED_SH		27
++#define __CPE_DEPTH_UNUSED(_v)		((_v) << __CPE_DEPTH_UNUSED_SH)
++#define __CPE_MSIX_VEC_INDEX_MK		0x07ff0000
++#define __CPE_MSIX_VEC_INDEX_SH		16
++#define __CPE_MSIX_VEC_INDEX(_v)	((_v) << __CPE_MSIX_VEC_INDEX_SH)
++#define __CPE_DEPTH			0x0000ffff
++#define CPE_DEPTH_Q1			0x00038048
++#define CPE_QCTRL_Q0			0x0003800c
++#define __CPE_CTRL_UNUSED30_MK		0xfc000000
++#define __CPE_CTRL_UNUSED30_SH		26
++#define __CPE_CTRL_UNUSED30(_v)		((_v) << __CPE_CTRL_UNUSED30_SH)
++#define __CPE_FUNC_INT_CTRL_MK		0x03000000
++#define __CPE_FUNC_INT_CTRL_SH		24
++#define __CPE_FUNC_INT_CTRL(_v)		((_v) << __CPE_FUNC_INT_CTRL_SH)
++enum {
++	__CPE_FUNC_INT_CTRL_DISABLE		= 0x0,
++	__CPE_FUNC_INT_CTRL_F2NF		= 0x1,
++	__CPE_FUNC_INT_CTRL_3QUART		= 0x2,
++	__CPE_FUNC_INT_CTRL_HALF		= 0x3,
++};
++#define __CPE_CTRL_UNUSED20_MK		0x00f00000
++#define __CPE_CTRL_UNUSED20_SH		20
++#define __CPE_CTRL_UNUSED20(_v)		((_v) << __CPE_CTRL_UNUSED20_SH)
++#define __CPE_SCI_TH_MK			0x000f0000
++#define __CPE_SCI_TH_SH			16
++#define __CPE_SCI_TH(_v)		((_v) << __CPE_SCI_TH_SH)
++#define __CPE_CTRL_UNUSED10_MK		0x0000c000
++#define __CPE_CTRL_UNUSED10_SH		14
++#define __CPE_CTRL_UNUSED10(_v)		((_v) << __CPE_CTRL_UNUSED10_SH)
++#define __CPE_ACK_PENDING		0x00002000
++#define __CPE_CTRL_UNUSED40_MK		0x00001c00
++#define __CPE_CTRL_UNUSED40_SH		10
++#define __CPE_CTRL_UNUSED40(_v)		((_v) << __CPE_CTRL_UNUSED40_SH)
++#define __CPE_PCIEID_MK			0x00000300
++#define __CPE_PCIEID_SH			8
++#define __CPE_PCIEID(_v)		((_v) << __CPE_PCIEID_SH)
++#define __CPE_CTRL_UNUSED00_MK		0x000000fe
++#define __CPE_CTRL_UNUSED00_SH		1
++#define __CPE_CTRL_UNUSED00(_v)		((_v) << __CPE_CTRL_UNUSED00_SH)
++#define __CPE_ESIZE			0x00000001
++#define CPE_QCTRL_Q1			0x0003804c
++#define __CPE_CTRL_UNUSED31_MK		0xfc000000
++#define __CPE_CTRL_UNUSED31_SH		26
++#define __CPE_CTRL_UNUSED31(_v)		((_v) << __CPE_CTRL_UNUSED31_SH)
++#define __CPE_CTRL_UNUSED21_MK		0x00f00000
++#define __CPE_CTRL_UNUSED21_SH		20
++#define __CPE_CTRL_UNUSED21(_v)		((_v) << __CPE_CTRL_UNUSED21_SH)
++#define __CPE_CTRL_UNUSED11_MK		0x0000c000
++#define __CPE_CTRL_UNUSED11_SH		14
++#define __CPE_CTRL_UNUSED11(_v)		((_v) << __CPE_CTRL_UNUSED11_SH)
++#define __CPE_CTRL_UNUSED41_MK		0x00001c00
++#define __CPE_CTRL_UNUSED41_SH		10
++#define __CPE_CTRL_UNUSED41(_v)		((_v) << __CPE_CTRL_UNUSED41_SH)
++#define __CPE_CTRL_UNUSED01_MK		0x000000fe
++#define __CPE_CTRL_UNUSED01_SH		1
++#define __CPE_CTRL_UNUSED01(_v)		((_v) << __CPE_CTRL_UNUSED01_SH)
++#define RME_PI_PTR_Q0			0x00038020
++#define __LATENCY_TIME_STAMP_MK		0xffff0000
++#define __LATENCY_TIME_STAMP_SH		16
++#define __LATENCY_TIME_STAMP(_v)	((_v) << __LATENCY_TIME_STAMP_SH)
++#define __RME_PI_PTR			0x0000ffff
++#define RME_PI_PTR_Q1			0x00038060
++#define RME_CI_PTR_Q0			0x00038024
++#define __DELAY_TIME_STAMP_MK		0xffff0000
++#define __DELAY_TIME_STAMP_SH		16
++#define __DELAY_TIME_STAMP(_v)		((_v) << __DELAY_TIME_STAMP_SH)
++#define __RME_CI_PTR			0x0000ffff
++#define RME_CI_PTR_Q1			0x00038064
++#define RME_DEPTH_Q0			0x00038028
++#define __RME_DEPTH_UNUSED_MK		0xf8000000
++#define __RME_DEPTH_UNUSED_SH		27
++#define __RME_DEPTH_UNUSED(_v)		((_v) << __RME_DEPTH_UNUSED_SH)
++#define __RME_MSIX_VEC_INDEX_MK		0x07ff0000
++#define __RME_MSIX_VEC_INDEX_SH		16
++#define __RME_MSIX_VEC_INDEX(_v)	((_v) << __RME_MSIX_VEC_INDEX_SH)
++#define __RME_DEPTH			0x0000ffff
++#define RME_DEPTH_Q1			0x00038068
++#define RME_QCTRL_Q0			0x0003802c
++#define __RME_INT_LATENCY_TIMER_MK	0xff000000
++#define __RME_INT_LATENCY_TIMER_SH	24
++#define __RME_INT_LATENCY_TIMER(_v)	((_v) << __RME_INT_LATENCY_TIMER_SH)
++#define __RME_INT_DELAY_TIMER_MK	0x00ff0000
++#define __RME_INT_DELAY_TIMER_SH	16
++#define __RME_INT_DELAY_TIMER(_v)	((_v) << __RME_INT_DELAY_TIMER_SH)
++#define __RME_INT_DELAY_DISABLE		0x00008000
++#define __RME_DLY_DELAY_DISABLE		0x00004000
++#define __RME_ACK_PENDING		0x00002000
++#define __RME_FULL_INTERRUPT_DISABLE	0x00001000
++#define __RME_CTRL_UNUSED10_MK		0x00000c00
++#define __RME_CTRL_UNUSED10_SH		10
++#define __RME_CTRL_UNUSED10(_v)		((_v) << __RME_CTRL_UNUSED10_SH)
++#define __RME_PCIEID_MK			0x00000300
++#define __RME_PCIEID_SH			8
++#define __RME_PCIEID(_v)		((_v) << __RME_PCIEID_SH)
++#define __RME_CTRL_UNUSED00_MK		0x000000fe
++#define __RME_CTRL_UNUSED00_SH		1
++#define __RME_CTRL_UNUSED00(_v)		((_v) << __RME_CTRL_UNUSED00_SH)
++#define __RME_ESIZE			0x00000001
++#define RME_QCTRL_Q1			0x0003806c
++#define __RME_CTRL_UNUSED11_MK		0x00000c00
++#define __RME_CTRL_UNUSED11_SH		10
++#define __RME_CTRL_UNUSED11(_v)		((_v) << __RME_CTRL_UNUSED11_SH)
++#define __RME_CTRL_UNUSED01_MK		0x000000fe
++#define __RME_CTRL_UNUSED01_SH		1
++#define __RME_CTRL_UNUSED01(_v)		((_v) << __RME_CTRL_UNUSED01_SH)
++#define PSS_CTL_REG			0x00018800
++#define __PSS_I2C_CLK_DIV_MK		0x007f0000
++#define __PSS_I2C_CLK_DIV_SH		16
++#define __PSS_I2C_CLK_DIV(_v)		((_v) << __PSS_I2C_CLK_DIV_SH)
++#define __PSS_LMEM_INIT_DONE		0x00001000
++#define __PSS_LMEM_RESET		0x00000200
++#define __PSS_LMEM_INIT_EN		0x00000100
++#define __PSS_LPU1_RESET		0x00000002
++#define __PSS_LPU0_RESET		0x00000001
++#define PSS_ERR_STATUS_REG		0x00018810
++#define __PSS_LPU1_TCM_READ_ERR		0x00200000
++#define __PSS_LPU0_TCM_READ_ERR		0x00100000
++#define __PSS_LMEM5_CORR_ERR		0x00080000
++#define __PSS_LMEM4_CORR_ERR		0x00040000
++#define __PSS_LMEM3_CORR_ERR		0x00020000
++#define __PSS_LMEM2_CORR_ERR		0x00010000
++#define __PSS_LMEM1_CORR_ERR		0x00008000
++#define __PSS_LMEM0_CORR_ERR		0x00004000
++#define __PSS_LMEM5_UNCORR_ERR		0x00002000
++#define __PSS_LMEM4_UNCORR_ERR		0x00001000
++#define __PSS_LMEM3_UNCORR_ERR		0x00000800
++#define __PSS_LMEM2_UNCORR_ERR		0x00000400
++#define __PSS_LMEM1_UNCORR_ERR		0x00000200
++#define __PSS_LMEM0_UNCORR_ERR		0x00000100
++#define __PSS_BAL_PERR			0x00000080
++#define __PSS_DIP_IF_ERR		0x00000040
++#define __PSS_IOH_IF_ERR		0x00000020
++#define __PSS_TDS_IF_ERR		0x00000010
++#define __PSS_RDS_IF_ERR		0x00000008
++#define __PSS_SGM_IF_ERR		0x00000004
++#define __PSS_LPU1_RAM_ERR		0x00000002
++#define __PSS_LPU0_RAM_ERR		0x00000001
++#define ERR_SET_REG			0x00018818
++#define __PSS_ERR_STATUS_SET		0x003fffff
++#define PMM_1T_RESET_REG_P0		0x0002381c
++#define __PMM_1T_RESET_P		0x00000001
++#define PMM_1T_RESET_REG_P1		0x00023c1c
++#define HQM_QSET0_RXQ_DRBL_P0		0x00038000
++#define __RXQ0_ADD_VECTORS_P		0x80000000
++#define __RXQ0_STOP_P			0x40000000
++#define __RXQ0_PRD_PTR_P		0x0000ffff
++#define HQM_QSET1_RXQ_DRBL_P0		0x00038080
++#define __RXQ1_ADD_VECTORS_P		0x80000000
++#define __RXQ1_STOP_P			0x40000000
++#define __RXQ1_PRD_PTR_P		0x0000ffff
++#define HQM_QSET0_RXQ_DRBL_P1		0x0003c000
++#define HQM_QSET1_RXQ_DRBL_P1		0x0003c080
++#define HQM_QSET0_TXQ_DRBL_P0		0x00038020
++#define __TXQ0_ADD_VECTORS_P		0x80000000
++#define __TXQ0_STOP_P			0x40000000
++#define __TXQ0_PRD_PTR_P		0x0000ffff
++#define HQM_QSET1_TXQ_DRBL_P0		0x000380a0
++#define __TXQ1_ADD_VECTORS_P		0x80000000
++#define __TXQ1_STOP_P			0x40000000
++#define __TXQ1_PRD_PTR_P		0x0000ffff
++#define HQM_QSET0_TXQ_DRBL_P1		0x0003c020
++#define HQM_QSET1_TXQ_DRBL_P1		0x0003c0a0
++#define HQM_QSET0_IB_DRBL_1_P0		0x00038040
++#define __IB1_0_ACK_P			0x80000000
++#define __IB1_0_DISABLE_P		0x40000000
++#define __IB1_0_COALESCING_CFG_P_MK	0x00ff0000
++#define __IB1_0_COALESCING_CFG_P_SH	16
++#define __IB1_0_COALESCING_CFG_P(_v)	((_v) << __IB1_0_COALESCING_CFG_P_SH)
++#define __IB1_0_NUM_OF_ACKED_EVENTS_P	0x0000ffff
++#define HQM_QSET1_IB_DRBL_1_P0		0x000380c0
++#define __IB1_1_ACK_P			0x80000000
++#define __IB1_1_DISABLE_P		0x40000000
++#define __IB1_1_COALESCING_CFG_P_MK	0x00ff0000
++#define __IB1_1_COALESCING_CFG_P_SH	16
++#define __IB1_1_COALESCING_CFG_P(_v)	((_v) << __IB1_1_COALESCING_CFG_P_SH)
++#define __IB1_1_NUM_OF_ACKED_EVENTS_P	0x0000ffff
++#define HQM_QSET0_IB_DRBL_1_P1		0x0003c040
++#define HQM_QSET1_IB_DRBL_1_P1		0x0003c0c0
++#define HQM_QSET0_IB_DRBL_2_P0		0x00038060
++#define __IB2_0_ACK_P			0x80000000
++#define __IB2_0_DISABLE_P		0x40000000
++#define __IB2_0_COALESCING_CFG_P_MK	0x00ff0000
++#define __IB2_0_COALESCING_CFG_P_SH	16
++#define __IB2_0_COALESCING_CFG_P(_v)	((_v) << __IB2_0_COALESCING_CFG_P_SH)
++#define __IB2_0_NUM_OF_ACKED_EVENTS_P	0x0000ffff
++#define HQM_QSET1_IB_DRBL_2_P0		0x000380e0
++#define __IB2_1_ACK_P			0x80000000
++#define __IB2_1_DISABLE_P		0x40000000
++#define __IB2_1_COALESCING_CFG_P_MK	0x00ff0000
++#define __IB2_1_COALESCING_CFG_P_SH	16
++#define __IB2_1_COALESCING_CFG_P(_v)	((_v) << __IB2_1_COALESCING_CFG_P_SH)
++#define __IB2_1_NUM_OF_ACKED_EVENTS_P	0x0000ffff
++#define HQM_QSET0_IB_DRBL_2_P1		0x0003c060
++#define HQM_QSET1_IB_DRBL_2_P1		0x0003c0e0
++
++/*
++ * These definitions are either in error/missing in spec. Its auto-generated
++ * from hard coded values in regparse.pl.
++ */
++#define __EMPHPOST_AT_4G_MK_FIX		0x0000001c
++#define __EMPHPOST_AT_4G_SH_FIX		0x00000002
++#define __EMPHPRE_AT_4G_FIX		0x00000003
++#define __SFP_TXRATE_EN_FIX		0x00000100
++#define __SFP_RXRATE_EN_FIX		0x00000080
++
++/*
++ * These register definitions are auto-generated from hard coded values
++ * in regparse.pl.
++ */
++
++/*
++ * These register mapping definitions are auto-generated from mapping tables
++ * in regparse.pl.
++ */
++#define BFA_IOC0_HBEAT_REG		HOST_SEM0_INFO_REG
++#define BFA_IOC0_STATE_REG		HOST_SEM1_INFO_REG
++#define BFA_IOC1_HBEAT_REG		HOST_SEM2_INFO_REG
++#define BFA_IOC1_STATE_REG		HOST_SEM3_INFO_REG
++#define BFA_FW_USE_COUNT		 HOST_SEM4_INFO_REG
++
++#define CPE_DEPTH_Q(__n) \
++	(CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
++#define CPE_QCTRL_Q(__n) \
++	(CPE_QCTRL_Q0 + (__n) * (CPE_QCTRL_Q1 - CPE_QCTRL_Q0))
++#define CPE_PI_PTR_Q(__n) \
++	(CPE_PI_PTR_Q0 + (__n) * (CPE_PI_PTR_Q1 - CPE_PI_PTR_Q0))
++#define CPE_CI_PTR_Q(__n) \
++	(CPE_CI_PTR_Q0 + (__n) * (CPE_CI_PTR_Q1 - CPE_CI_PTR_Q0))
++#define RME_DEPTH_Q(__n) \
++	(RME_DEPTH_Q0 + (__n) * (RME_DEPTH_Q1 - RME_DEPTH_Q0))
++#define RME_QCTRL_Q(__n) \
++	(RME_QCTRL_Q0 + (__n) * (RME_QCTRL_Q1 - RME_QCTRL_Q0))
++#define RME_PI_PTR_Q(__n) \
++	(RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
++#define RME_CI_PTR_Q(__n) \
++	(RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
++#define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \
++	* (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
++#define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \
++	* (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
++#define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \
++	* (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
++#define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \
++	* (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
++#define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \
++	* (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
++#define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \
++	* (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
++#define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \
++	* (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
++#define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \
++	* (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
++
++#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
++#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
++#define CPE_Q_MASK(__q) ((__q) & 0x3)
++#define RME_Q_MASK(__q) ((__q) & 0x3)
++
++/*
++ * PCI MSI-X vector defines
++ */
++enum {
++	BFA_MSIX_CPE_Q0 = 0,
++	BFA_MSIX_CPE_Q1 = 1,
++	BFA_MSIX_CPE_Q2 = 2,
++	BFA_MSIX_CPE_Q3 = 3,
++	BFA_MSIX_RME_Q0 = 4,
++	BFA_MSIX_RME_Q1 = 5,
++	BFA_MSIX_RME_Q2 = 6,
++	BFA_MSIX_RME_Q3 = 7,
++	BFA_MSIX_LPU_ERR = 8,
++	BFA_MSIX_CT_MAX = 9,
++};
++
++/*
++ * And corresponding host interrupt status bit field defines
++ */
++#define __HFN_INT_CPE_Q0		0x00000001U
++#define __HFN_INT_CPE_Q1		0x00000002U
++#define __HFN_INT_CPE_Q2		0x00000004U
++#define __HFN_INT_CPE_Q3		0x00000008U
++#define __HFN_INT_CPE_Q4		0x00000010U
++#define __HFN_INT_CPE_Q5		0x00000020U
++#define __HFN_INT_CPE_Q6		0x00000040U
++#define __HFN_INT_CPE_Q7		0x00000080U
++#define __HFN_INT_RME_Q0		0x00000100U
++#define __HFN_INT_RME_Q1		0x00000200U
++#define __HFN_INT_RME_Q2		0x00000400U
++#define __HFN_INT_RME_Q3		0x00000800U
++#define __HFN_INT_RME_Q4		0x00001000U
++#define __HFN_INT_RME_Q5		0x00002000U
++#define __HFN_INT_RME_Q6		0x00004000U
++#define __HFN_INT_RME_Q7		0x00008000U
++#define __HFN_INT_ERR_EMC		0x00010000U
++#define __HFN_INT_ERR_LPU0		0x00020000U
++#define __HFN_INT_ERR_LPU1		0x00040000U
++#define __HFN_INT_ERR_PSS		0x00080000U
++#define __HFN_INT_MBOX_LPU0		0x00100000U
++#define __HFN_INT_MBOX_LPU1		0x00200000U
++#define __HFN_INT_MBOX1_LPU0		0x00400000U
++#define __HFN_INT_MBOX1_LPU1		0x00800000U
++#define __HFN_INT_LL_HALT		0x01000000U
++#define __HFN_INT_CPE_MASK		0x000000ffU
++#define __HFN_INT_RME_MASK		0x0000ff00U
++
++/*
++ * catapult memory map.
++ */
++#define LL_PGN_HQM0		0x0096
++#define LL_PGN_HQM1		0x0097
++#define PSS_SMEM_PAGE_START	0x8000
++#define PSS_SMEM_PGNUM(_pg0, _ma)	((_pg0) + ((_ma) >> 15))
++#define PSS_SMEM_PGOFF(_ma)	((_ma) & 0x7fff)
++
++/*
++ * End of catapult memory map
++ */
++
++#endif /* __BFI_CTREG_H__ */
+--- /dev/null
++++ b/drivers/net/bna/bfi_ll.h
+@@ -0,0 +1,438 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++#ifndef __BFI_LL_H__
++#define __BFI_LL_H__
++
++#include "bfi.h"
++
++#pragma pack(1)
++
++/**
++ * @brief
++ *	"enums" for all LL mailbox messages other than IOC
++ */
++enum {
++	BFI_LL_H2I_MAC_UCAST_SET_REQ = 1,
++	BFI_LL_H2I_MAC_UCAST_ADD_REQ = 2,
++	BFI_LL_H2I_MAC_UCAST_DEL_REQ = 3,
++
++	BFI_LL_H2I_MAC_MCAST_ADD_REQ = 4,
++	BFI_LL_H2I_MAC_MCAST_DEL_REQ = 5,
++	BFI_LL_H2I_MAC_MCAST_FILTER_REQ = 6,
++	BFI_LL_H2I_MAC_MCAST_DEL_ALL_REQ = 7,
++
++	BFI_LL_H2I_PORT_ADMIN_REQ = 8,
++	BFI_LL_H2I_STATS_GET_REQ = 9,
++	BFI_LL_H2I_STATS_CLEAR_REQ = 10,
++
++	BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ = 11,
++	BFI_LL_H2I_RXF_DEFAULT_SET_REQ = 12,
++
++	BFI_LL_H2I_TXQ_STOP_REQ = 13,
++	BFI_LL_H2I_RXQ_STOP_REQ = 14,
++
++	BFI_LL_H2I_DIAG_LOOPBACK_REQ = 15,
++
++	BFI_LL_H2I_SET_PAUSE_REQ = 16,
++	BFI_LL_H2I_MTU_INFO_REQ = 17,
++
++	BFI_LL_H2I_RX_REQ = 18,
++} ;
++
++enum {
++	BFI_LL_I2H_MAC_UCAST_SET_RSP = BFA_I2HM(1),
++	BFI_LL_I2H_MAC_UCAST_ADD_RSP = BFA_I2HM(2),
++	BFI_LL_I2H_MAC_UCAST_DEL_RSP = BFA_I2HM(3),
++
++	BFI_LL_I2H_MAC_MCAST_ADD_RSP = BFA_I2HM(4),
++	BFI_LL_I2H_MAC_MCAST_DEL_RSP = BFA_I2HM(5),
++	BFI_LL_I2H_MAC_MCAST_FILTER_RSP = BFA_I2HM(6),
++	BFI_LL_I2H_MAC_MCAST_DEL_ALL_RSP = BFA_I2HM(7),
++
++	BFI_LL_I2H_PORT_ADMIN_RSP = BFA_I2HM(8),
++	BFI_LL_I2H_STATS_GET_RSP = BFA_I2HM(9),
++	BFI_LL_I2H_STATS_CLEAR_RSP = BFA_I2HM(10),
++
++	BFI_LL_I2H_RXF_PROMISCUOUS_SET_RSP = BFA_I2HM(11),
++	BFI_LL_I2H_RXF_DEFAULT_SET_RSP = BFA_I2HM(12),
++
++	BFI_LL_I2H_TXQ_STOP_RSP = BFA_I2HM(13),
++	BFI_LL_I2H_RXQ_STOP_RSP = BFA_I2HM(14),
++
++	BFI_LL_I2H_DIAG_LOOPBACK_RSP = BFA_I2HM(15),
++
++	BFI_LL_I2H_SET_PAUSE_RSP = BFA_I2HM(16),
++
++	BFI_LL_I2H_MTU_INFO_RSP = BFA_I2HM(17),
++	BFI_LL_I2H_RX_RSP = BFA_I2HM(18),
++
++	BFI_LL_I2H_LINK_DOWN_AEN = BFA_I2HM(19),
++	BFI_LL_I2H_LINK_UP_AEN = BFA_I2HM(20),
++
++	BFI_LL_I2H_PORT_ENABLE_AEN = BFA_I2HM(21),
++	BFI_LL_I2H_PORT_DISABLE_AEN = BFA_I2HM(22),
++} ;
++
++/**
++ * @brief bfi_ll_mac_addr_req is used by:
++ *        BFI_LL_H2I_MAC_UCAST_SET_REQ
++ *        BFI_LL_H2I_MAC_UCAST_ADD_REQ
++ *        BFI_LL_H2I_MAC_UCAST_DEL_REQ
++ *        BFI_LL_H2I_MAC_MCAST_ADD_REQ
++ *        BFI_LL_H2I_MAC_MCAST_DEL_REQ
++ */
++struct bfi_ll_mac_addr_req {
++	struct bfi_mhdr mh;		/*!< common msg header */
++	u8		rxf_id;
++	u8		rsvd1[3];
++	mac_t		mac_addr;
++	u8		rsvd2[2];
++};
++
++/**
++ * @brief bfi_ll_mcast_filter_req is used by:
++ *	  BFI_LL_H2I_MAC_MCAST_FILTER_REQ
++ */
++struct bfi_ll_mcast_filter_req {
++	struct bfi_mhdr mh;		/*!< common msg header */
++	u8		rxf_id;
++	u8		enable;
++	u8		rsvd[2];
++};
++
++/**
++ * @brief bfi_ll_mcast_del_all is used by:
++ *	  BFI_LL_H2I_MAC_MCAST_DEL_ALL_REQ
++ */
++struct bfi_ll_mcast_del_all_req {
++	struct bfi_mhdr mh;		/*!< common msg header */
++	u8		   rxf_id;
++	u8		   rsvd[3];
++};
++
++/**
++ * @brief bfi_ll_q_stop_req is used by:
++ *	BFI_LL_H2I_TXQ_STOP_REQ
++ *	BFI_LL_H2I_RXQ_STOP_REQ
++ */
++struct bfi_ll_q_stop_req {
++	struct bfi_mhdr mh;		/*!< common msg header */
++	u32	q_id_mask[2];	/* !< bit-mask for queue ids */
++};
++
++/**
++ * @brief bfi_ll_stats_req is used by:
++ *    BFI_LL_I2H_STATS_GET_REQ
++ *    BFI_LL_I2H_STATS_CLEAR_REQ
++ */
++struct bfi_ll_stats_req {
++	struct bfi_mhdr mh;	/*!< common msg header */
++	u16 stats_mask;	/* !< bit-mask for non-function statistics */
++	u8	rsvd[2];
++	u32 rxf_id_mask[2];	/* !< bit-mask for RxF Statistics */
++	u32 txf_id_mask[2];	/* !< bit-mask for TxF Statistics */
++	union bfi_addr_u  host_buffer;	/* !< where statistics are returned */
++};
++
++/**
++ * @brief defines for "stats_mask" above.
++ */
++#define BFI_LL_STATS_MAC	(1 << 0)	/* !< MAC Statistics */
++#define BFI_LL_STATS_BPC	(1 << 1)	/* !< Pause Stats from BPC */
++#define BFI_LL_STATS_RAD	(1 << 2)	/* !< Rx Admission Statistics */
++#define BFI_LL_STATS_RX_FC	(1 << 3)	/* !< Rx FC Stats from RxA */
++#define BFI_LL_STATS_TX_FC	(1 << 4)	/* !< Tx FC Stats from TxA */
++
++#define BFI_LL_STATS_ALL	0x1f
++
++/**
++ * @brief bfi_ll_port_admin_req
++ */
++struct bfi_ll_port_admin_req {
++	struct bfi_mhdr mh;		/*!< common msg header */
++	u8		 up;
++	u8		 rsvd[3];
++};
++
++/**
++ * @brief bfi_ll_rxf_req is used by:
++ *      BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
++ *      BFI_LL_H2I_RXF_DEFAULT_SET_REQ
++ */
++struct bfi_ll_rxf_req {
++	struct bfi_mhdr mh;		/*!< common msg header */
++	u8		rxf_id;
++	u8		enable;
++	u8		rsvd[2];
++};
++
++/**
++ * @brief bfi_ll_rxf_multi_req is used by:
++ *	BFI_LL_H2I_RX_REQ
++ */
++struct bfi_ll_rxf_multi_req {
++	struct bfi_mhdr mh;		/*!< common msg header */
++	u32	rxf_id_mask[2];
++	u8		enable;
++	u8		rsvd[3];
++};
++
++/**
++ * @brief enum for Loopback opmodes
++ */
++enum {
++	BFI_LL_DIAG_LB_OPMODE_EXT = 0,
++	BFI_LL_DIAG_LB_OPMODE_CBL = 1,
++};
++
++/**
++ * @brief bfi_ll_set_pause_req is used by:
++ *	BFI_LL_H2I_SET_PAUSE_REQ
++ */
++struct bfi_ll_set_pause_req {
++	struct bfi_mhdr mh;
++	u8		tx_pause; /* 1 = enable, 0 =  disable */
++	u8		rx_pause; /* 1 = enable, 0 =  disable */
++	u8		rsvd[2];
++};
++
++/**
++ * @brief bfi_ll_mtu_info_req is used by:
++ *	BFI_LL_H2I_MTU_INFO_REQ
++ */
++struct bfi_ll_mtu_info_req {
++	struct bfi_mhdr mh;
++	u16	mtu;
++	u8		rsvd[2];
++};
++
++/**
++ * @brief
++ *	  Response header format used by all responses
++ *	  For both responses and asynchronous notifications
++ */
++struct bfi_ll_rsp {
++	struct bfi_mhdr mh;		/*!< common msg header */
++	u8		error;
++	u8		rsvd[3];
++};
++
++/**
++ * @brief bfi_ll_cee_aen is used by:
++ *	BFI_LL_I2H_LINK_DOWN_AEN
++ *	BFI_LL_I2H_LINK_UP_AEN
++ */
++struct bfi_ll_aen {
++	struct bfi_mhdr mh;		/*!< common msg header */
++	u32	reason;
++	u8		cee_linkup;
++	u8		prio_map;    /*!< LL priority bit-map */
++	u8		rsvd[2];
++};
++
++/**
++ * @brief
++ * 	The following error codes can be returned
++ *	by the mbox commands
++ */
++enum {
++	BFI_LL_CMD_OK 		= 0,
++	BFI_LL_CMD_FAIL 	= 1,
++	BFI_LL_CMD_DUP_ENTRY	= 2,	/* !< Duplicate entry in CAM */
++	BFI_LL_CMD_CAM_FULL	= 3,	/* !< CAM is full */
++	BFI_LL_CMD_NOT_OWNER	= 4,   	/* !< Not permitted, b'cos not owner */
++	BFI_LL_CMD_NOT_EXEC	= 5,   	/* !< Was not sent to f/w at all */
++	BFI_LL_CMD_WAITING	= 6,	/* !< Waiting for completion (VMware) */
++	BFI_LL_CMD_PORT_DISABLED	= 7,	/* !< port in disabled state */
++} ;
++
++/* Statistics */
++#define BFI_LL_TXF_ID_MAX  	64
++#define BFI_LL_RXF_ID_MAX  	64
++
++/* TxF Frame Statistics */
++struct bfi_ll_stats_txf {
++	u64 ucast_octets;
++	u64 ucast;
++	u64 ucast_vlan;
++
++	u64 mcast_octets;
++	u64 mcast;
++	u64 mcast_vlan;
++
++	u64 bcast_octets;
++	u64 bcast;
++	u64 bcast_vlan;
++
++	u64 errors;
++	u64 filter_vlan;      /* frames filtered due to VLAN */
++	u64 filter_mac_sa;    /* frames filtered due to SA check */
++};
++
++/* RxF Frame Statistics */
++struct bfi_ll_stats_rxf {
++	u64 ucast_octets;
++	u64 ucast;
++	u64 ucast_vlan;
++
++	u64 mcast_octets;
++	u64 mcast;
++	u64 mcast_vlan;
++
++	u64 bcast_octets;
++	u64 bcast;
++	u64 bcast_vlan;
++	u64 frame_drops;
++};
++
++/* FC Tx Frame Statistics */
++struct bfi_ll_stats_fc_tx {
++	u64 txf_ucast_octets;
++	u64 txf_ucast;
++	u64 txf_ucast_vlan;
++
++	u64 txf_mcast_octets;
++	u64 txf_mcast;
++	u64 txf_mcast_vlan;
++
++	u64 txf_bcast_octets;
++	u64 txf_bcast;
++	u64 txf_bcast_vlan;
++
++	u64 txf_parity_errors;
++	u64 txf_timeout;
++	u64 txf_fid_parity_errors;
++};
++
++/* FC Rx Frame Statistics */
++struct bfi_ll_stats_fc_rx {
++	u64 rxf_ucast_octets;
++	u64 rxf_ucast;
++	u64 rxf_ucast_vlan;
++
++	u64 rxf_mcast_octets;
++	u64 rxf_mcast;
++	u64 rxf_mcast_vlan;
++
++	u64 rxf_bcast_octets;
++	u64 rxf_bcast;
++	u64 rxf_bcast_vlan;
++};
++
++/* RAD Frame Statistics */
++struct bfi_ll_stats_rad {
++	u64 rx_frames;
++	u64 rx_octets;
++	u64 rx_vlan_frames;
++
++	u64 rx_ucast;
++	u64 rx_ucast_octets;
++	u64 rx_ucast_vlan;
++
++	u64 rx_mcast;
++	u64 rx_mcast_octets;
++	u64 rx_mcast_vlan;
++
++	u64 rx_bcast;
++	u64 rx_bcast_octets;
++	u64 rx_bcast_vlan;
++
++	u64 rx_drops;
++};
++
++/* BPC Tx Registers */
++struct bfi_ll_stats_bpc {
++	/* transmit stats */
++	u64 tx_pause[8];
++	u64 tx_zero_pause[8];	/*!< Pause cancellation */
++	/*!<Pause initiation rather than retention */
++	u64 tx_first_pause[8];
++
++	/* receive stats */
++	u64 rx_pause[8];
++	u64 rx_zero_pause[8];	/*!< Pause cancellation */
++	/*!<Pause initiation rather than retention */
++	u64 rx_first_pause[8];
++};
++
++/* MAC Rx Statistics */
++struct bfi_ll_stats_mac {
++	u64 frame_64;		/* both rx and tx counter */
++	u64 frame_65_127;		/* both rx and tx counter */
++	u64 frame_128_255;		/* both rx and tx counter */
++	u64 frame_256_511;		/* both rx and tx counter */
++	u64 frame_512_1023;	/* both rx and tx counter */
++	u64 frame_1024_1518;	/* both rx and tx counter */
++	u64 frame_1519_1522;	/* both rx and tx counter */
++
++	/* receive stats */
++	u64 rx_bytes;
++	u64 rx_packets;
++	u64 rx_fcs_error;
++	u64 rx_multicast;
++	u64 rx_broadcast;
++	u64 rx_control_frames;
++	u64 rx_pause;
++	u64 rx_unknown_opcode;
++	u64 rx_alignment_error;
++	u64 rx_frame_length_error;
++	u64 rx_code_error;
++	u64 rx_carrier_sense_error;
++	u64 rx_undersize;
++	u64 rx_oversize;
++	u64 rx_fragments;
++	u64 rx_jabber;
++	u64 rx_drop;
++
++	/* transmit stats */
++	u64 tx_bytes;
++	u64 tx_packets;
++	u64 tx_multicast;
++	u64 tx_broadcast;
++	u64 tx_pause;
++	u64 tx_deferral;
++	u64 tx_excessive_deferral;
++	u64 tx_single_collision;
++	u64 tx_muliple_collision;
++	u64 tx_late_collision;
++	u64 tx_excessive_collision;
++	u64 tx_total_collision;
++	u64 tx_pause_honored;
++	u64 tx_drop;
++	u64 tx_jabber;
++	u64 tx_fcs_error;
++	u64 tx_control_frame;
++	u64 tx_oversize;
++	u64 tx_undersize;
++	u64 tx_fragments;
++};
++
++/* Complete statistics */
++struct bfi_ll_stats {
++	struct bfi_ll_stats_mac		mac_stats;
++	struct bfi_ll_stats_bpc		bpc_stats;
++	struct bfi_ll_stats_rad		rad_stats;
++	struct bfi_ll_stats_fc_rx	fc_rx_stats;
++	struct bfi_ll_stats_fc_tx	fc_tx_stats;
++	struct bfi_ll_stats_rxf	rxf_stats[BFI_LL_RXF_ID_MAX];
++	struct bfi_ll_stats_txf	txf_stats[BFI_LL_TXF_ID_MAX];
++};
++
++#pragma pack()
++
++#endif  /* __BFI_LL_H__ */
+--- /dev/null
++++ b/drivers/net/bna/bna.h
+@@ -0,0 +1,654 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++#ifndef __BNA_H__
++#define __BNA_H__
++
++#include "bfa_wc.h"
++#include "bfa_ioc.h"
++#include "cna.h"
++#include "bfi_ll.h"
++#include "bna_types.h"
++
++extern u32 bna_dim_vector[][BNA_BIAS_T_MAX];
++extern u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
++
++/**
++ *
++ *  Macros and constants
++ *
++ */
++
++#define BNA_IOC_TIMER_FREQ		200
++
++/* Log string size */
++#define BNA_MESSAGE_SIZE		256
++
++#define bna_device_timer(_dev)		bfa_timer_beat(&((_dev)->timer_mod))
++
++/* MBOX API for PORT, TX, RX */
++#define bna_mbox_qe_fill(_qe, _cmd, _cmd_len, _cbfn, _cbarg)		\
++do {									\
++	memcpy(&((_qe)->cmd.msg[0]), (_cmd), (_cmd_len));	\
++	(_qe)->cbfn = (_cbfn);						\
++	(_qe)->cbarg = (_cbarg);					\
++} while (0)
++
++#define bna_is_small_rxq(rcb) ((rcb)->id == 1)
++
++#define BNA_MAC_IS_EQUAL(_mac1, _mac2)					\
++	(!memcmp((_mac1), (_mac2), sizeof(mac_t)))
++
++#define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)
++
++#define BNA_TO_POWER_OF_2(x)						\
++do {									\
++	int _shift = 0;							\
++	while ((x) && (x) != 1) {					\
++		(x) >>= 1;						\
++		_shift++;						\
++	}								\
++	(x) <<= _shift;							\
++} while (0)
++
++#define BNA_TO_POWER_OF_2_HIGH(x)					\
++do {									\
++	int n = 1;							\
++	while (n < (x))							\
++		n <<= 1;						\
++	(x) = n;							\
++} while (0)
++
++/*
++ * input : _addr-> os dma addr in host endian format,
++ * output : _bna_dma_addr-> pointer to hw dma addr
++ */
++#define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr)				\
++do {									\
++	u64 tmp_addr =						\
++	cpu_to_be64((u64)(_addr));				\
++	(_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \
++	(_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \
++} while (0)
++
++/*
++ * input : _bna_dma_addr-> pointer to hw dma addr
++ * output : _addr-> os dma addr in host endian format
++ */
++#define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr)			\
++do {								\
++	(_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32)		\
++	| ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff));	\
++} while (0)
++
++#define	containing_rec(addr, type, field)				\
++	((type *)((unsigned char *)(addr) - 				\
++	(unsigned char *)(&((type *)0)->field)))
++
++#define BNA_TXQ_WI_NEEDED(_vectors)	(((_vectors) + 3) >> 2)
++
++/* TxQ element is 64 bytes */
++#define BNA_TXQ_PAGE_INDEX_MAX		(PAGE_SIZE >> 6)
++#define BNA_TXQ_PAGE_INDEX_MAX_SHIFT	(PAGE_SHIFT - 6)
++
++#define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
++{									\
++	unsigned int page_index;	/* index within a page */	\
++	void *page_addr;						\
++	page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); 		\
++	(_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); 	\
++	page_addr = (_qpt_ptr)[((_qe_idx) >>  BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
++	(_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
++}
++
++/* RxQ element is 8 bytes */
++#define BNA_RXQ_PAGE_INDEX_MAX		(PAGE_SIZE >> 3)
++#define BNA_RXQ_PAGE_INDEX_MAX_SHIFT	(PAGE_SHIFT - 3)
++
++#define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
++{									\
++	unsigned int page_index;	/* index within a page */	\
++	void *page_addr;						\
++	page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1);		\
++	(_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index);	\
++	page_addr = (_qpt_ptr)[((_qe_idx) >>				\
++				BNA_RXQ_PAGE_INDEX_MAX_SHIFT)];		\
++	(_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \
++}
++
++/* CQ element is 16 bytes */
++#define BNA_CQ_PAGE_INDEX_MAX		(PAGE_SIZE >> 4)
++#define BNA_CQ_PAGE_INDEX_MAX_SHIFT	(PAGE_SHIFT - 4)
++
++#define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range)	\
++{									\
++	unsigned int page_index;	  /* index within a page */	\
++	void *page_addr;						\
++									\
++	page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1);		\
++	(_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index);		\
++	page_addr = (_qpt_ptr)[((_qe_idx) >>				\
++				    BNA_CQ_PAGE_INDEX_MAX_SHIFT)];	\
++	(_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\
++}
++
++#define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base)			\
++	(&((_cast *)(_q_base))[(_qe_idx)])
++
++#define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx))
++
++#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth)			\
++	((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
++
++#define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth)		\
++	(((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
++
++#define BNA_QE_FREE_CNT(_q_ptr, _q_depth)				\
++	(((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) &	\
++	 ((_q_depth) - 1))
++
++#define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth)				\
++	((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) &	\
++	 (_q_depth - 1))
++
++#define BNA_Q_GET_CI(_q_ptr)		((_q_ptr)->q.consumer_index)
++
++#define BNA_Q_GET_PI(_q_ptr)		((_q_ptr)->q.producer_index)
++
++#define BNA_Q_PI_ADD(_q_ptr, _num)					\
++	(_q_ptr)->q.producer_index =					\
++		(((_q_ptr)->q.producer_index + (_num)) &		\
++		((_q_ptr)->q.q_depth - 1))
++
++#define BNA_Q_CI_ADD(_q_ptr, _num) 					\
++	(_q_ptr)->q.consumer_index =					\
++		(((_q_ptr)->q.consumer_index + (_num))  		\
++		& ((_q_ptr)->q.q_depth - 1))
++
++#define BNA_Q_FREE_COUNT(_q_ptr)					\
++	(BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))
++
++#define BNA_Q_IN_USE_COUNT(_q_ptr)  					\
++	(BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
++
++/* These macros build the data portion of the TxQ/RxQ doorbell */
++#define BNA_DOORBELL_Q_PRD_IDX(_pi) 	(0x80000000 | (_pi))
++#define BNA_DOORBELL_Q_STOP		(0x40000000)
++
++/* These macros build the data portion of the IB doorbell */
++#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \
++	(0x80000000 | ((_timeout) << 16) | (_events))
++#define BNA_DOORBELL_IB_INT_DISABLE 	(0x40000000)
++
++/* Set the coalescing timer for the given ib */
++#define bna_ib_coalescing_timer_set(_i_dbell, _cls_timer)		\
++	((_i_dbell)->doorbell_ack = BNA_DOORBELL_IB_INT_ACK((_cls_timer), 0));
++
++/* Acks 'events' # of events for a given ib */
++#define bna_ib_ack(_i_dbell, _events)					\
++	(writel(((_i_dbell)->doorbell_ack | (_events)), \
++		(_i_dbell)->doorbell_addr));
++
++#define bna_txq_prod_indx_doorbell(_tcb)				\
++	(writel(BNA_DOORBELL_Q_PRD_IDX((_tcb)->producer_index), \
++		(_tcb)->q_dbell));
++
++#define bna_rxq_prod_indx_doorbell(_rcb)				\
++	(writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \
++		(_rcb)->q_dbell));
++
++#define BNA_LARGE_PKT_SIZE		1000
++
++#define BNA_UPDATE_PKT_CNT(_pkt, _len)					\
++do {									\
++	if ((_len) > BNA_LARGE_PKT_SIZE) {				\
++		(_pkt)->large_pkt_cnt++;				\
++	} else {							\
++		(_pkt)->small_pkt_cnt++;				\
++	}								\
++} while (0)
++
++#define	call_rxf_stop_cbfn(rxf, status)					\
++	if ((rxf)->stop_cbfn) {						\
++		(*(rxf)->stop_cbfn)((rxf)->stop_cbarg, (status));	\
++		(rxf)->stop_cbfn = NULL;				\
++		(rxf)->stop_cbarg = NULL;				\
++	}
++
++#define	call_rxf_start_cbfn(rxf, status)				\
++	if ((rxf)->start_cbfn) {					\
++		(*(rxf)->start_cbfn)((rxf)->start_cbarg, (status));	\
++		(rxf)->start_cbfn = NULL;				\
++		(rxf)->start_cbarg = NULL;				\
++	}
++
++#define	call_rxf_cam_fltr_cbfn(rxf, status)				\
++	if ((rxf)->cam_fltr_cbfn) {					\
++		(*(rxf)->cam_fltr_cbfn)((rxf)->cam_fltr_cbarg, rxf->rx,	\
++					(status));			\
++		(rxf)->cam_fltr_cbfn = NULL;				\
++		(rxf)->cam_fltr_cbarg = NULL;				\
++	}
++
++#define	call_rxf_pause_cbfn(rxf, status)				\
++	if ((rxf)->oper_state_cbfn) {					\
++		(*(rxf)->oper_state_cbfn)((rxf)->oper_state_cbarg, rxf->rx,\
++					(status));			\
++		(rxf)->rxf_flags &= ~BNA_RXF_FL_OPERSTATE_CHANGED;	\
++		(rxf)->oper_state_cbfn = NULL;				\
++		(rxf)->oper_state_cbarg = NULL;				\
++	}
++
++#define	call_rxf_resume_cbfn(rxf, status) call_rxf_pause_cbfn(rxf, status)
++
++#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
++
++#define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))
++
++#define xxx_enable(mode, bitmask, xxx)					\
++do {									\
++	bitmask |= xxx;							\
++	mode |= xxx;							\
++} while (0)
++
++#define xxx_disable(mode, bitmask, xxx)					\
++do {									\
++	bitmask |= xxx;							\
++	mode &= ~xxx;							\
++} while (0)
++
++#define xxx_inactive(mode, bitmask, xxx)				\
++do {									\
++	bitmask &= ~xxx;						\
++	mode &= ~xxx;							\
++} while (0)
++
++#define is_promisc_enable(mode, bitmask)				\
++	is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
++
++#define is_promisc_disable(mode, bitmask)				\
++	is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
++
++#define promisc_enable(mode, bitmask)					\
++	xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
++
++#define promisc_disable(mode, bitmask)					\
++	xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
++
++#define promisc_inactive(mode, bitmask)					\
++	xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC)
++
++#define is_default_enable(mode, bitmask)				\
++	is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
++
++#define is_default_disable(mode, bitmask)				\
++	is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
++
++#define default_enable(mode, bitmask)					\
++	xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
++
++#define default_disable(mode, bitmask)					\
++	xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
++
++#define default_inactive(mode, bitmask)					\
++	xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT)
++
++#define is_allmulti_enable(mode, bitmask)				\
++	is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
++
++#define is_allmulti_disable(mode, bitmask)				\
++	is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
++
++#define allmulti_enable(mode, bitmask)					\
++	xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
++
++#define allmulti_disable(mode, bitmask)					\
++	xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
++
++#define allmulti_inactive(mode, bitmask)				\
++	xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI)
++
++#define	GET_RXQS(rxp, q0, q1)	do {					\
++	switch ((rxp)->type) {						\
++	case BNA_RXP_SINGLE:						\
++		(q0) = rxp->rxq.single.only;				\
++		(q1) = NULL;						\
++		break;							\
++	case BNA_RXP_SLR:						\
++		(q0) = rxp->rxq.slr.large;				\
++		(q1) = rxp->rxq.slr.small;				\
++		break;							\
++	case BNA_RXP_HDS:						\
++		(q0) = rxp->rxq.hds.data;				\
++		(q1) = rxp->rxq.hds.hdr;				\
++		break;							\
++	}								\
++} while (0)
++
++/**
++ *
++ * Function prototypes
++ *
++ */
++
++/**
++ * BNA
++ */
++
++/* Internal APIs */
++void bna_adv_res_req(struct bna_res_info *res_info);
++
++/* APIs for BNAD */
++void bna_res_req(struct bna_res_info *res_info);
++void bna_init(struct bna *bna, struct bnad *bnad,
++			struct bfa_pcidev *pcidev,
++			struct bna_res_info *res_info);
++void bna_uninit(struct bna *bna);
++void bna_stats_get(struct bna *bna);
++void bna_stats_clr(struct bna *bna);
++void bna_get_perm_mac(struct bna *bna, u8 *mac);
++
++/* APIs for Rx */
++int bna_rit_mod_can_satisfy(struct bna_rit_mod *rit_mod, int seg_size);
++
++/* APIs for RxF */
++struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod);
++void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
++			  struct bna_mac *mac);
++struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod);
++void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod,
++			  struct bna_mac *mac);
++struct bna_rit_segment *
++bna_rit_mod_seg_get(struct bna_rit_mod *rit_mod, int seg_size);
++void bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
++			struct bna_rit_segment *seg);
++
++/**
++ * DEVICE
++ */
++
++/* Interanl APIs */
++void bna_adv_device_init(struct bna_device *device, struct bna *bna,
++			struct bna_res_info *res_info);
++
++/* APIs for BNA */
++void bna_device_init(struct bna_device *device, struct bna *bna,
++		     struct bna_res_info *res_info);
++void bna_device_uninit(struct bna_device *device);
++void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
++int bna_device_status_get(struct bna_device *device);
++int bna_device_state_get(struct bna_device *device);
++
++/* APIs for BNAD */
++void bna_device_enable(struct bna_device *device);
++void bna_device_disable(struct bna_device *device,
++			enum bna_cleanup_type type);
++
++/**
++ * MBOX
++ */
++
++/* APIs for DEVICE */
++void bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna);
++void bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod);
++void bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod);
++void bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod);
++
++/* APIs for PORT, TX, RX */
++void bna_mbox_handler(struct bna *bna, u32 intr_status);
++void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe);
++
++/**
++ * PORT
++ */
++
++/* APIs for BNA */
++void bna_port_init(struct bna_port *port, struct bna *bna);
++void bna_port_uninit(struct bna_port *port);
++int bna_port_state_get(struct bna_port *port);
++int bna_llport_state_get(struct bna_llport *llport);
++
++/* APIs for DEVICE */
++void bna_port_start(struct bna_port *port);
++void bna_port_stop(struct bna_port *port);
++void bna_port_fail(struct bna_port *port);
++
++/* API for RX */
++int bna_port_mtu_get(struct bna_port *port);
++void bna_llport_admin_up(struct bna_llport *llport);
++void bna_llport_admin_down(struct bna_llport *llport);
++
++/* API for BNAD */
++void bna_port_enable(struct bna_port *port);
++void bna_port_disable(struct bna_port *port, enum bna_cleanup_type type,
++		      void (*cbfn)(void *, enum bna_cb_status));
++void bna_port_pause_config(struct bna_port *port,
++			   struct bna_pause_config *pause_config,
++			   void (*cbfn)(struct bnad *, enum bna_cb_status));
++void bna_port_mtu_set(struct bna_port *port, int mtu,
++		      void (*cbfn)(struct bnad *, enum bna_cb_status));
++void bna_port_mac_get(struct bna_port *port, mac_t *mac);
++void bna_port_type_set(struct bna_port *port, enum bna_port_type type);
++void bna_port_linkcbfn_set(struct bna_port *port,
++			   void (*linkcbfn)(struct bnad *,
++					    enum bna_link_status));
++void bna_port_admin_up(struct bna_port *port);
++void bna_port_admin_down(struct bna_port *port);
++
++/* Callbacks for TX, RX */
++void bna_port_cb_tx_stopped(struct bna_port *port,
++			    enum bna_cb_status status);
++void bna_port_cb_rx_stopped(struct bna_port *port,
++			    enum bna_cb_status status);
++
++/* Callbacks for MBOX */
++void bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
++			 int status);
++void bna_port_cb_link_down(struct bna_port *port, int status);
++
++/**
++ * IB
++ */
++
++/* APIs for BNA */
++void bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
++		     struct bna_res_info *res_info);
++void bna_ib_mod_uninit(struct bna_ib_mod *ib_mod);
++
++/* APIs for TX, RX */
++struct bna_ib *bna_ib_get(struct bna_ib_mod *ib_mod,
++			    enum bna_intr_type intr_type, int vector);
++void bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib);
++int bna_ib_reserve_idx(struct bna_ib *ib);
++void bna_ib_release_idx(struct bna_ib *ib, int idx);
++int bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config);
++void bna_ib_start(struct bna_ib *ib);
++void bna_ib_stop(struct bna_ib *ib);
++void bna_ib_fail(struct bna_ib *ib);
++void bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo);
++
++/**
++ * TX MODULE AND TX
++ */
++
++/* Internal APIs */
++void bna_tx_prio_changed(struct bna_tx *tx, int prio);
++
++/* APIs for BNA */
++void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
++		     struct bna_res_info *res_info);
++void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod);
++int bna_tx_state_get(struct bna_tx *tx);
++
++/* APIs for PORT */
++void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
++void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
++void bna_tx_mod_fail(struct bna_tx_mod *tx_mod);
++void bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio);
++void bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link);
++
++/* APIs for BNAD */
++void bna_tx_res_req(int num_txq, int txq_depth,
++		    struct bna_res_info *res_info);
++struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad,
++			       struct bna_tx_config *tx_cfg,
++			       struct bna_tx_event_cbfn *tx_cbfn,
++			       struct bna_res_info *res_info, void *priv);
++void bna_tx_destroy(struct bna_tx *tx);
++void bna_tx_enable(struct bna_tx *tx);
++void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
++		    void (*cbfn)(void *, struct bna_tx *,
++				 enum bna_cb_status));
++enum bna_cb_status
++bna_tx_prio_set(struct bna_tx *tx, int prio,
++		void (*cbfn)(struct bnad *, struct bna_tx *,
++			     enum bna_cb_status));
++void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
++
++/**
++ * RX MODULE, RX, RXF
++ */
++
++/* Internal APIs */
++void rxf_cb_cam_fltr_mbox_cmd(void *arg, int status);
++void rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd,
++		const struct bna_mac *mac_addr);
++void __rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status);
++void bna_rxf_adv_init(struct bna_rxf *rxf,
++		struct bna_rx *rx,
++		struct bna_rx_config *q_config);
++int rxf_process_packet_filter_ucast(struct bna_rxf *rxf);
++int rxf_process_packet_filter_promisc(struct bna_rxf *rxf);
++int rxf_process_packet_filter_default(struct bna_rxf *rxf);
++int rxf_process_packet_filter_allmulti(struct bna_rxf *rxf);
++int rxf_clear_packet_filter_ucast(struct bna_rxf *rxf);
++int rxf_clear_packet_filter_promisc(struct bna_rxf *rxf);
++int rxf_clear_packet_filter_default(struct bna_rxf *rxf);
++int rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf);
++void rxf_reset_packet_filter_ucast(struct bna_rxf *rxf);
++void rxf_reset_packet_filter_promisc(struct bna_rxf *rxf);
++void rxf_reset_packet_filter_default(struct bna_rxf *rxf);
++void rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf);
++
++/* APIs for BNA */
++void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
++		     struct bna_res_info *res_info);
++void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod);
++int bna_rx_state_get(struct bna_rx *rx);
++int bna_rxf_state_get(struct bna_rxf *rxf);
++
++/* APIs for PORT */
++void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
++void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
++void bna_rx_mod_fail(struct bna_rx_mod *rx_mod);
++
++/* APIs for BNAD */
++void bna_rx_res_req(struct bna_rx_config *rx_config,
++		    struct bna_res_info *res_info);
++struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad,
++			       struct bna_rx_config *rx_cfg,
++			       struct bna_rx_event_cbfn *rx_cbfn,
++			       struct bna_res_info *res_info, void *priv);
++void bna_rx_destroy(struct bna_rx *rx);
++void bna_rx_enable(struct bna_rx *rx);
++void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
++		    void (*cbfn)(void *, struct bna_rx *,
++				 enum bna_cb_status));
++void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
++void bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX]);
++void bna_rx_dim_update(struct bna_ccb *ccb);
++enum bna_cb_status
++bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
++		 void (*cbfn)(struct bnad *, struct bna_rx *,
++			      enum bna_cb_status));
++enum bna_cb_status
++bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
++		 void (*cbfn)(struct bnad *, struct bna_rx *,
++			      enum bna_cb_status));
++enum bna_cb_status
++bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
++		 void (*cbfn)(struct bnad *, struct bna_rx *,
++			      enum bna_cb_status));
++enum bna_cb_status
++bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
++		 void (*cbfn)(struct bnad *, struct bna_rx *,
++			      enum bna_cb_status));
++enum bna_cb_status
++bna_rx_mcast_del(struct bna_rx *rx, u8 *mcmac,
++		 void (*cbfn)(struct bnad *, struct bna_rx *,
++			      enum bna_cb_status));
++enum bna_cb_status
++bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
++		     void (*cbfn)(struct bnad *, struct bna_rx *,
++				  enum bna_cb_status));
++void bna_rx_mcast_delall(struct bna_rx *rx,
++			 void (*cbfn)(struct bnad *, struct bna_rx *,
++				      enum bna_cb_status));
++enum bna_cb_status
++bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
++		enum bna_rxmode bitmask,
++		void (*cbfn)(struct bnad *, struct bna_rx *,
++			     enum bna_cb_status));
++void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
++void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
++void bna_rx_vlanfilter_enable(struct bna_rx *rx);
++void bna_rx_vlanfilter_disable(struct bna_rx *rx);
++void bna_rx_rss_enable(struct bna_rx *rx);
++void bna_rx_rss_disable(struct bna_rx *rx);
++void bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config);
++void bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors,
++			int nvectors);
++void bna_rx_hds_enable(struct bna_rx *rx, struct bna_rxf_hds *hds_config,
++		       void (*cbfn)(struct bnad *, struct bna_rx *,
++				    enum bna_cb_status));
++void bna_rx_hds_disable(struct bna_rx *rx,
++			void (*cbfn)(struct bnad *, struct bna_rx *,
++				     enum bna_cb_status));
++void bna_rx_receive_pause(struct bna_rx *rx,
++			  void (*cbfn)(struct bnad *, struct bna_rx *,
++				       enum bna_cb_status));
++void bna_rx_receive_resume(struct bna_rx *rx,
++			   void (*cbfn)(struct bnad *, struct bna_rx *,
++					enum bna_cb_status));
++
++/* RxF APIs for RX */
++void bna_rxf_start(struct bna_rxf *rxf);
++void bna_rxf_stop(struct bna_rxf *rxf);
++void bna_rxf_fail(struct bna_rxf *rxf);
++void bna_rxf_init(struct bna_rxf *rxf, struct bna_rx *rx,
++		  struct bna_rx_config *q_config);
++void bna_rxf_uninit(struct bna_rxf *rxf);
++
++/* Callback from RXF to RX */
++void bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status);
++void bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status);
++
++/**
++ * BNAD
++ */
++
++/* Callbacks for BNA */
++void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
++		       struct bna_stats *stats);
++void bnad_cb_stats_clr(struct bnad *bnad);
++
++/* Callbacks for DEVICE */
++void bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status);
++void bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status);
++void bnad_cb_device_enable_mbox_intr(struct bnad *bnad);
++void bnad_cb_device_disable_mbox_intr(struct bnad *bnad);
++
++/* Callbacks for port */
++void bnad_cb_port_link_status(struct bnad *bnad,
++			      enum bna_link_status status);
++
++#endif  /* __BNA_H__ */
+--- /dev/null
++++ b/drivers/net/bna/bna_ctrl.c
+@@ -0,0 +1,3626 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++#include "bna.h"
++#include "bfa_sm.h"
++#include "bfa_wc.h"
++
++/**
++ * MBOX
++ */
++static int
++bna_is_aen(u8 msg_id)
++{
++	return (msg_id == BFI_LL_I2H_LINK_DOWN_AEN ||
++		msg_id == BFI_LL_I2H_LINK_UP_AEN);
++}
++
++static void
++bna_mbox_aen_callback(struct bna *bna, struct bfi_mbmsg *msg)
++{
++	struct bfi_ll_aen *aen = (struct bfi_ll_aen *)(msg);
++
++	switch (aen->mh.msg_id) {
++	case BFI_LL_I2H_LINK_UP_AEN:
++		bna_port_cb_link_up(&bna->port, aen, aen->reason);
++		break;
++	case BFI_LL_I2H_LINK_DOWN_AEN:
++		bna_port_cb_link_down(&bna->port, aen->reason);
++		break;
++	default:
++		break;
++	}
++}
++
++static void
++bna_ll_isr(void *llarg, struct bfi_mbmsg *msg)
++{
++	struct bna *bna = (struct bna *)(llarg);
++	struct bfi_ll_rsp *mb_rsp = (struct bfi_ll_rsp *)(msg);
++	struct bfi_mhdr *cmd_h, *rsp_h;
++	struct bna_mbox_qe *mb_qe = NULL;
++	int to_post = 0;
++	u8 aen = 0;
++	char message[BNA_MESSAGE_SIZE];
++
++	aen = bna_is_aen(mb_rsp->mh.msg_id);
++
++	if (!aen) {
++		mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
++		cmd_h = (struct bfi_mhdr *)(&mb_qe->cmd.msg[0]);
++		rsp_h = (struct bfi_mhdr *)(&mb_rsp->mh);
++
++		if ((BFA_I2HM(cmd_h->msg_id) == rsp_h->msg_id) &&
++		    (cmd_h->mtag.i2htok == rsp_h->mtag.i2htok)) {
++			/* Remove the request from posted_q, update state  */
++			list_del(&mb_qe->qe);
++			bna->mbox_mod.msg_pending--;
++			if (list_empty(&bna->mbox_mod.posted_q))
++				bna->mbox_mod.state = BNA_MBOX_FREE;
++			else
++				to_post = 1;
++
++			/* Dispatch the cbfn */
++			if (mb_qe->cbfn)
++				mb_qe->cbfn(mb_qe->cbarg, mb_rsp->error);
++
++			/* Post the next entry, if needed */
++			if (to_post) {
++				mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
++				bfa_ioc_mbox_queue(&bna->device.ioc,
++							&mb_qe->cmd);
++			}
++		} else {
++			snprintf(message, BNA_MESSAGE_SIZE,
++				       "No matching rsp for [%d:%d:%d]\n",
++				       mb_rsp->mh.msg_class, mb_rsp->mh.msg_id,
++				       mb_rsp->mh.mtag.i2htok);
++		pr_info("%s", message);
++		}
++
++	} else
++		bna_mbox_aen_callback(bna, msg);
++}
++
++void
++bna_err_handler(struct bna *bna, u32 intr_status)
++{
++	u32 init_halt;
++
++	if (intr_status & __HALT_STATUS_BITS) {
++		init_halt = readl(bna->device.ioc.ioc_regs.ll_halt);
++		init_halt &= ~__FW_INIT_HALT_P;
++		writel(init_halt, bna->device.ioc.ioc_regs.ll_halt);
++	}
++
++	bfa_ioc_error_isr(&bna->device.ioc);
++}
++
++void
++bna_mbox_handler(struct bna *bna, u32 intr_status)
++{
++	if (BNA_IS_ERR_INTR(intr_status)) {
++		bna_err_handler(bna, intr_status);
++		return;
++	}
++	if (BNA_IS_MBOX_INTR(intr_status))
++		bfa_ioc_mbox_isr(&bna->device.ioc);
++}
++
++void
++bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe)
++{
++	struct bfi_mhdr *mh;
++
++	mh = (struct bfi_mhdr *)(&mbox_qe->cmd.msg[0]);
++
++	mh->mtag.i2htok = htons(bna->mbox_mod.msg_ctr);
++	bna->mbox_mod.msg_ctr++;
++	bna->mbox_mod.msg_pending++;
++	if (bna->mbox_mod.state == BNA_MBOX_FREE) {
++		list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
++		bfa_ioc_mbox_queue(&bna->device.ioc, &mbox_qe->cmd);
++		bna->mbox_mod.state = BNA_MBOX_POSTED;
++	} else {
++		list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
++	}
++}
++
++void
++bna_mbox_flush_q(struct bna *bna, struct list_head *q)
++{
++	struct bna_mbox_qe *mb_qe = NULL;
++	struct bfi_mhdr *cmd_h;
++	struct list_head			*mb_q;
++	void 			(*cbfn)(void *arg, int status);
++	void 			*cbarg;
++
++	mb_q = &bna->mbox_mod.posted_q;
++
++	while (!list_empty(mb_q)) {
++		bfa_q_deq(mb_q, &mb_qe);
++		cbfn = mb_qe->cbfn;
++		cbarg = mb_qe->cbarg;
++		bfa_q_qe_init(mb_qe);
++		bna->mbox_mod.msg_pending--;
++
++		cmd_h = (struct bfi_mhdr *)(&mb_qe->cmd.msg[0]);
++		if (cbfn)
++			cbfn(cbarg, BNA_CB_NOT_EXEC);
++	}
++
++	bna->mbox_mod.state = BNA_MBOX_FREE;
++}
++
++void
++bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod)
++{
++}
++
++void
++bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod)
++{
++	bna_mbox_flush_q(mbox_mod->bna, &mbox_mod->posted_q);
++}
++
++void
++bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
++{
++	bfa_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna);
++	mbox_mod->state = BNA_MBOX_FREE;
++	mbox_mod->msg_ctr = mbox_mod->msg_pending = 0;
++	INIT_LIST_HEAD(&mbox_mod->posted_q);
++	mbox_mod->bna = bna;
++}
++
++void
++bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod)
++{
++	mbox_mod->bna = NULL;
++}
++
++/**
++ * LLPORT
++ */
++#define call_llport_stop_cbfn(llport, status)\
++do {\
++	if ((llport)->stop_cbfn)\
++		(llport)->stop_cbfn(&(llport)->bna->port, status);\
++	(llport)->stop_cbfn = NULL;\
++} while (0)
++
++static void bna_fw_llport_up(struct bna_llport *llport);
++static void bna_fw_cb_llport_up(void *arg, int status);
++static void bna_fw_llport_down(struct bna_llport *llport);
++static void bna_fw_cb_llport_down(void *arg, int status);
++static void bna_llport_start(struct bna_llport *llport);
++static void bna_llport_stop(struct bna_llport *llport);
++static void bna_llport_fail(struct bna_llport *llport);
++
++enum bna_llport_event {
++	LLPORT_E_START			= 1,
++	LLPORT_E_STOP			= 2,
++	LLPORT_E_FAIL			= 3,
++	LLPORT_E_UP			= 4,
++	LLPORT_E_DOWN			= 5,
++	LLPORT_E_FWRESP_UP		= 6,
++	LLPORT_E_FWRESP_DOWN		= 7
++};
++
++enum bna_llport_state {
++	BNA_LLPORT_STOPPED		= 1,
++	BNA_LLPORT_DOWN			= 2,
++	BNA_LLPORT_UP_RESP_WAIT		= 3,
++	BNA_LLPORT_DOWN_RESP_WAIT	= 4,
++	BNA_LLPORT_UP			= 5,
++	BNA_LLPORT_LAST_RESP_WAIT 	= 6
++};
++
++bfa_fsm_state_decl(bna_llport, stopped, struct bna_llport,
++			enum bna_llport_event);
++bfa_fsm_state_decl(bna_llport, down, struct bna_llport,
++			enum bna_llport_event);
++bfa_fsm_state_decl(bna_llport, up_resp_wait, struct bna_llport,
++			enum bna_llport_event);
++bfa_fsm_state_decl(bna_llport, down_resp_wait, struct bna_llport,
++			enum bna_llport_event);
++bfa_fsm_state_decl(bna_llport, up, struct bna_llport,
++			enum bna_llport_event);
++bfa_fsm_state_decl(bna_llport, last_resp_wait, struct bna_llport,
++			enum bna_llport_event);
++
++static struct bfa_sm_table llport_sm_table[] = {
++	{BFA_SM(bna_llport_sm_stopped), BNA_LLPORT_STOPPED},
++	{BFA_SM(bna_llport_sm_down), BNA_LLPORT_DOWN},
++	{BFA_SM(bna_llport_sm_up_resp_wait), BNA_LLPORT_UP_RESP_WAIT},
++	{BFA_SM(bna_llport_sm_down_resp_wait), BNA_LLPORT_DOWN_RESP_WAIT},
++	{BFA_SM(bna_llport_sm_up), BNA_LLPORT_UP},
++	{BFA_SM(bna_llport_sm_last_resp_wait), BNA_LLPORT_LAST_RESP_WAIT}
++};
++
++static void
++bna_llport_sm_stopped_entry(struct bna_llport *llport)
++{
++	llport->bna->port.link_cbfn((llport)->bna->bnad, BNA_LINK_DOWN);
++	call_llport_stop_cbfn(llport, BNA_CB_SUCCESS);
++}
++
++static void
++bna_llport_sm_stopped(struct bna_llport *llport,
++			enum bna_llport_event event)
++{
++	switch (event) {
++	case LLPORT_E_START:
++		bfa_fsm_set_state(llport, bna_llport_sm_down);
++		break;
++
++	case LLPORT_E_STOP:
++		call_llport_stop_cbfn(llport, BNA_CB_SUCCESS);
++		break;
++
++	case LLPORT_E_FAIL:
++		break;
++
++	case LLPORT_E_DOWN:
++		/* This event is received due to Rx objects failing */
++		/* No-op */
++		break;
++
++	case LLPORT_E_FWRESP_UP:
++	case LLPORT_E_FWRESP_DOWN:
++		/**
++		 * These events are received due to flushing of mbox when
++		 * device fails
++		 */
++		/* No-op */
++		break;
++
++	default:
++		bfa_sm_fault(llport->bna, event);
++	}
++}
++
++static void
++bna_llport_sm_down_entry(struct bna_llport *llport)
++{
++	bnad_cb_port_link_status((llport)->bna->bnad, BNA_LINK_DOWN);
++}
++
++static void
++bna_llport_sm_down(struct bna_llport *llport,
++			enum bna_llport_event event)
++{
++	switch (event) {
++	case LLPORT_E_STOP:
++		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
++		break;
++
++	case LLPORT_E_FAIL:
++		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
++		break;
++
++	case LLPORT_E_UP:
++		bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
++		bna_fw_llport_up(llport);
++		break;
++
++	default:
++		bfa_sm_fault(llport->bna, event);
++	}
++}
++
++static void
++bna_llport_sm_up_resp_wait_entry(struct bna_llport *llport)
++{
++	/**
++	 * NOTE: Do not call bna_fw_llport_up() here. That will over step
++	 * mbox due to down_resp_wait -> up_resp_wait transition on event
++	 * LLPORT_E_UP
++	 */
++}
++
++static void
++bna_llport_sm_up_resp_wait(struct bna_llport *llport,
++			enum bna_llport_event event)
++{
++	switch (event) {
++	case LLPORT_E_STOP:
++		bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
++		break;
++
++	case LLPORT_E_FAIL:
++		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
++		break;
++
++	case LLPORT_E_DOWN:
++		bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
++		break;
++
++	case LLPORT_E_FWRESP_UP:
++		bfa_fsm_set_state(llport, bna_llport_sm_up);
++		break;
++
++	case LLPORT_E_FWRESP_DOWN:
++		/* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */
++		bna_fw_llport_up(llport);
++		break;
++
++	default:
++		bfa_sm_fault(llport->bna, event);
++	}
++}
++
++static void
++bna_llport_sm_down_resp_wait_entry(struct bna_llport *llport)
++{
++	/**
++	 * NOTE: Do not call bna_fw_llport_down() here. That will over step
++	 * mbox due to up_resp_wait -> down_resp_wait transition on event
++	 * LLPORT_E_DOWN
++	 */
++}
++
++static void
++bna_llport_sm_down_resp_wait(struct bna_llport *llport,
++			enum bna_llport_event event)
++{
++	switch (event) {
++	case LLPORT_E_STOP:
++		bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
++		break;
++
++	case LLPORT_E_FAIL:
++		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
++		break;
++
++	case LLPORT_E_UP:
++		bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
++		break;
++
++	case LLPORT_E_FWRESP_UP:
++		/* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */
++		bna_fw_llport_down(llport);
++		break;
++
++	case LLPORT_E_FWRESP_DOWN:
++		bfa_fsm_set_state(llport, bna_llport_sm_down);
++		break;
++
++	default:
++		bfa_sm_fault(llport->bna, event);
++	}
++}
++
++static void
++bna_llport_sm_up_entry(struct bna_llport *llport)
++{
++}
++
++static void
++bna_llport_sm_up(struct bna_llport *llport,
++			enum bna_llport_event event)
++{
++	switch (event) {
++	case LLPORT_E_STOP:
++		bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
++		bna_fw_llport_down(llport);
++		break;
++
++	case LLPORT_E_FAIL:
++		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
++		break;
++
++	case LLPORT_E_DOWN:
++		bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
++		bna_fw_llport_down(llport);
++		break;
++
++	default:
++		bfa_sm_fault(llport->bna, event);
++	}
++}
++
++static void
++bna_llport_sm_last_resp_wait_entry(struct bna_llport *llport)
++{
++}
++
++static void
++bna_llport_sm_last_resp_wait(struct bna_llport *llport,
++			enum bna_llport_event event)
++{
++	switch (event) {
++	case LLPORT_E_FAIL:
++		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
++		break;
++
++	case LLPORT_E_DOWN:
++		/**
++		 * This event is received due to Rx objects stopping in
++		 * parallel to llport
++		 */
++		/* No-op */
++		break;
++
++	case LLPORT_E_FWRESP_UP:
++		/* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */
++		bna_fw_llport_down(llport);
++		break;
++
++	case LLPORT_E_FWRESP_DOWN:
++		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
++		break;
++
++	default:
++		bfa_sm_fault(llport->bna, event);
++	}
++}
++
++static void
++bna_fw_llport_admin_up(struct bna_llport *llport)
++{
++	struct bfi_ll_port_admin_req ll_req;
++
++	memset(&ll_req, 0, sizeof(ll_req));
++	ll_req.mh.msg_class = BFI_MC_LL;
++	ll_req.mh.msg_id = BFI_LL_H2I_PORT_ADMIN_REQ;
++	ll_req.mh.mtag.h2i.lpu_id = 0;
++
++	ll_req.up = BNA_STATUS_T_ENABLED;
++
++	bna_mbox_qe_fill(&llport->mbox_qe, &ll_req, sizeof(ll_req),
++			bna_fw_cb_llport_up, llport);
++
++	bna_mbox_send(llport->bna, &llport->mbox_qe);
++}
++
++static void
++bna_fw_llport_up(struct bna_llport *llport)
++{
++	if (llport->type == BNA_PORT_T_REGULAR)
++		bna_fw_llport_admin_up(llport);
++}
++
++static void
++bna_fw_cb_llport_up(void *arg, int status)
++{
++	struct bna_llport *llport = (struct bna_llport *)arg;
++
++	bfa_q_qe_init(&llport->mbox_qe.qe);
++	bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP);
++}
++
++static void
++bna_fw_llport_admin_down(struct bna_llport *llport)
++{
++	struct bfi_ll_port_admin_req ll_req;
++
++	memset(&ll_req, 0, sizeof(ll_req));
++	ll_req.mh.msg_class = BFI_MC_LL;
++	ll_req.mh.msg_id = BFI_LL_H2I_PORT_ADMIN_REQ;
++	ll_req.mh.mtag.h2i.lpu_id = 0;
++
++	ll_req.up = BNA_STATUS_T_DISABLED;
++
++	bna_mbox_qe_fill(&llport->mbox_qe, &ll_req, sizeof(ll_req),
++			bna_fw_cb_llport_down, llport);
++
++	bna_mbox_send(llport->bna, &llport->mbox_qe);
++}
++
++static void
++bna_fw_llport_down(struct bna_llport *llport)
++{
++	if (llport->type == BNA_PORT_T_REGULAR)
++		bna_fw_llport_admin_down(llport);
++}
++
++static void
++bna_fw_cb_llport_down(void *arg, int status)
++{
++	struct bna_llport *llport = (struct bna_llport *)arg;
++
++	bfa_q_qe_init(&llport->mbox_qe.qe);
++	bfa_fsm_send_event(llport, LLPORT_E_FWRESP_DOWN);
++}
++
++void
++bna_port_cb_llport_stopped(struct bna_port *port,
++				enum bna_cb_status status)
++{
++	bfa_wc_down(&port->chld_stop_wc);
++}
++
++static void
++bna_llport_init(struct bna_llport *llport, struct bna *bna)
++{
++	llport->flags |= BNA_LLPORT_F_ENABLED;
++	llport->type = BNA_PORT_T_REGULAR;
++	llport->bna = bna;
++
++	llport->link_status = BNA_LINK_DOWN;
++
++	llport->admin_up_count = 0;
++
++	llport->stop_cbfn = NULL;
++
++	bfa_q_qe_init(&llport->mbox_qe.qe);
++
++	bfa_fsm_set_state(llport, bna_llport_sm_stopped);
++}
++
++static void
++bna_llport_uninit(struct bna_llport *llport)
++{
++	llport->flags &= ~BNA_LLPORT_F_ENABLED;
++
++	llport->bna = NULL;
++}
++
++static void
++bna_llport_start(struct bna_llport *llport)
++{
++	bfa_fsm_send_event(llport, LLPORT_E_START);
++}
++
++static void
++bna_llport_stop(struct bna_llport *llport)
++{
++	llport->stop_cbfn = bna_port_cb_llport_stopped;
++
++	bfa_fsm_send_event(llport, LLPORT_E_STOP);
++}
++
++static void
++bna_llport_fail(struct bna_llport *llport)
++{
++	bfa_fsm_send_event(llport, LLPORT_E_FAIL);
++}
++
++int
++bna_llport_state_get(struct bna_llport *llport)
++{
++	return bfa_sm_to_state(llport_sm_table, llport->fsm);
++}
++
++void
++bna_llport_admin_up(struct bna_llport *llport)
++{
++	llport->admin_up_count++;
++
++	if (llport->admin_up_count == 1) {
++		llport->flags |= BNA_LLPORT_F_RX_ENABLED;
++		if (llport->flags & BNA_LLPORT_F_ENABLED)
++			bfa_fsm_send_event(llport, LLPORT_E_UP);
++	}
++}
++
++void
++bna_llport_admin_down(struct bna_llport *llport)
++{
++	llport->admin_up_count--;
++
++	if (llport->admin_up_count == 0) {
++		llport->flags &= ~BNA_LLPORT_F_RX_ENABLED;
++		if (llport->flags & BNA_LLPORT_F_ENABLED)
++			bfa_fsm_send_event(llport, LLPORT_E_DOWN);
++	}
++}
++
++/**
++ * PORT
++ */
++#define bna_port_chld_start(port)\
++do {\
++	enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
++					BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
++	enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
++					BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
++	bna_llport_start(&(port)->llport);\
++	bna_tx_mod_start(&(port)->bna->tx_mod, tx_type);\
++	bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
++} while (0)
++
++#define bna_port_chld_stop(port)\
++do {\
++	enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
++					BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
++	enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
++					BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
++	bfa_wc_up(&(port)->chld_stop_wc);\
++	bfa_wc_up(&(port)->chld_stop_wc);\
++	bfa_wc_up(&(port)->chld_stop_wc);\
++	bna_llport_stop(&(port)->llport);\
++	bna_tx_mod_stop(&(port)->bna->tx_mod, tx_type);\
++	bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
++} while (0)
++
++#define bna_port_chld_fail(port)\
++do {\
++	bna_llport_fail(&(port)->llport);\
++	bna_tx_mod_fail(&(port)->bna->tx_mod);\
++	bna_rx_mod_fail(&(port)->bna->rx_mod);\
++} while (0)
++
++#define bna_port_rx_start(port)\
++do {\
++	enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
++					BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
++	bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
++} while (0)
++
++#define bna_port_rx_stop(port)\
++do {\
++	enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
++					BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
++	bfa_wc_up(&(port)->chld_stop_wc);\
++	bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
++} while (0)
++
++#define call_port_stop_cbfn(port, status)\
++do {\
++	if ((port)->stop_cbfn)\
++		(port)->stop_cbfn((port)->stop_cbarg, status);\
++	(port)->stop_cbfn = NULL;\
++	(port)->stop_cbarg = NULL;\
++} while (0)
++
++#define call_port_pause_cbfn(port, status)\
++do {\
++	if ((port)->pause_cbfn)\
++		(port)->pause_cbfn((port)->bna->bnad, status);\
++	(port)->pause_cbfn = NULL;\
++} while (0)
++
++#define call_port_mtu_cbfn(port, status)\
++do {\
++	if ((port)->mtu_cbfn)\
++		(port)->mtu_cbfn((port)->bna->bnad, status);\
++	(port)->mtu_cbfn = NULL;\
++} while (0)
++
++static void bna_fw_pause_set(struct bna_port *port);
++static void bna_fw_cb_pause_set(void *arg, int status);
++static void bna_fw_mtu_set(struct bna_port *port);
++static void bna_fw_cb_mtu_set(void *arg, int status);
++
++enum bna_port_event {
++	PORT_E_START			= 1,
++	PORT_E_STOP			= 2,
++	PORT_E_FAIL			= 3,
++	PORT_E_PAUSE_CFG		= 4,
++	PORT_E_MTU_CFG			= 5,
++	PORT_E_CHLD_STOPPED		= 6,
++	PORT_E_FWRESP_PAUSE		= 7,
++	PORT_E_FWRESP_MTU		= 8
++};
++
++enum bna_port_state {
++	BNA_PORT_STOPPED		= 1,
++	BNA_PORT_MTU_INIT_WAIT		= 2,
++	BNA_PORT_PAUSE_INIT_WAIT	= 3,
++	BNA_PORT_LAST_RESP_WAIT		= 4,
++	BNA_PORT_STARTED		= 5,
++	BNA_PORT_PAUSE_CFG_WAIT		= 6,
++	BNA_PORT_RX_STOP_WAIT		= 7,
++	BNA_PORT_MTU_CFG_WAIT 		= 8,
++	BNA_PORT_CHLD_STOP_WAIT		= 9
++};
++
++bfa_fsm_state_decl(bna_port, stopped, struct bna_port,
++			enum bna_port_event);
++bfa_fsm_state_decl(bna_port, mtu_init_wait, struct bna_port,
++			enum bna_port_event);
++bfa_fsm_state_decl(bna_port, pause_init_wait, struct bna_port,
++			enum bna_port_event);
++bfa_fsm_state_decl(bna_port, last_resp_wait, struct bna_port,
++			enum bna_port_event);
++bfa_fsm_state_decl(bna_port, started, struct bna_port,
++			enum bna_port_event);
++bfa_fsm_state_decl(bna_port, pause_cfg_wait, struct bna_port,
++			enum bna_port_event);
++bfa_fsm_state_decl(bna_port, rx_stop_wait, struct bna_port,
++			enum bna_port_event);
++bfa_fsm_state_decl(bna_port, mtu_cfg_wait, struct bna_port,
++			enum bna_port_event);
++bfa_fsm_state_decl(bna_port, chld_stop_wait, struct bna_port,
++			enum bna_port_event);
++
++static struct bfa_sm_table port_sm_table[] = {
++	{BFA_SM(bna_port_sm_stopped), BNA_PORT_STOPPED},
++	{BFA_SM(bna_port_sm_mtu_init_wait), BNA_PORT_MTU_INIT_WAIT},
++	{BFA_SM(bna_port_sm_pause_init_wait), BNA_PORT_PAUSE_INIT_WAIT},
++	{BFA_SM(bna_port_sm_last_resp_wait), BNA_PORT_LAST_RESP_WAIT},
++	{BFA_SM(bna_port_sm_started), BNA_PORT_STARTED},
++	{BFA_SM(bna_port_sm_pause_cfg_wait), BNA_PORT_PAUSE_CFG_WAIT},
++	{BFA_SM(bna_port_sm_rx_stop_wait), BNA_PORT_RX_STOP_WAIT},
++	{BFA_SM(bna_port_sm_mtu_cfg_wait), BNA_PORT_MTU_CFG_WAIT},
++	{BFA_SM(bna_port_sm_chld_stop_wait), BNA_PORT_CHLD_STOP_WAIT}
++};
++
++static void
++bna_port_sm_stopped_entry(struct bna_port *port)
++{
++	call_port_pause_cbfn(port, BNA_CB_SUCCESS);
++	call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
++	call_port_stop_cbfn(port, BNA_CB_SUCCESS);
++}
++
++static void
++bna_port_sm_stopped(struct bna_port *port, enum bna_port_event event)
++{
++	switch (event) {
++	case PORT_E_START:
++		bfa_fsm_set_state(port, bna_port_sm_mtu_init_wait);
++		break;
++
++	case PORT_E_STOP:
++		call_port_stop_cbfn(port, BNA_CB_SUCCESS);
++		break;
++
++	case PORT_E_FAIL:
++		/* No-op */
++		break;
++
++	case PORT_E_PAUSE_CFG:
++		call_port_pause_cbfn(port, BNA_CB_SUCCESS);
++		break;
++
++	case PORT_E_MTU_CFG:
++		call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
++		break;
++
++	case PORT_E_CHLD_STOPPED:
++		/**
++		 * This event is received due to LLPort, Tx and Rx objects
++		 * failing
++		 */
++		/* No-op */
++		break;
++
++	case PORT_E_FWRESP_PAUSE:
++	case PORT_E_FWRESP_MTU:
++		/**
++		 * These events are received due to flushing of mbox when
++		 * device fails
++		 */
++		/* No-op */
++		break;
++
++	default:
++		bfa_sm_fault(port->bna, event);
++	}
++}
++
++static void
++bna_port_sm_mtu_init_wait_entry(struct bna_port *port)
++{
++	bna_fw_mtu_set(port);
++}
++
++static void
++bna_port_sm_mtu_init_wait(struct bna_port *port, enum bna_port_event event)
++{
++	switch (event) {
++	case PORT_E_STOP:
++		bfa_fsm_set_state(port, bna_port_sm_last_resp_wait);
++		break;
++
++	case PORT_E_FAIL:
++		bfa_fsm_set_state(port, bna_port_sm_stopped);
++		break;
++
++	case PORT_E_PAUSE_CFG:
++		/* No-op */
++		break;
++
++	case PORT_E_MTU_CFG:
++		port->flags |= BNA_PORT_F_MTU_CHANGED;
++		break;
++
++	case PORT_E_FWRESP_MTU:
++		if (port->flags & BNA_PORT_F_MTU_CHANGED) {
++			port->flags &= ~BNA_PORT_F_MTU_CHANGED;
++			bna_fw_mtu_set(port);
++		} else {
++			bfa_fsm_set_state(port, bna_port_sm_pause_init_wait);
++		}
++		break;
++
++	default:
++		bfa_sm_fault(port->bna, event);
++	}
++}
++
++static void
++bna_port_sm_pause_init_wait_entry(struct bna_port *port)
++{
++	bna_fw_pause_set(port);
++}
++
++static void
++bna_port_sm_pause_init_wait(struct bna_port *port,
++				enum bna_port_event event)
++{
++	switch (event) {
++	case PORT_E_STOP:
++		bfa_fsm_set_state(port, bna_port_sm_last_resp_wait);
++		break;
++
++	case PORT_E_FAIL:
++		bfa_fsm_set_state(port, bna_port_sm_stopped);
++		break;
++
++	case PORT_E_PAUSE_CFG:
++		port->flags |= BNA_PORT_F_PAUSE_CHANGED;
++		break;
++
++	case PORT_E_MTU_CFG:
++		port->flags |= BNA_PORT_F_MTU_CHANGED;
++		break;
++
++	case PORT_E_FWRESP_PAUSE:
++		if (port->flags & BNA_PORT_F_PAUSE_CHANGED) {
++			port->flags &= ~BNA_PORT_F_PAUSE_CHANGED;
++			bna_fw_pause_set(port);
++		} else if (port->flags & BNA_PORT_F_MTU_CHANGED) {
++			port->flags &= ~BNA_PORT_F_MTU_CHANGED;
++			bfa_fsm_set_state(port, bna_port_sm_mtu_init_wait);
++		} else {
++			bfa_fsm_set_state(port, bna_port_sm_started);
++			bna_port_chld_start(port);
++		}
++		break;
++
++	default:
++		bfa_sm_fault(port->bna, event);
++	}
++}
++
++static void
++bna_port_sm_last_resp_wait_entry(struct bna_port *port)
++{
++}
++
++static void
++bna_port_sm_last_resp_wait(struct bna_port *port,
++				enum bna_port_event event)
++{
++	switch (event) {
++	case PORT_E_FAIL:
++	case PORT_E_FWRESP_PAUSE:
++	case PORT_E_FWRESP_MTU:
++		bfa_fsm_set_state(port, bna_port_sm_stopped);
++		break;
++
++	default:
++		bfa_sm_fault(port->bna, event);
++	}
++}
++
++static void
++bna_port_sm_started_entry(struct bna_port *port)
++{
++	/**
++	 * NOTE: Do not call bna_port_chld_start() here, since it will be
++	 * inadvertently called during pause_cfg_wait->started transition
++	 * as well
++	 */
++	call_port_pause_cbfn(port, BNA_CB_SUCCESS);
++	call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
++}
++
++static void
++bna_port_sm_started(struct bna_port *port,
++			enum bna_port_event event)
++{
++	switch (event) {
++	case PORT_E_STOP:
++		bfa_fsm_set_state(port, bna_port_sm_chld_stop_wait);
++		break;
++
++	case PORT_E_FAIL:
++		bfa_fsm_set_state(port, bna_port_sm_stopped);
++		bna_port_chld_fail(port);
++		break;
++
++	case PORT_E_PAUSE_CFG:
++		bfa_fsm_set_state(port, bna_port_sm_pause_cfg_wait);
++		break;
++
++	case PORT_E_MTU_CFG:
++		bfa_fsm_set_state(port, bna_port_sm_rx_stop_wait);
++		break;
++
++	default:
++		bfa_sm_fault(port->bna, event);
++	}
++}
++
++static void
++bna_port_sm_pause_cfg_wait_entry(struct bna_port *port)
++{
++	bna_fw_pause_set(port);
++}
++
++static void
++bna_port_sm_pause_cfg_wait(struct bna_port *port,
++				enum bna_port_event event)
++{
++	switch (event) {
++	case PORT_E_FAIL:
++		bfa_fsm_set_state(port, bna_port_sm_stopped);
++		bna_port_chld_fail(port);
++		break;
++
++	case PORT_E_FWRESP_PAUSE:
++		bfa_fsm_set_state(port, bna_port_sm_started);
++		break;
++
++	default:
++		bfa_sm_fault(port->bna, event);
++	}
++}
++
++static void
++bna_port_sm_rx_stop_wait_entry(struct bna_port *port)
++{
++	bna_port_rx_stop(port);
++}
++
++static void
++bna_port_sm_rx_stop_wait(struct bna_port *port,
++				enum bna_port_event event)
++{
++	switch (event) {
++	case PORT_E_FAIL:
++		bfa_fsm_set_state(port, bna_port_sm_stopped);
++		bna_port_chld_fail(port);
++		break;
++
++	case PORT_E_CHLD_STOPPED:
++		bfa_fsm_set_state(port, bna_port_sm_mtu_cfg_wait);
++		break;
++
++	default:
++		bfa_sm_fault(port->bna, event);
++	}
++}
++
++static void
++bna_port_sm_mtu_cfg_wait_entry(struct bna_port *port)
++{
++	bna_fw_mtu_set(port);
++}
++
++static void
++bna_port_sm_mtu_cfg_wait(struct bna_port *port, enum bna_port_event event)
++{
++	switch (event) {
++	case PORT_E_FAIL:
++		bfa_fsm_set_state(port, bna_port_sm_stopped);
++		bna_port_chld_fail(port);
++		break;
++
++	case PORT_E_FWRESP_MTU:
++		bfa_fsm_set_state(port, bna_port_sm_started);
++		bna_port_rx_start(port);
++		break;
++
++	default:
++		bfa_sm_fault(port->bna, event);
++	}
++}
++
++static void
++bna_port_sm_chld_stop_wait_entry(struct bna_port *port)
++{
++	bna_port_chld_stop(port);
++}
++
++static void
++bna_port_sm_chld_stop_wait(struct bna_port *port,
++				enum bna_port_event event)
++{
++	switch (event) {
++	case PORT_E_FAIL:
++		bfa_fsm_set_state(port, bna_port_sm_stopped);
++		bna_port_chld_fail(port);
++		break;
++
++	case PORT_E_CHLD_STOPPED:
++		bfa_fsm_set_state(port, bna_port_sm_stopped);
++		break;
++
++	default:
++		bfa_sm_fault(port->bna, event);
++	}
++}
++
++static void
++bna_fw_pause_set(struct bna_port *port)
++{
++	struct bfi_ll_set_pause_req ll_req;
++
++	memset(&ll_req, 0, sizeof(ll_req));
++	ll_req.mh.msg_class = BFI_MC_LL;
++	ll_req.mh.msg_id = BFI_LL_H2I_SET_PAUSE_REQ;
++	ll_req.mh.mtag.h2i.lpu_id = 0;
++
++	ll_req.tx_pause = port->pause_config.tx_pause;
++	ll_req.rx_pause = port->pause_config.rx_pause;
++
++	bna_mbox_qe_fill(&port->mbox_qe, &ll_req, sizeof(ll_req),
++			bna_fw_cb_pause_set, port);
++
++	bna_mbox_send(port->bna, &port->mbox_qe);
++}
++
++static void
++bna_fw_cb_pause_set(void *arg, int status)
++{
++	struct bna_port *port = (struct bna_port *)arg;
++
++	bfa_q_qe_init(&port->mbox_qe.qe);
++	bfa_fsm_send_event(port, PORT_E_FWRESP_PAUSE);
++}
++
++void
++bna_fw_mtu_set(struct bna_port *port)
++{
++	struct bfi_ll_mtu_info_req ll_req;
++
++	bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_MTU_INFO_REQ, 0);
++	ll_req.mtu = htons((u16)port->mtu);
++
++	bna_mbox_qe_fill(&port->mbox_qe, &ll_req, sizeof(ll_req),
++				bna_fw_cb_mtu_set, port);
++	bna_mbox_send(port->bna, &port->mbox_qe);
++}
++
++void
++bna_fw_cb_mtu_set(void *arg, int status)
++{
++	struct bna_port *port = (struct bna_port *)arg;
++
++	bfa_q_qe_init(&port->mbox_qe.qe);
++	bfa_fsm_send_event(port, PORT_E_FWRESP_MTU);
++}
++
++static void
++bna_port_cb_chld_stopped(void *arg)
++{
++	struct bna_port *port = (struct bna_port *)arg;
++
++	bfa_fsm_send_event(port, PORT_E_CHLD_STOPPED);
++}
++
++void
++bna_port_init(struct bna_port *port, struct bna *bna)
++{
++	port->bna = bna;
++	port->flags = 0;
++	port->mtu = 0;
++	port->type = BNA_PORT_T_REGULAR;
++
++	port->link_cbfn = bnad_cb_port_link_status;
++
++	port->chld_stop_wc.wc_resume = bna_port_cb_chld_stopped;
++	port->chld_stop_wc.wc_cbarg = port;
++	port->chld_stop_wc.wc_count = 0;
++
++	port->stop_cbfn = NULL;
++	port->stop_cbarg = NULL;
++
++	port->pause_cbfn = NULL;
++
++	port->mtu_cbfn = NULL;
++
++	bfa_q_qe_init(&port->mbox_qe.qe);
++
++	bfa_fsm_set_state(port, bna_port_sm_stopped);
++
++	bna_llport_init(&port->llport, bna);
++}
++
++void
++bna_port_uninit(struct bna_port *port)
++{
++	bna_llport_uninit(&port->llport);
++
++	port->flags = 0;
++
++	port->bna = NULL;
++}
++
++int
++bna_port_state_get(struct bna_port *port)
++{
++	return bfa_sm_to_state(port_sm_table, port->fsm);
++}
++
++void
++bna_port_start(struct bna_port *port)
++{
++	port->flags |= BNA_PORT_F_DEVICE_READY;
++	if (port->flags & BNA_PORT_F_ENABLED)
++		bfa_fsm_send_event(port, PORT_E_START);
++}
++
++void
++bna_port_stop(struct bna_port *port)
++{
++	port->stop_cbfn = bna_device_cb_port_stopped;
++	port->stop_cbarg = &port->bna->device;
++
++	port->flags &= ~BNA_PORT_F_DEVICE_READY;
++	bfa_fsm_send_event(port, PORT_E_STOP);
++}
++
++void
++bna_port_fail(struct bna_port *port)
++{
++	port->flags &= ~BNA_PORT_F_DEVICE_READY;
++	bfa_fsm_send_event(port, PORT_E_FAIL);
++}
++
++void
++bna_port_cb_tx_stopped(struct bna_port *port, enum bna_cb_status status)
++{
++	bfa_wc_down(&port->chld_stop_wc);
++}
++
++void
++bna_port_cb_rx_stopped(struct bna_port *port, enum bna_cb_status status)
++{
++	bfa_wc_down(&port->chld_stop_wc);
++}
++
++void
++bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
++			int status)
++{
++	int i;
++	u8 prio_map;
++
++	port->llport.link_status = BNA_LINK_UP;
++	if (aen->cee_linkup)
++		port->llport.link_status = BNA_CEE_UP;
++
++	/* Compute the priority */
++	prio_map = aen->prio_map;
++	if (prio_map) {
++		for (i = 0; i < 8; i++) {
++			if ((prio_map >> i) & 0x1)
++				break;
++		}
++		port->priority = i;
++	} else
++		port->priority = 0;
++
++	/* Dispatch events */
++	bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup);
++	bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority);
++	port->link_cbfn(port->bna->bnad, port->llport.link_status);
++}
++
++void
++bna_port_cb_link_down(struct bna_port *port, int status)
++{
++	port->llport.link_status = BNA_LINK_DOWN;
++
++	/* Dispatch events */
++	bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN);
++	port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
++}
++
++int
++bna_port_mtu_get(struct bna_port *port)
++{
++	return port->mtu;
++}
++
++void
++bna_port_enable(struct bna_port *port)
++{
++	if (port->fsm != (bfa_sm_t)bna_port_sm_stopped)
++		return;
++
++	port->flags |= BNA_PORT_F_ENABLED;
++
++	if (port->flags & BNA_PORT_F_DEVICE_READY)
++		bfa_fsm_send_event(port, PORT_E_START);
++}
++
++void
++bna_port_disable(struct bna_port *port, enum bna_cleanup_type type,
++		 void (*cbfn)(void *, enum bna_cb_status))
++{
++	if (type == BNA_SOFT_CLEANUP) {
++		(*cbfn)(port->bna->bnad, BNA_CB_SUCCESS);
++		return;
++	}
++
++	port->stop_cbfn = cbfn;
++	port->stop_cbarg = port->bna->bnad;
++
++	port->flags &= ~BNA_PORT_F_ENABLED;
++
++	bfa_fsm_send_event(port, PORT_E_STOP);
++}
++
++void
++bna_port_pause_config(struct bna_port *port,
++		      struct bna_pause_config *pause_config,
++		      void (*cbfn)(struct bnad *, enum bna_cb_status))
++{
++	port->pause_config = *pause_config;
++
++	port->pause_cbfn = cbfn;
++
++	bfa_fsm_send_event(port, PORT_E_PAUSE_CFG);
++}
++
++void
++bna_port_mtu_set(struct bna_port *port, int mtu,
++		 void (*cbfn)(struct bnad *, enum bna_cb_status))
++{
++	port->mtu = mtu;
++
++	port->mtu_cbfn = cbfn;
++
++	bfa_fsm_send_event(port, PORT_E_MTU_CFG);
++}
++
++void
++bna_port_mac_get(struct bna_port *port, mac_t *mac)
++{
++	*mac = bfa_ioc_get_mac(&port->bna->device.ioc);
++}
++
++/**
++ * Should be called only when port is disabled
++ */
++void
++bna_port_type_set(struct bna_port *port, enum bna_port_type type)
++{
++	port->type = type;
++	port->llport.type = type;
++}
++
++/**
++ * Should be called only when port is disabled
++ */
++void
++bna_port_linkcbfn_set(struct bna_port *port,
++		      void (*linkcbfn)(struct bnad *, enum bna_link_status))
++{
++	port->link_cbfn = linkcbfn;
++}
++
++void
++bna_port_admin_up(struct bna_port *port)
++{
++	struct bna_llport *llport = &port->llport;
++
++	if (llport->flags & BNA_LLPORT_F_ENABLED)
++		return;
++
++	llport->flags |= BNA_LLPORT_F_ENABLED;
++
++	if (llport->flags & BNA_LLPORT_F_RX_ENABLED)
++		bfa_fsm_send_event(llport, LLPORT_E_UP);
++}
++
++void
++bna_port_admin_down(struct bna_port *port)
++{
++	struct bna_llport *llport = &port->llport;
++
++	if (!(llport->flags & BNA_LLPORT_F_ENABLED))
++		return;
++
++	llport->flags &= ~BNA_LLPORT_F_ENABLED;
++
++	if (llport->flags & BNA_LLPORT_F_RX_ENABLED)
++		bfa_fsm_send_event(llport, LLPORT_E_DOWN);
++}
++
++/**
++ * DEVICE
++ */
++#define enable_mbox_intr(_device)\
++do {\
++	u32 intr_status;\
++	bna_intr_status_get((_device)->bna, intr_status);\
++	bnad_cb_device_enable_mbox_intr((_device)->bna->bnad);\
++	bna_mbox_intr_enable((_device)->bna);\
++} while (0)
++
++#define disable_mbox_intr(_device)\
++do {\
++	bna_mbox_intr_disable((_device)->bna);\
++	bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\
++} while (0)
++
++const struct bna_chip_regs_offset reg_offset[] =
++{{HOST_PAGE_NUM_FN0, HOSTFN0_INT_STATUS,
++	HOSTFN0_INT_MASK, HOST_MSIX_ERR_INDEX_FN0},
++{HOST_PAGE_NUM_FN1, HOSTFN1_INT_STATUS,
++	HOSTFN1_INT_MASK, HOST_MSIX_ERR_INDEX_FN1},
++{HOST_PAGE_NUM_FN2, HOSTFN2_INT_STATUS,
++	HOSTFN2_INT_MASK, HOST_MSIX_ERR_INDEX_FN2},
++{HOST_PAGE_NUM_FN3, HOSTFN3_INT_STATUS,
++	HOSTFN3_INT_MASK, HOST_MSIX_ERR_INDEX_FN3},
++};
++
++enum bna_device_event {
++	DEVICE_E_ENABLE			= 1,
++	DEVICE_E_DISABLE		= 2,
++	DEVICE_E_IOC_READY		= 3,
++	DEVICE_E_IOC_FAILED		= 4,
++	DEVICE_E_IOC_DISABLED		= 5,
++	DEVICE_E_IOC_RESET		= 6,
++	DEVICE_E_PORT_STOPPED		= 7,
++};
++
++enum bna_device_state {
++	BNA_DEVICE_STOPPED		= 1,
++	BNA_DEVICE_IOC_READY_WAIT 	= 2,
++	BNA_DEVICE_READY		= 3,
++	BNA_DEVICE_PORT_STOP_WAIT 	= 4,
++	BNA_DEVICE_IOC_DISABLE_WAIT 	= 5,
++	BNA_DEVICE_FAILED		= 6
++};
++
++bfa_fsm_state_decl(bna_device, stopped, struct bna_device,
++			enum bna_device_event);
++bfa_fsm_state_decl(bna_device, ioc_ready_wait, struct bna_device,
++			enum bna_device_event);
++bfa_fsm_state_decl(bna_device, ready, struct bna_device,
++			enum bna_device_event);
++bfa_fsm_state_decl(bna_device, port_stop_wait, struct bna_device,
++			enum bna_device_event);
++bfa_fsm_state_decl(bna_device, ioc_disable_wait, struct bna_device,
++			enum bna_device_event);
++bfa_fsm_state_decl(bna_device, failed, struct bna_device,
++			enum bna_device_event);
++
++static struct bfa_sm_table device_sm_table[] = {
++	{BFA_SM(bna_device_sm_stopped), BNA_DEVICE_STOPPED},
++	{BFA_SM(bna_device_sm_ioc_ready_wait), BNA_DEVICE_IOC_READY_WAIT},
++	{BFA_SM(bna_device_sm_ready), BNA_DEVICE_READY},
++	{BFA_SM(bna_device_sm_port_stop_wait), BNA_DEVICE_PORT_STOP_WAIT},
++	{BFA_SM(bna_device_sm_ioc_disable_wait), BNA_DEVICE_IOC_DISABLE_WAIT},
++	{BFA_SM(bna_device_sm_failed), BNA_DEVICE_FAILED},
++};
++
++static void
++bna_device_sm_stopped_entry(struct bna_device *device)
++{
++	if (device->stop_cbfn)
++		device->stop_cbfn(device->stop_cbarg, BNA_CB_SUCCESS);
++
++	device->stop_cbfn = NULL;
++	device->stop_cbarg = NULL;
++}
++
++static void
++bna_device_sm_stopped(struct bna_device *device,
++			enum bna_device_event event)
++{
++	switch (event) {
++	case DEVICE_E_ENABLE:
++		if (device->intr_type == BNA_INTR_T_MSIX)
++			bna_mbox_msix_idx_set(device);
++		bfa_ioc_enable(&device->ioc);
++		bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
++		break;
++
++	case DEVICE_E_DISABLE:
++		bfa_fsm_set_state(device, bna_device_sm_stopped);
++		break;
++
++	case DEVICE_E_IOC_RESET:
++		enable_mbox_intr(device);
++		break;
++
++	case DEVICE_E_IOC_FAILED:
++		bfa_fsm_set_state(device, bna_device_sm_failed);
++		break;
++
++	default:
++		bfa_sm_fault(device->bna, event);
++	}
++}
++
++static void
++bna_device_sm_ioc_ready_wait_entry(struct bna_device *device)
++{
++	/**
++	 * Do not call bfa_ioc_enable() here. It must be called in the
++	 * previous state due to failed -> ioc_ready_wait transition.
++	 */
++}
++
++static void
++bna_device_sm_ioc_ready_wait(struct bna_device *device,
++				enum bna_device_event event)
++{
++	switch (event) {
++	case DEVICE_E_DISABLE:
++		if (device->ready_cbfn)
++			device->ready_cbfn(device->ready_cbarg,
++						BNA_CB_INTERRUPT);
++		device->ready_cbfn = NULL;
++		device->ready_cbarg = NULL;
++		bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
++		break;
++
++	case DEVICE_E_IOC_READY:
++		bfa_fsm_set_state(device, bna_device_sm_ready);
++		break;
++
++	case DEVICE_E_IOC_FAILED:
++		bfa_fsm_set_state(device, bna_device_sm_failed);
++		break;
++
++	case DEVICE_E_IOC_RESET:
++		enable_mbox_intr(device);
++		break;
++
++	default:
++		bfa_sm_fault(device->bna, event);
++	}
++}
++
++static void
++bna_device_sm_ready_entry(struct bna_device *device)
++{
++	bna_mbox_mod_start(&device->bna->mbox_mod);
++	bna_port_start(&device->bna->port);
++
++	if (device->ready_cbfn)
++		device->ready_cbfn(device->ready_cbarg,
++					BNA_CB_SUCCESS);
++	device->ready_cbfn = NULL;
++	device->ready_cbarg = NULL;
++}
++
++static void
++bna_device_sm_ready(struct bna_device *device, enum bna_device_event event)
++{
++	switch (event) {
++	case DEVICE_E_DISABLE:
++		bfa_fsm_set_state(device, bna_device_sm_port_stop_wait);
++		break;
++
++	case DEVICE_E_IOC_FAILED:
++		bfa_fsm_set_state(device, bna_device_sm_failed);
++		break;
++
++	default:
++		bfa_sm_fault(device->bna, event);
++	}
++}
++
++static void
++bna_device_sm_port_stop_wait_entry(struct bna_device *device)
++{
++	bna_port_stop(&device->bna->port);
++}
++
++static void
++bna_device_sm_port_stop_wait(struct bna_device *device,
++				enum bna_device_event event)
++{
++	switch (event) {
++	case DEVICE_E_PORT_STOPPED:
++		bna_mbox_mod_stop(&device->bna->mbox_mod);
++		bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
++		break;
++
++	case DEVICE_E_IOC_FAILED:
++		disable_mbox_intr(device);
++		bna_port_fail(&device->bna->port);
++		break;
++
++	default:
++		bfa_sm_fault(device->bna, event);
++	}
++}
++
++static void
++bna_device_sm_ioc_disable_wait_entry(struct bna_device *device)
++{
++	bfa_ioc_disable(&device->ioc);
++}
++
++static void
++bna_device_sm_ioc_disable_wait(struct bna_device *device,
++				enum bna_device_event event)
++{
++	switch (event) {
++	case DEVICE_E_IOC_DISABLED:
++		disable_mbox_intr(device);
++		bfa_fsm_set_state(device, bna_device_sm_stopped);
++		break;
++
++	default:
++		bfa_sm_fault(device->bna, event);
++	}
++}
++
++static void
++bna_device_sm_failed_entry(struct bna_device *device)
++{
++	disable_mbox_intr(device);
++	bna_port_fail(&device->bna->port);
++	bna_mbox_mod_stop(&device->bna->mbox_mod);
++
++	if (device->ready_cbfn)
++		device->ready_cbfn(device->ready_cbarg,
++					BNA_CB_FAIL);
++	device->ready_cbfn = NULL;
++	device->ready_cbarg = NULL;
++}
++
++static void
++bna_device_sm_failed(struct bna_device *device,
++			enum bna_device_event event)
++{
++	switch (event) {
++	case DEVICE_E_DISABLE:
++		bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
++		break;
++
++	case DEVICE_E_IOC_RESET:
++		enable_mbox_intr(device);
++		bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
++		break;
++
++	default:
++		bfa_sm_fault(device->bna, event);
++	}
++}
++
++/* IOC callback functions */
++
++static void
++bna_device_cb_iocll_ready(void *dev, enum bfa_status error)
++{
++	struct bna_device *device = (struct bna_device *)dev;
++
++	if (error)
++		bfa_fsm_send_event(device, DEVICE_E_IOC_FAILED);
++	else
++		bfa_fsm_send_event(device, DEVICE_E_IOC_READY);
++}
++
++static void
++bna_device_cb_iocll_disabled(void *dev)
++{
++	struct bna_device *device = (struct bna_device *)dev;
++
++	bfa_fsm_send_event(device, DEVICE_E_IOC_DISABLED);
++}
++
++static void
++bna_device_cb_iocll_failed(void *dev)
++{
++	struct bna_device *device = (struct bna_device *)dev;
++
++	bfa_fsm_send_event(device, DEVICE_E_IOC_FAILED);
++}
++
++static void
++bna_device_cb_iocll_reset(void *dev)
++{
++	struct bna_device *device = (struct bna_device *)dev;
++
++	bfa_fsm_send_event(device, DEVICE_E_IOC_RESET);
++}
++
++static struct bfa_ioc_cbfn bfa_iocll_cbfn = {
++	bna_device_cb_iocll_ready,
++	bna_device_cb_iocll_disabled,
++	bna_device_cb_iocll_failed,
++	bna_device_cb_iocll_reset
++};
++
++void
++bna_device_init(struct bna_device *device, struct bna *bna,
++		struct bna_res_info *res_info)
++{
++	u64 dma;
++
++	device->bna = bna;
++
++	/**
++	 * Attach IOC and claim:
++	 *	1. DMA memory for IOC attributes
++	 *	2. Kernel memory for FW trace
++	 */
++	bfa_ioc_attach(&device->ioc, device, &bfa_iocll_cbfn);
++	bfa_ioc_pci_init(&device->ioc, &bna->pcidev, BFI_MC_LL);
++
++	BNA_GET_DMA_ADDR(
++		&res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
++	bfa_ioc_mem_claim(&device->ioc,
++		res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva,
++			  dma);
++
++	bna_adv_device_init(device, bna, res_info);
++	/*
++	 * Initialize mbox_mod only after IOC, so that mbox handler
++	 * registration goes through
++	 */
++	device->intr_type =
++		res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.intr_type;
++	device->vector =
++		res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.idl[0].vector;
++	bna_mbox_mod_init(&bna->mbox_mod, bna);
++
++	device->ready_cbfn = device->stop_cbfn = NULL;
++	device->ready_cbarg = device->stop_cbarg = NULL;
++
++	bfa_fsm_set_state(device, bna_device_sm_stopped);
++}
++
++void
++bna_device_uninit(struct bna_device *device)
++{
++	bna_mbox_mod_uninit(&device->bna->mbox_mod);
++
++	bfa_cee_detach(&device->bna->cee);
++
++	bfa_ioc_detach(&device->ioc);
++
++	device->bna = NULL;
++}
++
++void
++bna_device_cb_port_stopped(void *arg, enum bna_cb_status status)
++{
++	struct bna_device *device = (struct bna_device *)arg;
++
++	bfa_fsm_send_event(device, DEVICE_E_PORT_STOPPED);
++}
++
++int
++bna_device_status_get(struct bna_device *device)
++{
++	return (device->fsm == (bfa_fsm_t)bna_device_sm_ready);
++}
++
++void
++bna_device_enable(struct bna_device *device)
++{
++	if (device->fsm != (bfa_fsm_t)bna_device_sm_stopped) {
++		bnad_cb_device_enabled(device->bna->bnad, BNA_CB_BUSY);
++		return;
++	}
++
++	device->ready_cbfn = bnad_cb_device_enabled;
++	device->ready_cbarg = device->bna->bnad;
++
++	bfa_fsm_send_event(device, DEVICE_E_ENABLE);
++}
++
++void
++bna_device_disable(struct bna_device *device, enum bna_cleanup_type type)
++{
++	if (type == BNA_SOFT_CLEANUP) {
++		bnad_cb_device_disabled(device->bna->bnad, BNA_CB_SUCCESS);
++		return;
++	}
++
++	device->stop_cbfn = bnad_cb_device_disabled;
++	device->stop_cbarg = device->bna->bnad;
++
++	bfa_fsm_send_event(device, DEVICE_E_DISABLE);
++}
++
++int
++bna_device_state_get(struct bna_device *device)
++{
++	return bfa_sm_to_state(device_sm_table, device->fsm);
++}
++
++u32 bna_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
++	{12, 20},
++	{10, 18},
++	{8, 16},
++	{6, 12},
++	{4, 8},
++	{3, 6},
++	{2, 4},
++	{1, 2},
++};
++
++u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
++	{12, 12},
++	{6, 10},
++	{5, 10},
++	{4, 8},
++	{3, 6},
++	{3, 6},
++	{2, 4},
++	{1, 2},
++};
++
++/* device */
++void
++bna_adv_device_init(struct bna_device *device, struct bna *bna,
++		struct bna_res_info *res_info)
++{
++	u8 *kva;
++	u64 dma;
++
++	device->bna = bna;
++
++	kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
++
++	/**
++	 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
++	 * DMA memory.
++	 */
++	BNA_GET_DMA_ADDR(
++		&res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
++	kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
++
++	bfa_cee_attach(&bna->cee, &device->ioc, bna);
++	bfa_cee_mem_claim(&bna->cee, kva, dma);
++	kva += bfa_cee_meminfo();
++	dma += bfa_cee_meminfo();
++
++}
++
++/* utils */
++
++void
++bna_adv_res_req(struct bna_res_info *res_info)
++{
++	/* DMA memory for COMMON_MODULE */
++	res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
++	res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
++	res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
++	res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
++				bfa_cee_meminfo(), PAGE_SIZE);
++
++	/* Virtual memory for retreiving fw_trc */
++	res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
++	res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
++	res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0;
++	res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0;
++
++	/* DMA memory for retreiving stats */
++	res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
++	res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
++	res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
++	res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
++				ALIGN(BFI_HW_STATS_SIZE, PAGE_SIZE);
++
++	/* Virtual memory for soft stats */
++	res_info[BNA_RES_MEM_T_SWSTATS].res_type = BNA_RES_T_MEM;
++	res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
++	res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.num = 1;
++	res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.len =
++				sizeof(struct bna_sw_stats);
++}
++
++static void
++bna_sw_stats_get(struct bna *bna, struct bna_sw_stats *sw_stats)
++{
++	struct bna_tx *tx;
++	struct bna_txq *txq;
++	struct bna_rx *rx;
++	struct bna_rxp *rxp;
++	struct list_head *qe;
++	struct list_head *txq_qe;
++	struct list_head *rxp_qe;
++	struct list_head *mac_qe;
++	int i;
++
++	sw_stats->device_state = bna_device_state_get(&bna->device);
++	sw_stats->port_state = bna_port_state_get(&bna->port);
++	sw_stats->port_flags = bna->port.flags;
++	sw_stats->llport_state = bna_llport_state_get(&bna->port.llport);
++	sw_stats->priority = bna->port.priority;
++
++	i = 0;
++	list_for_each(qe, &bna->tx_mod.tx_active_q) {
++		tx = (struct bna_tx *)qe;
++		sw_stats->tx_stats[i].tx_state = bna_tx_state_get(tx);
++		sw_stats->tx_stats[i].tx_flags = tx->flags;
++
++		sw_stats->tx_stats[i].num_txqs = 0;
++		sw_stats->tx_stats[i].txq_bmap[0] = 0;
++		sw_stats->tx_stats[i].txq_bmap[1] = 0;
++		list_for_each(txq_qe, &tx->txq_q) {
++			txq = (struct bna_txq *)txq_qe;
++			if (txq->txq_id < 32)
++				sw_stats->tx_stats[i].txq_bmap[0] |=
++						((u32)1 << txq->txq_id);
++			else
++				sw_stats->tx_stats[i].txq_bmap[1] |=
++						((u32)
++						 1 << (txq->txq_id - 32));
++			sw_stats->tx_stats[i].num_txqs++;
++		}
++
++		sw_stats->tx_stats[i].txf_id = tx->txf.txf_id;
++
++		i++;
++	}
++	sw_stats->num_active_tx = i;
++
++	i = 0;
++	list_for_each(qe, &bna->rx_mod.rx_active_q) {
++		rx = (struct bna_rx *)qe;
++		sw_stats->rx_stats[i].rx_state = bna_rx_state_get(rx);
++		sw_stats->rx_stats[i].rx_flags = rx->rx_flags;
++
++		sw_stats->rx_stats[i].num_rxps = 0;
++		sw_stats->rx_stats[i].num_rxqs = 0;
++		sw_stats->rx_stats[i].rxq_bmap[0] = 0;
++		sw_stats->rx_stats[i].rxq_bmap[1] = 0;
++		sw_stats->rx_stats[i].cq_bmap[0] = 0;
++		sw_stats->rx_stats[i].cq_bmap[1] = 0;
++		list_for_each(rxp_qe, &rx->rxp_q) {
++			rxp = (struct bna_rxp *)rxp_qe;
++
++			sw_stats->rx_stats[i].num_rxqs += 1;
++
++			if (rxp->type == BNA_RXP_SINGLE) {
++				if (rxp->rxq.single.only->rxq_id < 32) {
++					sw_stats->rx_stats[i].rxq_bmap[0] |=
++					((u32)1 <<
++					rxp->rxq.single.only->rxq_id);
++				} else {
++					sw_stats->rx_stats[i].rxq_bmap[1] |=
++					((u32)1 <<
++					(rxp->rxq.single.only->rxq_id - 32));
++				}
++			} else {
++				if (rxp->rxq.slr.large->rxq_id < 32) {
++					sw_stats->rx_stats[i].rxq_bmap[0] |=
++					((u32)1 <<
++					rxp->rxq.slr.large->rxq_id);
++				} else {
++					sw_stats->rx_stats[i].rxq_bmap[1] |=
++					((u32)1 <<
++					(rxp->rxq.slr.large->rxq_id - 32));
++				}
++
++				if (rxp->rxq.slr.small->rxq_id < 32) {
++					sw_stats->rx_stats[i].rxq_bmap[0] |=
++					((u32)1 <<
++					rxp->rxq.slr.small->rxq_id);
++				} else {
++					sw_stats->rx_stats[i].rxq_bmap[1] |=
++				((u32)1 <<
++				 (rxp->rxq.slr.small->rxq_id - 32));
++				}
++				sw_stats->rx_stats[i].num_rxqs += 1;
++			}
++
++			if (rxp->cq.cq_id < 32)
++				sw_stats->rx_stats[i].cq_bmap[0] |=
++					(1 << rxp->cq.cq_id);
++			else
++				sw_stats->rx_stats[i].cq_bmap[1] |=
++					(1 << (rxp->cq.cq_id - 32));
++
++			sw_stats->rx_stats[i].num_rxps++;
++		}
++
++		sw_stats->rx_stats[i].rxf_id = rx->rxf.rxf_id;
++		sw_stats->rx_stats[i].rxf_state = bna_rxf_state_get(&rx->rxf);
++		sw_stats->rx_stats[i].rxf_oper_state = rx->rxf.rxf_oper_state;
++
++		sw_stats->rx_stats[i].num_active_ucast = 0;
++		if (rx->rxf.ucast_active_mac)
++			sw_stats->rx_stats[i].num_active_ucast++;
++		list_for_each(mac_qe, &rx->rxf.ucast_active_q)
++			sw_stats->rx_stats[i].num_active_ucast++;
++
++		sw_stats->rx_stats[i].num_active_mcast = 0;
++		list_for_each(mac_qe, &rx->rxf.mcast_active_q)
++			sw_stats->rx_stats[i].num_active_mcast++;
++
++		sw_stats->rx_stats[i].rxmode_active = rx->rxf.rxmode_active;
++		sw_stats->rx_stats[i].vlan_filter_status =
++						rx->rxf.vlan_filter_status;
++		memcpy(sw_stats->rx_stats[i].vlan_filter_table,
++				rx->rxf.vlan_filter_table,
++				sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32));
++
++		sw_stats->rx_stats[i].rss_status = rx->rxf.rss_status;
++		sw_stats->rx_stats[i].hds_status = rx->rxf.hds_status;
++
++		i++;
++	}
++	sw_stats->num_active_rx = i;
++}
++
++static void
++bna_fw_cb_stats_get(void *arg, int status)
++{
++	struct bna *bna = (struct bna *)arg;
++	u64 *p_stats;
++	int i, count;
++	int rxf_count, txf_count;
++	u64 rxf_bmap, txf_bmap;
++
++	bfa_q_qe_init(&bna->mbox_qe.qe);
++
++	if (status == 0) {
++		p_stats = (u64 *)bna->stats.hw_stats;
++		count = sizeof(struct bfi_ll_stats) / sizeof(u64);
++		for (i = 0; i < count; i++)
++			p_stats[i] = cpu_to_be64(p_stats[i]);
++
++		rxf_count = 0;
++		rxf_bmap = (u64)bna->stats.rxf_bmap[0] |
++			((u64)bna->stats.rxf_bmap[1] << 32);
++		for (i = 0; i < BFI_LL_RXF_ID_MAX; i++)
++			if (rxf_bmap & ((u64)1 << i))
++				rxf_count++;
++
++		txf_count = 0;
++		txf_bmap = (u64)bna->stats.txf_bmap[0] |
++			((u64)bna->stats.txf_bmap[1] << 32);
++		for (i = 0; i < BFI_LL_TXF_ID_MAX; i++)
++			if (txf_bmap & ((u64)1 << i))
++				txf_count++;
++
++		p_stats = (u64 *)&bna->stats.hw_stats->rxf_stats[0] +
++				((rxf_count * sizeof(struct bfi_ll_stats_rxf) +
++				txf_count * sizeof(struct bfi_ll_stats_txf))/
++				sizeof(u64));
++
++		/* Populate the TXF stats from the firmware DMAed copy */
++		for (i = (BFI_LL_TXF_ID_MAX - 1); i >= 0; i--)
++			if (txf_bmap & ((u64)1 << i)) {
++				p_stats -= sizeof(struct bfi_ll_stats_txf)/
++						sizeof(u64);
++				memcpy(&bna->stats.hw_stats->txf_stats[i],
++					p_stats,
++					sizeof(struct bfi_ll_stats_txf));
++			}
++
++		/* Populate the RXF stats from the firmware DMAed copy */
++		for (i = (BFI_LL_RXF_ID_MAX - 1); i >= 0; i--)
++			if (rxf_bmap & ((u64)1 << i)) {
++				p_stats -= sizeof(struct bfi_ll_stats_rxf)/
++						sizeof(u64);
++				memcpy(&bna->stats.hw_stats->rxf_stats[i],
++					p_stats,
++					sizeof(struct bfi_ll_stats_rxf));
++			}
++
++		bna_sw_stats_get(bna, bna->stats.sw_stats);
++		bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
++	} else
++		bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
++}
++
++static void
++bna_fw_stats_get(struct bna *bna)
++{
++	struct bfi_ll_stats_req ll_req;
++
++	bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_GET_REQ, 0);
++	ll_req.stats_mask = htons(BFI_LL_STATS_ALL);
++
++	ll_req.rxf_id_mask[0] = htonl(bna->rx_mod.rxf_bmap[0]);
++	ll_req.rxf_id_mask[1] =	htonl(bna->rx_mod.rxf_bmap[1]);
++	ll_req.txf_id_mask[0] =	htonl(bna->tx_mod.txf_bmap[0]);
++	ll_req.txf_id_mask[1] =	htonl(bna->tx_mod.txf_bmap[1]);
++
++	ll_req.host_buffer.a32.addr_hi = bna->hw_stats_dma.msb;
++	ll_req.host_buffer.a32.addr_lo = bna->hw_stats_dma.lsb;
++
++	bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req),
++				bna_fw_cb_stats_get, bna);
++	bna_mbox_send(bna, &bna->mbox_qe);
++
++	bna->stats.rxf_bmap[0] = bna->rx_mod.rxf_bmap[0];
++	bna->stats.rxf_bmap[1] = bna->rx_mod.rxf_bmap[1];
++	bna->stats.txf_bmap[0] = bna->tx_mod.txf_bmap[0];
++	bna->stats.txf_bmap[1] = bna->tx_mod.txf_bmap[1];
++}
++
++static void
++bna_fw_cb_stats_clr(void *arg, int status)
++{
++	struct bna *bna = (struct bna *)arg;
++
++	bfa_q_qe_init(&bna->mbox_qe.qe);
++
++	memset(bna->stats.sw_stats, 0, sizeof(struct bna_sw_stats));
++	memset(bna->stats.hw_stats, 0, sizeof(struct bfi_ll_stats));
++
++	bnad_cb_stats_clr(bna->bnad);
++}
++
++static void
++bna_fw_stats_clr(struct bna *bna)
++{
++	struct bfi_ll_stats_req ll_req;
++
++	bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
++	ll_req.stats_mask = htons(BFI_LL_STATS_ALL);
++	ll_req.rxf_id_mask[0] = htonl(0xffffffff);
++	ll_req.rxf_id_mask[1] =	htonl(0xffffffff);
++	ll_req.txf_id_mask[0] =	htonl(0xffffffff);
++	ll_req.txf_id_mask[1] =	htonl(0xffffffff);
++
++	bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req),
++				bna_fw_cb_stats_clr, bna);
++	bna_mbox_send(bna, &bna->mbox_qe);
++}
++
++void
++bna_stats_get(struct bna *bna)
++{
++	if (bna_device_status_get(&bna->device))
++		bna_fw_stats_get(bna);
++	else
++		bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
++}
++
++void
++bna_stats_clr(struct bna *bna)
++{
++	if (bna_device_status_get(&bna->device))
++		bna_fw_stats_clr(bna);
++	else {
++		memset(&bna->stats.sw_stats, 0,
++				sizeof(struct bna_sw_stats));
++		memset(bna->stats.hw_stats, 0,
++				sizeof(struct bfi_ll_stats));
++		bnad_cb_stats_clr(bna->bnad);
++	}
++}
++
++/* IB */
++void
++bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
++{
++	ib->ib_config.coalescing_timeo = coalescing_timeo;
++
++	if (ib->start_count)
++		ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
++				(u32)ib->ib_config.coalescing_timeo, 0);
++}
++
++/* RxF */
++void
++bna_rxf_adv_init(struct bna_rxf *rxf,
++		struct bna_rx *rx,
++		struct bna_rx_config *q_config)
++{
++	switch (q_config->rxp_type) {
++	case BNA_RXP_SINGLE:
++		/* No-op */
++		break;
++	case BNA_RXP_SLR:
++		rxf->ctrl_flags |= BNA_RXF_CF_SM_LG_RXQ;
++		break;
++	case BNA_RXP_HDS:
++		rxf->hds_cfg.hdr_type = q_config->hds_config.hdr_type;
++		rxf->hds_cfg.header_size =
++				q_config->hds_config.header_size;
++		rxf->forced_offset = 0;
++		break;
++	default:
++		break;
++	}
++
++	if (q_config->rss_status == BNA_STATUS_T_ENABLED) {
++		rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
++		rxf->rss_cfg.hash_type = q_config->rss_config.hash_type;
++		rxf->rss_cfg.hash_mask = q_config->rss_config.hash_mask;
++		memcpy(&rxf->rss_cfg.toeplitz_hash_key[0],
++			&q_config->rss_config.toeplitz_hash_key[0],
++			sizeof(rxf->rss_cfg.toeplitz_hash_key));
++	}
++}
++
++static void
++rxf_fltr_mbox_cmd(struct bna_rxf *rxf, u8 cmd, enum bna_status status)
++{
++	struct bfi_ll_rxf_req req;
++
++	bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
++
++	req.rxf_id = rxf->rxf_id;
++	req.enable = status;
++
++	bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
++			rxf_cb_cam_fltr_mbox_cmd, rxf);
++
++	bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
++}
++
++void
++__rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status)
++{
++	struct bna_rx_fndb_ram *rx_fndb_ram;
++	u32 ctrl_flags;
++	int i;
++
++	rx_fndb_ram = (struct bna_rx_fndb_ram *)
++			BNA_GET_MEM_BASE_ADDR(rxf->rx->bna->pcidev.pci_bar_kva,
++			RX_FNDB_RAM_BASE_OFFSET);
++
++	for (i = 0; i < BFI_MAX_RXF; i++) {
++		if (status == BNA_STATUS_T_ENABLED) {
++			if (i == rxf->rxf_id)
++				continue;
++
++			ctrl_flags =
++				readl(&rx_fndb_ram[i].control_flags);
++			ctrl_flags |= BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
++			writel(ctrl_flags,
++						&rx_fndb_ram[i].control_flags);
++		} else {
++			ctrl_flags =
++				readl(&rx_fndb_ram[i].control_flags);
++			ctrl_flags &= ~BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
++			writel(ctrl_flags,
++						&rx_fndb_ram[i].control_flags);
++		}
++	}
++}
++
++int
++rxf_process_packet_filter_ucast(struct bna_rxf *rxf)
++{
++	struct bna_mac *mac = NULL;
++	struct list_head *qe;
++
++	/* Add additional MAC entries */
++	if (!list_empty(&rxf->ucast_pending_add_q)) {
++		bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
++		bfa_q_qe_init(qe);
++		mac = (struct bna_mac *)qe;
++		rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_ADD_REQ, mac);
++		list_add_tail(&mac->qe, &rxf->ucast_active_q);
++		return 1;
++	}
++
++	/* Delete MAC addresses previousely added */
++	if (!list_empty(&rxf->ucast_pending_del_q)) {
++		bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
++		bfa_q_qe_init(qe);
++		mac = (struct bna_mac *)qe;
++		rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
++		bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
++		return 1;
++	}
++
++	return 0;
++}
++
++int
++rxf_process_packet_filter_promisc(struct bna_rxf *rxf)
++{
++	struct bna *bna = rxf->rx->bna;
++
++	/* Enable/disable promiscuous mode */
++	if (is_promisc_enable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask)) {
++		/* move promisc configuration from pending -> active */
++		promisc_inactive(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		rxf->rxmode_active |= BNA_RXMODE_PROMISC;
++
++		/* Disable VLAN filter to allow all VLANs */
++		__rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED);
++		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
++				BNA_STATUS_T_ENABLED);
++		return 1;
++	} else if (is_promisc_disable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask)) {
++		/* move promisc configuration from pending -> active */
++		promisc_inactive(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
++		bna->rxf_promisc_id = BFI_MAX_RXF;
++
++		/* Revert VLAN filter */
++		__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
++		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
++				BNA_STATUS_T_DISABLED);
++		return 1;
++	}
++
++	return 0;
++}
++
++int
++rxf_process_packet_filter_default(struct bna_rxf *rxf)
++{
++	struct bna *bna = rxf->rx->bna;
++
++	/* Enable/disable default mode */
++	if (is_default_enable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask)) {
++		/* move default configuration from pending -> active */
++		default_inactive(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		rxf->rxmode_active |= BNA_RXMODE_DEFAULT;
++
++		/* Disable VLAN filter to allow all VLANs */
++		__rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED);
++		/* Redirect all other RxF vlan filtering to this one */
++		__rxf_default_function_config(rxf, BNA_STATUS_T_ENABLED);
++		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
++				BNA_STATUS_T_ENABLED);
++		return 1;
++	} else if (is_default_disable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask)) {
++		/* move default configuration from pending -> active */
++		default_inactive(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
++		bna->rxf_default_id = BFI_MAX_RXF;
++
++		/* Revert VLAN filter */
++		__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
++		/* Stop RxF vlan filter table redirection */
++		__rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
++		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
++				BNA_STATUS_T_DISABLED);
++		return 1;
++	}
++
++	return 0;
++}
++
++int
++rxf_process_packet_filter_allmulti(struct bna_rxf *rxf)
++{
++	/* Enable/disable allmulti mode */
++	if (is_allmulti_enable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask)) {
++		/* move allmulti configuration from pending -> active */
++		allmulti_inactive(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
++
++		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
++				BNA_STATUS_T_ENABLED);
++		return 1;
++	} else if (is_allmulti_disable(rxf->rxmode_pending,
++					rxf->rxmode_pending_bitmask)) {
++		/* move allmulti configuration from pending -> active */
++		allmulti_inactive(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
++
++		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
++				BNA_STATUS_T_DISABLED);
++		return 1;
++	}
++
++	return 0;
++}
++
++int
++rxf_clear_packet_filter_ucast(struct bna_rxf *rxf)
++{
++	struct bna_mac *mac = NULL;
++	struct list_head *qe;
++
++	/* 1. delete pending ucast entries */
++	if (!list_empty(&rxf->ucast_pending_del_q)) {
++		bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
++		bfa_q_qe_init(qe);
++		mac = (struct bna_mac *)qe;
++		rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
++		bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
++		return 1;
++	}
++
++	/* 2. clear active ucast entries; move them to pending_add_q */
++	if (!list_empty(&rxf->ucast_active_q)) {
++		bfa_q_deq(&rxf->ucast_active_q, &qe);
++		bfa_q_qe_init(qe);
++		mac = (struct bna_mac *)qe;
++		rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
++		list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
++		return 1;
++	}
++
++	return 0;
++}
++
++int
++rxf_clear_packet_filter_promisc(struct bna_rxf *rxf)
++{
++	struct bna *bna = rxf->rx->bna;
++
++	/* 6. Execute pending promisc mode disable command */
++	if (is_promisc_disable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask)) {
++		/* move promisc configuration from pending -> active */
++		promisc_inactive(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
++		bna->rxf_promisc_id = BFI_MAX_RXF;
++
++		/* Revert VLAN filter */
++		__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
++		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
++				BNA_STATUS_T_DISABLED);
++		return 1;
++	}
++
++	/* 7. Clear active promisc mode; move it to pending enable */
++	if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
++		/* move promisc configuration from active -> pending */
++		promisc_enable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
++
++		/* Revert VLAN filter */
++		__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
++		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
++				BNA_STATUS_T_DISABLED);
++		return 1;
++	}
++
++	return 0;
++}
++
++int
++rxf_clear_packet_filter_default(struct bna_rxf *rxf)
++{
++	struct bna *bna = rxf->rx->bna;
++
++	/* 8. Execute pending default mode disable command */
++	if (is_default_disable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask)) {
++		/* move default configuration from pending -> active */
++		default_inactive(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
++		bna->rxf_default_id = BFI_MAX_RXF;
++
++		/* Revert VLAN filter */
++		__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
++		/* Stop RxF vlan filter table redirection */
++		__rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
++		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
++				BNA_STATUS_T_DISABLED);
++		return 1;
++	}
++
++	/* 9. Clear active default mode; move it to pending enable */
++	if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
++		/* move default configuration from active -> pending */
++		default_enable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
++
++		/* Revert VLAN filter */
++		__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
++		/* Stop RxF vlan filter table redirection */
++		__rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
++		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
++				BNA_STATUS_T_DISABLED);
++		return 1;
++	}
++
++	return 0;
++}
++
++int
++rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf)
++{
++	/* 10. Execute pending allmulti mode disable command */
++	if (is_allmulti_disable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask)) {
++		/* move allmulti configuration from pending -> active */
++		allmulti_inactive(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
++		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
++				BNA_STATUS_T_DISABLED);
++		return 1;
++	}
++
++	/* 11. Clear active allmulti mode; move it to pending enable */
++	if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
++		/* move allmulti configuration from active -> pending */
++		allmulti_enable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
++		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
++				BNA_STATUS_T_DISABLED);
++		return 1;
++	}
++
++	return 0;
++}
++
++void
++rxf_reset_packet_filter_ucast(struct bna_rxf *rxf)
++{
++	struct list_head *qe;
++	struct bna_mac *mac;
++
++	/* 1. Move active ucast entries to pending_add_q */
++	while (!list_empty(&rxf->ucast_active_q)) {
++		bfa_q_deq(&rxf->ucast_active_q, &qe);
++		bfa_q_qe_init(qe);
++		list_add_tail(qe, &rxf->ucast_pending_add_q);
++	}
++
++	/* 2. Throw away delete pending ucast entries */
++	while (!list_empty(&rxf->ucast_pending_del_q)) {
++		bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
++		bfa_q_qe_init(qe);
++		mac = (struct bna_mac *)qe;
++		bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
++	}
++}
++
++void
++rxf_reset_packet_filter_promisc(struct bna_rxf *rxf)
++{
++	struct bna *bna = rxf->rx->bna;
++
++	/* 6. Clear pending promisc mode disable */
++	if (is_promisc_disable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask)) {
++		promisc_inactive(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
++		bna->rxf_promisc_id = BFI_MAX_RXF;
++	}
++
++	/* 7. Move promisc mode config from active -> pending */
++	if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
++		promisc_enable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
++	}
++
++}
++
++void
++rxf_reset_packet_filter_default(struct bna_rxf *rxf)
++{
++	struct bna *bna = rxf->rx->bna;
++
++	/* 8. Clear pending default mode disable */
++	if (is_default_disable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask)) {
++		default_inactive(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
++		bna->rxf_default_id = BFI_MAX_RXF;
++	}
++
++	/* 9. Move default mode config from active -> pending */
++	if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
++		default_enable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
++	}
++}
++
++void
++rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf)
++{
++	/* 10. Clear pending allmulti mode disable */
++	if (is_allmulti_disable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask)) {
++		allmulti_inactive(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
++	}
++
++	/* 11. Move allmulti mode config from active -> pending */
++	if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
++		allmulti_enable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
++	}
++}
++
++/**
++ * Should only be called by bna_rxf_mode_set.
++ * Helps deciding if h/w configuration is needed or not.
++ *  Returns:
++ *	0 = no h/w change
++ *	1 = need h/w change
++ */
++int
++rxf_promisc_enable(struct bna_rxf *rxf)
++{
++	struct bna *bna = rxf->rx->bna;
++	int ret = 0;
++
++	/* There can not be any pending disable command */
++
++	/* Do nothing if pending enable or already enabled */
++	if (is_promisc_enable(rxf->rxmode_pending,
++			rxf->rxmode_pending_bitmask) ||
++			(rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
++		/* Schedule enable */
++	} else {
++		/* Promisc mode should not be active in the system */
++		promisc_enable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		bna->rxf_promisc_id = rxf->rxf_id;
++		ret = 1;
++	}
++
++	return ret;
++}
++
++/**
++ * Should only be called by bna_rxf_mode_set.
++ * Helps deciding if h/w configuration is needed or not.
++ *  Returns:
++ *	0 = no h/w change
++ *	1 = need h/w change
++ */
++int
++rxf_promisc_disable(struct bna_rxf *rxf)
++{
++	struct bna *bna = rxf->rx->bna;
++	int ret = 0;
++
++	/* There can not be any pending disable */
++
++	/* Turn off pending enable command , if any */
++	if (is_promisc_enable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask)) {
++		/* Promisc mode should not be active */
++		/* system promisc state should be pending */
++		promisc_inactive(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		/* Remove the promisc state from the system */
++		bna->rxf_promisc_id = BFI_MAX_RXF;
++
++		/* Schedule disable */
++	} else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
++		/* Promisc mode should be active in the system */
++		promisc_disable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		ret = 1;
++
++	/* Do nothing if already disabled */
++	} else {
++	}
++
++	return ret;
++}
++
++/**
++ * Should only be called by bna_rxf_mode_set.
++ * Helps deciding if h/w configuration is needed or not.
++ *  Returns:
++ *	0 = no h/w change
++ *	1 = need h/w change
++ */
++int
++rxf_default_enable(struct bna_rxf *rxf)
++{
++	struct bna *bna = rxf->rx->bna;
++	int ret = 0;
++
++	/* There can not be any pending disable command */
++
++	/* Do nothing if pending enable or already enabled */
++	if (is_default_enable(rxf->rxmode_pending,
++		rxf->rxmode_pending_bitmask) ||
++		(rxf->rxmode_active & BNA_RXMODE_DEFAULT)) {
++		/* Schedule enable */
++	} else {
++		/* Default mode should not be active in the system */
++		default_enable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		bna->rxf_default_id = rxf->rxf_id;
++		ret = 1;
++	}
++
++	return ret;
++}
++
++/**
++ * Should only be called by bna_rxf_mode_set.
++ * Helps deciding if h/w configuration is needed or not.
++ *  Returns:
++ *	0 = no h/w change
++ *	1 = need h/w change
++ */
++int
++rxf_default_disable(struct bna_rxf *rxf)
++{
++	struct bna *bna = rxf->rx->bna;
++	int ret = 0;
++
++	/* There can not be any pending disable */
++
++	/* Turn off pending enable command , if any */
++	if (is_default_enable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask)) {
++		/* Promisc mode should not be active */
++		/* system default state should be pending */
++		default_inactive(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		/* Remove the default state from the system */
++		bna->rxf_default_id = BFI_MAX_RXF;
++
++	/* Schedule disable */
++	} else if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
++		/* Default mode should be active in the system */
++		default_disable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		ret = 1;
++
++	/* Do nothing if already disabled */
++	} else {
++	}
++
++	return ret;
++}
++
++/**
++ * Should only be called by bna_rxf_mode_set.
++ * Helps deciding if h/w configuration is needed or not.
++ *  Returns:
++ *	0 = no h/w change
++ *	1 = need h/w change
++ */
++int
++rxf_allmulti_enable(struct bna_rxf *rxf)
++{
++	int ret = 0;
++
++	/* There can not be any pending disable command */
++
++	/* Do nothing if pending enable or already enabled */
++	if (is_allmulti_enable(rxf->rxmode_pending,
++			rxf->rxmode_pending_bitmask) ||
++			(rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
++		/* Schedule enable */
++	} else {
++		allmulti_enable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		ret = 1;
++	}
++
++	return ret;
++}
++
++/**
++ * Should only be called by bna_rxf_mode_set.
++ * Helps deciding if h/w configuration is needed or not.
++ *  Returns:
++ *	0 = no h/w change
++ *	1 = need h/w change
++ */
++int
++rxf_allmulti_disable(struct bna_rxf *rxf)
++{
++	int ret = 0;
++
++	/* There can not be any pending disable */
++
++	/* Turn off pending enable command , if any */
++	if (is_allmulti_enable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask)) {
++		/* Allmulti mode should not be active */
++		allmulti_inactive(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++
++	/* Schedule disable */
++	} else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
++		allmulti_disable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		ret = 1;
++	}
++
++	return ret;
++}
++
++/* RxF <- bnad */
++void
++bna_rx_mcast_delall(struct bna_rx *rx,
++		    void (*cbfn)(struct bnad *, struct bna_rx *,
++				 enum bna_cb_status))
++{
++	struct bna_rxf *rxf = &rx->rxf;
++	struct list_head *qe;
++	struct bna_mac *mac;
++	int need_hw_config = 0;
++
++	/* Purge all entries from pending_add_q */
++	while (!list_empty(&rxf->mcast_pending_add_q)) {
++		bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
++		mac = (struct bna_mac *)qe;
++		bfa_q_qe_init(&mac->qe);
++		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
++	}
++
++	/* Schedule all entries in active_q for deletion */
++	while (!list_empty(&rxf->mcast_active_q)) {
++		bfa_q_deq(&rxf->mcast_active_q, &qe);
++		mac = (struct bna_mac *)qe;
++		bfa_q_qe_init(&mac->qe);
++		list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
++		need_hw_config = 1;
++	}
++
++	if (need_hw_config) {
++		rxf->cam_fltr_cbfn = cbfn;
++		rxf->cam_fltr_cbarg = rx->bna->bnad;
++		bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
++		return;
++	}
++
++	if (cbfn)
++		(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
++}
++
++/* RxF <- Rx */
++void
++bna_rx_receive_resume(struct bna_rx *rx,
++		      void (*cbfn)(struct bnad *, struct bna_rx *,
++				   enum bna_cb_status))
++{
++	struct bna_rxf *rxf = &rx->rxf;
++
++	if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED) {
++		rxf->oper_state_cbfn = cbfn;
++		rxf->oper_state_cbarg = rx->bna->bnad;
++		bfa_fsm_send_event(rxf, RXF_E_RESUME);
++	} else if (cbfn)
++		(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
++}
++
++void
++bna_rx_receive_pause(struct bna_rx *rx,
++		     void (*cbfn)(struct bnad *, struct bna_rx *,
++				  enum bna_cb_status))
++{
++	struct bna_rxf *rxf = &rx->rxf;
++
++	if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_RUNNING) {
++		rxf->oper_state_cbfn = cbfn;
++		rxf->oper_state_cbarg = rx->bna->bnad;
++		bfa_fsm_send_event(rxf, RXF_E_PAUSE);
++	} else if (cbfn)
++		(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
++}
++
++/* RxF <- bnad */
++enum bna_cb_status
++bna_rx_ucast_add(struct bna_rx *rx, u8 *addr,
++		 void (*cbfn)(struct bnad *, struct bna_rx *,
++			      enum bna_cb_status))
++{
++	struct bna_rxf *rxf = &rx->rxf;
++	struct list_head *qe;
++	struct bna_mac *mac;
++
++	/* Check if already added */
++	list_for_each(qe, &rxf->ucast_active_q) {
++		mac = (struct bna_mac *)qe;
++		if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
++			if (cbfn)
++				(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
++			return BNA_CB_SUCCESS;
++		}
++	}
++
++	/* Check if pending addition */
++	list_for_each(qe, &rxf->ucast_pending_add_q) {
++		mac = (struct bna_mac *)qe;
++		if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
++			if (cbfn)
++				(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
++			return BNA_CB_SUCCESS;
++		}
++	}
++
++	mac = bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
++	if (mac == NULL)
++		return BNA_CB_UCAST_CAM_FULL;
++	bfa_q_qe_init(&mac->qe);
++	memcpy(mac->addr, addr, ETH_ALEN);
++	list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
++
++	rxf->cam_fltr_cbfn = cbfn;
++	rxf->cam_fltr_cbarg = rx->bna->bnad;
++
++	bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
++
++	return BNA_CB_SUCCESS;
++}
++
++/* RxF <- bnad */
++enum bna_cb_status
++bna_rx_ucast_del(struct bna_rx *rx, u8 *addr,
++		 void (*cbfn)(struct bnad *, struct bna_rx *,
++			      enum bna_cb_status))
++{
++	struct bna_rxf *rxf = &rx->rxf;
++	struct list_head *qe;
++	struct bna_mac *mac;
++
++	list_for_each(qe, &rxf->ucast_pending_add_q) {
++		mac = (struct bna_mac *)qe;
++		if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
++			list_del(qe);
++			bfa_q_qe_init(qe);
++			bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
++			if (cbfn)
++				(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
++			return BNA_CB_SUCCESS;
++		}
++	}
++
++	list_for_each(qe, &rxf->ucast_active_q) {
++		mac = (struct bna_mac *)qe;
++		if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
++			list_del(qe);
++			bfa_q_qe_init(qe);
++			list_add_tail(qe, &rxf->ucast_pending_del_q);
++			rxf->cam_fltr_cbfn = cbfn;
++			rxf->cam_fltr_cbarg = rx->bna->bnad;
++			bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
++			return BNA_CB_SUCCESS;
++		}
++	}
++
++	return BNA_CB_INVALID_MAC;
++}
++
++/* RxF <- bnad */
++enum bna_cb_status
++bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
++		enum bna_rxmode bitmask,
++		void (*cbfn)(struct bnad *, struct bna_rx *,
++			     enum bna_cb_status))
++{
++	struct bna_rxf *rxf = &rx->rxf;
++	int need_hw_config = 0;
++
++	/* Error checks */
++
++	if (is_promisc_enable(new_mode, bitmask)) {
++		/* If promisc mode is already enabled elsewhere in the system */
++		if ((rx->bna->rxf_promisc_id != BFI_MAX_RXF) &&
++			(rx->bna->rxf_promisc_id != rxf->rxf_id))
++			goto err_return;
++
++		/* If default mode is already enabled in the system */
++		if (rx->bna->rxf_default_id != BFI_MAX_RXF)
++			goto err_return;
++
++		/* Trying to enable promiscuous and default mode together */
++		if (is_default_enable(new_mode, bitmask))
++			goto err_return;
++	}
++
++	if (is_default_enable(new_mode, bitmask)) {
++		/* If default mode is already enabled elsewhere in the system */
++		if ((rx->bna->rxf_default_id != BFI_MAX_RXF) &&
++			(rx->bna->rxf_default_id != rxf->rxf_id)) {
++				goto err_return;
++		}
++
++		/* If promiscuous mode is already enabled in the system */
++		if (rx->bna->rxf_promisc_id != BFI_MAX_RXF)
++			goto err_return;
++	}
++
++	/* Process the commands */
++
++	if (is_promisc_enable(new_mode, bitmask)) {
++		if (rxf_promisc_enable(rxf))
++			need_hw_config = 1;
++	} else if (is_promisc_disable(new_mode, bitmask)) {
++		if (rxf_promisc_disable(rxf))
++			need_hw_config = 1;
++	}
++
++	if (is_default_enable(new_mode, bitmask)) {
++		if (rxf_default_enable(rxf))
++			need_hw_config = 1;
++	} else if (is_default_disable(new_mode, bitmask)) {
++		if (rxf_default_disable(rxf))
++			need_hw_config = 1;
++	}
++
++	if (is_allmulti_enable(new_mode, bitmask)) {
++		if (rxf_allmulti_enable(rxf))
++			need_hw_config = 1;
++	} else if (is_allmulti_disable(new_mode, bitmask)) {
++		if (rxf_allmulti_disable(rxf))
++			need_hw_config = 1;
++	}
++
++	/* Trigger h/w if needed */
++
++	if (need_hw_config) {
++		rxf->cam_fltr_cbfn = cbfn;
++		rxf->cam_fltr_cbarg = rx->bna->bnad;
++		bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
++	} else if (cbfn)
++		(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
++
++	return BNA_CB_SUCCESS;
++
++err_return:
++	return BNA_CB_FAIL;
++}
++
++/* RxF <- bnad */
++void
++bna_rx_rss_enable(struct bna_rx *rx)
++{
++	struct bna_rxf *rxf = &rx->rxf;
++
++	rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
++	rxf->rss_status = BNA_STATUS_T_ENABLED;
++	bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
++}
++
++/* RxF <- bnad */
++void
++bna_rx_rss_disable(struct bna_rx *rx)
++{
++	struct bna_rxf *rxf = &rx->rxf;
++
++	rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
++	rxf->rss_status = BNA_STATUS_T_DISABLED;
++	bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
++}
++
++/* RxF <- bnad */
++void
++bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config)
++{
++	struct bna_rxf *rxf = &rx->rxf;
++	rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
++	rxf->rss_status = BNA_STATUS_T_ENABLED;
++	rxf->rss_cfg = *rss_config;
++	bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
++}
++
++void
++/* RxF <- bnad */
++bna_rx_vlanfilter_enable(struct bna_rx *rx)
++{
++	struct bna_rxf *rxf = &rx->rxf;
++
++	if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
++		rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
++		rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
++		bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
++	}
++}
++
++/* RxF <- bnad */
++void
++bna_rx_vlanfilter_disable(struct bna_rx *rx)
++{
++	struct bna_rxf *rxf = &rx->rxf;
++
++	if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
++		rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
++		rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
++		bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
++	}
++}
++
++/* Rx */
++
++struct bna_rxp *
++bna_rx_get_rxp(struct bna_rx *rx, int vector)
++{
++	struct bna_rxp *rxp;
++	struct list_head *qe;
++
++	list_for_each(qe, &rx->rxp_q) {
++		rxp = (struct bna_rxp *)qe;
++		if (rxp->vector == vector)
++			return rxp;
++	}
++	return NULL;
++}
++
++/*
++ * bna_rx_rss_rit_set()
++ * Sets the Q ids for the specified msi-x vectors in the RIT.
++ * Maximum rit size supported is 64, which should be the max size of the
++ * vectors array.
++ */
++
++void
++bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors, int nvectors)
++{
++	int i;
++	struct bna_rxp *rxp;
++	struct bna_rxq *q0 = NULL, *q1 = NULL;
++	struct bna *bna;
++	struct bna_rxf *rxf;
++
++	/* Build the RIT contents for this RX */
++	bna = rx->bna;
++
++	rxf = &rx->rxf;
++	for (i = 0; i < nvectors; i++) {
++		rxp = bna_rx_get_rxp(rx, vectors[i]);
++
++		GET_RXQS(rxp, q0, q1);
++		rxf->rit_segment->rit[i].large_rxq_id = q0->rxq_id;
++		rxf->rit_segment->rit[i].small_rxq_id = (q1 ? q1->rxq_id : 0);
++	}
++
++	rxf->rit_segment->rit_size = nvectors;
++
++	/* Subsequent call to enable/reconfig RSS will update the RIT in h/w */
++}
++
++/* Rx <- bnad */
++void
++bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
++{
++	struct bna_rxp *rxp;
++	struct list_head *qe;
++
++	list_for_each(qe, &rx->rxp_q) {
++		rxp = (struct bna_rxp *)qe;
++		rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
++		bna_ib_coalescing_timeo_set(rxp->cq.ib, coalescing_timeo);
++	}
++}
++
++/* Rx <- bnad */
++void
++bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX])
++{
++	int i, j;
++
++	for (i = 0; i < BNA_LOAD_T_MAX; i++)
++		for (j = 0; j < BNA_BIAS_T_MAX; j++)
++			bna->rx_mod.dim_vector[i][j] = vector[i][j];
++}
++
++/* Rx <- bnad */
++void
++bna_rx_dim_update(struct bna_ccb *ccb)
++{
++	struct bna *bna = ccb->cq->rx->bna;
++	u32 load, bias;
++	u32 pkt_rt, small_rt, large_rt;
++	u8 coalescing_timeo;
++
++	if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
++		(ccb->pkt_rate.large_pkt_cnt == 0))
++		return;
++
++	/* Arrive at preconfigured coalescing timeo value based on pkt rate */
++
++	small_rt = ccb->pkt_rate.small_pkt_cnt;
++	large_rt = ccb->pkt_rate.large_pkt_cnt;
++
++	pkt_rt = small_rt + large_rt;
++
++	if (pkt_rt < BNA_PKT_RATE_10K)
++		load = BNA_LOAD_T_LOW_4;
++	else if (pkt_rt < BNA_PKT_RATE_20K)
++		load = BNA_LOAD_T_LOW_3;
++	else if (pkt_rt < BNA_PKT_RATE_30K)
++		load = BNA_LOAD_T_LOW_2;
++	else if (pkt_rt < BNA_PKT_RATE_40K)
++		load = BNA_LOAD_T_LOW_1;
++	else if (pkt_rt < BNA_PKT_RATE_50K)
++		load = BNA_LOAD_T_HIGH_1;
++	else if (pkt_rt < BNA_PKT_RATE_60K)
++		load = BNA_LOAD_T_HIGH_2;
++	else if (pkt_rt < BNA_PKT_RATE_80K)
++		load = BNA_LOAD_T_HIGH_3;
++	else
++		load = BNA_LOAD_T_HIGH_4;
++
++	if (small_rt > (large_rt << 1))
++		bias = 0;
++	else
++		bias = 1;
++
++	ccb->pkt_rate.small_pkt_cnt = 0;
++	ccb->pkt_rate.large_pkt_cnt = 0;
++
++	coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
++	ccb->rx_coalescing_timeo = coalescing_timeo;
++
++	/* Set it to IB */
++	bna_ib_coalescing_timeo_set(ccb->cq->ib, coalescing_timeo);
++}
++
++/* Tx */
++/* TX <- bnad */
++enum bna_cb_status
++bna_tx_prio_set(struct bna_tx *tx, int prio,
++		void (*cbfn)(struct bnad *, struct bna_tx *,
++			     enum bna_cb_status))
++{
++	if (tx->flags & BNA_TX_F_PRIO_LOCK)
++		return BNA_CB_FAIL;
++	else {
++		tx->prio_change_cbfn = cbfn;
++		bna_tx_prio_changed(tx, prio);
++	}
++
++	return BNA_CB_SUCCESS;
++}
++
++/* TX <- bnad */
++void
++bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
++{
++	struct bna_txq *txq;
++	struct list_head *qe;
++
++	list_for_each(qe, &tx->txq_q) {
++		txq = (struct bna_txq *)qe;
++		bna_ib_coalescing_timeo_set(txq->ib, coalescing_timeo);
++	}
++}
++
++/*
++ * Private data
++ */
++
++struct bna_ritseg_pool_cfg {
++	u32	pool_size;
++	u32	pool_entry_size;
++};
++init_ritseg_pool(ritseg_pool_cfg);
++
++/*
++ * Private functions
++ */
++static void
++bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
++		  struct bna_res_info *res_info)
++{
++	int i;
++
++	ucam_mod->ucmac = (struct bna_mac *)
++		res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
++
++	INIT_LIST_HEAD(&ucam_mod->free_q);
++	for (i = 0; i < BFI_MAX_UCMAC; i++) {
++		bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
++		list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
++	}
++
++	ucam_mod->bna = bna;
++}
++
++static void
++bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
++{
++	struct list_head *qe;
++	int i = 0;
++
++	list_for_each(qe, &ucam_mod->free_q)
++		i++;
++
++	ucam_mod->bna = NULL;
++}
++
++static void
++bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
++		  struct bna_res_info *res_info)
++{
++	int i;
++
++	mcam_mod->mcmac = (struct bna_mac *)
++		res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
++
++	INIT_LIST_HEAD(&mcam_mod->free_q);
++	for (i = 0; i < BFI_MAX_MCMAC; i++) {
++		bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
++		list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
++	}
++
++	mcam_mod->bna = bna;
++}
++
++static void
++bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
++{
++	struct list_head *qe;
++	int i = 0;
++
++	list_for_each(qe, &mcam_mod->free_q)
++		i++;
++
++	mcam_mod->bna = NULL;
++}
++
++static void
++bna_rit_mod_init(struct bna_rit_mod *rit_mod,
++		struct bna_res_info *res_info)
++{
++	int i;
++	int j;
++	int count;
++	int offset;
++
++	rit_mod->rit = (struct bna_rit_entry *)
++		res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.mdl[0].kva;
++	rit_mod->rit_segment = (struct bna_rit_segment *)
++		res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.mdl[0].kva;
++
++	count = 0;
++	offset = 0;
++	for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
++		INIT_LIST_HEAD(&rit_mod->rit_seg_pool[i]);
++		for (j = 0; j < ritseg_pool_cfg[i].pool_size; j++) {
++			bfa_q_qe_init(&rit_mod->rit_segment[count].qe);
++			rit_mod->rit_segment[count].max_rit_size =
++					ritseg_pool_cfg[i].pool_entry_size;
++			rit_mod->rit_segment[count].rit_offset = offset;
++			rit_mod->rit_segment[count].rit =
++					&rit_mod->rit[offset];
++			list_add_tail(&rit_mod->rit_segment[count].qe,
++				&rit_mod->rit_seg_pool[i]);
++			count++;
++			offset += ritseg_pool_cfg[i].pool_entry_size;
++		}
++	}
++}
++
++static void
++bna_rit_mod_uninit(struct bna_rit_mod *rit_mod)
++{
++	struct bna_rit_segment *rit_segment;
++	struct list_head *qe;
++	int i;
++	int j;
++
++	for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
++		j = 0;
++		list_for_each(qe, &rit_mod->rit_seg_pool[i]) {
++			rit_segment = (struct bna_rit_segment *)qe;
++			j++;
++		}
++	}
++}
++
++/*
++ * Public functions
++ */
++
++/* Called during probe(), before calling bna_init() */
++void
++bna_res_req(struct bna_res_info *res_info)
++{
++	bna_adv_res_req(res_info);
++
++	/* DMA memory for retrieving IOC attributes */
++	res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
++	res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
++	res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
++	res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
++				ALIGN(bfa_ioc_meminfo(), PAGE_SIZE);
++
++	/* DMA memory for index segment of an IB */
++	res_info[BNA_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
++	res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
++	res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.len =
++				BFI_IBIDX_SIZE * BFI_IBIDX_MAX_SEGSIZE;
++	res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.num = BFI_MAX_IB;
++
++	/* Virtual memory for IB objects - stored by IB module */
++	res_info[BNA_RES_MEM_T_IB_ARRAY].res_type = BNA_RES_T_MEM;
++	res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mem_type =
++								BNA_MEM_T_KVA;
++	res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.num = 1;
++	res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.len =
++				BFI_MAX_IB * sizeof(struct bna_ib);
++
++	/* Virtual memory for intr objects - stored by IB module */
++	res_info[BNA_RES_MEM_T_INTR_ARRAY].res_type = BNA_RES_T_MEM;
++	res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mem_type =
++								BNA_MEM_T_KVA;
++	res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.num = 1;
++	res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.len =
++				BFI_MAX_IB * sizeof(struct bna_intr);
++
++	/* Virtual memory for idx_seg objects - stored by IB module */
++	res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_type = BNA_RES_T_MEM;
++	res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mem_type =
++								BNA_MEM_T_KVA;
++	res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.num = 1;
++	res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.len =
++			BFI_IBIDX_TOTAL_SEGS * sizeof(struct bna_ibidx_seg);
++
++	/* Virtual memory for Tx objects - stored by Tx module */
++	res_info[BNA_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
++	res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
++								BNA_MEM_T_KVA;
++	res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
++	res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
++			BFI_MAX_TXQ * sizeof(struct bna_tx);
++
++	/* Virtual memory for TxQ - stored by Tx module */
++	res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
++	res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
++								BNA_MEM_T_KVA;
++	res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
++	res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
++			BFI_MAX_TXQ * sizeof(struct bna_txq);
++
++	/* Virtual memory for Rx objects - stored by Rx module */
++	res_info[BNA_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
++	res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
++								BNA_MEM_T_KVA;
++	res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
++	res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
++			BFI_MAX_RXQ * sizeof(struct bna_rx);
++
++	/* Virtual memory for RxPath - stored by Rx module */
++	res_info[BNA_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
++	res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
++								BNA_MEM_T_KVA;
++	res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
++	res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
++			BFI_MAX_RXQ * sizeof(struct bna_rxp);
++
++	/* Virtual memory for RxQ - stored by Rx module */
++	res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
++	res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
++								BNA_MEM_T_KVA;
++	res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
++	res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
++			BFI_MAX_RXQ * sizeof(struct bna_rxq);
++
++	/* Virtual memory for Unicast MAC address - stored by ucam module */
++	res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
++	res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
++								BNA_MEM_T_KVA;
++	res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
++	res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
++			BFI_MAX_UCMAC * sizeof(struct bna_mac);
++
++	/* Virtual memory for Multicast MAC address - stored by mcam module */
++	res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
++	res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
++								BNA_MEM_T_KVA;
++	res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
++	res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
++			BFI_MAX_MCMAC * sizeof(struct bna_mac);
++
++	/* Virtual memory for RIT entries */
++	res_info[BNA_RES_MEM_T_RIT_ENTRY].res_type = BNA_RES_T_MEM;
++	res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.mem_type =
++								BNA_MEM_T_KVA;
++	res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.num = 1;
++	res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.len =
++			BFI_MAX_RIT_SIZE * sizeof(struct bna_rit_entry);
++
++	/* Virtual memory for RIT segment table */
++	res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_type = BNA_RES_T_MEM;
++	res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.mem_type =
++								BNA_MEM_T_KVA;
++	res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.num = 1;
++	res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.len =
++			BFI_RIT_TOTAL_SEGS * sizeof(struct bna_rit_segment);
++
++	/* Interrupt resource for mailbox interrupt */
++	res_info[BNA_RES_INTR_T_MBOX].res_type = BNA_RES_T_INTR;
++	res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.intr_type =
++							BNA_INTR_T_MSIX;
++	res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.num = 1;
++}
++
++/* Called during probe() */
++void
++bna_init(struct bna *bna, struct bnad *bnad, struct bfa_pcidev *pcidev,
++		struct bna_res_info *res_info)
++{
++	bna->bnad = bnad;
++	bna->pcidev = *pcidev;
++
++	bna->stats.hw_stats = (struct bfi_ll_stats *)
++		res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
++	bna->hw_stats_dma.msb =
++		res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
++	bna->hw_stats_dma.lsb =
++		res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
++	bna->stats.sw_stats = (struct bna_sw_stats *)
++		res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.mdl[0].kva;
++
++	bna->regs.page_addr = bna->pcidev.pci_bar_kva +
++				reg_offset[bna->pcidev.pci_func].page_addr;
++	bna->regs.fn_int_status = bna->pcidev.pci_bar_kva +
++				reg_offset[bna->pcidev.pci_func].fn_int_status;
++	bna->regs.fn_int_mask = bna->pcidev.pci_bar_kva +
++				reg_offset[bna->pcidev.pci_func].fn_int_mask;
++
++	if (bna->pcidev.pci_func < 3)
++		bna->port_num = 0;
++	else
++		bna->port_num = 1;
++
++	/* Also initializes diag, cee, sfp, phy_port and mbox_mod */
++	bna_device_init(&bna->device, bna, res_info);
++
++	bna_port_init(&bna->port, bna);
++
++	bna_tx_mod_init(&bna->tx_mod, bna, res_info);
++
++	bna_rx_mod_init(&bna->rx_mod, bna, res_info);
++
++	bna_ib_mod_init(&bna->ib_mod, bna, res_info);
++
++	bna_rit_mod_init(&bna->rit_mod, res_info);
++
++	bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
++
++	bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
++
++	bna->rxf_default_id = BFI_MAX_RXF;
++	bna->rxf_promisc_id = BFI_MAX_RXF;
++
++	/* Mbox q element for posting stat request to f/w */
++	bfa_q_qe_init(&bna->mbox_qe.qe);
++}
++
++void
++bna_uninit(struct bna *bna)
++{
++	bna_mcam_mod_uninit(&bna->mcam_mod);
++
++	bna_ucam_mod_uninit(&bna->ucam_mod);
++
++	bna_rit_mod_uninit(&bna->rit_mod);
++
++	bna_ib_mod_uninit(&bna->ib_mod);
++
++	bna_rx_mod_uninit(&bna->rx_mod);
++
++	bna_tx_mod_uninit(&bna->tx_mod);
++
++	bna_port_uninit(&bna->port);
++
++	bna_device_uninit(&bna->device);
++
++	bna->bnad = NULL;
++}
++
++struct bna_mac *
++bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
++{
++	struct list_head *qe;
++
++	if (list_empty(&ucam_mod->free_q))
++		return NULL;
++
++	bfa_q_deq(&ucam_mod->free_q, &qe);
++
++	return (struct bna_mac *)qe;
++}
++
++void
++bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
++{
++	list_add_tail(&mac->qe, &ucam_mod->free_q);
++}
++
++struct bna_mac *
++bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
++{
++	struct list_head *qe;
++
++	if (list_empty(&mcam_mod->free_q))
++		return NULL;
++
++	bfa_q_deq(&mcam_mod->free_q, &qe);
++
++	return (struct bna_mac *)qe;
++}
++
++void
++bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
++{
++	list_add_tail(&mac->qe, &mcam_mod->free_q);
++}
++
++/**
++ * Note: This should be called in the same locking context as the call to
++ * bna_rit_mod_seg_get()
++ */
++int
++bna_rit_mod_can_satisfy(struct bna_rit_mod *rit_mod, int seg_size)
++{
++	int i;
++
++	/* Select the pool for seg_size */
++	for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
++		if (seg_size <= ritseg_pool_cfg[i].pool_entry_size)
++			break;
++	}
++
++	if (i == BFI_RIT_SEG_TOTAL_POOLS)
++		return 0;
++
++	if (list_empty(&rit_mod->rit_seg_pool[i]))
++		return 0;
++
++	return 1;
++}
++
++struct bna_rit_segment *
++bna_rit_mod_seg_get(struct bna_rit_mod *rit_mod, int seg_size)
++{
++	struct bna_rit_segment *seg;
++	struct list_head *qe;
++	int i;
++
++	/* Select the pool for seg_size */
++	for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
++		if (seg_size <= ritseg_pool_cfg[i].pool_entry_size)
++			break;
++	}
++
++	if (i == BFI_RIT_SEG_TOTAL_POOLS)
++		return NULL;
++
++	if (list_empty(&rit_mod->rit_seg_pool[i]))
++		return NULL;
++
++	bfa_q_deq(&rit_mod->rit_seg_pool[i], &qe);
++	seg = (struct bna_rit_segment *)qe;
++	bfa_q_qe_init(&seg->qe);
++	seg->rit_size = seg_size;
++
++	return seg;
++}
++
++void
++bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
++			struct bna_rit_segment *seg)
++{
++	int i;
++
++	/* Select the pool for seg->max_rit_size */
++	for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
++		if (seg->max_rit_size == ritseg_pool_cfg[i].pool_entry_size)
++			break;
++	}
++
++	seg->rit_size = 0;
++	list_add_tail(&seg->qe, &rit_mod->rit_seg_pool[i]);
++}
+--- /dev/null
++++ b/drivers/net/bna/bna_hw.h
+@@ -0,0 +1,1491 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ *
++ * File for interrupt macros and functions
++ */
++
++#ifndef __BNA_HW_H__
++#define __BNA_HW_H__
++
++#include "bfi_ctreg.h"
++
++/**
++ *
++ * SW imposed limits
++ *
++ */
++
++#ifndef BNA_BIOS_BUILD
++
++#define BFI_MAX_TXQ			64
++#define BFI_MAX_RXQ			64
++#define	BFI_MAX_RXF			64
++#define BFI_MAX_IB			128
++#define	BFI_MAX_RIT_SIZE		256
++#define	BFI_RSS_RIT_SIZE		64
++#define	BFI_NONRSS_RIT_SIZE		1
++#define BFI_MAX_UCMAC			256
++#define BFI_MAX_MCMAC			512
++#define BFI_IBIDX_SIZE			4
++#define BFI_MAX_VLAN			4095
++
++/**
++ * There are 2 free IB index pools:
++ *	pool1: 120 segments of 1 index each
++ *	pool8: 1 segment of 8 indexes
++ */
++#define BFI_IBIDX_POOL1_SIZE		116
++#define	BFI_IBIDX_POOL1_ENTRY_SIZE	1
++#define BFI_IBIDX_POOL2_SIZE		2
++#define	BFI_IBIDX_POOL2_ENTRY_SIZE	2
++#define	BFI_IBIDX_POOL8_SIZE		1
++#define	BFI_IBIDX_POOL8_ENTRY_SIZE	8
++#define	BFI_IBIDX_TOTAL_POOLS		3
++#define	BFI_IBIDX_TOTAL_SEGS		119 /* (POOL1 + POOL2 + POOL8)_SIZE */
++#define	BFI_IBIDX_MAX_SEGSIZE		8
++#define init_ibidx_pool(name)						\
++static struct bna_ibidx_pool name[BFI_IBIDX_TOTAL_POOLS] =		\
++{									\
++	{ BFI_IBIDX_POOL1_SIZE, BFI_IBIDX_POOL1_ENTRY_SIZE },		\
++	{ BFI_IBIDX_POOL2_SIZE, BFI_IBIDX_POOL2_ENTRY_SIZE },		\
++	{ BFI_IBIDX_POOL8_SIZE, BFI_IBIDX_POOL8_ENTRY_SIZE }		\
++}
++
++/**
++ * There are 2 free RIT segment pools:
++ * 	Pool1: 192 segments of 1 RIT entry each
++ *	Pool2: 1 segment of 64 RIT entry
++ */
++#define BFI_RIT_SEG_POOL1_SIZE		192
++#define BFI_RIT_SEG_POOL1_ENTRY_SIZE	1
++#define BFI_RIT_SEG_POOLRSS_SIZE	1
++#define BFI_RIT_SEG_POOLRSS_ENTRY_SIZE	64
++#define BFI_RIT_SEG_TOTAL_POOLS		2
++#define BFI_RIT_TOTAL_SEGS		193 /* POOL1_SIZE + POOLRSS_SIZE */
++#define init_ritseg_pool(name)						\
++static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] =	\
++{									\
++	{ BFI_RIT_SEG_POOL1_SIZE, BFI_RIT_SEG_POOL1_ENTRY_SIZE },	\
++	{ BFI_RIT_SEG_POOLRSS_SIZE, BFI_RIT_SEG_POOLRSS_ENTRY_SIZE }	\
++}
++
++#else /* BNA_BIOS_BUILD */
++
++#define BFI_MAX_TXQ			1
++#define BFI_MAX_RXQ			1
++#define	BFI_MAX_RXF			1
++#define BFI_MAX_IB			2
++#define	BFI_MAX_RIT_SIZE		2
++#define	BFI_RSS_RIT_SIZE		64
++#define	BFI_NONRSS_RIT_SIZE		1
++#define BFI_MAX_UCMAC			1
++#define BFI_MAX_MCMAC			8
++#define BFI_IBIDX_SIZE			4
++#define BFI_MAX_VLAN			4095
++/* There is one free pool: 2 segments of 1 index each */
++#define BFI_IBIDX_POOL1_SIZE		2
++#define	BFI_IBIDX_POOL1_ENTRY_SIZE	1
++#define	BFI_IBIDX_TOTAL_POOLS		1
++#define	BFI_IBIDX_TOTAL_SEGS		2 /* POOL1_SIZE */
++#define	BFI_IBIDX_MAX_SEGSIZE		1
++#define init_ibidx_pool(name)						\
++static struct bna_ibidx_pool name[BFI_IBIDX_TOTAL_POOLS] =		\
++{									\
++	{ BFI_IBIDX_POOL1_SIZE, BFI_IBIDX_POOL1_ENTRY_SIZE }		\
++}
++
++#define BFI_RIT_SEG_POOL1_SIZE		1
++#define BFI_RIT_SEG_POOL1_ENTRY_SIZE	1
++#define BFI_RIT_SEG_TOTAL_POOLS		1
++#define BFI_RIT_TOTAL_SEGS		1 /* POOL1_SIZE */
++#define init_ritseg_pool(name)						\
++static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] =	\
++{									\
++	{ BFI_RIT_SEG_POOL1_SIZE, BFI_RIT_SEG_POOL1_ENTRY_SIZE }	\
++}
++
++#endif /* BNA_BIOS_BUILD */
++
++#define BFI_RSS_HASH_KEY_LEN		10
++
++#define BFI_COALESCING_TIMER_UNIT	5	/* 5us */
++#define BFI_MAX_COALESCING_TIMEO	0xFF	/* in 5us units */
++#define BFI_MAX_INTERPKT_COUNT		0xFF
++#define BFI_MAX_INTERPKT_TIMEO		0xF	/* in 0.5us units */
++#define BFI_TX_COALESCING_TIMEO		20	/* 20 * 5 = 100us */
++#define BFI_TX_INTERPKT_COUNT		32
++#define	BFI_RX_COALESCING_TIMEO		12	/* 12 * 5 = 60us */
++#define	BFI_RX_INTERPKT_COUNT		6	/* Pkt Cnt = 6 */
++#define	BFI_RX_INTERPKT_TIMEO		3	/* 3 * 0.5 = 1.5us */
++
++#define BFI_TXQ_WI_SIZE			64	/* bytes */
++#define BFI_RXQ_WI_SIZE			8	/* bytes */
++#define BFI_CQ_WI_SIZE			16	/* bytes */
++#define BFI_TX_MAX_WRR_QUOTA		0xFFF
++
++#define BFI_TX_MAX_VECTORS_PER_WI	4
++#define BFI_TX_MAX_VECTORS_PER_PKT	0xFF
++#define BFI_TX_MAX_DATA_PER_VECTOR	0xFFFF
++#define BFI_TX_MAX_DATA_PER_PKT		0xFFFFFF
++
++/* Small Q buffer size */
++#define BFI_SMALL_RXBUF_SIZE		128
++
++/* Defined separately since BFA_FLASH_DMA_BUF_SZ is in bfa_flash.c */
++#define BFI_FLASH_DMA_BUF_SZ		0x010000 /* 64K DMA */
++#define BFI_HW_STATS_SIZE		0x4000 /* 16K DMA */
++
++/**
++ *
++ * HW register offsets, macros
++ *
++ */
++
++/* DMA Block Register Host Window Start Address */
++#define DMA_BLK_REG_ADDR		0x00013000
++
++/* DMA Block Internal Registers */
++#define DMA_CTRL_REG0			(DMA_BLK_REG_ADDR + 0x000)
++#define DMA_CTRL_REG1			(DMA_BLK_REG_ADDR + 0x004)
++#define DMA_ERR_INT_STATUS		(DMA_BLK_REG_ADDR + 0x008)
++#define DMA_ERR_INT_ENABLE		(DMA_BLK_REG_ADDR + 0x00c)
++#define DMA_ERR_INT_STATUS_SET		(DMA_BLK_REG_ADDR + 0x010)
++
++/* APP Block Register Address Offset from BAR0 */
++#define APP_BLK_REG_ADDR		0x00014000
++
++/* Host Function Interrupt Mask Registers */
++#define HOSTFN0_INT_MASK		(APP_BLK_REG_ADDR + 0x004)
++#define HOSTFN1_INT_MASK		(APP_BLK_REG_ADDR + 0x104)
++#define HOSTFN2_INT_MASK		(APP_BLK_REG_ADDR + 0x304)
++#define HOSTFN3_INT_MASK		(APP_BLK_REG_ADDR + 0x404)
++
++/**
++ * Host Function PCIe Error Registers
++ * Duplicates "Correctable" & "Uncorrectable"
++ * registers in PCIe Config space.
++ */
++#define FN0_PCIE_ERR_REG		(APP_BLK_REG_ADDR + 0x014)
++#define FN1_PCIE_ERR_REG		(APP_BLK_REG_ADDR + 0x114)
++#define FN2_PCIE_ERR_REG		(APP_BLK_REG_ADDR + 0x314)
++#define FN3_PCIE_ERR_REG		(APP_BLK_REG_ADDR + 0x414)
++
++/* Host Function Error Type Status Registers */
++#define FN0_ERR_TYPE_STATUS_REG		(APP_BLK_REG_ADDR + 0x018)
++#define FN1_ERR_TYPE_STATUS_REG		(APP_BLK_REG_ADDR + 0x118)
++#define FN2_ERR_TYPE_STATUS_REG		(APP_BLK_REG_ADDR + 0x318)
++#define FN3_ERR_TYPE_STATUS_REG		(APP_BLK_REG_ADDR + 0x418)
++
++/* Host Function Error Type Mask Registers */
++#define FN0_ERR_TYPE_MSK_STATUS_REG	(APP_BLK_REG_ADDR + 0x01c)
++#define FN1_ERR_TYPE_MSK_STATUS_REG	(APP_BLK_REG_ADDR + 0x11c)
++#define FN2_ERR_TYPE_MSK_STATUS_REG	(APP_BLK_REG_ADDR + 0x31c)
++#define FN3_ERR_TYPE_MSK_STATUS_REG	(APP_BLK_REG_ADDR + 0x41c)
++
++/* Catapult Host Semaphore Status Registers (App block) */
++#define HOST_SEM_STS0_REG		(APP_BLK_REG_ADDR + 0x630)
++#define HOST_SEM_STS1_REG		(APP_BLK_REG_ADDR + 0x634)
++#define HOST_SEM_STS2_REG		(APP_BLK_REG_ADDR + 0x638)
++#define HOST_SEM_STS3_REG		(APP_BLK_REG_ADDR + 0x63c)
++#define HOST_SEM_STS4_REG		(APP_BLK_REG_ADDR + 0x640)
++#define HOST_SEM_STS5_REG		(APP_BLK_REG_ADDR + 0x644)
++#define HOST_SEM_STS6_REG		(APP_BLK_REG_ADDR + 0x648)
++#define HOST_SEM_STS7_REG		(APP_BLK_REG_ADDR + 0x64c)
++
++/* PCIe Misc Register */
++#define PCIE_MISC_REG			(APP_BLK_REG_ADDR + 0x200)
++
++/* Temp Sensor Control Registers */
++#define TEMPSENSE_CNTL_REG		(APP_BLK_REG_ADDR + 0x250)
++#define TEMPSENSE_STAT_REG		(APP_BLK_REG_ADDR + 0x254)
++
++/* APP Block local error registers */
++#define APP_LOCAL_ERR_STAT		(APP_BLK_REG_ADDR + 0x258)
++#define APP_LOCAL_ERR_MSK		(APP_BLK_REG_ADDR + 0x25c)
++
++/* PCIe Link Error registers */
++#define PCIE_LNK_ERR_STAT		(APP_BLK_REG_ADDR + 0x260)
++#define PCIE_LNK_ERR_MSK		(APP_BLK_REG_ADDR + 0x264)
++
++/**
++ * FCoE/FIP Ethertype Register
++ * 31:16 -- Chip wide value for FIP type
++ * 15:0  -- Chip wide value for FCoE type
++ */
++#define FCOE_FIP_ETH_TYPE		(APP_BLK_REG_ADDR + 0x280)
++
++/**
++ * Reserved Ethertype Register
++ * 31:16 -- Reserved
++ * 15:0  -- Other ethertype
++ */
++#define RESV_ETH_TYPE			(APP_BLK_REG_ADDR + 0x284)
++
++/**
++ * Host Command Status Registers
++ * Each set consists of 3 registers :
++ * clear, set, cmd
++ * 16 such register sets in all
++ * See catapult_spec.pdf for detailed functionality
++ * Put each type in a single macro accessed by _num ?
++ */
++#define HOST_CMDSTS0_CLR_REG		(APP_BLK_REG_ADDR + 0x500)
++#define HOST_CMDSTS0_SET_REG		(APP_BLK_REG_ADDR + 0x504)
++#define HOST_CMDSTS0_REG		(APP_BLK_REG_ADDR + 0x508)
++#define HOST_CMDSTS1_CLR_REG		(APP_BLK_REG_ADDR + 0x510)
++#define HOST_CMDSTS1_SET_REG		(APP_BLK_REG_ADDR + 0x514)
++#define HOST_CMDSTS1_REG		(APP_BLK_REG_ADDR + 0x518)
++#define HOST_CMDSTS2_CLR_REG		(APP_BLK_REG_ADDR + 0x520)
++#define HOST_CMDSTS2_SET_REG		(APP_BLK_REG_ADDR + 0x524)
++#define HOST_CMDSTS2_REG		(APP_BLK_REG_ADDR + 0x528)
++#define HOST_CMDSTS3_CLR_REG		(APP_BLK_REG_ADDR + 0x530)
++#define HOST_CMDSTS3_SET_REG		(APP_BLK_REG_ADDR + 0x534)
++#define HOST_CMDSTS3_REG		(APP_BLK_REG_ADDR + 0x538)
++#define HOST_CMDSTS4_CLR_REG		(APP_BLK_REG_ADDR + 0x540)
++#define HOST_CMDSTS4_SET_REG		(APP_BLK_REG_ADDR + 0x544)
++#define HOST_CMDSTS4_REG		(APP_BLK_REG_ADDR + 0x548)
++#define HOST_CMDSTS5_CLR_REG		(APP_BLK_REG_ADDR + 0x550)
++#define HOST_CMDSTS5_SET_REG		(APP_BLK_REG_ADDR + 0x554)
++#define HOST_CMDSTS5_REG		(APP_BLK_REG_ADDR + 0x558)
++#define HOST_CMDSTS6_CLR_REG		(APP_BLK_REG_ADDR + 0x560)
++#define HOST_CMDSTS6_SET_REG		(APP_BLK_REG_ADDR + 0x564)
++#define HOST_CMDSTS6_REG		(APP_BLK_REG_ADDR + 0x568)
++#define HOST_CMDSTS7_CLR_REG		(APP_BLK_REG_ADDR + 0x570)
++#define HOST_CMDSTS7_SET_REG		(APP_BLK_REG_ADDR + 0x574)
++#define HOST_CMDSTS7_REG		(APP_BLK_REG_ADDR + 0x578)
++#define HOST_CMDSTS8_CLR_REG		(APP_BLK_REG_ADDR + 0x580)
++#define HOST_CMDSTS8_SET_REG		(APP_BLK_REG_ADDR + 0x584)
++#define HOST_CMDSTS8_REG		(APP_BLK_REG_ADDR + 0x588)
++#define HOST_CMDSTS9_CLR_REG		(APP_BLK_REG_ADDR + 0x590)
++#define HOST_CMDSTS9_SET_REG		(APP_BLK_REG_ADDR + 0x594)
++#define HOST_CMDSTS9_REG		(APP_BLK_REG_ADDR + 0x598)
++#define HOST_CMDSTS10_CLR_REG		(APP_BLK_REG_ADDR + 0x5A0)
++#define HOST_CMDSTS10_SET_REG		(APP_BLK_REG_ADDR + 0x5A4)
++#define HOST_CMDSTS10_REG		(APP_BLK_REG_ADDR + 0x5A8)
++#define HOST_CMDSTS11_CLR_REG		(APP_BLK_REG_ADDR + 0x5B0)
++#define HOST_CMDSTS11_SET_REG		(APP_BLK_REG_ADDR + 0x5B4)
++#define HOST_CMDSTS11_REG		(APP_BLK_REG_ADDR + 0x5B8)
++#define HOST_CMDSTS12_CLR_REG		(APP_BLK_REG_ADDR + 0x5C0)
++#define HOST_CMDSTS12_SET_REG		(APP_BLK_REG_ADDR + 0x5C4)
++#define HOST_CMDSTS12_REG		(APP_BLK_REG_ADDR + 0x5C8)
++#define HOST_CMDSTS13_CLR_REG		(APP_BLK_REG_ADDR + 0x5D0)
++#define HOST_CMDSTS13_SET_REG		(APP_BLK_REG_ADDR + 0x5D4)
++#define HOST_CMDSTS13_REG		(APP_BLK_REG_ADDR + 0x5D8)
++#define HOST_CMDSTS14_CLR_REG		(APP_BLK_REG_ADDR + 0x5E0)
++#define HOST_CMDSTS14_SET_REG		(APP_BLK_REG_ADDR + 0x5E4)
++#define HOST_CMDSTS14_REG		(APP_BLK_REG_ADDR + 0x5E8)
++#define HOST_CMDSTS15_CLR_REG		(APP_BLK_REG_ADDR + 0x5F0)
++#define HOST_CMDSTS15_SET_REG		(APP_BLK_REG_ADDR + 0x5F4)
++#define HOST_CMDSTS15_REG		(APP_BLK_REG_ADDR + 0x5F8)
++
++/**
++ * LPU0 Block Register Address Offset from BAR0
++ * Range 0x18000 - 0x18033
++ */
++#define LPU0_BLK_REG_ADDR		0x00018000
++
++/**
++ * LPU0 Registers
++ * Should they be directly used from host,
++ * except for diagnostics ?
++ * CTL_REG : Control register
++ * CMD_REG : Triggers exec. of cmd. in
++ *           Mailbox memory
++ */
++#define LPU0_MBOX_CTL_REG		(LPU0_BLK_REG_ADDR + 0x000)
++#define LPU0_MBOX_CMD_REG		(LPU0_BLK_REG_ADDR + 0x004)
++#define LPU0_MBOX_LINK_0REG		(LPU0_BLK_REG_ADDR + 0x008)
++#define LPU1_MBOX_LINK_0REG		(LPU0_BLK_REG_ADDR + 0x00c)
++#define LPU0_MBOX_STATUS_0REG		(LPU0_BLK_REG_ADDR + 0x010)
++#define LPU1_MBOX_STATUS_0REG		(LPU0_BLK_REG_ADDR + 0x014)
++#define LPU0_ERR_STATUS_REG		(LPU0_BLK_REG_ADDR + 0x018)
++#define LPU0_ERR_SET_REG		(LPU0_BLK_REG_ADDR + 0x020)
++
++/**
++ * LPU1 Block Register Address Offset from BAR0
++ * Range 0x18400 - 0x18433
++ */
++#define LPU1_BLK_REG_ADDR		0x00018400
++
++/**
++ * LPU1 Registers
++ * Same as LPU0 registers above
++ */
++#define LPU1_MBOX_CTL_REG		(LPU1_BLK_REG_ADDR + 0x000)
++#define LPU1_MBOX_CMD_REG		(LPU1_BLK_REG_ADDR + 0x004)
++#define LPU0_MBOX_LINK_1REG		(LPU1_BLK_REG_ADDR + 0x008)
++#define LPU1_MBOX_LINK_1REG		(LPU1_BLK_REG_ADDR + 0x00c)
++#define LPU0_MBOX_STATUS_1REG		(LPU1_BLK_REG_ADDR + 0x010)
++#define LPU1_MBOX_STATUS_1REG		(LPU1_BLK_REG_ADDR + 0x014)
++#define LPU1_ERR_STATUS_REG		(LPU1_BLK_REG_ADDR + 0x018)
++#define LPU1_ERR_SET_REG		(LPU1_BLK_REG_ADDR + 0x020)
++
++/**
++ * PSS Block Register Address Offset from BAR0
++ * Range 0x18800 - 0x188DB
++ */
++#define PSS_BLK_REG_ADDR		0x00018800
++
++/**
++ * PSS Registers
++ * For details, see catapult_spec.pdf
++ * ERR_STATUS_REG : Indicates error in PSS module
++ * RAM_ERR_STATUS_REG : Indicates RAM module that detected error
++ */
++#define ERR_STATUS_SET			(PSS_BLK_REG_ADDR + 0x018)
++#define PSS_RAM_ERR_STATUS_REG		(PSS_BLK_REG_ADDR + 0x01C)
++
++/**
++ * PSS Semaphore Lock Registers, total 16
++ * First read when unlocked returns 0,
++ * and is set to 1, atomically.
++ * Subsequent reads returns 1.
++ * To clear set the value to 0.
++ * Range : 0x20 to 0x5c
++ */
++#define PSS_SEM_LOCK_REG(_num) 		\
++	(PSS_BLK_REG_ADDR + 0x020 + ((_num) << 2))
++
++/**
++ * PSS Semaphore Status Registers,
++ * corresponding to the lock registers above
++ */
++#define PSS_SEM_STATUS_REG(_num) 		\
++	(PSS_BLK_REG_ADDR + 0x060 + ((_num) << 2))
++
++/**
++ * Catapult CPQ Registers
++ * Defines for Mailbox Registers
++ * Used to send mailbox commands to firmware from
++ * host. The data part is written to the MBox
++ * memory, registers are used to indicate that
++ * a commnad is resident in memory.
++ *
++ * Note : LPU0<->LPU1 mailboxes are not listed here
++ */
++#define CPQ_BLK_REG_ADDR		0x00019000
++
++#define HOSTFN0_LPU0_MBOX1_CMD_STAT	(CPQ_BLK_REG_ADDR + 0x130)
++#define HOSTFN0_LPU1_MBOX1_CMD_STAT	(CPQ_BLK_REG_ADDR + 0x134)
++#define LPU0_HOSTFN0_MBOX1_CMD_STAT	(CPQ_BLK_REG_ADDR + 0x138)
++#define LPU1_HOSTFN0_MBOX1_CMD_STAT	(CPQ_BLK_REG_ADDR + 0x13C)
++
++#define HOSTFN1_LPU0_MBOX1_CMD_STAT	(CPQ_BLK_REG_ADDR + 0x140)
++#define HOSTFN1_LPU1_MBOX1_CMD_STAT	(CPQ_BLK_REG_ADDR + 0x144)
++#define LPU0_HOSTFN1_MBOX1_CMD_STAT	(CPQ_BLK_REG_ADDR + 0x148)
++#define LPU1_HOSTFN1_MBOX1_CMD_STAT	(CPQ_BLK_REG_ADDR + 0x14C)
++
++#define HOSTFN2_LPU0_MBOX1_CMD_STAT	(CPQ_BLK_REG_ADDR + 0x170)
++#define HOSTFN2_LPU1_MBOX1_CMD_STAT	(CPQ_BLK_REG_ADDR + 0x174)
++#define LPU0_HOSTFN2_MBOX1_CMD_STAT	(CPQ_BLK_REG_ADDR + 0x178)
++#define LPU1_HOSTFN2_MBOX1_CMD_STAT	(CPQ_BLK_REG_ADDR + 0x17C)
++
++#define HOSTFN3_LPU0_MBOX1_CMD_STAT	(CPQ_BLK_REG_ADDR + 0x180)
++#define HOSTFN3_LPU1_MBOX1_CMD_STAT	(CPQ_BLK_REG_ADDR + 0x184)
++#define LPU0_HOSTFN3_MBOX1_CMD_STAT	(CPQ_BLK_REG_ADDR + 0x188)
++#define LPU1_HOSTFN3_MBOX1_CMD_STAT	(CPQ_BLK_REG_ADDR + 0x18C)
++
++/* Host Function Force Parity Error Registers */
++#define HOSTFN0_LPU_FORCE_PERR		(CPQ_BLK_REG_ADDR + 0x120)
++#define HOSTFN1_LPU_FORCE_PERR		(CPQ_BLK_REG_ADDR + 0x124)
++#define HOSTFN2_LPU_FORCE_PERR		(CPQ_BLK_REG_ADDR + 0x128)
++#define HOSTFN3_LPU_FORCE_PERR		(CPQ_BLK_REG_ADDR + 0x12C)
++
++/* LL Port[0|1] Halt Mask Registers */
++#define LL_HALT_MSK_P0			(CPQ_BLK_REG_ADDR + 0x1A0)
++#define LL_HALT_MSK_P1			(CPQ_BLK_REG_ADDR + 0x1B0)
++
++/* LL Port[0|1] Error Mask Registers */
++#define LL_ERR_MSK_P0			(CPQ_BLK_REG_ADDR + 0x1D0)
++#define LL_ERR_MSK_P1			(CPQ_BLK_REG_ADDR + 0x1D4)
++
++/* EMC FLI (Flash Controller) Block Register Address Offset from BAR0 */
++#define FLI_BLK_REG_ADDR		0x0001D000
++
++/* EMC FLI Registers */
++#define FLI_CMD_REG			(FLI_BLK_REG_ADDR + 0x000)
++#define FLI_ADDR_REG			(FLI_BLK_REG_ADDR + 0x004)
++#define FLI_CTL_REG			(FLI_BLK_REG_ADDR + 0x008)
++#define FLI_WRDATA_REG			(FLI_BLK_REG_ADDR + 0x00C)
++#define FLI_RDDATA_REG			(FLI_BLK_REG_ADDR + 0x010)
++#define FLI_DEV_STATUS_REG		(FLI_BLK_REG_ADDR + 0x014)
++#define FLI_SIG_WD_REG			(FLI_BLK_REG_ADDR + 0x018)
++
++/**
++ * RO register
++ * 31:16 -- Vendor Id
++ * 15:0  -- Device Id
++ */
++#define FLI_DEV_VENDOR_REG		(FLI_BLK_REG_ADDR + 0x01C)
++#define FLI_ERR_STATUS_REG		(FLI_BLK_REG_ADDR + 0x020)
++
++/**
++ * RAD (RxAdm) Block Register Address Offset from BAR0
++ * RAD0 Range : 0x20000 - 0x203FF
++ * RAD1 Range : 0x20400 - 0x207FF
++ */
++#define RAD0_BLK_REG_ADDR		0x00020000
++#define RAD1_BLK_REG_ADDR		0x00020400
++
++/* RAD0 Registers */
++#define RAD0_CTL_REG			(RAD0_BLK_REG_ADDR + 0x000)
++#define RAD0_PE_PARM_REG		(RAD0_BLK_REG_ADDR + 0x004)
++#define RAD0_BCN_REG			(RAD0_BLK_REG_ADDR + 0x008)
++
++/* Default function ID register */
++#define RAD0_DEFAULT_REG		(RAD0_BLK_REG_ADDR + 0x00C)
++
++/* Default promiscuous ID register */
++#define RAD0_PROMISC_REG		(RAD0_BLK_REG_ADDR + 0x010)
++
++#define RAD0_BCNQ_REG			(RAD0_BLK_REG_ADDR + 0x014)
++
++/*
++ * This register selects 1 of 8 PM Q's using
++ * VLAN pri, for non-BCN packets without a VLAN tag
++ */
++#define RAD0_DEFAULTQ_REG		(RAD0_BLK_REG_ADDR + 0x018)
++
++#define RAD0_ERR_STS			(RAD0_BLK_REG_ADDR + 0x01C)
++#define RAD0_SET_ERR_STS		(RAD0_BLK_REG_ADDR + 0x020)
++#define RAD0_ERR_INT_EN			(RAD0_BLK_REG_ADDR + 0x024)
++#define RAD0_FIRST_ERR			(RAD0_BLK_REG_ADDR + 0x028)
++#define RAD0_FORCE_ERR			(RAD0_BLK_REG_ADDR + 0x02C)
++
++#define RAD0_IF_RCVD			(RAD0_BLK_REG_ADDR + 0x030)
++#define RAD0_IF_RCVD_OCTETS_HIGH	(RAD0_BLK_REG_ADDR + 0x034)
++#define RAD0_IF_RCVD_OCTETS_LOW		(RAD0_BLK_REG_ADDR + 0x038)
++#define RAD0_IF_RCVD_VLAN		(RAD0_BLK_REG_ADDR + 0x03C)
++#define RAD0_IF_RCVD_UCAST		(RAD0_BLK_REG_ADDR + 0x040)
++#define RAD0_IF_RCVD_UCAST_OCTETS_HIGH	(RAD0_BLK_REG_ADDR + 0x044)
++#define RAD0_IF_RCVD_UCAST_OCTETS_LOW   (RAD0_BLK_REG_ADDR + 0x048)
++#define RAD0_IF_RCVD_UCAST_VLAN		(RAD0_BLK_REG_ADDR + 0x04C)
++#define RAD0_IF_RCVD_MCAST		(RAD0_BLK_REG_ADDR + 0x050)
++#define RAD0_IF_RCVD_MCAST_OCTETS_HIGH  (RAD0_BLK_REG_ADDR + 0x054)
++#define RAD0_IF_RCVD_MCAST_OCTETS_LOW   (RAD0_BLK_REG_ADDR + 0x058)
++#define RAD0_IF_RCVD_MCAST_VLAN		(RAD0_BLK_REG_ADDR + 0x05C)
++#define RAD0_IF_RCVD_BCAST		(RAD0_BLK_REG_ADDR + 0x060)
++#define RAD0_IF_RCVD_BCAST_OCTETS_HIGH  (RAD0_BLK_REG_ADDR + 0x064)
++#define RAD0_IF_RCVD_BCAST_OCTETS_LOW   (RAD0_BLK_REG_ADDR + 0x068)
++#define RAD0_IF_RCVD_BCAST_VLAN		(RAD0_BLK_REG_ADDR + 0x06C)
++#define RAD0_DROPPED_FRAMES		(RAD0_BLK_REG_ADDR + 0x070)
++
++#define RAD0_MAC_MAN_1H			(RAD0_BLK_REG_ADDR + 0x080)
++#define RAD0_MAC_MAN_1L			(RAD0_BLK_REG_ADDR + 0x084)
++#define RAD0_MAC_MAN_2H			(RAD0_BLK_REG_ADDR + 0x088)
++#define RAD0_MAC_MAN_2L			(RAD0_BLK_REG_ADDR + 0x08C)
++#define RAD0_MAC_MAN_3H			(RAD0_BLK_REG_ADDR + 0x090)
++#define RAD0_MAC_MAN_3L			(RAD0_BLK_REG_ADDR + 0x094)
++#define RAD0_MAC_MAN_4H			(RAD0_BLK_REG_ADDR + 0x098)
++#define RAD0_MAC_MAN_4L			(RAD0_BLK_REG_ADDR + 0x09C)
++
++#define RAD0_LAST4_IP			(RAD0_BLK_REG_ADDR + 0x100)
++
++/* RAD1 Registers */
++#define RAD1_CTL_REG			(RAD1_BLK_REG_ADDR + 0x000)
++#define RAD1_PE_PARM_REG		(RAD1_BLK_REG_ADDR + 0x004)
++#define RAD1_BCN_REG			(RAD1_BLK_REG_ADDR + 0x008)
++
++/* Default function ID register */
++#define RAD1_DEFAULT_REG		(RAD1_BLK_REG_ADDR + 0x00C)
++
++/* Promiscuous function ID register */
++#define RAD1_PROMISC_REG		(RAD1_BLK_REG_ADDR + 0x010)
++
++#define RAD1_BCNQ_REG			(RAD1_BLK_REG_ADDR + 0x014)
++
++/*
++ * This register selects 1 of 8 PM Q's using
++ * VLAN pri, for non-BCN packets without a VLAN tag
++ */
++#define RAD1_DEFAULTQ_REG		(RAD1_BLK_REG_ADDR + 0x018)
++
++#define RAD1_ERR_STS			(RAD1_BLK_REG_ADDR + 0x01C)
++#define RAD1_SET_ERR_STS		(RAD1_BLK_REG_ADDR + 0x020)
++#define RAD1_ERR_INT_EN			(RAD1_BLK_REG_ADDR + 0x024)
++
++/**
++ * TXA Block Register Address Offset from BAR0
++ * TXA0 Range : 0x21000 - 0x213FF
++ * TXA1 Range : 0x21400 - 0x217FF
++ */
++#define TXA0_BLK_REG_ADDR		0x00021000
++#define TXA1_BLK_REG_ADDR		0x00021400
++
++/* TXA Registers */
++#define TXA0_CTRL_REG			(TXA0_BLK_REG_ADDR + 0x000)
++#define TXA1_CTRL_REG			(TXA1_BLK_REG_ADDR + 0x000)
++
++/**
++ * TSO Sequence # Registers (RO)
++ * Total 8 (for 8 queues)
++ * Holds the last seq.# for TSO frames
++ * See catapult_spec.pdf for more details
++ */
++#define TXA0_TSO_TCP_SEQ_REG(_num)		\
++	(TXA0_BLK_REG_ADDR + 0x020 + ((_num) << 2))
++
++#define TXA1_TSO_TCP_SEQ_REG(_num)		\
++	(TXA1_BLK_REG_ADDR + 0x020 + ((_num) << 2))
++
++/**
++ * TSO IP ID # Registers (RO)
++ * Total 8 (for 8 queues)
++ * Holds the last IP ID for TSO frames
++ * See catapult_spec.pdf for more details
++ */
++#define TXA0_TSO_IP_INFO_REG(_num)		\
++	(TXA0_BLK_REG_ADDR + 0x040 + ((_num) << 2))
++
++#define TXA1_TSO_IP_INFO_REG(_num)		\
++	(TXA1_BLK_REG_ADDR + 0x040 + ((_num) << 2))
++
++/**
++ * RXA Block Register Address Offset from BAR0
++ * RXA0 Range : 0x21800 - 0x21BFF
++ * RXA1 Range : 0x21C00 - 0x21FFF
++ */
++#define RXA0_BLK_REG_ADDR		0x00021800
++#define RXA1_BLK_REG_ADDR		0x00021C00
++
++/* RXA Registers */
++#define RXA0_CTL_REG			(RXA0_BLK_REG_ADDR + 0x040)
++#define RXA1_CTL_REG			(RXA1_BLK_REG_ADDR + 0x040)
++
++/**
++ * PPLB Block Register Address Offset from BAR0
++ * PPLB0 Range : 0x22000 - 0x223FF
++ * PPLB1 Range : 0x22400 - 0x227FF
++ */
++#define PLB0_BLK_REG_ADDR		0x00022000
++#define PLB1_BLK_REG_ADDR		0x00022400
++
++/**
++ * PLB Registers
++ * Holds RL timer used time stamps in RLT tagged frames
++ */
++#define PLB0_ECM_TIMER_REG		(PLB0_BLK_REG_ADDR + 0x05C)
++#define PLB1_ECM_TIMER_REG		(PLB1_BLK_REG_ADDR + 0x05C)
++
++/* Controls the rate-limiter on each of the priority class */
++#define PLB0_RL_CTL			(PLB0_BLK_REG_ADDR + 0x060)
++#define PLB1_RL_CTL			(PLB1_BLK_REG_ADDR + 0x060)
++
++/**
++ * Max byte register, total 8, 0-7
++ * see catapult_spec.pdf for details
++ */
++#define PLB0_RL_MAX_BC(_num)			\
++	(PLB0_BLK_REG_ADDR + 0x064 + ((_num) << 2))
++#define PLB1_RL_MAX_BC(_num)			\
++	(PLB1_BLK_REG_ADDR + 0x064 + ((_num) << 2))
++
++/**
++ * RL Time Unit Register for priority 0-7
++ * 4 bits per priority
++ * (2^rl_unit)*1us is the actual time period
++ */
++#define PLB0_RL_TU_PRIO			(PLB0_BLK_REG_ADDR + 0x084)
++#define PLB1_RL_TU_PRIO			(PLB1_BLK_REG_ADDR + 0x084)
++
++/**
++ * RL byte count register,
++ * bytes transmitted in (rl_unit*1)us time period
++ * 1 per priority, 8 in all, 0-7.
++ */
++#define PLB0_RL_BYTE_CNT(_num)			\
++	(PLB0_BLK_REG_ADDR + 0x088 + ((_num) << 2))
++#define PLB1_RL_BYTE_CNT(_num)			\
++	(PLB1_BLK_REG_ADDR + 0x088 + ((_num) << 2))
++
++/**
++ * RL Min factor register
++ * 2 bits per priority,
++ * 4 factors possible: 1, 0.5, 0.25, 0
++ * 2'b00 - 0; 2'b01 - 0.25; 2'b10 - 0.5; 2'b11 - 1
++ */
++#define PLB0_RL_MIN_REG			(PLB0_BLK_REG_ADDR + 0x0A8)
++#define PLB1_RL_MIN_REG			(PLB1_BLK_REG_ADDR + 0x0A8)
++
++/**
++ * RL Max factor register
++ * 2 bits per priority,
++ * 4 factors possible: 1, 0.5, 0.25, 0
++ * 2'b00 - 0; 2'b01 - 0.25; 2'b10 - 0.5; 2'b11 - 1
++ */
++#define PLB0_RL_MAX_REG			(PLB0_BLK_REG_ADDR + 0x0AC)
++#define PLB1_RL_MAX_REG			(PLB1_BLK_REG_ADDR + 0x0AC)
++
++/* MAC SERDES Address Paging register */
++#define PLB0_EMS_ADD_REG		(PLB0_BLK_REG_ADDR + 0xD0)
++#define PLB1_EMS_ADD_REG		(PLB1_BLK_REG_ADDR + 0xD0)
++
++/* LL EMS Registers */
++#define LL_EMS0_BLK_REG_ADDR		0x00026800
++#define LL_EMS1_BLK_REG_ADDR		0x00026C00
++
++/**
++ * BPC Block Register Address Offset from BAR0
++ * BPC0 Range : 0x23000 - 0x233FF
++ * BPC1 Range : 0x23400 - 0x237FF
++ */
++#define BPC0_BLK_REG_ADDR		0x00023000
++#define BPC1_BLK_REG_ADDR		0x00023400
++
++/**
++ * PMM Block Register Address Offset from BAR0
++ * PMM0 Range : 0x23800 - 0x23BFF
++ * PMM1 Range : 0x23C00 - 0x23FFF
++ */
++#define PMM0_BLK_REG_ADDR		0x00023800
++#define PMM1_BLK_REG_ADDR		0x00023C00
++
++/**
++ * HQM Block Register Address Offset from BAR0
++ * HQM0 Range : 0x24000 - 0x243FF
++ * HQM1 Range : 0x24400 - 0x247FF
++ */
++#define HQM0_BLK_REG_ADDR		0x00024000
++#define HQM1_BLK_REG_ADDR		0x00024400
++
++/**
++ * HQM Control Register
++ * Controls some aspects of IB
++ * See catapult_spec.pdf for details
++ */
++#define HQM0_CTL_REG			(HQM0_BLK_REG_ADDR + 0x000)
++#define HQM1_CTL_REG			(HQM1_BLK_REG_ADDR + 0x000)
++
++/**
++ * HQM Stop Q Semaphore Registers.
++ * Only one Queue resource can be stopped at
++ * any given time. This register controls access
++ * to the single stop Q resource.
++ * See catapult_spec.pdf for details
++ */
++#define HQM0_RXQ_STOP_SEM		(HQM0_BLK_REG_ADDR + 0x028)
++#define HQM0_TXQ_STOP_SEM		(HQM0_BLK_REG_ADDR + 0x02C)
++#define HQM1_RXQ_STOP_SEM		(HQM1_BLK_REG_ADDR + 0x028)
++#define HQM1_TXQ_STOP_SEM		(HQM1_BLK_REG_ADDR + 0x02C)
++
++/**
++ * LUT Block Register Address Offset from BAR0
++ * LUT0 Range : 0x25800 - 0x25BFF
++ * LUT1 Range : 0x25C00 - 0x25FFF
++ */
++#define LUT0_BLK_REG_ADDR		0x00025800
++#define LUT1_BLK_REG_ADDR		0x00025C00
++
++/**
++ * LUT Registers
++ * See catapult_spec.pdf for details
++ */
++#define LUT0_ERR_STS			(LUT0_BLK_REG_ADDR + 0x000)
++#define LUT1_ERR_STS			(LUT1_BLK_REG_ADDR + 0x000)
++#define LUT0_SET_ERR_STS		(LUT0_BLK_REG_ADDR + 0x004)
++#define LUT1_SET_ERR_STS		(LUT1_BLK_REG_ADDR + 0x004)
++
++/**
++ * TRC (Debug/Trace) Register Offset from BAR0
++ * Range : 0x26000 -- 0x263FFF
++ */
++#define TRC_BLK_REG_ADDR		0x00026000
++
++/**
++ * TRC Registers
++ * See catapult_spec.pdf for details of each
++ */
++#define TRC_CTL_REG			(TRC_BLK_REG_ADDR + 0x000)
++#define TRC_MODS_REG			(TRC_BLK_REG_ADDR + 0x004)
++#define TRC_TRGC_REG			(TRC_BLK_REG_ADDR + 0x008)
++#define TRC_CNT1_REG			(TRC_BLK_REG_ADDR + 0x010)
++#define TRC_CNT2_REG			(TRC_BLK_REG_ADDR + 0x014)
++#define TRC_NXTS_REG			(TRC_BLK_REG_ADDR + 0x018)
++#define TRC_DIRR_REG			(TRC_BLK_REG_ADDR + 0x01C)
++
++/**
++ * TRC Trigger match filters, total 10
++ * Determines the trigger condition
++ */
++#define TRC_TRGM_REG(_num)		\
++	(TRC_BLK_REG_ADDR + 0x040 + ((_num) << 2))
++
++/**
++ * TRC Next State filters, total 10
++ * Determines the next state conditions
++ */
++#define TRC_NXTM_REG(_num)		\
++	(TRC_BLK_REG_ADDR + 0x080 + ((_num) << 2))
++
++/**
++ * TRC Store Match filters, total 10
++ * Determines the store conditions
++ */
++#define TRC_STRM_REG(_num)		\
++	(TRC_BLK_REG_ADDR + 0x0C0 + ((_num) << 2))
++
++/* DOORBELLS ACCESS */
++
++/**
++ * Catapult doorbells
++ * Each doorbell-queue set has
++ * 1 RxQ, 1 TxQ, 2 IBs in that order
++ * Size of each entry in 32 bytes, even though only 1 word
++ * is used. For Non-VM case each doorbell-q set is
++ * separated by 128 bytes, for VM case it is separated
++ * by 4K bytes
++ * Non VM case Range : 0x38000 - 0x39FFF
++ * VM case Range     : 0x100000 - 0x11FFFF
++ * The range applies to both HQMs
++ */
++#define HQM_DOORBELL_BLK_BASE_ADDR	0x00038000
++#define HQM_DOORBELL_VM_BLK_BASE_ADDR	0x00100000
++
++/* MEMORY ACCESS */
++
++/**
++ * Catapult H/W Block Memory Access Address
++ * To the host a memory space of 32K (page) is visible
++ * at a time. The address range is from 0x08000 to 0x0FFFF
++ */
++#define HW_BLK_HOST_MEM_ADDR		0x08000
++
++/**
++ * Catapult LUT Memory Access Page Numbers
++ * Range : LUT0 0xa0-0xa1
++ *         LUT1 0xa2-0xa3
++ */
++#define LUT0_MEM_BLK_BASE_PG_NUM	0x000000A0
++#define LUT1_MEM_BLK_BASE_PG_NUM	0x000000A2
++
++/**
++ * Catapult RxFn Database Memory Block Base Offset
++ *
++ * The Rx function database exists in LUT block.
++ * In PCIe space this is accessible as a 256x32
++ * bit block. Each entry in this database is 4
++ * (4 byte) words. Max. entries is 64.
++ * Address of an entry corresponding to a function
++ * = base_addr + (function_no. * 16)
++ */
++#define RX_FNDB_RAM_BASE_OFFSET		0x0000B400
++
++/**
++ * Catapult TxFn Database Memory Block Base Offset Address
++ *
++ * The Tx function database exists in LUT block.
++ * In PCIe space this is accessible as a 64x32
++ * bit block. Each entry in this database is 1
++ * (4 byte) word. Max. entries is 64.
++ * Address of an entry corresponding to a function
++ * = base_addr + (function_no. * 4)
++ */
++#define TX_FNDB_RAM_BASE_OFFSET		0x0000B800
++
++/**
++ * Catapult Unicast CAM Base Offset Address
++ *
++ * Exists in LUT memory space.
++ * Shared by both the LL & FCoE driver.
++ * Size is 256x48 bits; mapped to PCIe space
++ * 512x32 bit blocks. For each address, bits
++ * are written in the order : [47:32] and then
++ * [31:0].
++ */
++#define UCAST_CAM_BASE_OFFSET		0x0000A800
++
++/**
++ * Catapult Unicast RAM Base Offset Address
++ *
++ * Exists in LUT memory space.
++ * Shared by both the LL & FCoE driver.
++ * Size is 256x9 bits.
++ */
++#define UCAST_RAM_BASE_OFFSET		0x0000B000
++
++/**
++ * Catapult Mulicast CAM Base Offset Address
++ *
++ * Exists in LUT memory space.
++ * Shared by both the LL & FCoE driver.
++ * Size is 256x48 bits; mapped to PCIe space
++ * 512x32 bit blocks. For each address, bits
++ * are written in the order : [47:32] and then
++ * [31:0].
++ */
++#define MCAST_CAM_BASE_OFFSET		0x0000A000
++
++/**
++ * Catapult VLAN RAM Base Offset Address
++ *
++ * Exists in LUT memory space.
++ * Size is 4096x66 bits; mapped to PCIe space as
++ * 8192x32 bit blocks.
++ * All the 4K entries are within the address range
++ * 0x0000 to 0x8000, so in the first LUT page.
++ */
++#define VLAN_RAM_BASE_OFFSET		0x00000000
++
++/**
++ * Catapult Tx Stats RAM Base Offset Address
++ *
++ * Exists in LUT memory space.
++ * Size is 1024x33 bits;
++ * Each Tx function has 64 bytes of space
++ */
++#define TX_STATS_RAM_BASE_OFFSET	0x00009000
++
++/**
++ * Catapult Rx Stats RAM Base Offset Address
++ *
++ * Exists in LUT memory space.
++ * Size is 1024x33 bits;
++ * Each Rx function has 64 bytes of space
++ */
++#define RX_STATS_RAM_BASE_OFFSET	0x00008000
++
++/* Catapult RXA Memory Access Page Numbers */
++#define RXA0_MEM_BLK_BASE_PG_NUM	0x0000008C
++#define RXA1_MEM_BLK_BASE_PG_NUM	0x0000008D
++
++/**
++ * Catapult Multicast Vector Table Base Offset Address
++ *
++ * Exists in RxA memory space.
++ * Organized as 512x65 bit block.
++ * However for each entry 16 bytes allocated (power of 2)
++ * Total size 512*16 bytes.
++ * There are two logical divisions, 256 entries each :
++ * a) Entries 0x00 to 0xff (256) -- Approx. MVT
++ *    Offset 0x000 to 0xFFF
++ * b) Entries 0x100 to 0x1ff (256) -- Exact MVT
++ *    Offsets 0x1000 to 0x1FFF
++ */
++#define MCAST_APPROX_MVT_BASE_OFFSET	0x00000000
++#define MCAST_EXACT_MVT_BASE_OFFSET	0x00001000
++
++/**
++ * Catapult RxQ Translate Table (RIT) Base Offset Address
++ *
++ * Exists in RxA memory space
++ * Total no. of entries 64
++ * Each entry is 1 (4 byte) word.
++ * 31:12 -- Reserved
++ * 11:0  -- Two 6 bit RxQ Ids
++ */
++#define FUNCTION_TO_RXQ_TRANSLATE	0x00002000
++
++/* Catapult RxAdm (RAD) Memory Access Page Numbers */
++#define RAD0_MEM_BLK_BASE_PG_NUM	0x00000086
++#define RAD1_MEM_BLK_BASE_PG_NUM	0x00000087
++
++/**
++ * Catapult RSS Table Base Offset Address
++ *
++ * Exists in RAD memory space.
++ * Each entry is 352 bits, but alligned on
++ * 64 byte (512 bit) boundary. Accessed
++ * 4 byte words, the whole entry can be
++ * broken into 11 word accesses.
++ */
++#define RSS_TABLE_BASE_OFFSET		0x00000800
++
++/**
++ * Catapult CPQ Block Page Number
++ * This value is written to the page number registers
++ * to access the memory associated with the mailboxes.
++ */
++#define CPQ_BLK_PG_NUM			0x00000005
++
++/**
++ * Clarification :
++ * LL functions are 2 & 3; can HostFn0/HostFn1
++ * <-> LPU0/LPU1 memories be used ?
++ */
++/**
++ * Catapult HostFn0/HostFn1 to LPU0/LPU1 Mbox memory
++ * Per catapult_spec.pdf, the offset of the mbox
++ * memory is in the register space at an offset of 0x200
++ */
++#define CPQ_BLK_REG_MBOX_ADDR		(CPQ_BLK_REG_ADDR + 0x200)
++
++#define HOSTFN_LPU_MBOX			(CPQ_BLK_REG_MBOX_ADDR + 0x000)
++
++/* Catapult LPU0/LPU1 to HostFn0/HostFn1 Mbox memory */
++#define LPU_HOSTFN_MBOX			(CPQ_BLK_REG_MBOX_ADDR + 0x080)
++
++/**
++ * Catapult HQM Block Page Number
++ * This is written to the page number register for
++ * the appropriate function to access the memory
++ * associated with HQM
++ */
++#define HQM0_BLK_PG_NUM			0x00000096
++#define HQM1_BLK_PG_NUM			0x00000097
++
++/**
++ * Note that TxQ and RxQ entries are interlaced
++ * the HQM memory, i.e RXQ0, TXQ0, RXQ1, TXQ1.. etc.
++ */
++
++#define HQM_RXTX_Q_RAM_BASE_OFFSET	0x00004000
++
++/**
++ * CQ Memory
++ * Exists in HQM Memory space
++ * Each entry is 16 (4 byte) words of which
++ * only 12 words are used for configuration
++ * Total 64 entries per HQM memory space
++ */
++#define HQM_CQ_RAM_BASE_OFFSET		0x00006000
++
++/**
++ * Interrupt Block (IB) Memory
++ * Exists in HQM Memory space
++ * Each entry is 8 (4 byte) words of which
++ * only 5 words are used for configuration
++ * Total 128 entries per HQM memory space
++ */
++#define HQM_IB_RAM_BASE_OFFSET		0x00001000
++
++/**
++ * Index Table (IT) Memory
++ * Exists in HQM Memory space
++ * Each entry is 1 (4 byte) word which
++ * is used for configuration
++ * Total 128 entries per HQM memory space
++ */
++#define HQM_INDX_TBL_RAM_BASE_OFFSET	0x00002000
++
++/**
++ * PSS Block Memory Page Number
++ * This is written to the appropriate page number
++ * register to access the CPU memory.
++ * Also known as the PSS secondary memory (SMEM).
++ * Range : 0x180 to 0x1CF
++ * See catapult_spec.pdf for details
++ */
++#define PSS_BLK_PG_NUM			0x00000180
++
++/**
++ * Offsets of different instances of PSS SMEM
++ * 2.5M of continuous 1T memory space : 2 blocks
++ * of 1M each (32 pages each, page=32KB) and 4 smaller
++ * blocks of 128K each (4 pages each, page=32KB)
++ * PSS_LMEM_INST0 is used for firmware download
++ */
++#define PSS_LMEM_INST0			0x00000000
++#define PSS_LMEM_INST1			0x00100000
++#define PSS_LMEM_INST2			0x00200000
++#define PSS_LMEM_INST3			0x00220000
++#define PSS_LMEM_INST4			0x00240000
++#define PSS_LMEM_INST5			0x00260000
++
++#define BNA_PCI_REG_CT_ADDRSZ		(0x40000)
++
++#define BNA_GET_PAGE_NUM(_base_page, _offset)   \
++	((_base_page) + ((_offset) >> 15))
++
++#define BNA_GET_PAGE_OFFSET(_offset)    \
++	((_offset) & 0x7fff)
++
++#define BNA_GET_MEM_BASE_ADDR(_bar0, _base_offset)	\
++	((_bar0) + HW_BLK_HOST_MEM_ADDR		\
++	  + BNA_GET_PAGE_OFFSET((_base_offset)))
++
++#define BNA_GET_VLAN_MEM_ENTRY_ADDR(_bar0, _fn_id, _vlan_id)\
++	(_bar0 + (HW_BLK_HOST_MEM_ADDR)  \
++	+ (BNA_GET_PAGE_OFFSET(VLAN_RAM_BASE_OFFSET))	\
++	+ (((_fn_id) & 0x3f) << 9)	  \
++	+ (((_vlan_id) & 0xfe0) >> 3))
++
++/**
++ *
++ *  Interrupt related bits, flags and macros
++ *
++ */
++
++#define __LPU02HOST_MBOX0_STATUS_BITS 0x00100000
++#define __LPU12HOST_MBOX0_STATUS_BITS 0x00200000
++#define __LPU02HOST_MBOX1_STATUS_BITS 0x00400000
++#define __LPU12HOST_MBOX1_STATUS_BITS 0x00800000
++
++#define __LPU02HOST_MBOX0_MASK_BITS	0x00100000
++#define __LPU12HOST_MBOX0_MASK_BITS	0x00200000
++#define __LPU02HOST_MBOX1_MASK_BITS	0x00400000
++#define __LPU12HOST_MBOX1_MASK_BITS	0x00800000
++
++#define __LPU2HOST_MBOX_MASK_BITS			 \
++	(__LPU02HOST_MBOX0_MASK_BITS | __LPU02HOST_MBOX1_MASK_BITS |	\
++	  __LPU12HOST_MBOX0_MASK_BITS | __LPU12HOST_MBOX1_MASK_BITS)
++
++#define __LPU2HOST_IB_STATUS_BITS	0x0000ffff
++
++#define BNA_IS_LPU0_MBOX_INTR(_intr_status) \
++	((_intr_status) & (__LPU02HOST_MBOX0_STATUS_BITS | \
++			__LPU02HOST_MBOX1_STATUS_BITS))
++
++#define BNA_IS_LPU1_MBOX_INTR(_intr_status) \
++	((_intr_status) & (__LPU12HOST_MBOX0_STATUS_BITS | \
++		__LPU12HOST_MBOX1_STATUS_BITS))
++
++#define BNA_IS_MBOX_INTR(_intr_status)		\
++	((_intr_status) &  			\
++	(__LPU02HOST_MBOX0_STATUS_BITS |	\
++	 __LPU02HOST_MBOX1_STATUS_BITS |	\
++	 __LPU12HOST_MBOX0_STATUS_BITS |	\
++	 __LPU12HOST_MBOX1_STATUS_BITS))
++
++#define __EMC_ERROR_STATUS_BITS		0x00010000
++#define __LPU0_ERROR_STATUS_BITS	0x00020000
++#define __LPU1_ERROR_STATUS_BITS	0x00040000
++#define __PSS_ERROR_STATUS_BITS		0x00080000
++
++#define __HALT_STATUS_BITS		0x01000000
++
++#define __EMC_ERROR_MASK_BITS		0x00010000
++#define __LPU0_ERROR_MASK_BITS		0x00020000
++#define __LPU1_ERROR_MASK_BITS		0x00040000
++#define __PSS_ERROR_MASK_BITS		0x00080000
++
++#define __HALT_MASK_BITS		0x01000000
++
++#define __ERROR_MASK_BITS		\
++	(__EMC_ERROR_MASK_BITS | __LPU0_ERROR_MASK_BITS | \
++	  __LPU1_ERROR_MASK_BITS | __PSS_ERROR_MASK_BITS | \
++	  __HALT_MASK_BITS)
++
++#define BNA_IS_ERR_INTR(_intr_status)	\
++	((_intr_status) &  		\
++	(__EMC_ERROR_STATUS_BITS |  	\
++	 __LPU0_ERROR_STATUS_BITS | 	\
++	 __LPU1_ERROR_STATUS_BITS | 	\
++	 __PSS_ERROR_STATUS_BITS  | 	\
++	 __HALT_STATUS_BITS))
++
++#define BNA_IS_MBOX_ERR_INTR(_intr_status)	\
++	(BNA_IS_MBOX_INTR((_intr_status)) |	\
++	 BNA_IS_ERR_INTR((_intr_status)))
++
++#define BNA_IS_INTX_DATA_INTR(_intr_status)	\
++	((_intr_status) & __LPU2HOST_IB_STATUS_BITS)
++
++#define BNA_INTR_STATUS_MBOX_CLR(_intr_status)			\
++do {								\
++	(_intr_status) &= ~(__LPU02HOST_MBOX0_STATUS_BITS |	\
++			__LPU02HOST_MBOX1_STATUS_BITS | 	\
++			__LPU12HOST_MBOX0_STATUS_BITS | 	\
++			__LPU12HOST_MBOX1_STATUS_BITS); 	\
++} while (0)
++
++#define BNA_INTR_STATUS_ERR_CLR(_intr_status)		\
++do {							\
++	(_intr_status) &= ~(__EMC_ERROR_STATUS_BITS |	\
++		__LPU0_ERROR_STATUS_BITS |		\
++		__LPU1_ERROR_STATUS_BITS |		\
++		__PSS_ERROR_STATUS_BITS  |		\
++		__HALT_STATUS_BITS);			\
++} while (0)
++
++#define bna_intx_disable(_bna, _cur_mask)		\
++{							\
++	(_cur_mask) = readl((_bna)->regs.fn_int_mask);\
++	writel(0xffffffff, (_bna)->regs.fn_int_mask);\
++}
++
++#define bna_intx_enable(bna, new_mask) 			\
++	writel((new_mask), (bna)->regs.fn_int_mask)
++
++#define bna_mbox_intr_disable(bna)		\
++	writel((readl((bna)->regs.fn_int_mask) | \
++	     (__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS)), \
++	     (bna)->regs.fn_int_mask)
++
++#define bna_mbox_intr_enable(bna)		\
++	writel((readl((bna)->regs.fn_int_mask) & \
++	     ~(__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS)), \
++	     (bna)->regs.fn_int_mask)
++
++#define bna_intr_status_get(_bna, _status)				\
++{									\
++	(_status) = readl((_bna)->regs.fn_int_status);		\
++	if ((_status)) {						\
++		writel((_status) & ~(__LPU02HOST_MBOX0_STATUS_BITS |\
++					  __LPU02HOST_MBOX1_STATUS_BITS |\
++					  __LPU12HOST_MBOX0_STATUS_BITS |\
++					  __LPU12HOST_MBOX1_STATUS_BITS), \
++			      (_bna)->regs.fn_int_status);\
++	}								\
++}
++
++#define bna_intr_status_get_no_clr(_bna, _status)		\
++	(_status) = readl((_bna)->regs.fn_int_status)
++
++#define bna_intr_mask_get(bna, mask)		\
++	(*mask) = readl((bna)->regs.fn_int_mask)
++
++#define bna_intr_ack(bna, intr_bmap)		\
++	writel((intr_bmap), (bna)->regs.fn_int_status)
++
++#define bna_ib_intx_disable(bna, ib_id)		\
++	writel(readl((bna)->regs.fn_int_mask) | \
++	    (1 << (ib_id)), \
++	    (bna)->regs.fn_int_mask)
++
++#define bna_ib_intx_enable(bna, ib_id)		\
++	writel(readl((bna)->regs.fn_int_mask) & \
++	    ~(1 << (ib_id)), \
++	    (bna)->regs.fn_int_mask)
++
++#define bna_mbox_msix_idx_set(_device) \
++do {\
++	writel(((_device)->vector & 0x000001FF), \
++		(_device)->bna->pcidev.pci_bar_kva + \
++		reg_offset[(_device)->bna->pcidev.pci_func].msix_idx);\
++} while (0)
++
++/**
++ *
++ * TxQ, RxQ, CQ related bits, offsets, macros
++ *
++ */
++
++#define	BNA_Q_IDLE_STATE	0x00008001
++
++#define BNA_GET_DOORBELL_BASE_ADDR(_bar0)	\
++	((_bar0) + HQM_DOORBELL_BLK_BASE_ADDR)
++
++#define BNA_GET_DOORBELL_ENTRY_OFFSET(_entry)		\
++	((HQM_DOORBELL_BLK_BASE_ADDR)		\
++	+ (_entry << 7))
++
++#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \
++		(0x80000000 | ((_timeout) << 16) | (_events))
++
++#define BNA_DOORBELL_IB_INT_DISABLE		(0x40000000)
++
++/* TxQ Entry Opcodes */
++#define BNA_TXQ_WI_SEND 		(0x402)	/* Single Frame Transmission */
++#define BNA_TXQ_WI_SEND_LSO 		(0x403)	/* Multi-Frame Transmission */
++#define BNA_TXQ_WI_EXTENSION		(0x104)	/* Extension WI */
++
++/* TxQ Entry Control Flags */
++#define BNA_TXQ_WI_CF_FCOE_CRC  	(1 << 8)
++#define BNA_TXQ_WI_CF_IPID_MODE 	(1 << 5)
++#define BNA_TXQ_WI_CF_INS_PRIO  	(1 << 4)
++#define BNA_TXQ_WI_CF_INS_VLAN  	(1 << 3)
++#define BNA_TXQ_WI_CF_UDP_CKSUM 	(1 << 2)
++#define BNA_TXQ_WI_CF_TCP_CKSUM 	(1 << 1)
++#define BNA_TXQ_WI_CF_IP_CKSUM  	(1 << 0)
++
++#define BNA_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
++		(((_hdr_size) << 10) | ((_offset) & 0x3FF))
++
++/*
++ * Completion Q defines
++ */
++/* CQ Entry Flags */
++#define	BNA_CQ_EF_MAC_ERROR 	(1 <<  0)
++#define	BNA_CQ_EF_FCS_ERROR 	(1 <<  1)
++#define	BNA_CQ_EF_TOO_LONG  	(1 <<  2)
++#define	BNA_CQ_EF_FC_CRC_OK 	(1 <<  3)
++
++#define	BNA_CQ_EF_RSVD1 	(1 <<  4)
++#define	BNA_CQ_EF_L4_CKSUM_OK	(1 <<  5)
++#define	BNA_CQ_EF_L3_CKSUM_OK	(1 <<  6)
++#define	BNA_CQ_EF_HDS_HEADER	(1 <<  7)
++
++#define	BNA_CQ_EF_UDP   	(1 <<  8)
++#define	BNA_CQ_EF_TCP   	(1 <<  9)
++#define	BNA_CQ_EF_IP_OPTIONS	(1 << 10)
++#define	BNA_CQ_EF_IPV6  	(1 << 11)
++
++#define	BNA_CQ_EF_IPV4  	(1 << 12)
++#define	BNA_CQ_EF_VLAN  	(1 << 13)
++#define	BNA_CQ_EF_RSS   	(1 << 14)
++#define	BNA_CQ_EF_RSVD2 	(1 << 15)
++
++#define	BNA_CQ_EF_MCAST_MATCH   (1 << 16)
++#define	BNA_CQ_EF_MCAST 	(1 << 17)
++#define BNA_CQ_EF_BCAST 	(1 << 18)
++#define	BNA_CQ_EF_REMOTE 	(1 << 19)
++
++#define	BNA_CQ_EF_LOCAL		(1 << 20)
++
++/**
++ *
++ * Data structures
++ *
++ */
++
++enum txf_flags {
++	BFI_TXF_CF_ENABLE		= 1 << 0,
++	BFI_TXF_CF_VLAN_FILTER		= 1 << 8,
++	BFI_TXF_CF_VLAN_ADMIT		= 1 << 9,
++	BFI_TXF_CF_VLAN_INSERT		= 1 << 10,
++	BFI_TXF_CF_RSVD1		= 1 << 11,
++	BFI_TXF_CF_MAC_SA_CHECK		= 1 << 12,
++	BFI_TXF_CF_VLAN_WI_BASED	= 1 << 13,
++	BFI_TXF_CF_VSWITCH_MCAST	= 1 << 14,
++	BFI_TXF_CF_VSWITCH_UCAST	= 1 << 15,
++	BFI_TXF_CF_RSVD2		= 0x7F << 1
++};
++
++enum ib_flags {
++	BFI_IB_CF_MASTER_ENABLE		= (1 << 0),
++	BFI_IB_CF_MSIX_MODE		= (1 << 1),
++	BFI_IB_CF_COALESCING_MODE	= (1 << 2),
++	BFI_IB_CF_INTER_PKT_ENABLE	= (1 << 3),
++	BFI_IB_CF_INT_ENABLE		= (1 << 4),
++	BFI_IB_CF_INTER_PKT_DMA		= (1 << 5),
++	BFI_IB_CF_ACK_PENDING		= (1 << 6),
++	BFI_IB_CF_RESERVED1		= (1 << 7)
++};
++
++enum rss_hash_type {
++	BFI_RSS_T_V4_TCP    		= (1 << 11),
++	BFI_RSS_T_V4_IP     		= (1 << 10),
++	BFI_RSS_T_V6_TCP    		= (1 <<  9),
++	BFI_RSS_T_V6_IP     		= (1 <<  8)
++};
++enum hds_header_type {
++	BNA_HDS_T_V4_TCP	= (1 << 11),
++	BNA_HDS_T_V4_UDP	= (1 << 10),
++	BNA_HDS_T_V6_TCP	= (1 << 9),
++	BNA_HDS_T_V6_UDP	= (1 << 8),
++	BNA_HDS_FORCED		= (1 << 7),
++};
++enum rxf_flags {
++	BNA_RXF_CF_SM_LG_RXQ			= (1 << 15),
++	BNA_RXF_CF_DEFAULT_VLAN			= (1 << 14),
++	BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE	= (1 << 13),
++	BNA_RXF_CF_VLAN_STRIP			= (1 << 12),
++	BNA_RXF_CF_RSS_ENABLE			= (1 <<  8)
++};
++struct bna_chip_regs_offset {
++	u32 page_addr;
++	u32 fn_int_status;
++	u32 fn_int_mask;
++	u32 msix_idx;
++};
++extern const struct bna_chip_regs_offset reg_offset[];
++
++struct bna_chip_regs {
++	void __iomem *page_addr;
++	void __iomem *fn_int_status;
++	void __iomem *fn_int_mask;
++};
++
++struct bna_txq_mem {
++	u32 pg_tbl_addr_lo;
++	u32 pg_tbl_addr_hi;
++	u32 cur_q_entry_lo;
++	u32 cur_q_entry_hi;
++	u32 reserved1;
++	u32 reserved2;
++	u32 pg_cnt_n_prd_ptr;	/* 31:16->total page count */
++					/* 15:0 ->producer pointer (index?) */
++	u32 entry_n_pg_size; 	/* 31:16->entry size */
++					/* 15:0 ->page size */
++	u32 int_blk_n_cns_ptr;	/* 31:24->Int Blk Id;  */
++					/* 23:16->Int Blk Offset */
++					/* 15:0 ->consumer pointer(index?) */
++	u32 cns_ptr2_n_q_state;	/* 31:16->cons. ptr 2; 15:0-> Q state */
++	u32 nxt_qid_n_fid_n_pri;	/* 17:10->next */
++					/* QId;9:3->FID;2:0->Priority */
++	u32 wvc_n_cquota_n_rquota; /* 31:24->WI Vector Count; */
++					/* 23:12->Cfg Quota; */
++					/* 11:0 ->Run Quota */
++	u32 reserved3[4];
++};
++
++struct bna_rxq_mem {
++	u32 pg_tbl_addr_lo;
++	u32 pg_tbl_addr_hi;
++	u32 cur_q_entry_lo;
++	u32 cur_q_entry_hi;
++	u32 reserved1;
++	u32 reserved2;
++	u32 pg_cnt_n_prd_ptr;	/* 31:16->total page count */
++					/* 15:0 ->producer pointer (index?) */
++	u32 entry_n_pg_size;	/* 31:16->entry size */
++					/* 15:0 ->page size */
++	u32 sg_n_cq_n_cns_ptr;	/* 31:28->reserved; 27:24->sg count */
++					/* 23:16->CQ; */
++					/* 15:0->consumer pointer(index?) */
++	u32 buf_sz_n_q_state; 	/* 31:16->buffer size; 15:0-> Q state */
++	u32 next_qid;		/* 17:10->next QId */
++	u32 reserved3;
++	u32 reserved4[4];
++};
++
++struct bna_rxtx_q_mem {
++	struct bna_rxq_mem rxq;
++	struct bna_txq_mem txq;
++};
++
++struct bna_cq_mem {
++	u32 pg_tbl_addr_lo;
++	u32 pg_tbl_addr_hi;
++	u32 cur_q_entry_lo;
++	u32 cur_q_entry_hi;
++
++	u32 reserved1;
++	u32 reserved2;
++	u32 pg_cnt_n_prd_ptr;	/* 31:16->total page count */
++					/* 15:0 ->producer pointer (index?) */
++	u32 entry_n_pg_size;	/* 31:16->entry size */
++					/* 15:0 ->page size */
++	u32 int_blk_n_cns_ptr;	/* 31:24->Int Blk Id; */
++					/* 23:16->Int Blk Offset */
++					/* 15:0 ->consumer pointer(index?) */
++	u32 q_state;		/* 31:16->reserved; 15:0-> Q state */
++	u32 reserved3[2];
++	u32 reserved4[4];
++};
++
++struct bna_ib_blk_mem {
++	u32 host_addr_lo;
++	u32 host_addr_hi;
++	u32 clsc_n_ctrl_n_msix;	/* 31:24->coalescing; */
++					/* 23:16->coalescing cfg; */
++					/* 15:8 ->control; */
++					/* 7:0 ->msix; */
++	u32 ipkt_n_ent_n_idxof;
++	u32 ipkt_cnt_cfg_n_unacked;
++
++	u32 reserved[3];
++};
++
++struct bna_idx_tbl_mem {
++	u32 idx;	  /* !< 31:16->res;15:0->idx; */
++};
++
++struct bna_doorbell_qset {
++	u32 rxq[0x20 >> 2];
++	u32 txq[0x20 >> 2];
++	u32 ib0[0x20 >> 2];
++	u32 ib1[0x20 >> 2];
++};
++
++struct bna_rx_fndb_ram {
++	u32 rss_prop;
++	u32 size_routing_props;
++	u32 rit_hds_mcastq;
++	u32 control_flags;
++};
++
++struct bna_tx_fndb_ram {
++	u32 vlan_n_ctrl_flags;
++};
++
++/**
++ * @brief
++ *  Structure which maps to RxFn Indirection Table (RIT)
++ *  Size : 1 word
++ *  See catapult_spec.pdf, RxA for details
++ */
++struct bna_rit_mem {
++	u32 rxq_ids;	/* !< 31:12->res;11:0->two 6 bit RxQ Ids */
++};
++
++/**
++ * @brief
++ *  Structure which maps to RSS Table entry
++ *  Size : 16 words
++ *  See catapult_spec.pdf, RAD for details
++ */
++struct bna_rss_mem {
++	/*
++	 * 31:12-> res
++	 * 11:8 -> protocol type
++	 *  7:0 -> hash index
++	 */
++	u32 type_n_hash;
++	u32 hash_key[10];  /* !< 40 byte Toeplitz hash key */
++	u32 reserved[5];
++};
++
++/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
++struct bna_dma_addr {
++	u32		msb;
++	u32		lsb;
++};
++
++struct bna_txq_wi_vector {
++	u16 		reserved;
++	u16 		length;		/* Only 14 LSB are valid */
++	struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */
++};
++
++typedef u16 bna_txq_wi_opcode_t;
++
++typedef u16 bna_txq_wi_ctrl_flag_t;
++
++/**
++ *  TxQ Entry Structure
++ *
++ *  BEWARE:  Load values into this structure with correct endianess.
++ */
++struct bna_txq_entry {
++	union {
++		struct {
++			u8 reserved;
++			u8 num_vectors;	/* number of vectors present */
++			bna_txq_wi_opcode_t opcode; /* Either */
++						    /* BNA_TXQ_WI_SEND or */
++						    /* BNA_TXQ_WI_SEND_LSO */
++			bna_txq_wi_ctrl_flag_t flags; /* OR of all the flags */
++			u16 l4_hdr_size_n_offset;
++			u16 vlan_tag;
++			u16 lso_mss;	/* Only 14 LSB are valid */
++			u32 frame_length;	/* Only 24 LSB are valid */
++		} wi;
++
++		struct {
++			u16 reserved;
++			bna_txq_wi_opcode_t opcode; /* Must be */
++						    /* BNA_TXQ_WI_EXTENSION */
++			u32 reserved2[3];	/* Place holder for */
++						/* removed vector (12 bytes) */
++		} wi_ext;
++	} hdr;
++	struct bna_txq_wi_vector vector[4];
++};
++#define wi_hdr  	hdr.wi
++#define wi_ext_hdr  hdr.wi_ext
++
++/* RxQ Entry Structure */
++struct bna_rxq_entry {		/* Rx-Buffer */
++	struct bna_dma_addr host_addr; /* Rx-Buffer DMA address */
++};
++
++typedef u32 bna_cq_e_flag_t;
++
++/* CQ Entry Structure */
++struct bna_cq_entry {
++	bna_cq_e_flag_t flags;
++	u16 vlan_tag;
++	u16 length;
++	u32 rss_hash;
++	u8 valid;
++	u8 reserved1;
++	u8 reserved2;
++	u8 rxq_id;
++};
++
++#endif /* __BNA_HW_H__ */
+--- /dev/null
++++ b/drivers/net/bna/bna_txrx.c
+@@ -0,0 +1,4209 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++  */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++#include "bna.h"
++#include "bfa_sm.h"
++#include "bfi.h"
++
++/**
++ * IB
++ */
++#define bna_ib_find_free_ibidx(_mask, _pos)\
++do {\
++	(_pos) = 0;\
++	while (((_pos) < (BFI_IBIDX_MAX_SEGSIZE)) &&\
++		((1 << (_pos)) & (_mask)))\
++		(_pos)++;\
++} while (0)
++
++#define bna_ib_count_ibidx(_mask, _count)\
++do {\
++	int pos = 0;\
++	(_count) = 0;\
++	while (pos < (BFI_IBIDX_MAX_SEGSIZE)) {\
++		if ((1 << pos) & (_mask))\
++			(_count) = pos + 1;\
++		pos++;\
++	} \
++} while (0)
++
++#define bna_ib_select_segpool(_count, _q_idx)\
++do {\
++	int i;\
++	(_q_idx) = -1;\
++	for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {\
++		if ((_count <= ibidx_pool[i].pool_entry_size)) {\
++			(_q_idx) = i;\
++			break;\
++		} \
++	} \
++} while (0)
++
++struct bna_ibidx_pool {
++	int	pool_size;
++	int	pool_entry_size;
++};
++init_ibidx_pool(ibidx_pool);
++
++static struct bna_intr *
++bna_intr_get(struct bna_ib_mod *ib_mod, enum bna_intr_type intr_type,
++		int vector)
++{
++	struct bna_intr *intr;
++	struct list_head *qe;
++
++	list_for_each(qe, &ib_mod->intr_active_q) {
++		intr = (struct bna_intr *)qe;
++
++		if ((intr->intr_type == intr_type) &&
++			(intr->vector == vector)) {
++			intr->ref_count++;
++			return intr;
++		}
++	}
++
++	if (list_empty(&ib_mod->intr_free_q))
++		return NULL;
++
++	bfa_q_deq(&ib_mod->intr_free_q, &intr);
++	bfa_q_qe_init(&intr->qe);
++
++	intr->ref_count = 1;
++	intr->intr_type = intr_type;
++	intr->vector = vector;
++
++	list_add_tail(&intr->qe, &ib_mod->intr_active_q);
++
++	return intr;
++}
++
++static void
++bna_intr_put(struct bna_ib_mod *ib_mod,
++		struct bna_intr *intr)
++{
++	intr->ref_count--;
++
++	if (intr->ref_count == 0) {
++		intr->ib = NULL;
++		list_del(&intr->qe);
++		bfa_q_qe_init(&intr->qe);
++		list_add_tail(&intr->qe, &ib_mod->intr_free_q);
++	}
++}
++
++void
++bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
++		struct bna_res_info *res_info)
++{
++	int i;
++	int j;
++	int count;
++	u8 offset;
++	struct bna_doorbell_qset *qset;
++	unsigned long off;
++
++	ib_mod->bna = bna;
++
++	ib_mod->ib = (struct bna_ib *)
++		res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mdl[0].kva;
++	ib_mod->intr = (struct bna_intr *)
++		res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mdl[0].kva;
++	ib_mod->idx_seg = (struct bna_ibidx_seg *)
++		res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mdl[0].kva;
++
++	INIT_LIST_HEAD(&ib_mod->ib_free_q);
++	INIT_LIST_HEAD(&ib_mod->intr_free_q);
++	INIT_LIST_HEAD(&ib_mod->intr_active_q);
++
++	for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++)
++		INIT_LIST_HEAD(&ib_mod->ibidx_seg_pool[i]);
++
++	for (i = 0; i < BFI_MAX_IB; i++) {
++		ib_mod->ib[i].ib_id = i;
++
++		ib_mod->ib[i].ib_seg_host_addr_kva =
++		res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
++		ib_mod->ib[i].ib_seg_host_addr.lsb =
++		res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
++		ib_mod->ib[i].ib_seg_host_addr.msb =
++		res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
++
++		qset = (struct bna_doorbell_qset *)0;
++		off = (unsigned long)(&qset[i >> 1].ib0[(i & 0x1)
++					* (0x20 >> 2)]);
++		ib_mod->ib[i].door_bell.doorbell_addr = off +
++			BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
++
++		bfa_q_qe_init(&ib_mod->ib[i].qe);
++		list_add_tail(&ib_mod->ib[i].qe, &ib_mod->ib_free_q);
++
++		bfa_q_qe_init(&ib_mod->intr[i].qe);
++		list_add_tail(&ib_mod->intr[i].qe, &ib_mod->intr_free_q);
++	}
++
++	count = 0;
++	offset = 0;
++	for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
++		for (j = 0; j < ibidx_pool[i].pool_size; j++) {
++			bfa_q_qe_init(&ib_mod->idx_seg[count]);
++			ib_mod->idx_seg[count].ib_seg_size =
++					ibidx_pool[i].pool_entry_size;
++			ib_mod->idx_seg[count].ib_idx_tbl_offset = offset;
++			list_add_tail(&ib_mod->idx_seg[count].qe,
++				&ib_mod->ibidx_seg_pool[i]);
++			count++;
++			offset += ibidx_pool[i].pool_entry_size;
++		}
++	}
++}
++
++void
++bna_ib_mod_uninit(struct bna_ib_mod *ib_mod)
++{
++	int i;
++	int j;
++	struct list_head *qe;
++
++	i = 0;
++	list_for_each(qe, &ib_mod->ib_free_q)
++		i++;
++
++	i = 0;
++	list_for_each(qe, &ib_mod->intr_free_q)
++		i++;
++
++	for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
++		j = 0;
++		list_for_each(qe, &ib_mod->ibidx_seg_pool[i])
++			j++;
++	}
++
++	ib_mod->bna = NULL;
++}
++
++struct bna_ib *
++bna_ib_get(struct bna_ib_mod *ib_mod,
++		enum bna_intr_type intr_type,
++		int vector)
++{
++	struct bna_ib *ib;
++	struct bna_intr *intr;
++
++	if (intr_type == BNA_INTR_T_INTX)
++		vector = (1 << vector);
++
++	intr = bna_intr_get(ib_mod, intr_type, vector);
++	if (intr == NULL)
++		return NULL;
++
++	if (intr->ib) {
++		if (intr->ib->ref_count == BFI_IBIDX_MAX_SEGSIZE) {
++			bna_intr_put(ib_mod, intr);
++			return NULL;
++		}
++		intr->ib->ref_count++;
++		return intr->ib;
++	}
++
++	if (list_empty(&ib_mod->ib_free_q)) {
++		bna_intr_put(ib_mod, intr);
++		return NULL;
++	}
++
++	bfa_q_deq(&ib_mod->ib_free_q, &ib);
++	bfa_q_qe_init(&ib->qe);
++
++	ib->ref_count = 1;
++	ib->start_count = 0;
++	ib->idx_mask = 0;
++
++	ib->intr = intr;
++	ib->idx_seg = NULL;
++	intr->ib = ib;
++
++	ib->bna = ib_mod->bna;
++
++	return ib;
++}
++
++void
++bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
++{
++	bna_intr_put(ib_mod, ib->intr);
++
++	ib->ref_count--;
++
++	if (ib->ref_count == 0) {
++		ib->intr = NULL;
++		ib->bna = NULL;
++		list_add_tail(&ib->qe, &ib_mod->ib_free_q);
++	}
++}
++
++/* Returns index offset - starting from 0 */
++int
++bna_ib_reserve_idx(struct bna_ib *ib)
++{
++	struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
++	struct bna_ibidx_seg *idx_seg;
++	int idx;
++	int num_idx;
++	int q_idx;
++
++	/* Find the first free index position */
++	bna_ib_find_free_ibidx(ib->idx_mask, idx);
++	if (idx == BFI_IBIDX_MAX_SEGSIZE)
++		return -1;
++
++	/*
++	 * Calculate the total number of indexes held by this IB,
++	 * including the index newly reserved above.
++	 */
++	bna_ib_count_ibidx((ib->idx_mask | (1 << idx)), num_idx);
++
++	/* See if there is a free space in the index segment held by this IB */
++	if (ib->idx_seg && (num_idx <= ib->idx_seg->ib_seg_size)) {
++		ib->idx_mask |= (1 << idx);
++		return idx;
++	}
++
++	if (ib->start_count)
++		return -1;
++
++	/* Allocate a new segment */
++	bna_ib_select_segpool(num_idx, q_idx);
++	while (1) {
++		if (q_idx == BFI_IBIDX_TOTAL_POOLS)
++			return -1;
++		if (!list_empty(&ib_mod->ibidx_seg_pool[q_idx]))
++			break;
++		q_idx++;
++	}
++	bfa_q_deq(&ib_mod->ibidx_seg_pool[q_idx], &idx_seg);
++	bfa_q_qe_init(&idx_seg->qe);
++
++	/* Free the old segment */
++	if (ib->idx_seg) {
++		bna_ib_select_segpool(ib->idx_seg->ib_seg_size, q_idx);
++		list_add_tail(&ib->idx_seg->qe, &ib_mod->ibidx_seg_pool[q_idx]);
++	}
++
++	ib->idx_seg = idx_seg;
++
++	ib->idx_mask |= (1 << idx);
++
++	return idx;
++}
++
++void
++bna_ib_release_idx(struct bna_ib *ib, int idx)
++{
++	struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
++	struct bna_ibidx_seg *idx_seg;
++	int num_idx;
++	int cur_q_idx;
++	int new_q_idx;
++
++	ib->idx_mask &= ~(1 << idx);
++
++	if (ib->start_count)
++		return;
++
++	bna_ib_count_ibidx(ib->idx_mask, num_idx);
++
++	/*
++	 * Free the segment, if there are no more indexes in the segment
++	 * held by this IB
++	 */
++	if (!num_idx) {
++		bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
++		list_add_tail(&ib->idx_seg->qe,
++			&ib_mod->ibidx_seg_pool[cur_q_idx]);
++		ib->idx_seg = NULL;
++		return;
++	}
++
++	/* See if we can move to a smaller segment */
++	bna_ib_select_segpool(num_idx, new_q_idx);
++	bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
++	while (new_q_idx < cur_q_idx) {
++		if (!list_empty(&ib_mod->ibidx_seg_pool[new_q_idx]))
++			break;
++		new_q_idx++;
++	}
++	if (new_q_idx < cur_q_idx) {
++		/* Select the new smaller segment */
++		bfa_q_deq(&ib_mod->ibidx_seg_pool[new_q_idx], &idx_seg);
++		bfa_q_qe_init(&idx_seg->qe);
++		/* Free the old segment */
++		list_add_tail(&ib->idx_seg->qe,
++			&ib_mod->ibidx_seg_pool[cur_q_idx]);
++		ib->idx_seg = idx_seg;
++	}
++}
++
++int
++bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
++{
++	if (ib->start_count)
++		return -1;
++
++	ib->ib_config.coalescing_timeo = ib_config->coalescing_timeo;
++	ib->ib_config.interpkt_timeo = ib_config->interpkt_timeo;
++	ib->ib_config.interpkt_count = ib_config->interpkt_count;
++	ib->ib_config.ctrl_flags = ib_config->ctrl_flags;
++
++	ib->ib_config.ctrl_flags |= BFI_IB_CF_MASTER_ENABLE;
++	if (ib->intr->intr_type == BNA_INTR_T_MSIX)
++		ib->ib_config.ctrl_flags |= BFI_IB_CF_MSIX_MODE;
++
++	return 0;
++}
++
++void
++bna_ib_start(struct bna_ib *ib)
++{
++	struct bna_ib_blk_mem ib_cfg;
++	struct bna_ib_blk_mem *ib_mem;
++	u32 pg_num;
++	u32 intx_mask;
++	int i;
++	void __iomem *base_addr;
++	unsigned long off;
++
++	ib->start_count++;
++
++	if (ib->start_count > 1)
++		return;
++
++	ib_cfg.host_addr_lo = (u32)(ib->ib_seg_host_addr.lsb);
++	ib_cfg.host_addr_hi = (u32)(ib->ib_seg_host_addr.msb);
++
++	ib_cfg.clsc_n_ctrl_n_msix = (((u32)
++				     ib->ib_config.coalescing_timeo << 16) |
++				((u32)ib->ib_config.ctrl_flags << 8) |
++				(ib->intr->vector));
++	ib_cfg.ipkt_n_ent_n_idxof =
++				((u32)
++				 (ib->ib_config.interpkt_timeo & 0xf) << 16) |
++				((u32)ib->idx_seg->ib_seg_size << 8) |
++				(ib->idx_seg->ib_idx_tbl_offset);
++	ib_cfg.ipkt_cnt_cfg_n_unacked = ((u32)
++					 ib->ib_config.interpkt_count << 24);
++
++	pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
++				HQM_IB_RAM_BASE_OFFSET);
++	writel(pg_num, ib->bna->regs.page_addr);
++
++	base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
++					HQM_IB_RAM_BASE_OFFSET);
++
++	ib_mem = (struct bna_ib_blk_mem *)0;
++	off = (unsigned long)&ib_mem[ib->ib_id].host_addr_lo;
++	writel(htonl(ib_cfg.host_addr_lo), base_addr + off);
++
++	off = (unsigned long)&ib_mem[ib->ib_id].host_addr_hi;
++	writel(htonl(ib_cfg.host_addr_hi), base_addr + off);
++
++	off = (unsigned long)&ib_mem[ib->ib_id].clsc_n_ctrl_n_msix;
++	writel(ib_cfg.clsc_n_ctrl_n_msix, base_addr + off);
++
++	off = (unsigned long)&ib_mem[ib->ib_id].ipkt_n_ent_n_idxof;
++	writel(ib_cfg.ipkt_n_ent_n_idxof, base_addr + off);
++
++	off = (unsigned long)&ib_mem[ib->ib_id].ipkt_cnt_cfg_n_unacked;
++	writel(ib_cfg.ipkt_cnt_cfg_n_unacked, base_addr + off);
++
++	ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
++				(u32)ib->ib_config.coalescing_timeo, 0);
++
++	pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
++				HQM_INDX_TBL_RAM_BASE_OFFSET);
++	writel(pg_num, ib->bna->regs.page_addr);
++
++	base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
++					HQM_INDX_TBL_RAM_BASE_OFFSET);
++	for (i = 0; i < ib->idx_seg->ib_seg_size; i++) {
++		off = (unsigned long)
++		((ib->idx_seg->ib_idx_tbl_offset + i) * BFI_IBIDX_SIZE);
++		writel(0, base_addr + off);
++	}
++
++	if (ib->intr->intr_type == BNA_INTR_T_INTX) {
++		bna_intx_disable(ib->bna, intx_mask);
++		intx_mask &= ~(ib->intr->vector);
++		bna_intx_enable(ib->bna, intx_mask);
++	}
++}
++
++void
++bna_ib_stop(struct bna_ib *ib)
++{
++	u32 intx_mask;
++
++	ib->start_count--;
++
++	if (ib->start_count == 0) {
++		writel(BNA_DOORBELL_IB_INT_DISABLE,
++				ib->door_bell.doorbell_addr);
++		if (ib->intr->intr_type == BNA_INTR_T_INTX) {
++			bna_intx_disable(ib->bna, intx_mask);
++			intx_mask |= (ib->intr->vector);
++			bna_intx_enable(ib->bna, intx_mask);
++		}
++	}
++}
++
++void
++bna_ib_fail(struct bna_ib *ib)
++{
++	ib->start_count = 0;
++}
++
++/**
++ * RXF
++ */
++static void rxf_enable(struct bna_rxf *rxf);
++static void rxf_disable(struct bna_rxf *rxf);
++static void __rxf_config_set(struct bna_rxf *rxf);
++static void __rxf_rit_set(struct bna_rxf *rxf);
++static void __bna_rxf_stat_clr(struct bna_rxf *rxf);
++static int rxf_process_packet_filter(struct bna_rxf *rxf);
++static int rxf_clear_packet_filter(struct bna_rxf *rxf);
++static void rxf_reset_packet_filter(struct bna_rxf *rxf);
++static void rxf_cb_enabled(void *arg, int status);
++static void rxf_cb_disabled(void *arg, int status);
++static void bna_rxf_cb_stats_cleared(void *arg, int status);
++static void __rxf_enable(struct bna_rxf *rxf);
++static void __rxf_disable(struct bna_rxf *rxf);
++
++bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
++			enum bna_rxf_event);
++bfa_fsm_state_decl(bna_rxf, start_wait, struct bna_rxf,
++			enum bna_rxf_event);
++bfa_fsm_state_decl(bna_rxf, cam_fltr_mod_wait, struct bna_rxf,
++			enum bna_rxf_event);
++bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
++			enum bna_rxf_event);
++bfa_fsm_state_decl(bna_rxf, cam_fltr_clr_wait, struct bna_rxf,
++			enum bna_rxf_event);
++bfa_fsm_state_decl(bna_rxf, stop_wait, struct bna_rxf,
++			enum bna_rxf_event);
++bfa_fsm_state_decl(bna_rxf, pause_wait, struct bna_rxf,
++			enum bna_rxf_event);
++bfa_fsm_state_decl(bna_rxf, resume_wait, struct bna_rxf,
++			enum bna_rxf_event);
++bfa_fsm_state_decl(bna_rxf, stat_clr_wait, struct bna_rxf,
++			enum bna_rxf_event);
++
++static struct bfa_sm_table rxf_sm_table[] = {
++	{BFA_SM(bna_rxf_sm_stopped), BNA_RXF_STOPPED},
++	{BFA_SM(bna_rxf_sm_start_wait), BNA_RXF_START_WAIT},
++	{BFA_SM(bna_rxf_sm_cam_fltr_mod_wait), BNA_RXF_CAM_FLTR_MOD_WAIT},
++	{BFA_SM(bna_rxf_sm_started), BNA_RXF_STARTED},
++	{BFA_SM(bna_rxf_sm_cam_fltr_clr_wait), BNA_RXF_CAM_FLTR_CLR_WAIT},
++	{BFA_SM(bna_rxf_sm_stop_wait), BNA_RXF_STOP_WAIT},
++	{BFA_SM(bna_rxf_sm_pause_wait), BNA_RXF_PAUSE_WAIT},
++	{BFA_SM(bna_rxf_sm_resume_wait), BNA_RXF_RESUME_WAIT},
++	{BFA_SM(bna_rxf_sm_stat_clr_wait), BNA_RXF_STAT_CLR_WAIT}
++};
++
++static void
++bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
++{
++	call_rxf_stop_cbfn(rxf, BNA_CB_SUCCESS);
++}
++
++static void
++bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
++{
++	switch (event) {
++	case RXF_E_START:
++		bfa_fsm_set_state(rxf, bna_rxf_sm_start_wait);
++		break;
++
++	case RXF_E_STOP:
++		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
++		break;
++
++	case RXF_E_FAIL:
++		/* No-op */
++		break;
++
++	case RXF_E_CAM_FLTR_MOD:
++		call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
++		break;
++
++	case RXF_E_STARTED:
++	case RXF_E_STOPPED:
++	case RXF_E_CAM_FLTR_RESP:
++		/**
++		 * These events are received due to flushing of mbox
++		 * when device fails
++		 */
++		/* No-op */
++		break;
++
++	case RXF_E_PAUSE:
++		rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
++		call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
++		break;
++
++	case RXF_E_RESUME:
++		rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
++		call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
++		break;
++
++	default:
++		bfa_sm_fault(rxf->rx->bna, event);
++	}
++}
++
++static void
++bna_rxf_sm_start_wait_entry(struct bna_rxf *rxf)
++{
++	__rxf_config_set(rxf);
++	__rxf_rit_set(rxf);
++	rxf_enable(rxf);
++}
++
++static void
++bna_rxf_sm_start_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
++{
++	switch (event) {
++	case RXF_E_STOP:
++		/**
++		 * STOP is originated from bnad. When this happens,
++		 * it can not be waiting for filter update
++		 */
++		call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
++		bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
++		break;
++
++	case RXF_E_FAIL:
++		call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
++		call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
++		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
++		break;
++
++	case RXF_E_CAM_FLTR_MOD:
++		/* No-op */
++		break;
++
++	case RXF_E_STARTED:
++		/**
++		 * Force rxf_process_filter() to go through initial
++		 * config
++		 */
++		if ((rxf->ucast_active_mac != NULL) &&
++			(rxf->ucast_pending_set == 0))
++			rxf->ucast_pending_set = 1;
++
++		if (rxf->rss_status == BNA_STATUS_T_ENABLED)
++			rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
++
++		rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
++
++		bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
++		break;
++
++	case RXF_E_PAUSE:
++	case RXF_E_RESUME:
++		rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
++		break;
++
++	default:
++		bfa_sm_fault(rxf->rx->bna, event);
++	}
++}
++
++static void
++bna_rxf_sm_cam_fltr_mod_wait_entry(struct bna_rxf *rxf)
++{
++	if (!rxf_process_packet_filter(rxf)) {
++		/* No more pending CAM entries to update */
++		bfa_fsm_set_state(rxf, bna_rxf_sm_started);
++	}
++}
++
++static void
++bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
++{
++	switch (event) {
++	case RXF_E_STOP:
++		/**
++		 * STOP is originated from bnad. When this happens,
++		 * it can not be waiting for filter update
++		 */
++		call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
++		bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
++		break;
++
++	case RXF_E_FAIL:
++		rxf_reset_packet_filter(rxf);
++		call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
++		call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
++		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
++		break;
++
++	case RXF_E_CAM_FLTR_MOD:
++		/* No-op */
++		break;
++
++	case RXF_E_CAM_FLTR_RESP:
++		if (!rxf_process_packet_filter(rxf)) {
++			/* No more pending CAM entries to update */
++			call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
++			bfa_fsm_set_state(rxf, bna_rxf_sm_started);
++		}
++		break;
++
++	case RXF_E_PAUSE:
++	case RXF_E_RESUME:
++		rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
++		break;
++
++	default:
++		bfa_sm_fault(rxf->rx->bna, event);
++	}
++}
++
++static void
++bna_rxf_sm_started_entry(struct bna_rxf *rxf)
++{
++	call_rxf_start_cbfn(rxf, BNA_CB_SUCCESS);
++
++	if (rxf->rxf_flags & BNA_RXF_FL_OPERSTATE_CHANGED) {
++		if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
++			bfa_fsm_send_event(rxf, RXF_E_PAUSE);
++		else
++			bfa_fsm_send_event(rxf, RXF_E_RESUME);
++	}
++
++}
++
++static void
++bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
++{
++	switch (event) {
++	case RXF_E_STOP:
++		bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
++		/* Hack to get FSM start clearing CAM entries */
++		bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
++		break;
++
++	case RXF_E_FAIL:
++		rxf_reset_packet_filter(rxf);
++		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
++		break;
++
++	case RXF_E_CAM_FLTR_MOD:
++		bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
++		break;
++
++	case RXF_E_PAUSE:
++		bfa_fsm_set_state(rxf, bna_rxf_sm_pause_wait);
++		break;
++
++	case RXF_E_RESUME:
++		bfa_fsm_set_state(rxf, bna_rxf_sm_resume_wait);
++		break;
++
++	default:
++		bfa_sm_fault(rxf->rx->bna, event);
++	}
++}
++
++static void
++bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf *rxf)
++{
++	/**
++	 *  Note: Do not add rxf_clear_packet_filter here.
++	 * It will overstep mbox when this transition happens:
++	 * 	cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event
++	 */
++}
++
++static void
++bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
++{
++	switch (event) {
++	case RXF_E_FAIL:
++		/**
++		 * FSM was in the process of stopping, initiated by
++		 * bnad. When this happens, no one can be waiting for
++		 * start or filter update
++		 */
++		rxf_reset_packet_filter(rxf);
++		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
++		break;
++
++	case RXF_E_CAM_FLTR_RESP:
++		if (!rxf_clear_packet_filter(rxf)) {
++			/* No more pending CAM entries to clear */
++			bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
++			rxf_disable(rxf);
++		}
++		break;
++
++	default:
++		bfa_sm_fault(rxf->rx->bna, event);
++	}
++}
++
++static void
++bna_rxf_sm_stop_wait_entry(struct bna_rxf *rxf)
++{
++	/**
++	 * NOTE: Do not add  rxf_disable here.
++	 * It will overstep mbox when this transition happens:
++	 * 	start_wait -> stop_wait on RXF_E_STOP event
++	 */
++}
++
++static void
++bna_rxf_sm_stop_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
++{
++	switch (event) {
++	case RXF_E_FAIL:
++		/**
++		 * FSM was in the process of stopping, initiated by
++		 * bnad. When this happens, no one can be waiting for
++		 * start or filter update
++		 */
++		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
++		break;
++
++	case RXF_E_STARTED:
++		/**
++		 * This event is received due to abrupt transition from
++		 * bna_rxf_sm_start_wait state on receiving
++		 * RXF_E_STOP event
++		 */
++		rxf_disable(rxf);
++		break;
++
++	case RXF_E_STOPPED:
++		/**
++		 * FSM was in the process of stopping, initiated by
++		 * bnad. When this happens, no one can be waiting for
++		 * start or filter update
++		 */
++		bfa_fsm_set_state(rxf, bna_rxf_sm_stat_clr_wait);
++		break;
++
++	case RXF_E_PAUSE:
++		rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
++		break;
++
++	case RXF_E_RESUME:
++		rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
++		break;
++
++	default:
++		bfa_sm_fault(rxf->rx->bna, event);
++	}
++}
++
++static void
++bna_rxf_sm_pause_wait_entry(struct bna_rxf *rxf)
++{
++	rxf->rxf_flags &=
++		~(BNA_RXF_FL_OPERSTATE_CHANGED | BNA_RXF_FL_RXF_ENABLED);
++	__rxf_disable(rxf);
++}
++
++static void
++bna_rxf_sm_pause_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
++{
++	switch (event) {
++	case RXF_E_FAIL:
++		/**
++		 * FSM was in the process of disabling rxf, initiated by
++		 * bnad.
++		 */
++		call_rxf_pause_cbfn(rxf, BNA_CB_FAIL);
++		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
++		break;
++
++	case RXF_E_STOPPED:
++		rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
++		call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
++		bfa_fsm_set_state(rxf, bna_rxf_sm_started);
++		break;
++
++	/*
++	 * Since PAUSE/RESUME can only be sent by bnad, we don't expect
++	 * any other event during these states
++	 */
++	default:
++		bfa_sm_fault(rxf->rx->bna, event);
++	}
++}
++
++static void
++bna_rxf_sm_resume_wait_entry(struct bna_rxf *rxf)
++{
++	rxf->rxf_flags &= ~(BNA_RXF_FL_OPERSTATE_CHANGED);
++	rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
++	__rxf_enable(rxf);
++}
++
++static void
++bna_rxf_sm_resume_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
++{
++	switch (event) {
++	case RXF_E_FAIL:
++		/**
++		 * FSM was in the process of disabling rxf, initiated by
++		 * bnad.
++		 */
++		call_rxf_resume_cbfn(rxf, BNA_CB_FAIL);
++		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
++		break;
++
++	case RXF_E_STARTED:
++		rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
++		call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
++		bfa_fsm_set_state(rxf, bna_rxf_sm_started);
++		break;
++
++	/*
++	 * Since PAUSE/RESUME can only be sent by bnad, we don't expect
++	 * any other event during these states
++	 */
++	default:
++		bfa_sm_fault(rxf->rx->bna, event);
++	}
++}
++
++static void
++bna_rxf_sm_stat_clr_wait_entry(struct bna_rxf *rxf)
++{
++	__bna_rxf_stat_clr(rxf);
++}
++
++static void
++bna_rxf_sm_stat_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
++{
++	switch (event) {
++	case RXF_E_FAIL:
++	case RXF_E_STAT_CLEARED:
++		bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
++		break;
++
++	default:
++		bfa_sm_fault(rxf->rx->bna, event);
++	}
++}
++
++static void
++__rxf_enable(struct bna_rxf *rxf)
++{
++	struct bfi_ll_rxf_multi_req ll_req;
++	u32 bm[2] = {0, 0};
++
++	if (rxf->rxf_id < 32)
++		bm[0] = 1 << rxf->rxf_id;
++	else
++		bm[1] = 1 << (rxf->rxf_id - 32);
++
++	bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
++	ll_req.rxf_id_mask[0] = htonl(bm[0]);
++	ll_req.rxf_id_mask[1] = htonl(bm[1]);
++	ll_req.enable = 1;
++
++	bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
++			rxf_cb_enabled, rxf);
++
++	bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
++}
++
++static void
++__rxf_disable(struct bna_rxf *rxf)
++{
++	struct bfi_ll_rxf_multi_req ll_req;
++	u32 bm[2] = {0, 0};
++
++	if (rxf->rxf_id < 32)
++		bm[0] = 1 << rxf->rxf_id;
++	else
++		bm[1] = 1 << (rxf->rxf_id - 32);
++
++	bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
++	ll_req.rxf_id_mask[0] = htonl(bm[0]);
++	ll_req.rxf_id_mask[1] = htonl(bm[1]);
++	ll_req.enable = 0;
++
++	bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
++			rxf_cb_disabled, rxf);
++
++	bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
++}
++
++static void
++__rxf_config_set(struct bna_rxf *rxf)
++{
++	u32 i;
++	struct bna_rss_mem *rss_mem;
++	struct bna_rx_fndb_ram *rx_fndb_ram;
++	struct bna *bna = rxf->rx->bna;
++	void __iomem *base_addr;
++	unsigned long off;
++
++	base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
++			RSS_TABLE_BASE_OFFSET);
++
++	rss_mem = (struct bna_rss_mem *)0;
++
++	/* Configure RSS if required */
++	if (rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE) {
++		/* configure RSS Table */
++		writel(BNA_GET_PAGE_NUM(RAD0_MEM_BLK_BASE_PG_NUM +
++			bna->port_num, RSS_TABLE_BASE_OFFSET),
++					bna->regs.page_addr);
++
++		/* temporarily disable RSS, while hash value is written */
++		off = (unsigned long)&rss_mem[0].type_n_hash;
++		writel(0, base_addr + off);
++
++		for (i = 0; i < BFI_RSS_HASH_KEY_LEN; i++) {
++			off = (unsigned long)
++			&rss_mem[0].hash_key[(BFI_RSS_HASH_KEY_LEN - 1) - i];
++			writel(htonl(rxf->rss_cfg.toeplitz_hash_key[i]),
++			base_addr + off);
++		}
++
++		off = (unsigned long)&rss_mem[0].type_n_hash;
++		writel(rxf->rss_cfg.hash_type | rxf->rss_cfg.hash_mask,
++			base_addr + off);
++	}
++
++	/* Configure RxF */
++	writel(BNA_GET_PAGE_NUM(
++		LUT0_MEM_BLK_BASE_PG_NUM + (bna->port_num * 2),
++		RX_FNDB_RAM_BASE_OFFSET),
++		bna->regs.page_addr);
++
++	base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
++		RX_FNDB_RAM_BASE_OFFSET);
++
++	rx_fndb_ram = (struct bna_rx_fndb_ram *)0;
++
++	/* We always use RSS table 0 */
++	off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rss_prop;
++	writel(rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE,
++		base_addr + off);
++
++	/* small large buffer enable/disable */
++	off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].size_routing_props;
++	writel((rxf->ctrl_flags & BNA_RXF_CF_SM_LG_RXQ) | 0x80,
++		base_addr + off);
++
++	/* RIT offset,  HDS forced offset, multicast RxQ Id */
++	off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rit_hds_mcastq;
++	writel((rxf->rit_segment->rit_offset << 16) |
++		(rxf->forced_offset << 8) |
++		(rxf->hds_cfg.hdr_type & BNA_HDS_FORCED) | rxf->mcast_rxq_id,
++		base_addr + off);
++
++	/*
++	 * default vlan tag, default function enable, strip vlan bytes,
++	 * HDS type, header size
++	 */
++
++	off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].control_flags;
++	 writel(((u32)rxf->default_vlan_tag << 16) |
++		(rxf->ctrl_flags &
++			(BNA_RXF_CF_DEFAULT_VLAN |
++			BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE |
++			BNA_RXF_CF_VLAN_STRIP)) |
++		(rxf->hds_cfg.hdr_type & ~BNA_HDS_FORCED) |
++		rxf->hds_cfg.header_size,
++		base_addr + off);
++}
++
++void
++__rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status)
++{
++	struct bna *bna = rxf->rx->bna;
++	int i;
++
++	writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
++			(bna->port_num * 2), VLAN_RAM_BASE_OFFSET),
++			bna->regs.page_addr);
++
++	if (status == BNA_STATUS_T_ENABLED) {
++		/* enable VLAN filtering on this function */
++		for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
++			writel(rxf->vlan_filter_table[i],
++					BNA_GET_VLAN_MEM_ENTRY_ADDR
++					(bna->pcidev.pci_bar_kva, rxf->rxf_id,
++						i * 32));
++		}
++	} else {
++		/* disable VLAN filtering on this function */
++		for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
++			writel(0xffffffff,
++					BNA_GET_VLAN_MEM_ENTRY_ADDR
++					(bna->pcidev.pci_bar_kva, rxf->rxf_id,
++						i * 32));
++		}
++	}
++}
++
++static void
++__rxf_rit_set(struct bna_rxf *rxf)
++{
++	struct bna *bna = rxf->rx->bna;
++	struct bna_rit_mem *rit_mem;
++	int i;
++	void __iomem *base_addr;
++	unsigned long off;
++
++	base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
++			FUNCTION_TO_RXQ_TRANSLATE);
++
++	rit_mem = (struct bna_rit_mem *)0;
++
++	writel(BNA_GET_PAGE_NUM(RXA0_MEM_BLK_BASE_PG_NUM + bna->port_num,
++		FUNCTION_TO_RXQ_TRANSLATE),
++		bna->regs.page_addr);
++
++	for (i = 0; i < rxf->rit_segment->rit_size; i++) {
++		off = (unsigned long)&rit_mem[i + rxf->rit_segment->rit_offset];
++		writel(rxf->rit_segment->rit[i].large_rxq_id << 6 |
++			rxf->rit_segment->rit[i].small_rxq_id,
++			base_addr + off);
++	}
++}
++
++static void
++__bna_rxf_stat_clr(struct bna_rxf *rxf)
++{
++	struct bfi_ll_stats_req ll_req;
++	u32 bm[2] = {0, 0};
++
++	if (rxf->rxf_id < 32)
++		bm[0] = 1 << rxf->rxf_id;
++	else
++		bm[1] = 1 << (rxf->rxf_id - 32);
++
++	bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
++	ll_req.stats_mask = 0;
++	ll_req.txf_id_mask[0] = 0;
++	ll_req.txf_id_mask[1] =	0;
++
++	ll_req.rxf_id_mask[0] = htonl(bm[0]);
++	ll_req.rxf_id_mask[1] = htonl(bm[1]);
++
++	bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
++			bna_rxf_cb_stats_cleared, rxf);
++	bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
++}
++
++static void
++rxf_enable(struct bna_rxf *rxf)
++{
++	if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
++		bfa_fsm_send_event(rxf, RXF_E_STARTED);
++	else {
++		rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
++		__rxf_enable(rxf);
++	}
++}
++
++static void
++rxf_cb_enabled(void *arg, int status)
++{
++	struct bna_rxf *rxf = (struct bna_rxf *)arg;
++
++	bfa_q_qe_init(&rxf->mbox_qe.qe);
++	bfa_fsm_send_event(rxf, RXF_E_STARTED);
++}
++
++static void
++rxf_disable(struct bna_rxf *rxf)
++{
++	if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
++		bfa_fsm_send_event(rxf, RXF_E_STOPPED);
++	else
++		rxf->rxf_flags &= ~BNA_RXF_FL_RXF_ENABLED;
++		__rxf_disable(rxf);
++}
++
++static void
++rxf_cb_disabled(void *arg, int status)
++{
++	struct bna_rxf *rxf = (struct bna_rxf *)arg;
++
++	bfa_q_qe_init(&rxf->mbox_qe.qe);
++	bfa_fsm_send_event(rxf, RXF_E_STOPPED);
++}
++
++void
++rxf_cb_cam_fltr_mbox_cmd(void *arg, int status)
++{
++	struct bna_rxf *rxf = (struct bna_rxf *)arg;
++
++	bfa_q_qe_init(&rxf->mbox_qe.qe);
++
++	bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
++}
++
++static void
++bna_rxf_cb_stats_cleared(void *arg, int status)
++{
++	struct bna_rxf *rxf = (struct bna_rxf *)arg;
++
++	bfa_q_qe_init(&rxf->mbox_qe.qe);
++	bfa_fsm_send_event(rxf, RXF_E_STAT_CLEARED);
++}
++
++void
++rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd,
++		const struct bna_mac *mac_addr)
++{
++	struct bfi_ll_mac_addr_req req;
++
++	bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
++
++	req.rxf_id = rxf->rxf_id;
++	memcpy(&req.mac_addr, (void *)&mac_addr->addr, ETH_ALEN);
++
++	bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
++				rxf_cb_cam_fltr_mbox_cmd, rxf);
++
++	bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
++}
++
++static int
++rxf_process_packet_filter_mcast(struct bna_rxf *rxf)
++{
++	struct bna_mac *mac = NULL;
++	struct list_head *qe;
++
++	/* Add multicast entries */
++	if (!list_empty(&rxf->mcast_pending_add_q)) {
++		bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
++		bfa_q_qe_init(qe);
++		mac = (struct bna_mac *)qe;
++		rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_ADD_REQ, mac);
++		list_add_tail(&mac->qe, &rxf->mcast_active_q);
++		return 1;
++	}
++
++	/* Delete multicast entries previousely added */
++	if (!list_empty(&rxf->mcast_pending_del_q)) {
++		bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
++		bfa_q_qe_init(qe);
++		mac = (struct bna_mac *)qe;
++		rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
++		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
++		return 1;
++	}
++
++	return 0;
++}
++
++static int
++rxf_process_packet_filter_vlan(struct bna_rxf *rxf)
++{
++	/* Apply the VLAN filter */
++	if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) {
++		rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING;
++		if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC) &&
++			!(rxf->rxmode_active & BNA_RXMODE_DEFAULT))
++			__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
++	}
++
++	/* Apply RSS configuration */
++	if (rxf->rxf_flags & BNA_RXF_FL_RSS_CONFIG_PENDING) {
++		rxf->rxf_flags &= ~BNA_RXF_FL_RSS_CONFIG_PENDING;
++		if (rxf->rss_status == BNA_STATUS_T_DISABLED) {
++			/* RSS is being disabled */
++			rxf->ctrl_flags &= ~BNA_RXF_CF_RSS_ENABLE;
++			__rxf_rit_set(rxf);
++			__rxf_config_set(rxf);
++		} else {
++			/* RSS is being enabled or reconfigured */
++			rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
++			__rxf_rit_set(rxf);
++			__rxf_config_set(rxf);
++		}
++	}
++
++	return 0;
++}
++
++/**
++ * Processes pending ucast, mcast entry addition/deletion and issues mailbox
++ * command. Also processes pending filter configuration - promiscuous mode,
++ * default mode, allmutli mode and issues mailbox command or directly applies
++ * to h/w
++ */
++static int
++rxf_process_packet_filter(struct bna_rxf *rxf)
++{
++	/* Set the default MAC first */
++	if (rxf->ucast_pending_set > 0) {
++		rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_SET_REQ,
++				rxf->ucast_active_mac);
++		rxf->ucast_pending_set--;
++		return 1;
++	}
++
++	if (rxf_process_packet_filter_ucast(rxf))
++		return 1;
++
++	if (rxf_process_packet_filter_mcast(rxf))
++		return 1;
++
++	if (rxf_process_packet_filter_promisc(rxf))
++		return 1;
++
++	if (rxf_process_packet_filter_default(rxf))
++		return 1;
++
++	if (rxf_process_packet_filter_allmulti(rxf))
++		return 1;
++
++	if (rxf_process_packet_filter_vlan(rxf))
++		return 1;
++
++	return 0;
++}
++
++static int
++rxf_clear_packet_filter_mcast(struct bna_rxf *rxf)
++{
++	struct bna_mac *mac = NULL;
++	struct list_head *qe;
++
++	/* 3. delete pending mcast entries */
++	if (!list_empty(&rxf->mcast_pending_del_q)) {
++		bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
++		bfa_q_qe_init(qe);
++		mac = (struct bna_mac *)qe;
++		rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
++		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
++		return 1;
++	}
++
++	/* 4. clear active mcast entries; move them to pending_add_q */
++	if (!list_empty(&rxf->mcast_active_q)) {
++		bfa_q_deq(&rxf->mcast_active_q, &qe);
++		bfa_q_qe_init(qe);
++		mac = (struct bna_mac *)qe;
++		rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
++		list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
++		return 1;
++	}
++
++	return 0;
++}
++
++/**
++ * In the rxf stop path, processes pending ucast/mcast delete queue and issues
++ * the mailbox command. Moves the active ucast/mcast entries to pending add q,
++ * so that they are added to CAM again in the rxf start path. Moves the current
++ * filter settings - promiscuous, default, allmutli - to pending filter
++ * configuration
++ */
++static int
++rxf_clear_packet_filter(struct bna_rxf *rxf)
++{
++	if (rxf_clear_packet_filter_ucast(rxf))
++		return 1;
++
++	if (rxf_clear_packet_filter_mcast(rxf))
++		return 1;
++
++	/* 5. clear active default MAC in the CAM */
++	if (rxf->ucast_pending_set > 0)
++		rxf->ucast_pending_set = 0;
++
++	if (rxf_clear_packet_filter_promisc(rxf))
++		return 1;
++
++	if (rxf_clear_packet_filter_default(rxf))
++		return 1;
++
++	if (rxf_clear_packet_filter_allmulti(rxf))
++		return 1;
++
++	return 0;
++}
++
++static void
++rxf_reset_packet_filter_mcast(struct bna_rxf *rxf)
++{
++	struct list_head *qe;
++	struct bna_mac *mac;
++
++	/* 3. Move active mcast entries to pending_add_q */
++	while (!list_empty(&rxf->mcast_active_q)) {
++		bfa_q_deq(&rxf->mcast_active_q, &qe);
++		bfa_q_qe_init(qe);
++		list_add_tail(qe, &rxf->mcast_pending_add_q);
++	}
++
++	/* 4. Throw away delete pending mcast entries */
++	while (!list_empty(&rxf->mcast_pending_del_q)) {
++		bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
++		bfa_q_qe_init(qe);
++		mac = (struct bna_mac *)qe;
++		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
++	}
++}
++
++/**
++ * In the rxf fail path, throws away the ucast/mcast entries pending for
++ * deletion, moves all active ucast/mcast entries to pending queue so that
++ * they are added back to CAM in the rxf start path. Also moves the current
++ * filter configuration to pending filter configuration.
++ */
++static void
++rxf_reset_packet_filter(struct bna_rxf *rxf)
++{
++	rxf_reset_packet_filter_ucast(rxf);
++
++	rxf_reset_packet_filter_mcast(rxf);
++
++	/* 5. Turn off ucast set flag */
++	rxf->ucast_pending_set = 0;
++
++	rxf_reset_packet_filter_promisc(rxf);
++
++	rxf_reset_packet_filter_default(rxf);
++
++	rxf_reset_packet_filter_allmulti(rxf);
++}
++
++void
++bna_rxf_init(struct bna_rxf *rxf,
++		struct bna_rx *rx,
++		struct bna_rx_config *q_config)
++{
++	struct list_head *qe;
++	struct bna_rxp *rxp;
++
++	/* rxf_id is initialized during rx_mod init */
++	rxf->rx = rx;
++
++	INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
++	INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
++	rxf->ucast_pending_set = 0;
++	INIT_LIST_HEAD(&rxf->ucast_active_q);
++	rxf->ucast_active_mac = NULL;
++
++	INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
++	INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
++	INIT_LIST_HEAD(&rxf->mcast_active_q);
++
++	bfa_q_qe_init(&rxf->mbox_qe.qe);
++
++	if (q_config->vlan_strip_status == BNA_STATUS_T_ENABLED)
++		rxf->ctrl_flags |= BNA_RXF_CF_VLAN_STRIP;
++
++	rxf->rxf_oper_state = (q_config->paused) ?
++		BNA_RXF_OPER_STATE_PAUSED : BNA_RXF_OPER_STATE_RUNNING;
++
++	bna_rxf_adv_init(rxf, rx, q_config);
++
++	rxf->rit_segment = bna_rit_mod_seg_get(&rxf->rx->bna->rit_mod,
++					q_config->num_paths);
++
++	list_for_each(qe, &rx->rxp_q) {
++		rxp = (struct bna_rxp *)qe;
++		if (q_config->rxp_type == BNA_RXP_SINGLE)
++			rxf->mcast_rxq_id = rxp->rxq.single.only->rxq_id;
++		else
++			rxf->mcast_rxq_id = rxp->rxq.slr.large->rxq_id;
++		break;
++	}
++
++	rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
++	memset(rxf->vlan_filter_table, 0,
++			(sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32)));
++
++	bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
++}
++
++void
++bna_rxf_uninit(struct bna_rxf *rxf)
++{
++	struct bna_mac *mac;
++
++	bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment);
++	rxf->rit_segment = NULL;
++
++	rxf->ucast_pending_set = 0;
++
++	while (!list_empty(&rxf->ucast_pending_add_q)) {
++		bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
++		bfa_q_qe_init(&mac->qe);
++		bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
++	}
++
++	if (rxf->ucast_active_mac) {
++		bfa_q_qe_init(&rxf->ucast_active_mac->qe);
++		bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
++			rxf->ucast_active_mac);
++		rxf->ucast_active_mac = NULL;
++	}
++
++	while (!list_empty(&rxf->mcast_pending_add_q)) {
++		bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
++		bfa_q_qe_init(&mac->qe);
++		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
++	}
++
++	rxf->rx = NULL;
++}
++
++void
++bna_rxf_start(struct bna_rxf *rxf)
++{
++	rxf->start_cbfn = bna_rx_cb_rxf_started;
++	rxf->start_cbarg = rxf->rx;
++	rxf->rxf_flags &= ~BNA_RXF_FL_FAILED;
++	bfa_fsm_send_event(rxf, RXF_E_START);
++}
++
++void
++bna_rxf_stop(struct bna_rxf *rxf)
++{
++	rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
++	rxf->stop_cbarg = rxf->rx;
++	bfa_fsm_send_event(rxf, RXF_E_STOP);
++}
++
++void
++bna_rxf_fail(struct bna_rxf *rxf)
++{
++	rxf->rxf_flags |= BNA_RXF_FL_FAILED;
++	bfa_fsm_send_event(rxf, RXF_E_FAIL);
++}
++
++int
++bna_rxf_state_get(struct bna_rxf *rxf)
++{
++	return bfa_sm_to_state(rxf_sm_table, rxf->fsm);
++}
++
++enum bna_cb_status
++bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
++		 void (*cbfn)(struct bnad *, struct bna_rx *,
++			      enum bna_cb_status))
++{
++	struct bna_rxf *rxf = &rx->rxf;
++
++	if (rxf->ucast_active_mac == NULL) {
++		rxf->ucast_active_mac =
++				bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
++		if (rxf->ucast_active_mac == NULL)
++			return BNA_CB_UCAST_CAM_FULL;
++		bfa_q_qe_init(&rxf->ucast_active_mac->qe);
++	}
++
++	memcpy(rxf->ucast_active_mac->addr, ucmac, ETH_ALEN);
++	rxf->ucast_pending_set++;
++	rxf->cam_fltr_cbfn = cbfn;
++	rxf->cam_fltr_cbarg = rx->bna->bnad;
++
++	bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
++
++	return BNA_CB_SUCCESS;
++}
++
++enum bna_cb_status
++bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
++		 void (*cbfn)(struct bnad *, struct bna_rx *,
++			      enum bna_cb_status))
++{
++	struct bna_rxf *rxf = &rx->rxf;
++	struct list_head	*qe;
++	struct bna_mac *mac;
++
++	/* Check if already added */
++	list_for_each(qe, &rxf->mcast_active_q) {
++		mac = (struct bna_mac *)qe;
++		if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
++			if (cbfn)
++				(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
++			return BNA_CB_SUCCESS;
++		}
++	}
++
++	/* Check if pending addition */
++	list_for_each(qe, &rxf->mcast_pending_add_q) {
++		mac = (struct bna_mac *)qe;
++		if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
++			if (cbfn)
++				(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
++			return BNA_CB_SUCCESS;
++		}
++	}
++
++	mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
++	if (mac == NULL)
++		return BNA_CB_MCAST_LIST_FULL;
++	bfa_q_qe_init(&mac->qe);
++	memcpy(mac->addr, addr, ETH_ALEN);
++	list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
++
++	rxf->cam_fltr_cbfn = cbfn;
++	rxf->cam_fltr_cbarg = rx->bna->bnad;
++
++	bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
++
++	return BNA_CB_SUCCESS;
++}
++
++enum bna_cb_status
++bna_rx_mcast_del(struct bna_rx *rx, u8 *addr,
++		 void (*cbfn)(struct bnad *, struct bna_rx *,
++			      enum bna_cb_status))
++{
++	struct bna_rxf *rxf = &rx->rxf;
++	struct list_head *qe;
++	struct bna_mac *mac;
++
++	list_for_each(qe, &rxf->mcast_pending_add_q) {
++		mac = (struct bna_mac *)qe;
++		if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
++			list_del(qe);
++			bfa_q_qe_init(qe);
++			bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
++			if (cbfn)
++				(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
++			return BNA_CB_SUCCESS;
++		}
++	}
++
++	list_for_each(qe, &rxf->mcast_active_q) {
++		mac = (struct bna_mac *)qe;
++		if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
++			list_del(qe);
++			bfa_q_qe_init(qe);
++			list_add_tail(qe, &rxf->mcast_pending_del_q);
++			rxf->cam_fltr_cbfn = cbfn;
++			rxf->cam_fltr_cbarg = rx->bna->bnad;
++			bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
++			return BNA_CB_SUCCESS;
++		}
++	}
++
++	return BNA_CB_INVALID_MAC;
++}
++
++enum bna_cb_status
++bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
++		     void (*cbfn)(struct bnad *, struct bna_rx *,
++				  enum bna_cb_status))
++{
++	struct bna_rxf *rxf = &rx->rxf;
++	struct list_head list_head;
++	struct list_head *qe;
++	u8 *mcaddr;
++	struct bna_mac *mac;
++	struct bna_mac *mac1;
++	int skip;
++	int delete;
++	int need_hw_config = 0;
++	int i;
++
++	/* Allocate nodes */
++	INIT_LIST_HEAD(&list_head);
++	for (i = 0, mcaddr = mclist; i < count; i++) {
++		mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
++		if (mac == NULL)
++			goto err_return;
++		bfa_q_qe_init(&mac->qe);
++		memcpy(mac->addr, mcaddr, ETH_ALEN);
++		list_add_tail(&mac->qe, &list_head);
++
++		mcaddr += ETH_ALEN;
++	}
++
++	/* Schedule for addition */
++	while (!list_empty(&list_head)) {
++		bfa_q_deq(&list_head, &qe);
++		mac = (struct bna_mac *)qe;
++		bfa_q_qe_init(&mac->qe);
++
++		skip = 0;
++
++		/* Skip if already added */
++		list_for_each(qe, &rxf->mcast_active_q) {
++			mac1 = (struct bna_mac *)qe;
++			if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
++				bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
++							mac);
++				skip = 1;
++				break;
++			}
++		}
++
++		if (skip)
++			continue;
++
++		/* Skip if pending addition */
++		list_for_each(qe, &rxf->mcast_pending_add_q) {
++			mac1 = (struct bna_mac *)qe;
++			if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
++				bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
++							mac);
++				skip = 1;
++				break;
++			}
++		}
++
++		if (skip)
++			continue;
++
++		need_hw_config = 1;
++		list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
++	}
++
++	/**
++	 * Delete the entries that are in the pending_add_q but not
++	 * in the new list
++	 */
++	while (!list_empty(&rxf->mcast_pending_add_q)) {
++		bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
++		mac = (struct bna_mac *)qe;
++		bfa_q_qe_init(&mac->qe);
++		for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
++			if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
++				delete = 0;
++				break;
++			}
++			mcaddr += ETH_ALEN;
++		}
++		if (delete)
++			bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
++		else
++			list_add_tail(&mac->qe, &list_head);
++	}
++	while (!list_empty(&list_head)) {
++		bfa_q_deq(&list_head, &qe);
++		mac = (struct bna_mac *)qe;
++		bfa_q_qe_init(&mac->qe);
++		list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
++	}
++
++	/**
++	 * Schedule entries for deletion that are in the active_q but not
++	 * in the new list
++	 */
++	while (!list_empty(&rxf->mcast_active_q)) {
++		bfa_q_deq(&rxf->mcast_active_q, &qe);
++		mac = (struct bna_mac *)qe;
++		bfa_q_qe_init(&mac->qe);
++		for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
++			if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
++				delete = 0;
++				break;
++			}
++			mcaddr += ETH_ALEN;
++		}
++		if (delete) {
++			list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
++			need_hw_config = 1;
++		} else {
++			list_add_tail(&mac->qe, &list_head);
++		}
++	}
++	while (!list_empty(&list_head)) {
++		bfa_q_deq(&list_head, &qe);
++		mac = (struct bna_mac *)qe;
++		bfa_q_qe_init(&mac->qe);
++		list_add_tail(&mac->qe, &rxf->mcast_active_q);
++	}
++
++	if (need_hw_config) {
++		rxf->cam_fltr_cbfn = cbfn;
++		rxf->cam_fltr_cbarg = rx->bna->bnad;
++		bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
++	} else if (cbfn)
++		(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
++
++	return BNA_CB_SUCCESS;
++
++err_return:
++	while (!list_empty(&list_head)) {
++		bfa_q_deq(&list_head, &qe);
++		mac = (struct bna_mac *)qe;
++		bfa_q_qe_init(&mac->qe);
++		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
++	}
++
++	return BNA_CB_MCAST_LIST_FULL;
++}
++
++void
++bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
++{
++	struct bna_rxf *rxf = &rx->rxf;
++	int index = (vlan_id >> 5);
++	int bit = (1 << (vlan_id & 0x1F));
++
++	rxf->vlan_filter_table[index] |= bit;
++	if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
++		rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
++		bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
++	}
++}
++
++void
++bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
++{
++	struct bna_rxf *rxf = &rx->rxf;
++	int index = (vlan_id >> 5);
++	int bit = (1 << (vlan_id & 0x1F));
++
++	rxf->vlan_filter_table[index] &= ~bit;
++	if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
++		rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
++		bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
++	}
++}
++
++/**
++ * RX
++ */
++#define	RXQ_RCB_INIT(q, rxp, qdepth, bna, _id, unmapq_mem)	do {	\
++	struct bna_doorbell_qset *_qset;				\
++	unsigned long off;						\
++	(q)->rcb->producer_index = (q)->rcb->consumer_index = 0;	\
++	(q)->rcb->q_depth = (qdepth);					\
++	(q)->rcb->unmap_q = unmapq_mem;					\
++	(q)->rcb->rxq = (q);						\
++	(q)->rcb->cq = &(rxp)->cq;					\
++	(q)->rcb->bnad = (bna)->bnad;					\
++	_qset = (struct bna_doorbell_qset *)0;			\
++	off = (unsigned long)&_qset[(q)->rxq_id].rxq[0];		\
++	(q)->rcb->q_dbell = off +					\
++		BNA_GET_DOORBELL_BASE_ADDR((bna)->pcidev.pci_bar_kva);	\
++	(q)->rcb->id = _id;						\
++} while (0)
++
++#define	BNA_GET_RXQS(qcfg)	(((qcfg)->rxp_type == BNA_RXP_SINGLE) ?	\
++	(qcfg)->num_paths : ((qcfg)->num_paths * 2))
++
++#define	SIZE_TO_PAGES(size)	(((size) >> PAGE_SHIFT) + ((((size) &\
++	(PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
++
++#define	call_rx_stop_callback(rx, status)				\
++	if ((rx)->stop_cbfn) {						\
++		(*(rx)->stop_cbfn)((rx)->stop_cbarg, rx, (status));	\
++		(rx)->stop_cbfn = NULL;					\
++		(rx)->stop_cbarg = NULL;				\
++	}
++
++/*
++ * Since rx_enable is synchronous callback, there is no start_cbfn required.
++ * Instead, we'll call bnad_rx_post(rxp) so that bnad can post the buffers
++ * for each rxpath.
++ */
++
++#define	call_rx_disable_cbfn(rx, status)				\
++		if ((rx)->disable_cbfn)	{				\
++			(*(rx)->disable_cbfn)((rx)->disable_cbarg,	\
++					status);			\
++			(rx)->disable_cbfn = NULL;			\
++			(rx)->disable_cbarg = NULL;			\
++		}							\
++
++#define	rxqs_reqd(type, num_rxqs)					\
++	(((type) == BNA_RXP_SINGLE) ? (num_rxqs) : ((num_rxqs) * 2))
++
++#define rx_ib_fail(rx)						\
++do {								\
++	struct bna_rxp *rxp;					\
++	struct list_head *qe;						\
++	list_for_each(qe, &(rx)->rxp_q) {				\
++		rxp = (struct bna_rxp *)qe;			\
++		bna_ib_fail(rxp->cq.ib);			\
++	}							\
++} while (0)
++
++static void __bna_multi_rxq_stop(struct bna_rxp *, u32 *);
++static void __bna_rxq_start(struct bna_rxq *rxq);
++static void __bna_cq_start(struct bna_cq *cq);
++static void bna_rit_create(struct bna_rx *rx);
++static void bna_rx_cb_multi_rxq_stopped(void *arg, int status);
++static void bna_rx_cb_rxq_stopped_all(void *arg);
++
++bfa_fsm_state_decl(bna_rx, stopped,
++	struct bna_rx, enum bna_rx_event);
++bfa_fsm_state_decl(bna_rx, rxf_start_wait,
++	struct bna_rx, enum bna_rx_event);
++bfa_fsm_state_decl(bna_rx, started,
++	struct bna_rx, enum bna_rx_event);
++bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
++	struct bna_rx, enum bna_rx_event);
++bfa_fsm_state_decl(bna_rx, rxq_stop_wait,
++	struct bna_rx, enum bna_rx_event);
++
++static struct bfa_sm_table rx_sm_table[] = {
++	{BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED},
++	{BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT},
++	{BFA_SM(bna_rx_sm_started), BNA_RX_STARTED},
++	{BFA_SM(bna_rx_sm_rxf_stop_wait), BNA_RX_RXF_STOP_WAIT},
++	{BFA_SM(bna_rx_sm_rxq_stop_wait), BNA_RX_RXQ_STOP_WAIT},
++};
++
++static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
++{
++	struct bna_rxp *rxp;
++	struct list_head *qe_rxp;
++
++	list_for_each(qe_rxp, &rx->rxp_q) {
++		rxp = (struct bna_rxp *)qe_rxp;
++		rx->rx_cleanup_cbfn(rx->bna->bnad, rxp->cq.ccb);
++	}
++
++	call_rx_stop_callback(rx, BNA_CB_SUCCESS);
++}
++
++static void bna_rx_sm_stopped(struct bna_rx *rx,
++				enum bna_rx_event event)
++{
++	switch (event) {
++	case RX_E_START:
++		bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
++		break;
++	case RX_E_STOP:
++		call_rx_stop_callback(rx, BNA_CB_SUCCESS);
++		break;
++	case RX_E_FAIL:
++		/* no-op */
++		break;
++	default:
++		bfa_sm_fault(rx->bna, event);
++		break;
++	}
++
++}
++
++static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
++{
++	struct bna_rxp *rxp;
++	struct list_head *qe_rxp;
++	struct bna_rxq *q0 = NULL, *q1 = NULL;
++
++	/* Setup the RIT */
++	bna_rit_create(rx);
++
++	list_for_each(qe_rxp, &rx->rxp_q) {
++		rxp = (struct bna_rxp *)qe_rxp;
++		bna_ib_start(rxp->cq.ib);
++		GET_RXQS(rxp, q0, q1);
++		q0->buffer_size = bna_port_mtu_get(&rx->bna->port);
++		__bna_rxq_start(q0);
++		rx->rx_post_cbfn(rx->bna->bnad, q0->rcb);
++		if (q1)  {
++			__bna_rxq_start(q1);
++			rx->rx_post_cbfn(rx->bna->bnad, q1->rcb);
++		}
++		__bna_cq_start(&rxp->cq);
++	}
++
++	bna_rxf_start(&rx->rxf);
++}
++
++static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
++				enum bna_rx_event event)
++{
++	switch (event) {
++	case RX_E_STOP:
++		bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
++		break;
++	case RX_E_FAIL:
++		bfa_fsm_set_state(rx, bna_rx_sm_stopped);
++		rx_ib_fail(rx);
++		bna_rxf_fail(&rx->rxf);
++		break;
++	case RX_E_RXF_STARTED:
++		bfa_fsm_set_state(rx, bna_rx_sm_started);
++		break;
++	default:
++		bfa_sm_fault(rx->bna, event);
++		break;
++	}
++}
++
++void
++bna_rx_sm_started_entry(struct bna_rx *rx)
++{
++	struct bna_rxp *rxp;
++	struct list_head *qe_rxp;
++
++	/* Start IB */
++	list_for_each(qe_rxp, &rx->rxp_q) {
++		rxp = (struct bna_rxp *)qe_rxp;
++		bna_ib_ack(&rxp->cq.ib->door_bell, 0);
++	}
++
++	bna_llport_admin_up(&rx->bna->port.llport);
++}
++
++void
++bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
++{
++	switch (event) {
++	case RX_E_FAIL:
++		bna_llport_admin_down(&rx->bna->port.llport);
++		bfa_fsm_set_state(rx, bna_rx_sm_stopped);
++		rx_ib_fail(rx);
++		bna_rxf_fail(&rx->rxf);
++		break;
++	case RX_E_STOP:
++		bna_llport_admin_down(&rx->bna->port.llport);
++		bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
++		break;
++	default:
++		bfa_sm_fault(rx->bna, event);
++		break;
++	}
++}
++
++void
++bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
++{
++	bna_rxf_stop(&rx->rxf);
++}
++
++void
++bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
++{
++	switch (event) {
++	case RX_E_RXF_STOPPED:
++		bfa_fsm_set_state(rx, bna_rx_sm_rxq_stop_wait);
++		break;
++	case RX_E_RXF_STARTED:
++		/**
++		 * RxF was in the process of starting up when
++		 * RXF_E_STOP was issued. Ignore this event
++		 */
++		break;
++	case RX_E_FAIL:
++		bfa_fsm_set_state(rx, bna_rx_sm_stopped);
++		rx_ib_fail(rx);
++		bna_rxf_fail(&rx->rxf);
++		break;
++	default:
++		bfa_sm_fault(rx->bna, event);
++		break;
++	}
++
++}
++
++void
++bna_rx_sm_rxq_stop_wait_entry(struct bna_rx *rx)
++{
++	struct bna_rxp *rxp = NULL;
++	struct bna_rxq *q0 = NULL;
++	struct bna_rxq *q1 = NULL;
++	struct list_head	*qe;
++	u32 rxq_mask[2] = {0, 0};
++
++	/* Only one call to multi-rxq-stop for all RXPs in this RX */
++	bfa_wc_up(&rx->rxq_stop_wc);
++	list_for_each(qe, &rx->rxp_q) {
++		rxp = (struct bna_rxp *)qe;
++		GET_RXQS(rxp, q0, q1);
++		if (q0->rxq_id < 32)
++			rxq_mask[0] |= ((u32)1 << q0->rxq_id);
++		else
++			rxq_mask[1] |= ((u32)1 << (q0->rxq_id - 32));
++		if (q1) {
++			if (q1->rxq_id < 32)
++				rxq_mask[0] |= ((u32)1 << q1->rxq_id);
++			else
++				rxq_mask[1] |= ((u32)
++						1 << (q1->rxq_id - 32));
++		}
++	}
++
++	__bna_multi_rxq_stop(rxp, rxq_mask);
++}
++
++void
++bna_rx_sm_rxq_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
++{
++	struct bna_rxp *rxp = NULL;
++	struct list_head	*qe;
++
++	switch (event) {
++	case RX_E_RXQ_STOPPED:
++		list_for_each(qe, &rx->rxp_q) {
++			rxp = (struct bna_rxp *)qe;
++			bna_ib_stop(rxp->cq.ib);
++		}
++		/* Fall through */
++	case RX_E_FAIL:
++		bfa_fsm_set_state(rx, bna_rx_sm_stopped);
++		break;
++	default:
++		bfa_sm_fault(rx->bna, event);
++		break;
++	}
++}
++
++void
++__bna_multi_rxq_stop(struct bna_rxp *rxp, u32 * rxq_id_mask)
++{
++	struct bfi_ll_q_stop_req ll_req;
++
++	bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RXQ_STOP_REQ, 0);
++	ll_req.q_id_mask[0] = htonl(rxq_id_mask[0]);
++	ll_req.q_id_mask[1] = htonl(rxq_id_mask[1]);
++	bna_mbox_qe_fill(&rxp->mbox_qe, &ll_req, sizeof(ll_req),
++		bna_rx_cb_multi_rxq_stopped, rxp);
++	bna_mbox_send(rxp->rx->bna, &rxp->mbox_qe);
++}
++
++void
++__bna_rxq_start(struct bna_rxq *rxq)
++{
++	struct bna_rxtx_q_mem *q_mem;
++	struct bna_rxq_mem rxq_cfg, *rxq_mem;
++	struct bna_dma_addr cur_q_addr;
++	/* struct bna_doorbell_qset *qset; */
++	struct bna_qpt *qpt;
++	u32 pg_num;
++	struct bna *bna = rxq->rx->bna;
++	void __iomem *base_addr;
++	unsigned long off;
++
++	qpt = &rxq->qpt;
++	cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
++
++	rxq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
++	rxq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
++	rxq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
++	rxq_cfg.cur_q_entry_hi = cur_q_addr.msb;
++
++	rxq_cfg.pg_cnt_n_prd_ptr = ((u32)qpt->page_count << 16) | 0x0;
++	rxq_cfg.entry_n_pg_size = ((u32)(BFI_RXQ_WI_SIZE >> 2) << 16) |
++		(qpt->page_size >> 2);
++	rxq_cfg.sg_n_cq_n_cns_ptr =
++		((u32)(rxq->rxp->cq.cq_id & 0xff) << 16) | 0x0;
++	rxq_cfg.buf_sz_n_q_state = ((u32)rxq->buffer_size << 16) |
++		BNA_Q_IDLE_STATE;
++	rxq_cfg.next_qid = 0x0 | (0x3 << 8);
++
++	/* Write the page number register */
++	pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
++			HQM_RXTX_Q_RAM_BASE_OFFSET);
++	writel(pg_num, bna->regs.page_addr);
++
++	/* Write to h/w */
++	base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
++					HQM_RXTX_Q_RAM_BASE_OFFSET);
++
++	q_mem = (struct bna_rxtx_q_mem *)0;
++	rxq_mem = &q_mem[rxq->rxq_id].rxq;
++
++	off = (unsigned long)&rxq_mem->pg_tbl_addr_lo;
++	writel(htonl(rxq_cfg.pg_tbl_addr_lo), base_addr + off);
++
++	off = (unsigned long)&rxq_mem->pg_tbl_addr_hi;
++	writel(htonl(rxq_cfg.pg_tbl_addr_hi), base_addr + off);
++
++	off = (unsigned long)&rxq_mem->cur_q_entry_lo;
++	writel(htonl(rxq_cfg.cur_q_entry_lo), base_addr + off);
++
++	off = (unsigned long)&rxq_mem->cur_q_entry_hi;
++	writel(htonl(rxq_cfg.cur_q_entry_hi), base_addr + off);
++
++	off = (unsigned long)&rxq_mem->pg_cnt_n_prd_ptr;
++	writel(rxq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
++
++	off = (unsigned long)&rxq_mem->entry_n_pg_size;
++	writel(rxq_cfg.entry_n_pg_size, base_addr + off);
++
++	off = (unsigned long)&rxq_mem->sg_n_cq_n_cns_ptr;
++	writel(rxq_cfg.sg_n_cq_n_cns_ptr, base_addr + off);
++
++	off = (unsigned long)&rxq_mem->buf_sz_n_q_state;
++	writel(rxq_cfg.buf_sz_n_q_state, base_addr + off);
++
++	off = (unsigned long)&rxq_mem->next_qid;
++	writel(rxq_cfg.next_qid, base_addr + off);
++
++	rxq->rcb->producer_index = 0;
++	rxq->rcb->consumer_index = 0;
++}
++
++void
++__bna_cq_start(struct bna_cq *cq)
++{
++	struct bna_cq_mem cq_cfg, *cq_mem;
++	const struct bna_qpt *qpt;
++	struct bna_dma_addr cur_q_addr;
++	u32 pg_num;
++	struct bna *bna = cq->rx->bna;
++	void __iomem *base_addr;
++	unsigned long off;
++
++	qpt = &cq->qpt;
++	cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
++
++	/*
++	 * Fill out structure, to be subsequently written
++	 * to hardware
++	 */
++	cq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
++	cq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
++	cq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
++	cq_cfg.cur_q_entry_hi = cur_q_addr.msb;
++
++	cq_cfg.pg_cnt_n_prd_ptr = (qpt->page_count << 16) | 0x0;
++	cq_cfg.entry_n_pg_size =
++		((u32)(BFI_CQ_WI_SIZE >> 2) << 16) | (qpt->page_size >> 2);
++	cq_cfg.int_blk_n_cns_ptr = ((((u32)cq->ib_seg_offset) << 24) |
++			((u32)(cq->ib->ib_id & 0xff)  << 16) | 0x0);
++	cq_cfg.q_state = BNA_Q_IDLE_STATE;
++
++	/* Write the page number register */
++	pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
++				  HQM_CQ_RAM_BASE_OFFSET);
++
++	writel(pg_num, bna->regs.page_addr);
++
++	/* H/W write */
++	base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
++					HQM_CQ_RAM_BASE_OFFSET);
++
++	cq_mem = (struct bna_cq_mem *)0;
++
++	off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_lo;
++	writel(htonl(cq_cfg.pg_tbl_addr_lo), base_addr + off);
++
++	off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_hi;
++	writel(htonl(cq_cfg.pg_tbl_addr_hi), base_addr + off);
++
++	off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_lo;
++	writel(htonl(cq_cfg.cur_q_entry_lo), base_addr + off);
++
++	off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_hi;
++	writel(htonl(cq_cfg.cur_q_entry_hi), base_addr + off);
++
++	off = (unsigned long)&cq_mem[cq->cq_id].pg_cnt_n_prd_ptr;
++	writel(cq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
++
++	off = (unsigned long)&cq_mem[cq->cq_id].entry_n_pg_size;
++	writel(cq_cfg.entry_n_pg_size, base_addr + off);
++
++	off = (unsigned long)&cq_mem[cq->cq_id].int_blk_n_cns_ptr;
++	writel(cq_cfg.int_blk_n_cns_ptr, base_addr + off);
++
++	off = (unsigned long)&cq_mem[cq->cq_id].q_state;
++	writel(cq_cfg.q_state, base_addr + off);
++
++	cq->ccb->producer_index = 0;
++	*(cq->ccb->hw_producer_index) = 0;
++}
++
++void
++bna_rit_create(struct bna_rx *rx)
++{
++	struct list_head	*qe_rxp;
++	struct bna *bna;
++	struct bna_rxp *rxp;
++	struct bna_rxq *q0 = NULL;
++	struct bna_rxq *q1 = NULL;
++	int offset;
++
++	bna = rx->bna;
++
++	offset = 0;
++	list_for_each(qe_rxp, &rx->rxp_q) {
++		rxp = (struct bna_rxp *)qe_rxp;
++		GET_RXQS(rxp, q0, q1);
++		rx->rxf.rit_segment->rit[offset].large_rxq_id = q0->rxq_id;
++		rx->rxf.rit_segment->rit[offset].small_rxq_id =
++						(q1 ? q1->rxq_id : 0);
++		offset++;
++	}
++}
++
++int
++_rx_can_satisfy(struct bna_rx_mod *rx_mod,
++		struct bna_rx_config *rx_cfg)
++{
++	if ((rx_mod->rx_free_count == 0) ||
++		(rx_mod->rxp_free_count == 0) ||
++		(rx_mod->rxq_free_count == 0))
++		return 0;
++
++	if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
++		if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
++			(rx_mod->rxq_free_count < rx_cfg->num_paths))
++				return 0;
++	} else {
++		if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
++			(rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
++			return 0;
++	}
++
++	if (!bna_rit_mod_can_satisfy(&rx_mod->bna->rit_mod, rx_cfg->num_paths))
++		return 0;
++
++	return 1;
++}
++
++struct bna_rxq *
++_get_free_rxq(struct bna_rx_mod *rx_mod)
++{
++	struct bna_rxq *rxq = NULL;
++	struct list_head	*qe = NULL;
++
++	bfa_q_deq(&rx_mod->rxq_free_q, &qe);
++	if (qe) {
++		rx_mod->rxq_free_count--;
++		rxq = (struct bna_rxq *)qe;
++	}
++	return rxq;
++}
++
++void
++_put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
++{
++	bfa_q_qe_init(&rxq->qe);
++	list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
++	rx_mod->rxq_free_count++;
++}
++
++struct bna_rxp *
++_get_free_rxp(struct bna_rx_mod *rx_mod)
++{
++	struct list_head	*qe = NULL;
++	struct bna_rxp *rxp = NULL;
++
++	bfa_q_deq(&rx_mod->rxp_free_q, &qe);
++	if (qe) {
++		rx_mod->rxp_free_count--;
++
++		rxp = (struct bna_rxp *)qe;
++	}
++
++	return rxp;
++}
++
++void
++_put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
++{
++	bfa_q_qe_init(&rxp->qe);
++	list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
++	rx_mod->rxp_free_count++;
++}
++
++struct bna_rx *
++_get_free_rx(struct bna_rx_mod *rx_mod)
++{
++	struct list_head	*qe = NULL;
++	struct bna_rx *rx = NULL;
++
++	bfa_q_deq(&rx_mod->rx_free_q, &qe);
++	if (qe) {
++		rx_mod->rx_free_count--;
++
++		rx = (struct bna_rx *)qe;
++		bfa_q_qe_init(qe);
++		list_add_tail(&rx->qe, &rx_mod->rx_active_q);
++	}
++
++	return rx;
++}
++
++void
++_put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
++{
++	bfa_q_qe_init(&rx->qe);
++	list_add_tail(&rx->qe, &rx_mod->rx_free_q);
++	rx_mod->rx_free_count++;
++}
++
++void
++_rx_init(struct bna_rx *rx, struct bna *bna)
++{
++	rx->bna = bna;
++	rx->rx_flags = 0;
++
++	INIT_LIST_HEAD(&rx->rxp_q);
++
++	rx->rxq_stop_wc.wc_resume = bna_rx_cb_rxq_stopped_all;
++	rx->rxq_stop_wc.wc_cbarg = rx;
++	rx->rxq_stop_wc.wc_count = 0;
++
++	rx->stop_cbfn = NULL;
++	rx->stop_cbarg = NULL;
++}
++
++void
++_rxp_add_rxqs(struct bna_rxp *rxp,
++		struct bna_rxq *q0,
++		struct bna_rxq *q1)
++{
++	switch (rxp->type) {
++	case BNA_RXP_SINGLE:
++		rxp->rxq.single.only = q0;
++		rxp->rxq.single.reserved = NULL;
++		break;
++	case BNA_RXP_SLR:
++		rxp->rxq.slr.large = q0;
++		rxp->rxq.slr.small = q1;
++		break;
++	case BNA_RXP_HDS:
++		rxp->rxq.hds.data = q0;
++		rxp->rxq.hds.hdr = q1;
++		break;
++	default:
++		break;
++	}
++}
++
++void
++_rxq_qpt_init(struct bna_rxq *rxq,
++		struct bna_rxp *rxp,
++		u32 page_count,
++		u32 page_size,
++		struct bna_mem_descr *qpt_mem,
++		struct bna_mem_descr *swqpt_mem,
++		struct bna_mem_descr *page_mem)
++{
++	int	i;
++
++	rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
++	rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
++	rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
++	rxq->qpt.page_count = page_count;
++	rxq->qpt.page_size = page_size;
++
++	rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
++
++	for (i = 0; i < rxq->qpt.page_count; i++) {
++		rxq->rcb->sw_qpt[i] = page_mem[i].kva;
++		((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
++			page_mem[i].dma.lsb;
++		((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
++			page_mem[i].dma.msb;
++
++	}
++}
++
++void
++_rxp_cqpt_setup(struct bna_rxp *rxp,
++		u32 page_count,
++		u32 page_size,
++		struct bna_mem_descr *qpt_mem,
++		struct bna_mem_descr *swqpt_mem,
++		struct bna_mem_descr *page_mem)
++{
++	int	i;
++
++	rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
++	rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
++	rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
++	rxp->cq.qpt.page_count = page_count;
++	rxp->cq.qpt.page_size = page_size;
++
++	rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
++
++	for (i = 0; i < rxp->cq.qpt.page_count; i++) {
++		rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva;
++
++		((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
++			page_mem[i].dma.lsb;
++		((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
++			page_mem[i].dma.msb;
++
++	}
++}
++
++void
++_rx_add_rxp(struct bna_rx *rx, struct bna_rxp *rxp)
++{
++	list_add_tail(&rxp->qe, &rx->rxp_q);
++}
++
++void
++_init_rxmod_queues(struct bna_rx_mod *rx_mod)
++{
++	INIT_LIST_HEAD(&rx_mod->rx_free_q);
++	INIT_LIST_HEAD(&rx_mod->rxq_free_q);
++	INIT_LIST_HEAD(&rx_mod->rxp_free_q);
++	INIT_LIST_HEAD(&rx_mod->rx_active_q);
++
++	rx_mod->rx_free_count = 0;
++	rx_mod->rxq_free_count = 0;
++	rx_mod->rxp_free_count = 0;
++}
++
++void
++_rx_ctor(struct bna_rx *rx, int id)
++{
++	bfa_q_qe_init(&rx->qe);
++	INIT_LIST_HEAD(&rx->rxp_q);
++	rx->bna = NULL;
++
++	rx->rxf.rxf_id = id;
++
++	/* FIXME: mbox_qe ctor()?? */
++	bfa_q_qe_init(&rx->mbox_qe.qe);
++
++	rx->stop_cbfn = NULL;
++	rx->stop_cbarg = NULL;
++}
++
++void
++bna_rx_cb_multi_rxq_stopped(void *arg, int status)
++{
++	struct bna_rxp *rxp = (struct bna_rxp *)arg;
++
++	bfa_wc_down(&rxp->rx->rxq_stop_wc);
++}
++
++void
++bna_rx_cb_rxq_stopped_all(void *arg)
++{
++	struct bna_rx *rx = (struct bna_rx *)arg;
++
++	bfa_fsm_send_event(rx, RX_E_RXQ_STOPPED);
++}
++
++void
++bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx,
++			 enum bna_cb_status status)
++{
++	struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
++
++	bfa_wc_down(&rx_mod->rx_stop_wc);
++}
++
++void
++bna_rx_mod_cb_rx_stopped_all(void *arg)
++{
++	struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
++
++	if (rx_mod->stop_cbfn)
++		rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
++	rx_mod->stop_cbfn = NULL;
++}
++
++void
++bna_rx_start(struct bna_rx *rx)
++{
++	rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
++	if (rx->rx_flags & BNA_RX_F_ENABLE)
++		bfa_fsm_send_event(rx, RX_E_START);
++}
++
++void
++bna_rx_stop(struct bna_rx *rx)
++{
++	rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
++	if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
++		bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx, BNA_CB_SUCCESS);
++	else {
++		rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
++		rx->stop_cbarg = &rx->bna->rx_mod;
++		bfa_fsm_send_event(rx, RX_E_STOP);
++	}
++}
++
++void
++bna_rx_fail(struct bna_rx *rx)
++{
++	/* Indicate port is not enabled, and failed */
++	rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
++	rx->rx_flags |= BNA_RX_F_PORT_FAILED;
++	bfa_fsm_send_event(rx, RX_E_FAIL);
++}
++
++void
++bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
++{
++	bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
++	if (rx->rxf.rxf_id < 32)
++		rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
++	else
++		rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
++				1 << (rx->rxf.rxf_id - 32));
++}
++
++void
++bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
++{
++	bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
++	if (rx->rxf.rxf_id < 32)
++		rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
++	else
++		rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
++				1 << (rx->rxf.rxf_id - 32);
++}
++
++void
++bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
++{
++	struct bna_rx *rx;
++	struct list_head *qe;
++
++	rx_mod->flags |= BNA_RX_MOD_F_PORT_STARTED;
++	if (type == BNA_RX_T_LOOPBACK)
++		rx_mod->flags |= BNA_RX_MOD_F_PORT_LOOPBACK;
++
++	list_for_each(qe, &rx_mod->rx_active_q) {
++		rx = (struct bna_rx *)qe;
++		if (rx->type == type)
++			bna_rx_start(rx);
++	}
++}
++
++void
++bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
++{
++	struct bna_rx *rx;
++	struct list_head *qe;
++
++	rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
++	rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
++
++	rx_mod->stop_cbfn = bna_port_cb_rx_stopped;
++
++	/**
++	 * Before calling bna_rx_stop(), increment rx_stop_wc as many times
++	 * as we are going to call bna_rx_stop
++	 */
++	list_for_each(qe, &rx_mod->rx_active_q) {
++		rx = (struct bna_rx *)qe;
++		if (rx->type == type)
++			bfa_wc_up(&rx_mod->rx_stop_wc);
++	}
++
++	if (rx_mod->rx_stop_wc.wc_count == 0) {
++		rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
++		rx_mod->stop_cbfn = NULL;
++		return;
++	}
++
++	list_for_each(qe, &rx_mod->rx_active_q) {
++		rx = (struct bna_rx *)qe;
++		if (rx->type == type)
++			bna_rx_stop(rx);
++	}
++}
++
++void
++bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
++{
++	struct bna_rx *rx;
++	struct list_head *qe;
++
++	rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
++	rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
++
++	list_for_each(qe, &rx_mod->rx_active_q) {
++		rx = (struct bna_rx *)qe;
++		bna_rx_fail(rx);
++	}
++}
++
++void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
++			struct bna_res_info *res_info)
++{
++	int	index;
++	struct bna_rx *rx_ptr;
++	struct bna_rxp *rxp_ptr;
++	struct bna_rxq *rxq_ptr;
++
++	rx_mod->bna = bna;
++	rx_mod->flags = 0;
++
++	rx_mod->rx = (struct bna_rx *)
++		res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
++	rx_mod->rxp = (struct bna_rxp *)
++		res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
++	rx_mod->rxq = (struct bna_rxq *)
++		res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
++
++	/* Initialize the queues */
++	_init_rxmod_queues(rx_mod);
++
++	/* Build RX queues */
++	for (index = 0; index < BFI_MAX_RXQ; index++) {
++		rx_ptr = &rx_mod->rx[index];
++		_rx_ctor(rx_ptr, index);
++		list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
++		rx_mod->rx_free_count++;
++	}
++
++	/* build RX-path queue */
++	for (index = 0; index < BFI_MAX_RXQ; index++) {
++		rxp_ptr = &rx_mod->rxp[index];
++		rxp_ptr->cq.cq_id = index;
++		bfa_q_qe_init(&rxp_ptr->qe);
++		list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
++		rx_mod->rxp_free_count++;
++	}
++
++	/* build RXQ queue */
++	for (index = 0; index < BFI_MAX_RXQ; index++) {
++		rxq_ptr = &rx_mod->rxq[index];
++		rxq_ptr->rxq_id = index;
++
++		bfa_q_qe_init(&rxq_ptr->qe);
++		list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
++		rx_mod->rxq_free_count++;
++	}
++
++	rx_mod->rx_stop_wc.wc_resume = bna_rx_mod_cb_rx_stopped_all;
++	rx_mod->rx_stop_wc.wc_cbarg = rx_mod;
++	rx_mod->rx_stop_wc.wc_count = 0;
++}
++
++void
++bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
++{
++	struct list_head		*qe;
++	int i;
++
++	i = 0;
++	list_for_each(qe, &rx_mod->rx_free_q)
++		i++;
++
++	i = 0;
++	list_for_each(qe, &rx_mod->rxp_free_q)
++		i++;
++
++	i = 0;
++	list_for_each(qe, &rx_mod->rxq_free_q)
++		i++;
++
++	rx_mod->bna = NULL;
++}
++
++int
++bna_rx_state_get(struct bna_rx *rx)
++{
++	return bfa_sm_to_state(rx_sm_table, rx->fsm);
++}
++
++void
++bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
++{
++	u32 cq_size, hq_size, dq_size;
++	u32 cpage_count, hpage_count, dpage_count;
++	struct bna_mem_info *mem_info;
++	u32 cq_depth;
++	u32 hq_depth;
++	u32 dq_depth;
++
++	dq_depth = q_cfg->q_depth;
++	hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
++	cq_depth = dq_depth + hq_depth;
++
++	BNA_TO_POWER_OF_2_HIGH(cq_depth);
++	cq_size = cq_depth * BFI_CQ_WI_SIZE;
++	cq_size = ALIGN(cq_size, PAGE_SIZE);
++	cpage_count = SIZE_TO_PAGES(cq_size);
++
++	BNA_TO_POWER_OF_2_HIGH(dq_depth);
++	dq_size = dq_depth * BFI_RXQ_WI_SIZE;
++	dq_size = ALIGN(dq_size, PAGE_SIZE);
++	dpage_count = SIZE_TO_PAGES(dq_size);
++
++	if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
++		BNA_TO_POWER_OF_2_HIGH(hq_depth);
++		hq_size = hq_depth * BFI_RXQ_WI_SIZE;
++		hq_size = ALIGN(hq_size, PAGE_SIZE);
++		hpage_count = SIZE_TO_PAGES(hq_size);
++	} else {
++		hpage_count = 0;
++	}
++
++	/* CCB structures */
++	res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
++	mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
++	mem_info->mem_type = BNA_MEM_T_KVA;
++	mem_info->len = sizeof(struct bna_ccb);
++	mem_info->num = q_cfg->num_paths;
++
++	/* RCB structures */
++	res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
++	mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
++	mem_info->mem_type = BNA_MEM_T_KVA;
++	mem_info->len = sizeof(struct bna_rcb);
++	mem_info->num = BNA_GET_RXQS(q_cfg);
++
++	/* Completion QPT */
++	res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
++	mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
++	mem_info->mem_type = BNA_MEM_T_DMA;
++	mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
++	mem_info->num = q_cfg->num_paths;
++
++	/* Completion s/w QPT */
++	res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
++	mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
++	mem_info->mem_type = BNA_MEM_T_KVA;
++	mem_info->len = cpage_count * sizeof(void *);
++	mem_info->num = q_cfg->num_paths;
++
++	/* Completion QPT pages */
++	res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
++	mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
++	mem_info->mem_type = BNA_MEM_T_DMA;
++	mem_info->len = PAGE_SIZE;
++	mem_info->num = cpage_count * q_cfg->num_paths;
++
++	/* Data QPTs */
++	res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
++	mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
++	mem_info->mem_type = BNA_MEM_T_DMA;
++	mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
++	mem_info->num = q_cfg->num_paths;
++
++	/* Data s/w QPTs */
++	res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
++	mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
++	mem_info->mem_type = BNA_MEM_T_KVA;
++	mem_info->len = dpage_count * sizeof(void *);
++	mem_info->num = q_cfg->num_paths;
++
++	/* Data QPT pages */
++	res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
++	mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
++	mem_info->mem_type = BNA_MEM_T_DMA;
++	mem_info->len = PAGE_SIZE;
++	mem_info->num = dpage_count * q_cfg->num_paths;
++
++	/* Hdr QPTs */
++	res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
++	mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
++	mem_info->mem_type = BNA_MEM_T_DMA;
++	mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
++	mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
++
++	/* Hdr s/w QPTs */
++	res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
++	mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
++	mem_info->mem_type = BNA_MEM_T_KVA;
++	mem_info->len = hpage_count * sizeof(void *);
++	mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
++
++	/* Hdr QPT pages */
++	res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
++	mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
++	mem_info->mem_type = BNA_MEM_T_DMA;
++	mem_info->len = (hpage_count ? PAGE_SIZE : 0);
++	mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0);
++
++	/* RX Interrupts */
++	res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
++	res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
++	res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
++}
++
++struct bna_rx *
++bna_rx_create(struct bna *bna, struct bnad *bnad,
++		struct bna_rx_config *rx_cfg,
++		struct bna_rx_event_cbfn *rx_cbfn,
++		struct bna_res_info *res_info,
++		void *priv)
++{
++	struct bna_rx_mod *rx_mod = &bna->rx_mod;
++	struct bna_rx *rx;
++	struct bna_rxp *rxp;
++	struct bna_rxq *q0;
++	struct bna_rxq *q1;
++	struct bna_intr_info *intr_info;
++	u32 page_count;
++	struct bna_mem_descr *ccb_mem;
++	struct bna_mem_descr *rcb_mem;
++	struct bna_mem_descr *unmapq_mem;
++	struct bna_mem_descr *cqpt_mem;
++	struct bna_mem_descr *cswqpt_mem;
++	struct bna_mem_descr *cpage_mem;
++	struct bna_mem_descr *hqpt_mem;	/* Header/Small Q qpt */
++	struct bna_mem_descr *dqpt_mem;	/* Data/Large Q qpt */
++	struct bna_mem_descr *hsqpt_mem;	/* s/w qpt for hdr */
++	struct bna_mem_descr *dsqpt_mem;	/* s/w qpt for data */
++	struct bna_mem_descr *hpage_mem;	/* hdr page mem */
++	struct bna_mem_descr *dpage_mem;	/* data page mem */
++	int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0, ret;
++	int dpage_count, hpage_count, rcb_idx;
++	struct bna_ib_config ibcfg;
++	/* Fail if we don't have enough RXPs, RXQs */
++	if (!_rx_can_satisfy(rx_mod, rx_cfg))
++		return NULL;
++
++	/* Initialize resource pointers */
++	intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
++	ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
++	rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
++	unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
++	cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
++	cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
++	cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
++	hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
++	dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
++	hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
++	dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
++	hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
++	dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
++
++	/* Compute q depth & page count */
++	page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num /
++			rx_cfg->num_paths;
++
++	dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num /
++			rx_cfg->num_paths;
++
++	hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num /
++			rx_cfg->num_paths;
++	/* Get RX pointer */
++	rx = _get_free_rx(rx_mod);
++	_rx_init(rx, bna);
++	rx->priv = priv;
++	rx->type = rx_cfg->rx_type;
++
++	rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
++	rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
++	rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
++	rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
++	/* Following callbacks are mandatory */
++	rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
++	rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
++
++	if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_STARTED) {
++		switch (rx->type) {
++		case BNA_RX_T_REGULAR:
++			if (!(rx->bna->rx_mod.flags &
++				BNA_RX_MOD_F_PORT_LOOPBACK))
++				rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
++			break;
++		case BNA_RX_T_LOOPBACK:
++			if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_LOOPBACK)
++				rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
++			break;
++		}
++	}
++
++	for (i = 0, rcb_idx = 0; i < rx_cfg->num_paths; i++) {
++		rxp = _get_free_rxp(rx_mod);
++		rxp->type = rx_cfg->rxp_type;
++		rxp->rx = rx;
++		rxp->cq.rx = rx;
++
++		/* Get required RXQs, and queue them to rx-path */
++		q0 = _get_free_rxq(rx_mod);
++		if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
++			q1 = NULL;
++		else
++			q1 = _get_free_rxq(rx_mod);
++
++		/* Initialize IB */
++		if (1 == intr_info->num) {
++			rxp->cq.ib = bna_ib_get(&bna->ib_mod,
++					intr_info->intr_type,
++					intr_info->idl[0].vector);
++			rxp->vector = intr_info->idl[0].vector;
++		} else {
++			rxp->cq.ib = bna_ib_get(&bna->ib_mod,
++					intr_info->intr_type,
++					intr_info->idl[i].vector);
++
++			/* Map the MSI-x vector used for this RXP */
++			rxp->vector = intr_info->idl[i].vector;
++		}
++
++		rxp->cq.ib_seg_offset = bna_ib_reserve_idx(rxp->cq.ib);
++
++		ibcfg.coalescing_timeo = BFI_RX_COALESCING_TIMEO;
++		ibcfg.interpkt_count = BFI_RX_INTERPKT_COUNT;
++		ibcfg.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
++		ibcfg.ctrl_flags = BFI_IB_CF_INT_ENABLE;
++
++		ret = bna_ib_config(rxp->cq.ib, &ibcfg);
++
++		/* Link rxqs to rxp */
++		_rxp_add_rxqs(rxp, q0, q1);
++
++		/* Link rxp to rx */
++		_rx_add_rxp(rx, rxp);
++
++		q0->rx = rx;
++		q0->rxp = rxp;
++
++		/* Initialize RCB for the large / data q */
++		q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
++		RXQ_RCB_INIT(q0, rxp, rx_cfg->q_depth, bna, 0,
++			(void *)unmapq_mem[rcb_idx].kva);
++		rcb_idx++;
++		(q0)->rx_packets = (q0)->rx_bytes = 0;
++		(q0)->rx_packets_with_error = (q0)->rxbuf_alloc_failed = 0;
++
++		/* Initialize RXQs */
++		_rxq_qpt_init(q0, rxp, dpage_count, PAGE_SIZE,
++			&dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]);
++		q0->rcb->page_idx = dpage_idx;
++		q0->rcb->page_count = dpage_count;
++		dpage_idx += dpage_count;
++
++		/* Call bnad to complete rcb setup */
++		if (rx->rcb_setup_cbfn)
++			rx->rcb_setup_cbfn(bnad, q0->rcb);
++
++		if (q1) {
++			q1->rx = rx;
++			q1->rxp = rxp;
++
++			q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
++			RXQ_RCB_INIT(q1, rxp, rx_cfg->q_depth, bna, 1,
++				(void *)unmapq_mem[rcb_idx].kva);
++			rcb_idx++;
++			(q1)->buffer_size = (rx_cfg)->small_buff_size;
++			(q1)->rx_packets = (q1)->rx_bytes = 0;
++			(q1)->rx_packets_with_error =
++				(q1)->rxbuf_alloc_failed = 0;
++
++			_rxq_qpt_init(q1, rxp, hpage_count, PAGE_SIZE,
++				&hqpt_mem[i], &hsqpt_mem[i],
++				&hpage_mem[hpage_idx]);
++			q1->rcb->page_idx = hpage_idx;
++			q1->rcb->page_count = hpage_count;
++			hpage_idx += hpage_count;
++
++			/* Call bnad to complete rcb setup */
++			if (rx->rcb_setup_cbfn)
++				rx->rcb_setup_cbfn(bnad, q1->rcb);
++		}
++		/* Setup RXP::CQ */
++		rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
++		_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
++			&cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]);
++		rxp->cq.ccb->page_idx = cpage_idx;
++		rxp->cq.ccb->page_count = page_count;
++		cpage_idx += page_count;
++
++		rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
++		rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
++
++		rxp->cq.ccb->producer_index = 0;
++		rxp->cq.ccb->q_depth =	rx_cfg->q_depth +
++					((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
++					0 : rx_cfg->q_depth);
++		rxp->cq.ccb->i_dbell = &rxp->cq.ib->door_bell;
++		rxp->cq.ccb->rcb[0] = q0->rcb;
++		if (q1)
++			rxp->cq.ccb->rcb[1] = q1->rcb;
++		rxp->cq.ccb->cq = &rxp->cq;
++		rxp->cq.ccb->bnad = bna->bnad;
++		rxp->cq.ccb->hw_producer_index =
++			((volatile u32 *)rxp->cq.ib->ib_seg_host_addr_kva +
++				      (rxp->cq.ib_seg_offset * BFI_IBIDX_SIZE));
++		*(rxp->cq.ccb->hw_producer_index) = 0;
++		rxp->cq.ccb->intr_type = intr_info->intr_type;
++		rxp->cq.ccb->intr_vector = (intr_info->num == 1) ?
++						intr_info->idl[0].vector :
++						intr_info->idl[i].vector;
++		rxp->cq.ccb->rx_coalescing_timeo =
++					rxp->cq.ib->ib_config.coalescing_timeo;
++		rxp->cq.ccb->id = i;
++
++		/* Call bnad to complete CCB setup */
++		if (rx->ccb_setup_cbfn)
++			rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
++
++	} /* for each rx-path */
++
++	bna_rxf_init(&rx->rxf, rx, rx_cfg);
++
++	bfa_fsm_set_state(rx, bna_rx_sm_stopped);
++
++	return rx;
++}
++
++void
++bna_rx_destroy(struct bna_rx *rx)
++{
++	struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
++	struct bna_ib_mod *ib_mod = &rx->bna->ib_mod;
++	struct bna_rxq *q0 = NULL;
++	struct bna_rxq *q1 = NULL;
++	struct bna_rxp *rxp;
++	struct list_head *qe;
++
++	bna_rxf_uninit(&rx->rxf);
++
++	while (!list_empty(&rx->rxp_q)) {
++		bfa_q_deq(&rx->rxp_q, &rxp);
++		GET_RXQS(rxp, q0, q1);
++		/* Callback to bnad for destroying RCB */
++		if (rx->rcb_destroy_cbfn)
++			rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
++		q0->rcb = NULL;
++		q0->rxp = NULL;
++		q0->rx = NULL;
++		_put_free_rxq(rx_mod, q0);
++		if (q1) {
++			/* Callback to bnad for destroying RCB */
++			if (rx->rcb_destroy_cbfn)
++				rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
++			q1->rcb = NULL;
++			q1->rxp = NULL;
++			q1->rx = NULL;
++			_put_free_rxq(rx_mod, q1);
++		}
++		rxp->rxq.slr.large = NULL;
++		rxp->rxq.slr.small = NULL;
++		if (rxp->cq.ib) {
++			if (rxp->cq.ib_seg_offset != 0xff)
++				bna_ib_release_idx(rxp->cq.ib,
++						rxp->cq.ib_seg_offset);
++			bna_ib_put(ib_mod, rxp->cq.ib);
++			rxp->cq.ib = NULL;
++		}
++		/* Callback to bnad for destroying CCB */
++		if (rx->ccb_destroy_cbfn)
++			rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
++		rxp->cq.ccb = NULL;
++		rxp->rx = NULL;
++		_put_free_rxp(rx_mod, rxp);
++	}
++
++	list_for_each(qe, &rx_mod->rx_active_q) {
++		if (qe == &rx->qe) {
++			list_del(&rx->qe);
++			bfa_q_qe_init(&rx->qe);
++			break;
++		}
++	}
++
++	rx->bna = NULL;
++	rx->priv = NULL;
++	_put_free_rx(rx_mod, rx);
++}
++
++void
++bna_rx_enable(struct bna_rx *rx)
++{
++	if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
++		return;
++
++	rx->rx_flags |= BNA_RX_F_ENABLE;
++	if (rx->rx_flags & BNA_RX_F_PORT_ENABLED)
++		bfa_fsm_send_event(rx, RX_E_START);
++}
++
++void
++bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
++		void (*cbfn)(void *, struct bna_rx *,
++				enum bna_cb_status))
++{
++	if (type == BNA_SOFT_CLEANUP) {
++		/* h/w should not be accessed. Treat we're stopped */
++		(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
++	} else {
++		rx->stop_cbfn = cbfn;
++		rx->stop_cbarg = rx->bna->bnad;
++
++		rx->rx_flags &= ~BNA_RX_F_ENABLE;
++
++		bfa_fsm_send_event(rx, RX_E_STOP);
++	}
++}
++
++/**
++ * TX
++ */
++#define call_tx_stop_cbfn(tx, status)\
++do {\
++	if ((tx)->stop_cbfn)\
++		(tx)->stop_cbfn((tx)->stop_cbarg, (tx), status);\
++	(tx)->stop_cbfn = NULL;\
++	(tx)->stop_cbarg = NULL;\
++} while (0)
++
++#define call_tx_prio_change_cbfn(tx, status)\
++do {\
++	if ((tx)->prio_change_cbfn)\
++		(tx)->prio_change_cbfn((tx)->bna->bnad, (tx), status);\
++	(tx)->prio_change_cbfn = NULL;\
++} while (0)
++
++static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx,
++					enum bna_cb_status status);
++static void bna_tx_cb_txq_stopped(void *arg, int status);
++static void bna_tx_cb_stats_cleared(void *arg, int status);
++static void __bna_tx_stop(struct bna_tx *tx);
++static void __bna_tx_start(struct bna_tx *tx);
++static void __bna_txf_stat_clr(struct bna_tx *tx);
++
++enum bna_tx_event {
++	TX_E_START			= 1,
++	TX_E_STOP			= 2,
++	TX_E_FAIL			= 3,
++	TX_E_TXQ_STOPPED		= 4,
++	TX_E_PRIO_CHANGE		= 5,
++	TX_E_STAT_CLEARED		= 6,
++};
++
++enum bna_tx_state {
++	BNA_TX_STOPPED			= 1,
++	BNA_TX_STARTED			= 2,
++	BNA_TX_TXQ_STOP_WAIT		= 3,
++	BNA_TX_PRIO_STOP_WAIT		= 4,
++	BNA_TX_STAT_CLR_WAIT		= 5,
++};
++
++bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx,
++			enum bna_tx_event);
++bfa_fsm_state_decl(bna_tx, started, struct bna_tx,
++			enum bna_tx_event);
++bfa_fsm_state_decl(bna_tx, txq_stop_wait, struct bna_tx,
++			enum bna_tx_event);
++bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
++			enum bna_tx_event);
++bfa_fsm_state_decl(bna_tx, stat_clr_wait, struct bna_tx,
++			enum bna_tx_event);
++
++static struct bfa_sm_table tx_sm_table[] = {
++	{BFA_SM(bna_tx_sm_stopped), BNA_TX_STOPPED},
++	{BFA_SM(bna_tx_sm_started), BNA_TX_STARTED},
++	{BFA_SM(bna_tx_sm_txq_stop_wait), BNA_TX_TXQ_STOP_WAIT},
++	{BFA_SM(bna_tx_sm_prio_stop_wait), BNA_TX_PRIO_STOP_WAIT},
++	{BFA_SM(bna_tx_sm_stat_clr_wait), BNA_TX_STAT_CLR_WAIT},
++};
++
++static void
++bna_tx_sm_stopped_entry(struct bna_tx *tx)
++{
++	struct bna_txq *txq;
++	struct list_head		 *qe;
++
++	list_for_each(qe, &tx->txq_q) {
++		txq = (struct bna_txq *)qe;
++		(tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
++	}
++
++	call_tx_stop_cbfn(tx, BNA_CB_SUCCESS);
++}
++
++static void
++bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
++{
++	switch (event) {
++	case TX_E_START:
++		bfa_fsm_set_state(tx, bna_tx_sm_started);
++		break;
++
++	case TX_E_STOP:
++		bfa_fsm_set_state(tx, bna_tx_sm_stopped);
++		break;
++
++	case TX_E_FAIL:
++		/* No-op */
++		break;
++
++	case TX_E_PRIO_CHANGE:
++		call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
++		break;
++
++	case TX_E_TXQ_STOPPED:
++		/**
++		 * This event is received due to flushing of mbox when
++		 * device fails
++		 */
++		/* No-op */
++		break;
++
++	default:
++		bfa_sm_fault(tx->bna, event);
++	}
++}
++
++static void
++bna_tx_sm_started_entry(struct bna_tx *tx)
++{
++	struct bna_txq *txq;
++	struct list_head		 *qe;
++
++	__bna_tx_start(tx);
++
++	/* Start IB */
++	list_for_each(qe, &tx->txq_q) {
++		txq = (struct bna_txq *)qe;
++		bna_ib_ack(&txq->ib->door_bell, 0);
++	}
++}
++
++static void
++bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
++{
++	struct bna_txq *txq;
++	struct list_head		 *qe;
++
++	switch (event) {
++	case TX_E_STOP:
++		bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
++		__bna_tx_stop(tx);
++		break;
++
++	case TX_E_FAIL:
++		list_for_each(qe, &tx->txq_q) {
++			txq = (struct bna_txq *)qe;
++			bna_ib_fail(txq->ib);
++			(tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
++		}
++		bfa_fsm_set_state(tx, bna_tx_sm_stopped);
++		break;
++
++	case TX_E_PRIO_CHANGE:
++		bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
++		break;
++
++	default:
++		bfa_sm_fault(tx->bna, event);
++	}
++}
++
++static void
++bna_tx_sm_txq_stop_wait_entry(struct bna_tx *tx)
++{
++}
++
++static void
++bna_tx_sm_txq_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
++{
++	struct bna_txq *txq;
++	struct list_head		 *qe;
++
++	switch (event) {
++	case TX_E_FAIL:
++		bfa_fsm_set_state(tx, bna_tx_sm_stopped);
++		break;
++
++	case TX_E_TXQ_STOPPED:
++		list_for_each(qe, &tx->txq_q) {
++			txq = (struct bna_txq *)qe;
++			bna_ib_stop(txq->ib);
++		}
++		bfa_fsm_set_state(tx, bna_tx_sm_stat_clr_wait);
++		break;
++
++	case TX_E_PRIO_CHANGE:
++		/* No-op */
++		break;
++
++	default:
++		bfa_sm_fault(tx->bna, event);
++	}
++}
++
++static void
++bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
++{
++	__bna_tx_stop(tx);
++}
++
++static void
++bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
++{
++	struct bna_txq *txq;
++	struct list_head		 *qe;
++
++	switch (event) {
++	case TX_E_STOP:
++		bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
++		break;
++
++	case TX_E_FAIL:
++		call_tx_prio_change_cbfn(tx, BNA_CB_FAIL);
++		bfa_fsm_set_state(tx, bna_tx_sm_stopped);
++		break;
++
++	case TX_E_TXQ_STOPPED:
++		list_for_each(qe, &tx->txq_q) {
++			txq = (struct bna_txq *)qe;
++			bna_ib_stop(txq->ib);
++			(tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
++		}
++		call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
++		bfa_fsm_set_state(tx, bna_tx_sm_started);
++		break;
++
++	case TX_E_PRIO_CHANGE:
++		/* No-op */
++		break;
++
++	default:
++		bfa_sm_fault(tx->bna, event);
++	}
++}
++
++static void
++bna_tx_sm_stat_clr_wait_entry(struct bna_tx *tx)
++{
++	__bna_txf_stat_clr(tx);
++}
++
++static void
++bna_tx_sm_stat_clr_wait(struct bna_tx *tx, enum bna_tx_event event)
++{
++	switch (event) {
++	case TX_E_FAIL:
++	case TX_E_STAT_CLEARED:
++		bfa_fsm_set_state(tx, bna_tx_sm_stopped);
++		break;
++
++	default:
++		bfa_sm_fault(tx->bna, event);
++	}
++}
++
++static void
++__bna_txq_start(struct bna_tx *tx, struct bna_txq *txq)
++{
++	struct bna_rxtx_q_mem *q_mem;
++	struct bna_txq_mem txq_cfg;
++	struct bna_txq_mem *txq_mem;
++	struct bna_dma_addr cur_q_addr;
++	u32 pg_num;
++	void __iomem *base_addr;
++	unsigned long off;
++
++	/* Fill out structure, to be subsequently written to hardware */
++	txq_cfg.pg_tbl_addr_lo = txq->qpt.hw_qpt_ptr.lsb;
++	txq_cfg.pg_tbl_addr_hi = txq->qpt.hw_qpt_ptr.msb;
++	cur_q_addr = *((struct bna_dma_addr *)(txq->qpt.kv_qpt_ptr));
++	txq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
++	txq_cfg.cur_q_entry_hi = cur_q_addr.msb;
++
++	txq_cfg.pg_cnt_n_prd_ptr = (txq->qpt.page_count << 16) | 0x0;
++
++	txq_cfg.entry_n_pg_size = ((u32)(BFI_TXQ_WI_SIZE >> 2) << 16) |
++			(txq->qpt.page_size >> 2);
++	txq_cfg.int_blk_n_cns_ptr = ((((u32)txq->ib_seg_offset) << 24) |
++			((u32)(txq->ib->ib_id & 0xff) << 16) | 0x0);
++
++	txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE;
++	txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) |
++			(txq->priority & 0x3));
++	txq_cfg.wvc_n_cquota_n_rquota =
++			((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) |
++			(BFI_TX_MAX_WRR_QUOTA & 0xfff));
++
++	/* Setup the page and write to H/W */
++
++	pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + tx->bna->port_num,
++			HQM_RXTX_Q_RAM_BASE_OFFSET);
++	writel(pg_num, tx->bna->regs.page_addr);
++
++	base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
++					HQM_RXTX_Q_RAM_BASE_OFFSET);
++	q_mem = (struct bna_rxtx_q_mem *)0;
++	txq_mem = &q_mem[txq->txq_id].txq;
++
++	/*
++	 * The following 4 lines, is a hack b'cos the H/W needs to read
++	 * these DMA addresses as little endian
++	 */
++
++	off = (unsigned long)&txq_mem->pg_tbl_addr_lo;
++	writel(htonl(txq_cfg.pg_tbl_addr_lo), base_addr + off);
++
++	off = (unsigned long)&txq_mem->pg_tbl_addr_hi;
++	writel(htonl(txq_cfg.pg_tbl_addr_hi), base_addr + off);
++
++	off = (unsigned long)&txq_mem->cur_q_entry_lo;
++	writel(htonl(txq_cfg.cur_q_entry_lo), base_addr + off);
++
++	off = (unsigned long)&txq_mem->cur_q_entry_hi;
++	writel(htonl(txq_cfg.cur_q_entry_hi), base_addr + off);
++
++	off = (unsigned long)&txq_mem->pg_cnt_n_prd_ptr;
++	writel(txq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
++
++	off = (unsigned long)&txq_mem->entry_n_pg_size;
++	writel(txq_cfg.entry_n_pg_size, base_addr + off);
++
++	off = (unsigned long)&txq_mem->int_blk_n_cns_ptr;
++	writel(txq_cfg.int_blk_n_cns_ptr, base_addr + off);
++
++	off = (unsigned long)&txq_mem->cns_ptr2_n_q_state;
++	writel(txq_cfg.cns_ptr2_n_q_state, base_addr + off);
++
++	off = (unsigned long)&txq_mem->nxt_qid_n_fid_n_pri;
++	writel(txq_cfg.nxt_qid_n_fid_n_pri, base_addr + off);
++
++	off = (unsigned long)&txq_mem->wvc_n_cquota_n_rquota;
++	writel(txq_cfg.wvc_n_cquota_n_rquota, base_addr + off);
++
++	txq->tcb->producer_index = 0;
++	txq->tcb->consumer_index = 0;
++	*(txq->tcb->hw_consumer_index) = 0;
++
++}
++
++static void
++__bna_txq_stop(struct bna_tx *tx, struct bna_txq *txq)
++{
++	struct bfi_ll_q_stop_req ll_req;
++	u32 bit_mask[2] = {0, 0};
++	if (txq->txq_id < 32)
++		bit_mask[0] = (u32)1 << txq->txq_id;
++	else
++		bit_mask[1] = (u32)1 << (txq->txq_id - 32);
++
++	memset(&ll_req, 0, sizeof(ll_req));
++	ll_req.mh.msg_class = BFI_MC_LL;
++	ll_req.mh.msg_id = BFI_LL_H2I_TXQ_STOP_REQ;
++	ll_req.mh.mtag.h2i.lpu_id = 0;
++	ll_req.q_id_mask[0] = htonl(bit_mask[0]);
++	ll_req.q_id_mask[1] = htonl(bit_mask[1]);
++
++	bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
++			bna_tx_cb_txq_stopped, tx);
++
++	bna_mbox_send(tx->bna, &tx->mbox_qe);
++}
++
++static void
++__bna_txf_start(struct bna_tx *tx)
++{
++	struct bna_tx_fndb_ram *tx_fndb;
++	struct bna_txf *txf = &tx->txf;
++	void __iomem *base_addr;
++	unsigned long off;
++
++	writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
++			(tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET),
++			tx->bna->regs.page_addr);
++
++	base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
++					TX_FNDB_RAM_BASE_OFFSET);
++
++	tx_fndb = (struct bna_tx_fndb_ram *)0;
++	off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
++
++	writel(((u32)txf->vlan << 16) | txf->ctrl_flags,
++			base_addr + off);
++
++	if (tx->txf.txf_id < 32)
++		tx->bna->tx_mod.txf_bmap[0] |= ((u32)1 << tx->txf.txf_id);
++	else
++		tx->bna->tx_mod.txf_bmap[1] |= ((u32)
++						 1 << (tx->txf.txf_id - 32));
++}
++
++static void
++__bna_txf_stop(struct bna_tx *tx)
++{
++	struct bna_tx_fndb_ram *tx_fndb;
++	u32 page_num;
++	u32 ctl_flags;
++	struct bna_txf *txf = &tx->txf;
++	void __iomem *base_addr;
++	unsigned long off;
++
++	/* retrieve the running txf_flags & turn off enable bit */
++	page_num = BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
++			(tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET);
++	writel(page_num, tx->bna->regs.page_addr);
++
++	base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
++					TX_FNDB_RAM_BASE_OFFSET);
++	tx_fndb = (struct bna_tx_fndb_ram *)0;
++	off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
++
++	ctl_flags = readl(base_addr + off);
++	ctl_flags &= ~BFI_TXF_CF_ENABLE;
++
++	writel(ctl_flags, base_addr + off);
++
++	if (tx->txf.txf_id < 32)
++		tx->bna->tx_mod.txf_bmap[0] &= ~((u32)1 << tx->txf.txf_id);
++	else
++		tx->bna->tx_mod.txf_bmap[0] &= ~((u32)
++						 1 << (tx->txf.txf_id - 32));
++}
++
++static void
++__bna_txf_stat_clr(struct bna_tx *tx)
++{
++	struct bfi_ll_stats_req ll_req;
++	u32 txf_bmap[2] = {0, 0};
++	if (tx->txf.txf_id < 32)
++		txf_bmap[0] = ((u32)1 << tx->txf.txf_id);
++	else
++		txf_bmap[1] = ((u32)1 << (tx->txf.txf_id - 32));
++	bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
++	ll_req.stats_mask = 0;
++	ll_req.rxf_id_mask[0] = 0;
++	ll_req.rxf_id_mask[1] =	0;
++	ll_req.txf_id_mask[0] =	htonl(txf_bmap[0]);
++	ll_req.txf_id_mask[1] =	htonl(txf_bmap[1]);
++
++	bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
++			bna_tx_cb_stats_cleared, tx);
++	bna_mbox_send(tx->bna, &tx->mbox_qe);
++}
++
++static void
++__bna_tx_start(struct bna_tx *tx)
++{
++	struct bna_txq *txq;
++	struct list_head		 *qe;
++
++	list_for_each(qe, &tx->txq_q) {
++		txq = (struct bna_txq *)qe;
++		bna_ib_start(txq->ib);
++		__bna_txq_start(tx, txq);
++	}
++
++	__bna_txf_start(tx);
++
++	list_for_each(qe, &tx->txq_q) {
++		txq = (struct bna_txq *)qe;
++		txq->tcb->priority = txq->priority;
++		(tx->tx_resume_cbfn)(tx->bna->bnad, txq->tcb);
++	}
++}
++
++static void
++__bna_tx_stop(struct bna_tx *tx)
++{
++	struct bna_txq *txq;
++	struct list_head		 *qe;
++
++	list_for_each(qe, &tx->txq_q) {
++		txq = (struct bna_txq *)qe;
++		(tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
++	}
++
++	__bna_txf_stop(tx);
++
++	list_for_each(qe, &tx->txq_q) {
++		txq = (struct bna_txq *)qe;
++		bfa_wc_up(&tx->txq_stop_wc);
++	}
++
++	list_for_each(qe, &tx->txq_q) {
++		txq = (struct bna_txq *)qe;
++		__bna_txq_stop(tx, txq);
++	}
++}
++
++static void
++bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
++		struct bna_mem_descr *qpt_mem,
++		struct bna_mem_descr *swqpt_mem,
++		struct bna_mem_descr *page_mem)
++{
++	int i;
++
++	txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
++	txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
++	txq->qpt.kv_qpt_ptr = qpt_mem->kva;
++	txq->qpt.page_count = page_count;
++	txq->qpt.page_size = page_size;
++
++	txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
++
++	for (i = 0; i < page_count; i++) {
++		txq->tcb->sw_qpt[i] = page_mem[i].kva;
++
++		((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
++			page_mem[i].dma.lsb;
++		((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
++			page_mem[i].dma.msb;
++
++	}
++}
++
++static void
++bna_tx_free(struct bna_tx *tx)
++{
++	struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
++	struct bna_txq *txq;
++	struct bna_ib_mod *ib_mod = &tx->bna->ib_mod;
++	struct list_head *qe;
++
++	while (!list_empty(&tx->txq_q)) {
++		bfa_q_deq(&tx->txq_q, &txq);
++		bfa_q_qe_init(&txq->qe);
++		if (txq->ib) {
++			if (txq->ib_seg_offset != -1)
++				bna_ib_release_idx(txq->ib,
++						txq->ib_seg_offset);
++			bna_ib_put(ib_mod, txq->ib);
++			txq->ib = NULL;
++		}
++		txq->tcb = NULL;
++		txq->tx = NULL;
++		list_add_tail(&txq->qe, &tx_mod->txq_free_q);
++	}
++
++	list_for_each(qe, &tx_mod->tx_active_q) {
++		if (qe == &tx->qe) {
++			list_del(&tx->qe);
++			bfa_q_qe_init(&tx->qe);
++			break;
++		}
++	}
++
++	tx->bna = NULL;
++	tx->priv = NULL;
++	list_add_tail(&tx->qe, &tx_mod->tx_free_q);
++}
++
++static void
++bna_tx_cb_txq_stopped(void *arg, int status)
++{
++	struct bna_tx *tx = (struct bna_tx *)arg;
++
++	bfa_q_qe_init(&tx->mbox_qe.qe);
++	bfa_wc_down(&tx->txq_stop_wc);
++}
++
++static void
++bna_tx_cb_txq_stopped_all(void *arg)
++{
++	struct bna_tx *tx = (struct bna_tx *)arg;
++
++	bfa_fsm_send_event(tx, TX_E_TXQ_STOPPED);
++}
++
++static void
++bna_tx_cb_stats_cleared(void *arg, int status)
++{
++	struct bna_tx *tx = (struct bna_tx *)arg;
++
++	bfa_q_qe_init(&tx->mbox_qe.qe);
++
++	bfa_fsm_send_event(tx, TX_E_STAT_CLEARED);
++}
++
++static void
++bna_tx_start(struct bna_tx *tx)
++{
++	tx->flags |= BNA_TX_F_PORT_STARTED;
++	if (tx->flags & BNA_TX_F_ENABLED)
++		bfa_fsm_send_event(tx, TX_E_START);
++}
++
++static void
++bna_tx_stop(struct bna_tx *tx)
++{
++	tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
++	tx->stop_cbarg = &tx->bna->tx_mod;
++
++	tx->flags &= ~BNA_TX_F_PORT_STARTED;
++	bfa_fsm_send_event(tx, TX_E_STOP);
++}
++
++static void
++bna_tx_fail(struct bna_tx *tx)
++{
++	tx->flags &= ~BNA_TX_F_PORT_STARTED;
++	bfa_fsm_send_event(tx, TX_E_FAIL);
++}
++
++void
++bna_tx_prio_changed(struct bna_tx *tx, int prio)
++{
++	struct bna_txq *txq;
++	struct list_head		 *qe;
++
++	list_for_each(qe, &tx->txq_q) {
++		txq = (struct bna_txq *)qe;
++		txq->priority = prio;
++	}
++
++	bfa_fsm_send_event(tx, TX_E_PRIO_CHANGE);
++}
++
++static void
++bna_tx_cee_link_status(struct bna_tx *tx, int cee_link)
++{
++	if (cee_link)
++		tx->flags |= BNA_TX_F_PRIO_LOCK;
++	else
++		tx->flags &= ~BNA_TX_F_PRIO_LOCK;
++}
++
++static void
++bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx,
++			enum bna_cb_status status)
++{
++	struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
++
++	bfa_wc_down(&tx_mod->tx_stop_wc);
++}
++
++static void
++bna_tx_mod_cb_tx_stopped_all(void *arg)
++{
++	struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
++
++	if (tx_mod->stop_cbfn)
++		tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
++	tx_mod->stop_cbfn = NULL;
++}
++
++void
++bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
++{
++	u32 q_size;
++	u32 page_count;
++	struct bna_mem_info *mem_info;
++
++	res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
++	mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
++	mem_info->mem_type = BNA_MEM_T_KVA;
++	mem_info->len = sizeof(struct bna_tcb);
++	mem_info->num = num_txq;
++
++	q_size = txq_depth * BFI_TXQ_WI_SIZE;
++	q_size = ALIGN(q_size, PAGE_SIZE);
++	page_count = q_size >> PAGE_SHIFT;
++
++	res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
++	mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
++	mem_info->mem_type = BNA_MEM_T_DMA;
++	mem_info->len = page_count * sizeof(struct bna_dma_addr);
++	mem_info->num = num_txq;
++
++	res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
++	mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
++	mem_info->mem_type = BNA_MEM_T_KVA;
++	mem_info->len = page_count * sizeof(void *);
++	mem_info->num = num_txq;
++
++	res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
++	mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
++	mem_info->mem_type = BNA_MEM_T_DMA;
++	mem_info->len = PAGE_SIZE;
++	mem_info->num = num_txq * page_count;
++
++	res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
++	res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
++			BNA_INTR_T_MSIX;
++	res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
++}
++
++struct bna_tx *
++bna_tx_create(struct bna *bna, struct bnad *bnad,
++		struct bna_tx_config *tx_cfg,
++		struct bna_tx_event_cbfn *tx_cbfn,
++		struct bna_res_info *res_info, void *priv)
++{
++	struct bna_intr_info *intr_info;
++	struct bna_tx_mod *tx_mod = &bna->tx_mod;
++	struct bna_tx *tx;
++	struct bna_txq *txq;
++	struct list_head *qe;
++	struct bna_ib_mod *ib_mod = &bna->ib_mod;
++	struct bna_doorbell_qset *qset;
++	struct bna_ib_config ib_config;
++	int page_count;
++	int page_size;
++	int page_idx;
++	int i;
++	unsigned long off;
++
++	intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
++	page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) /
++			tx_cfg->num_txq;
++	page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len;
++
++	/**
++	 * Get resources
++	 */
++
++	if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
++		return NULL;
++
++	/* Tx */
++
++	if (list_empty(&tx_mod->tx_free_q))
++		return NULL;
++	bfa_q_deq(&tx_mod->tx_free_q, &tx);
++	bfa_q_qe_init(&tx->qe);
++
++	/* TxQs */
++
++	INIT_LIST_HEAD(&tx->txq_q);
++	for (i = 0; i < tx_cfg->num_txq; i++) {
++		if (list_empty(&tx_mod->txq_free_q))
++			goto err_return;
++
++		bfa_q_deq(&tx_mod->txq_free_q, &txq);
++		bfa_q_qe_init(&txq->qe);
++		list_add_tail(&txq->qe, &tx->txq_q);
++		txq->ib = NULL;
++		txq->ib_seg_offset = -1;
++		txq->tx = tx;
++	}
++
++	/* IBs */
++	i = 0;
++	list_for_each(qe, &tx->txq_q) {
++		txq = (struct bna_txq *)qe;
++
++		if (intr_info->num == 1)
++			txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
++						intr_info->idl[0].vector);
++		else
++			txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
++						intr_info->idl[i].vector);
++
++		if (txq->ib == NULL)
++			goto err_return;
++
++		txq->ib_seg_offset = bna_ib_reserve_idx(txq->ib);
++		if (txq->ib_seg_offset == -1)
++			goto err_return;
++
++		i++;
++	}
++
++	/*
++	 * Initialize
++	 */
++
++	/* Tx */
++
++	tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
++	tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
++	/* Following callbacks are mandatory */
++	tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
++	tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
++	tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
++
++	list_add_tail(&tx->qe, &tx_mod->tx_active_q);
++	tx->bna = bna;
++	tx->priv = priv;
++	tx->txq_stop_wc.wc_resume = bna_tx_cb_txq_stopped_all;
++	tx->txq_stop_wc.wc_cbarg = tx;
++	tx->txq_stop_wc.wc_count = 0;
++
++	tx->type = tx_cfg->tx_type;
++
++	tx->flags = 0;
++	if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_STARTED) {
++		switch (tx->type) {
++		case BNA_TX_T_REGULAR:
++			if (!(tx->bna->tx_mod.flags &
++				BNA_TX_MOD_F_PORT_LOOPBACK))
++				tx->flags |= BNA_TX_F_PORT_STARTED;
++			break;
++		case BNA_TX_T_LOOPBACK:
++			if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_LOOPBACK)
++				tx->flags |= BNA_TX_F_PORT_STARTED;
++			break;
++		}
++	}
++	if (tx->bna->tx_mod.cee_link)
++		tx->flags |= BNA_TX_F_PRIO_LOCK;
++
++	/* TxQ */
++
++	i = 0;
++	page_idx = 0;
++	list_for_each(qe, &tx->txq_q) {
++		txq = (struct bna_txq *)qe;
++		txq->priority = tx_mod->priority;
++		txq->tcb = (struct bna_tcb *)
++		  res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
++		txq->tx_packets = 0;
++		txq->tx_bytes = 0;
++
++		/* IB */
++
++		ib_config.coalescing_timeo = BFI_TX_COALESCING_TIMEO;
++		ib_config.interpkt_timeo = 0; /* Not used */
++		ib_config.interpkt_count = BFI_TX_INTERPKT_COUNT;
++		ib_config.ctrl_flags = (BFI_IB_CF_INTER_PKT_DMA |
++					BFI_IB_CF_INT_ENABLE |
++					BFI_IB_CF_COALESCING_MODE);
++		bna_ib_config(txq->ib, &ib_config);
++
++		/* TCB */
++
++		txq->tcb->producer_index = 0;
++		txq->tcb->consumer_index = 0;
++		txq->tcb->hw_consumer_index = (volatile u32 *)
++			((volatile u8 *)txq->ib->ib_seg_host_addr_kva +
++			 (txq->ib_seg_offset * BFI_IBIDX_SIZE));
++		*(txq->tcb->hw_consumer_index) = 0;
++		txq->tcb->q_depth = tx_cfg->txq_depth;
++		txq->tcb->unmap_q = (void *)
++		res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
++		qset = (struct bna_doorbell_qset *)0;
++		off = (unsigned long)&qset[txq->txq_id].txq[0];
++		txq->tcb->q_dbell = off +
++			BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
++		txq->tcb->i_dbell = &txq->ib->door_bell;
++		txq->tcb->intr_type = intr_info->intr_type;
++		txq->tcb->intr_vector = (intr_info->num == 1) ?
++					intr_info->idl[0].vector :
++					intr_info->idl[i].vector;
++		txq->tcb->txq = txq;
++		txq->tcb->bnad = bnad;
++		txq->tcb->id = i;
++
++		/* QPT, SWQPT, Pages */
++		bna_txq_qpt_setup(txq, page_count, page_size,
++			&res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
++			&res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
++			&res_info[BNA_TX_RES_MEM_T_PAGE].
++				  res_u.mem_info.mdl[page_idx]);
++		txq->tcb->page_idx = page_idx;
++		txq->tcb->page_count = page_count;
++		page_idx += page_count;
++
++		/* Callback to bnad for setting up TCB */
++		if (tx->tcb_setup_cbfn)
++			(tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
++
++		i++;
++	}
++
++	/* TxF */
++
++	tx->txf.ctrl_flags = BFI_TXF_CF_ENABLE | BFI_TXF_CF_VLAN_WI_BASED;
++	tx->txf.vlan = 0;
++
++	/* Mbox element */
++	bfa_q_qe_init(&tx->mbox_qe.qe);
++
++	bfa_fsm_set_state(tx, bna_tx_sm_stopped);
++
++	return tx;
++
++err_return:
++	bna_tx_free(tx);
++	return NULL;
++}
++
++void
++bna_tx_destroy(struct bna_tx *tx)
++{
++	/* Callback to bnad for destroying TCB */
++	if (tx->tcb_destroy_cbfn) {
++		struct bna_txq *txq;
++		struct list_head *qe;
++
++		list_for_each(qe, &tx->txq_q) {
++			txq = (struct bna_txq *)qe;
++			(tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
++		}
++	}
++
++	bna_tx_free(tx);
++}
++
++void
++bna_tx_enable(struct bna_tx *tx)
++{
++	if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
++		return;
++
++	tx->flags |= BNA_TX_F_ENABLED;
++
++	if (tx->flags & BNA_TX_F_PORT_STARTED)
++		bfa_fsm_send_event(tx, TX_E_START);
++}
++
++void
++bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
++		void (*cbfn)(void *, struct bna_tx *, enum bna_cb_status))
++{
++	if (type == BNA_SOFT_CLEANUP) {
++		(*cbfn)(tx->bna->bnad, tx, BNA_CB_SUCCESS);
++		return;
++	}
++
++	tx->stop_cbfn = cbfn;
++	tx->stop_cbarg = tx->bna->bnad;
++
++	tx->flags &= ~BNA_TX_F_ENABLED;
++
++	bfa_fsm_send_event(tx, TX_E_STOP);
++}
++
++int
++bna_tx_state_get(struct bna_tx *tx)
++{
++	return bfa_sm_to_state(tx_sm_table, tx->fsm);
++}
++
++void
++bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
++		struct bna_res_info *res_info)
++{
++	int i;
++
++	tx_mod->bna = bna;
++	tx_mod->flags = 0;
++
++	tx_mod->tx = (struct bna_tx *)
++		res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
++	tx_mod->txq = (struct bna_txq *)
++		res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
++
++	INIT_LIST_HEAD(&tx_mod->tx_free_q);
++	INIT_LIST_HEAD(&tx_mod->tx_active_q);
++
++	INIT_LIST_HEAD(&tx_mod->txq_free_q);
++
++	for (i = 0; i < BFI_MAX_TXQ; i++) {
++		tx_mod->tx[i].txf.txf_id = i;
++		bfa_q_qe_init(&tx_mod->tx[i].qe);
++		list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
++
++		tx_mod->txq[i].txq_id = i;
++		bfa_q_qe_init(&tx_mod->txq[i].qe);
++		list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
++	}
++
++	tx_mod->tx_stop_wc.wc_resume = bna_tx_mod_cb_tx_stopped_all;
++	tx_mod->tx_stop_wc.wc_cbarg = tx_mod;
++	tx_mod->tx_stop_wc.wc_count = 0;
++}
++
++void
++bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
++{
++	struct list_head		*qe;
++	int i;
++
++	i = 0;
++	list_for_each(qe, &tx_mod->tx_free_q)
++		i++;
++
++	i = 0;
++	list_for_each(qe, &tx_mod->txq_free_q)
++		i++;
++
++	tx_mod->bna = NULL;
++}
++
++void
++bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
++{
++	struct bna_tx *tx;
++	struct list_head		*qe;
++
++	tx_mod->flags |= BNA_TX_MOD_F_PORT_STARTED;
++	if (type == BNA_TX_T_LOOPBACK)
++		tx_mod->flags |= BNA_TX_MOD_F_PORT_LOOPBACK;
++
++	list_for_each(qe, &tx_mod->tx_active_q) {
++		tx = (struct bna_tx *)qe;
++		if (tx->type == type)
++			bna_tx_start(tx);
++	}
++}
++
++void
++bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
++{
++	struct bna_tx *tx;
++	struct list_head		*qe;
++
++	tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
++	tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
++
++	tx_mod->stop_cbfn = bna_port_cb_tx_stopped;
++
++	/**
++	 * Before calling bna_tx_stop(), increment tx_stop_wc as many times
++	 * as we are going to call bna_tx_stop
++	 */
++	list_for_each(qe, &tx_mod->tx_active_q) {
++		tx = (struct bna_tx *)qe;
++		if (tx->type == type)
++			bfa_wc_up(&tx_mod->tx_stop_wc);
++	}
++
++	if (tx_mod->tx_stop_wc.wc_count == 0) {
++		tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
++		tx_mod->stop_cbfn = NULL;
++		return;
++	}
++
++	list_for_each(qe, &tx_mod->tx_active_q) {
++		tx = (struct bna_tx *)qe;
++		if (tx->type == type)
++			bna_tx_stop(tx);
++	}
++}
++
++void
++bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
++{
++	struct bna_tx *tx;
++	struct list_head		*qe;
++
++	tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
++	tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
++
++	list_for_each(qe, &tx_mod->tx_active_q) {
++		tx = (struct bna_tx *)qe;
++		bna_tx_fail(tx);
++	}
++}
++
++void
++bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio)
++{
++	struct bna_tx *tx;
++	struct list_head		*qe;
++
++	if (prio != tx_mod->priority) {
++		tx_mod->priority = prio;
++
++		list_for_each(qe, &tx_mod->tx_active_q) {
++			tx = (struct bna_tx *)qe;
++			bna_tx_prio_changed(tx, prio);
++		}
++	}
++}
++
++void
++bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link)
++{
++	struct bna_tx *tx;
++	struct list_head		*qe;
++
++	tx_mod->cee_link = cee_link;
++
++	list_for_each(qe, &tx_mod->tx_active_q) {
++		tx = (struct bna_tx *)qe;
++		bna_tx_cee_link_status(tx, cee_link);
++	}
++}
+--- /dev/null
++++ b/drivers/net/bna/bna_types.h
+@@ -0,0 +1,1128 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++#ifndef __BNA_TYPES_H__
++#define __BNA_TYPES_H__
++
++#include "cna.h"
++#include "bna_hw.h"
++#include "bfa_cee.h"
++
++/**
++ *
++ * Forward declarations
++ *
++ */
++
++struct bna_txq;
++struct bna_tx;
++struct bna_rxq;
++struct bna_cq;
++struct bna_rx;
++struct bna_rxf;
++struct bna_port;
++struct bna;
++struct bnad;
++
++/**
++ *
++ * Enums, primitive data types
++ *
++ */
++
++enum bna_status {
++	BNA_STATUS_T_DISABLED	= 0,
++	BNA_STATUS_T_ENABLED	= 1
++};
++
++enum bna_cleanup_type {
++	BNA_HARD_CLEANUP 	= 0,
++	BNA_SOFT_CLEANUP 	= 1
++};
++
++enum bna_cb_status {
++	BNA_CB_SUCCESS 		= 0,
++	BNA_CB_FAIL		= 1,
++	BNA_CB_INTERRUPT	= 2,
++	BNA_CB_BUSY		= 3,
++	BNA_CB_INVALID_MAC	= 4,
++	BNA_CB_MCAST_LIST_FULL	= 5,
++	BNA_CB_UCAST_CAM_FULL	= 6,
++	BNA_CB_WAITING		= 7,
++	BNA_CB_NOT_EXEC		= 8
++};
++
++enum bna_res_type {
++	BNA_RES_T_MEM		= 1,
++	BNA_RES_T_INTR		= 2
++};
++
++enum bna_mem_type {
++	BNA_MEM_T_KVA 		= 1,
++	BNA_MEM_T_DMA 		= 2
++};
++
++enum bna_intr_type {
++	BNA_INTR_T_INTX		= 1,
++	BNA_INTR_T_MSIX		= 2
++};
++
++enum bna_res_req_type {
++	BNA_RES_MEM_T_COM 		= 0,
++	BNA_RES_MEM_T_ATTR 		= 1,
++	BNA_RES_MEM_T_FWTRC 		= 2,
++	BNA_RES_MEM_T_STATS 		= 3,
++	BNA_RES_MEM_T_SWSTATS		= 4,
++	BNA_RES_MEM_T_IBIDX		= 5,
++	BNA_RES_MEM_T_IB_ARRAY		= 6,
++	BNA_RES_MEM_T_INTR_ARRAY	= 7,
++	BNA_RES_MEM_T_IDXSEG_ARRAY	= 8,
++	BNA_RES_MEM_T_TX_ARRAY		= 9,
++	BNA_RES_MEM_T_TXQ_ARRAY		= 10,
++	BNA_RES_MEM_T_RX_ARRAY		= 11,
++	BNA_RES_MEM_T_RXP_ARRAY		= 12,
++	BNA_RES_MEM_T_RXQ_ARRAY		= 13,
++	BNA_RES_MEM_T_UCMAC_ARRAY	= 14,
++	BNA_RES_MEM_T_MCMAC_ARRAY	= 15,
++	BNA_RES_MEM_T_RIT_ENTRY		= 16,
++	BNA_RES_MEM_T_RIT_SEGMENT	= 17,
++	BNA_RES_INTR_T_MBOX		= 18,
++	BNA_RES_T_MAX
++};
++
++enum bna_tx_res_req_type {
++	BNA_TX_RES_MEM_T_TCB	= 0,
++	BNA_TX_RES_MEM_T_UNMAPQ	= 1,
++	BNA_TX_RES_MEM_T_QPT 	= 2,
++	BNA_TX_RES_MEM_T_SWQPT	= 3,
++	BNA_TX_RES_MEM_T_PAGE 	= 4,
++	BNA_TX_RES_INTR_T_TXCMPL = 5,
++	BNA_TX_RES_T_MAX,
++};
++
++enum bna_rx_mem_type {
++	BNA_RX_RES_MEM_T_CCB		= 0,	/* CQ context */
++	BNA_RX_RES_MEM_T_RCB		= 1,	/* CQ context */
++	BNA_RX_RES_MEM_T_UNMAPQ		= 2,	/* UnmapQ for RxQs */
++	BNA_RX_RES_MEM_T_CQPT		= 3,	/* CQ QPT */
++	BNA_RX_RES_MEM_T_CSWQPT		= 4,	/* S/W QPT */
++	BNA_RX_RES_MEM_T_CQPT_PAGE	= 5,	/* CQPT page */
++	BNA_RX_RES_MEM_T_HQPT		= 6,	/* RX QPT */
++	BNA_RX_RES_MEM_T_DQPT		= 7,	/* RX QPT */
++	BNA_RX_RES_MEM_T_HSWQPT		= 8,	/* RX s/w QPT */
++	BNA_RX_RES_MEM_T_DSWQPT		= 9,	/* RX s/w QPT */
++	BNA_RX_RES_MEM_T_DPAGE		= 10,	/* RX s/w QPT */
++	BNA_RX_RES_MEM_T_HPAGE		= 11,	/* RX s/w QPT */
++	BNA_RX_RES_T_INTR		= 12,	/* Rx interrupts */
++	BNA_RX_RES_T_MAX		= 13
++};
++
++enum bna_mbox_state {
++	BNA_MBOX_FREE		= 0,
++	BNA_MBOX_POSTED		= 1
++};
++
++enum bna_tx_type {
++	BNA_TX_T_REGULAR	= 0,
++	BNA_TX_T_LOOPBACK	= 1,
++};
++
++enum bna_tx_flags {
++	BNA_TX_F_PORT_STARTED	= 1,
++	BNA_TX_F_ENABLED	= 2,
++	BNA_TX_F_PRIO_LOCK	= 4,
++};
++
++enum bna_tx_mod_flags {
++	BNA_TX_MOD_F_PORT_STARTED	= 1,
++	BNA_TX_MOD_F_PORT_LOOPBACK	= 2,
++};
++
++enum bna_rx_type {
++	BNA_RX_T_REGULAR	= 0,
++	BNA_RX_T_LOOPBACK	= 1,
++};
++
++enum bna_rxp_type {
++	BNA_RXP_SINGLE 		= 1,
++	BNA_RXP_SLR 		= 2,
++	BNA_RXP_HDS 		= 3
++};
++
++enum bna_rxmode {
++	BNA_RXMODE_PROMISC 	= 1,
++	BNA_RXMODE_DEFAULT 	= 2,
++	BNA_RXMODE_ALLMULTI 	= 4
++};
++
++enum bna_rx_event {
++	RX_E_START			= 1,
++	RX_E_STOP			= 2,
++	RX_E_FAIL			= 3,
++	RX_E_RXF_STARTED		= 4,
++	RX_E_RXF_STOPPED		= 5,
++	RX_E_RXQ_STOPPED		= 6,
++};
++
++enum bna_rx_state {
++	BNA_RX_STOPPED			= 1,
++	BNA_RX_RXF_START_WAIT		= 2,
++	BNA_RX_STARTED			= 3,
++	BNA_RX_RXF_STOP_WAIT		= 4,
++	BNA_RX_RXQ_STOP_WAIT		= 5,
++};
++
++enum bna_rx_flags {
++	BNA_RX_F_ENABLE		= 0x01,		/* bnad enabled rxf */
++	BNA_RX_F_PORT_ENABLED	= 0x02,		/* Port object is enabled */
++	BNA_RX_F_PORT_FAILED	= 0x04,		/* Port in failed state */
++};
++
++enum bna_rx_mod_flags {
++	BNA_RX_MOD_F_PORT_STARTED	= 1,
++	BNA_RX_MOD_F_PORT_LOOPBACK	= 2,
++};
++
++enum bna_rxf_oper_state {
++	BNA_RXF_OPER_STATE_RUNNING	= 0x01, /* rxf operational */
++	BNA_RXF_OPER_STATE_PAUSED	= 0x02,	/* rxf in PAUSED state */
++};
++
++enum bna_rxf_flags {
++	BNA_RXF_FL_STOP_PENDING 	= 0x01,
++	BNA_RXF_FL_FAILED		= 0x02,
++	BNA_RXF_FL_RSS_CONFIG_PENDING	= 0x04,
++	BNA_RXF_FL_OPERSTATE_CHANGED	= 0x08,
++	BNA_RXF_FL_RXF_ENABLED		= 0x10,
++	BNA_RXF_FL_VLAN_CONFIG_PENDING	= 0x20,
++};
++
++enum bna_rxf_event {
++	RXF_E_START			= 1,
++	RXF_E_STOP			= 2,
++	RXF_E_FAIL			= 3,
++	RXF_E_CAM_FLTR_MOD		= 4,
++	RXF_E_STARTED			= 5,
++	RXF_E_STOPPED			= 6,
++	RXF_E_CAM_FLTR_RESP		= 7,
++	RXF_E_PAUSE			= 8,
++	RXF_E_RESUME			= 9,
++	RXF_E_STAT_CLEARED		= 10,
++};
++
++enum bna_rxf_state {
++	BNA_RXF_STOPPED			= 1,
++	BNA_RXF_START_WAIT		= 2,
++	BNA_RXF_CAM_FLTR_MOD_WAIT	= 3,
++	BNA_RXF_STARTED			= 4,
++	BNA_RXF_CAM_FLTR_CLR_WAIT	= 5,
++	BNA_RXF_STOP_WAIT		= 6,
++	BNA_RXF_PAUSE_WAIT		= 7,
++	BNA_RXF_RESUME_WAIT		= 8,
++	BNA_RXF_STAT_CLR_WAIT		= 9,
++};
++
++enum bna_port_type {
++	BNA_PORT_T_REGULAR		= 0,
++	BNA_PORT_T_LOOPBACK_INTERNAL	= 1,
++	BNA_PORT_T_LOOPBACK_EXTERNAL	= 2,
++};
++
++enum bna_link_status {
++	BNA_LINK_DOWN		= 0,
++	BNA_LINK_UP		= 1,
++	BNA_CEE_UP 		= 2
++};
++
++enum bna_llport_flags {
++	BNA_LLPORT_F_ENABLED 	= 1,
++	BNA_LLPORT_F_RX_ENABLED	= 2
++};
++
++enum bna_port_flags {
++	BNA_PORT_F_DEVICE_READY	= 1,
++	BNA_PORT_F_ENABLED	= 2,
++	BNA_PORT_F_PAUSE_CHANGED = 4,
++	BNA_PORT_F_MTU_CHANGED	= 8
++};
++
++enum bna_pkt_rates {
++	BNA_PKT_RATE_10K		= 10000,
++	BNA_PKT_RATE_20K		= 20000,
++	BNA_PKT_RATE_30K		= 30000,
++	BNA_PKT_RATE_40K		= 40000,
++	BNA_PKT_RATE_50K		= 50000,
++	BNA_PKT_RATE_60K		= 60000,
++	BNA_PKT_RATE_70K		= 70000,
++	BNA_PKT_RATE_80K		= 80000,
++};
++
++enum bna_dim_load_types {
++	BNA_LOAD_T_HIGH_4		= 0, /* 80K <= r */
++	BNA_LOAD_T_HIGH_3		= 1, /* 60K <= r < 80K */
++	BNA_LOAD_T_HIGH_2		= 2, /* 50K <= r < 60K */
++	BNA_LOAD_T_HIGH_1		= 3, /* 40K <= r < 50K */
++	BNA_LOAD_T_LOW_1		= 4, /* 30K <= r < 40K */
++	BNA_LOAD_T_LOW_2		= 5, /* 20K <= r < 30K */
++	BNA_LOAD_T_LOW_3		= 6, /* 10K <= r < 20K */
++	BNA_LOAD_T_LOW_4		= 7, /* r < 10K */
++	BNA_LOAD_T_MAX			= 8
++};
++
++enum bna_dim_bias_types {
++	BNA_BIAS_T_SMALL		= 0, /* small pkts > (large pkts * 2) */
++	BNA_BIAS_T_LARGE		= 1, /* Not BNA_BIAS_T_SMALL */
++	BNA_BIAS_T_MAX			= 2
++};
++
++struct bna_mac {
++	/* This should be the first one */
++	struct list_head			qe;
++	u8			addr[ETH_ALEN];
++};
++
++struct bna_mem_descr {
++	u32		len;
++	void		*kva;
++	struct bna_dma_addr dma;
++};
++
++struct bna_mem_info {
++	enum bna_mem_type mem_type;
++	u32		len;
++	u32 		num;
++	u32		align_sz; /* 0/1 = no alignment */
++	struct bna_mem_descr *mdl;
++	void			*cookie; /* For bnad to unmap dma later */
++};
++
++struct bna_intr_descr {
++	int			vector;
++};
++
++struct bna_intr_info {
++	enum bna_intr_type intr_type;
++	int			num;
++	struct bna_intr_descr *idl;
++};
++
++union bna_res_u {
++	struct bna_mem_info mem_info;
++	struct bna_intr_info intr_info;
++};
++
++struct bna_res_info {
++	enum bna_res_type res_type;
++	union bna_res_u		res_u;
++};
++
++/* HW QPT */
++struct bna_qpt {
++	struct bna_dma_addr hw_qpt_ptr;
++	void		*kv_qpt_ptr;
++	u32		page_count;
++	u32		page_size;
++};
++
++/**
++ *
++ * Device
++ *
++ */
++
++struct bna_device {
++	bfa_fsm_t		fsm;
++	struct bfa_ioc ioc;
++
++	enum bna_intr_type intr_type;
++	int			vector;
++
++	void (*ready_cbfn)(struct bnad *bnad, enum bna_cb_status status);
++	struct bnad *ready_cbarg;
++
++	void (*stop_cbfn)(struct bnad *bnad, enum bna_cb_status status);
++	struct bnad *stop_cbarg;
++
++	struct bna *bna;
++};
++
++/**
++ *
++ * Mail box
++ *
++ */
++
++struct bna_mbox_qe {
++	/* This should be the first one */
++	struct list_head			qe;
++
++	struct bfa_mbox_cmd cmd;
++	u32 		cmd_len;
++	/* Callback for port, tx, rx, rxf */
++	void (*cbfn)(void *arg, int status);
++	void 			*cbarg;
++};
++
++struct bna_mbox_mod {
++	enum bna_mbox_state state;
++	struct list_head			posted_q;
++	u32		msg_pending;
++	u32		msg_ctr;
++	struct bna *bna;
++};
++
++/**
++ *
++ * Port
++ *
++ */
++
++/* Pause configuration */
++struct bna_pause_config {
++	enum bna_status tx_pause;
++	enum bna_status rx_pause;
++};
++
++struct bna_llport {
++	bfa_fsm_t		fsm;
++	enum bna_llport_flags flags;
++
++	enum bna_port_type type;
++
++	enum bna_link_status link_status;
++
++	int			admin_up_count;
++
++	void (*stop_cbfn)(struct bna_port *, enum bna_cb_status);
++
++	struct bna_mbox_qe mbox_qe;
++
++	struct bna *bna;
++};
++
++struct bna_port {
++	bfa_fsm_t		fsm;
++	enum bna_port_flags flags;
++
++	enum bna_port_type type;
++
++	struct bna_llport llport;
++
++	struct bna_pause_config pause_config;
++	u8			priority;
++	int			mtu;
++
++	/* Callback for bna_port_disable(), port_stop() */
++	void (*stop_cbfn)(void *, enum bna_cb_status);
++	void			*stop_cbarg;
++
++	/* Callback for bna_port_pause_config() */
++	void (*pause_cbfn)(struct bnad *, enum bna_cb_status);
++
++	/* Callback for bna_port_mtu_set() */
++	void (*mtu_cbfn)(struct bnad *, enum bna_cb_status);
++
++	void (*link_cbfn)(struct bnad *, enum bna_link_status);
++
++	struct bfa_wc		chld_stop_wc;
++
++	struct bna_mbox_qe mbox_qe;
++
++	struct bna *bna;
++};
++
++/**
++ *
++ * Interrupt Block
++ *
++ */
++
++/* IB index segment structure */
++struct bna_ibidx_seg {
++	/* This should be the first one */
++	struct list_head			qe;
++
++	u8			ib_seg_size;
++	u8			ib_idx_tbl_offset;
++};
++
++/* Interrupt structure */
++struct bna_intr {
++	/* This should be the first one */
++	struct list_head			qe;
++	int			ref_count;
++
++	enum bna_intr_type intr_type;
++	int			vector;
++
++	struct bna_ib *ib;
++};
++
++/* Doorbell structure */
++struct bna_ib_dbell {
++	void *__iomem doorbell_addr;
++	u32		doorbell_ack;
++};
++
++/* Interrupt timer configuration */
++struct bna_ib_config {
++	u8 		coalescing_timeo;    /* Unit is 5usec. */
++
++	int			interpkt_count;
++	int			interpkt_timeo;
++
++	enum ib_flags ctrl_flags;
++};
++
++/* IB structure */
++struct bna_ib {
++	/* This should be the first one */
++	struct list_head			qe;
++
++	int			ib_id;
++
++	int			ref_count;
++	int			start_count;
++
++	struct bna_dma_addr ib_seg_host_addr;
++	void		*ib_seg_host_addr_kva;
++	u32		idx_mask; /* Size >= BNA_IBIDX_MAX_SEGSIZE */
++
++	struct bna_ibidx_seg *idx_seg;
++
++	struct bna_ib_dbell door_bell;
++
++	struct bna_intr *intr;
++
++	struct bna_ib_config ib_config;
++
++	struct bna *bna;
++};
++
++/* IB module - keeps track of IBs and interrupts */
++struct bna_ib_mod {
++	struct bna_ib *ib;		/* BFI_MAX_IB entries */
++	struct bna_intr *intr;		/* BFI_MAX_IB entries */
++	struct bna_ibidx_seg *idx_seg;	/* BNA_IBIDX_TOTAL_SEGS */
++
++	struct list_head			ib_free_q;
++
++	struct list_head		ibidx_seg_pool[BFI_IBIDX_TOTAL_POOLS];
++
++	struct list_head			intr_free_q;
++	struct list_head			intr_active_q;
++
++	struct bna *bna;
++};
++
++/**
++ *
++ * Tx object
++ *
++ */
++
++/* Tx datapath control structure */
++#define BNA_Q_NAME_SIZE		16
++struct bna_tcb {
++	/* Fast path */
++	void			**sw_qpt;
++	void			*unmap_q;
++	u32		producer_index;
++	u32		consumer_index;
++	volatile u32	*hw_consumer_index;
++	u32		q_depth;
++	void *__iomem q_dbell;
++	struct bna_ib_dbell *i_dbell;
++	int			page_idx;
++	int			page_count;
++	/* Control path */
++	struct bna_txq *txq;
++	struct bnad *bnad;
++	enum bna_intr_type intr_type;
++	int			intr_vector;
++	u8			priority; /* Current priority */
++	unsigned long		flags; /* Used by bnad as required */
++	int			id;
++	char			name[BNA_Q_NAME_SIZE];
++};
++
++/* TxQ QPT and configuration */
++struct bna_txq {
++	/* This should be the first one */
++	struct list_head			qe;
++
++	int			txq_id;
++
++	u8			priority;
++
++	struct bna_qpt qpt;
++	struct bna_tcb *tcb;
++	struct bna_ib *ib;
++	int			ib_seg_offset;
++
++	struct bna_tx *tx;
++
++	u64 		tx_packets;
++	u64 		tx_bytes;
++};
++
++/* TxF structure (hardware Tx Function) */
++struct bna_txf {
++	int			txf_id;
++	enum txf_flags ctrl_flags;
++	u16		vlan;
++};
++
++/* Tx object */
++struct bna_tx {
++	/* This should be the first one */
++	struct list_head			qe;
++
++	bfa_fsm_t		fsm;
++	enum bna_tx_flags flags;
++
++	enum bna_tx_type type;
++
++	struct list_head			txq_q;
++	struct bna_txf txf;
++
++	/* Tx event handlers */
++	void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
++	void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
++	void (*tx_stall_cbfn)(struct bnad *, struct bna_tcb *);
++	void (*tx_resume_cbfn)(struct bnad *, struct bna_tcb *);
++	void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tcb *);
++
++	/* callback for bna_tx_disable(), bna_tx_stop() */
++	void (*stop_cbfn)(void *arg, struct bna_tx *tx,
++				enum bna_cb_status status);
++	void			*stop_cbarg;
++
++	/* callback for bna_tx_prio_set() */
++	void (*prio_change_cbfn)(struct bnad *bnad, struct bna_tx *tx,
++				enum bna_cb_status status);
++
++	struct bfa_wc		txq_stop_wc;
++
++	struct bna_mbox_qe mbox_qe;
++
++	struct bna *bna;
++	void			*priv;	/* bnad's cookie */
++};
++
++struct bna_tx_config {
++	int			num_txq;
++	int			txq_depth;
++	enum bna_tx_type tx_type;
++};
++
++struct bna_tx_event_cbfn {
++	/* Optional */
++	void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
++	void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
++	/* Mandatory */
++	void (*tx_stall_cbfn)(struct bnad *, struct bna_tcb *);
++	void (*tx_resume_cbfn)(struct bnad *, struct bna_tcb *);
++	void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tcb *);
++};
++
++/* Tx module - keeps track of free, active tx objects */
++struct bna_tx_mod {
++	struct bna_tx *tx;		/* BFI_MAX_TXQ entries */
++	struct bna_txq *txq;		/* BFI_MAX_TXQ entries */
++
++	struct list_head			tx_free_q;
++	struct list_head			tx_active_q;
++
++	struct list_head			txq_free_q;
++
++	/* callback for bna_tx_mod_stop() */
++	void (*stop_cbfn)(struct bna_port *port,
++				enum bna_cb_status status);
++
++	struct bfa_wc		tx_stop_wc;
++
++	enum bna_tx_mod_flags flags;
++
++	int			priority;
++	int			cee_link;
++
++	u32		txf_bmap[2];
++
++	struct bna *bna;
++};
++
++/**
++ *
++ * Receive Indirection Table
++ *
++ */
++
++/* One row of RIT table */
++struct bna_rit_entry {
++	u8 large_rxq_id;	/* used for either large or data buffers */
++	u8 small_rxq_id;	/* used for either small or header buffers */
++};
++
++/* RIT segment */
++struct bna_rit_segment {
++	struct list_head			qe;
++
++	u32		rit_offset;
++	u32		rit_size;
++	/**
++	 * max_rit_size: Varies per RIT segment depending on how RIT is
++	 * partitioned
++	 */
++	u32		max_rit_size;
++
++	struct bna_rit_entry *rit;
++};
++
++struct bna_rit_mod {
++	struct bna_rit_entry *rit;
++	struct bna_rit_segment *rit_segment;
++
++	struct list_head		rit_seg_pool[BFI_RIT_SEG_TOTAL_POOLS];
++};
++
++/**
++ *
++ * Rx object
++ *
++ */
++
++/* Rx datapath control structure */
++struct bna_rcb {
++	/* Fast path */
++	void			**sw_qpt;
++	void			*unmap_q;
++	u32		producer_index;
++	u32		consumer_index;
++	u32		q_depth;
++	void *__iomem q_dbell;
++	int			page_idx;
++	int			page_count;
++	/* Control path */
++	struct bna_rxq *rxq;
++	struct bna_cq *cq;
++	struct bnad *bnad;
++	unsigned long		flags;
++	int			id;
++};
++
++/* RxQ structure - QPT, configuration */
++struct bna_rxq {
++	struct list_head			qe;
++	int			rxq_id;
++
++	int			buffer_size;
++	int			q_depth;
++
++	struct bna_qpt qpt;
++	struct bna_rcb *rcb;
++
++	struct bna_rxp *rxp;
++	struct bna_rx *rx;
++
++	u64 		rx_packets;
++	u64		rx_bytes;
++	u64 		rx_packets_with_error;
++	u64 		rxbuf_alloc_failed;
++};
++
++/* RxQ pair */
++union bna_rxq_u {
++	struct {
++		struct bna_rxq *hdr;
++		struct bna_rxq *data;
++	} hds;
++	struct {
++		struct bna_rxq *small;
++		struct bna_rxq *large;
++	} slr;
++	struct {
++		struct bna_rxq *only;
++		struct bna_rxq *reserved;
++	} single;
++};
++
++/* Packet rate for Dynamic Interrupt Moderation */
++struct bna_pkt_rate {
++	u32		small_pkt_cnt;
++	u32		large_pkt_cnt;
++};
++
++/* Completion control structure */
++struct bna_ccb {
++	/* Fast path */
++	void			**sw_qpt;
++	u32		producer_index;
++	volatile u32	*hw_producer_index;
++	u32		q_depth;
++	struct bna_ib_dbell *i_dbell;
++	struct bna_rcb *rcb[2];
++	void			*ctrl; /* For bnad */
++	struct bna_pkt_rate pkt_rate;
++	int			page_idx;
++	int			page_count;
++
++	/* Control path */
++	struct bna_cq *cq;
++	struct bnad *bnad;
++	enum bna_intr_type intr_type;
++	int			intr_vector;
++	u8			rx_coalescing_timeo; /* For NAPI */
++	int			id;
++	char			name[BNA_Q_NAME_SIZE];
++};
++
++/* CQ QPT, configuration  */
++struct bna_cq {
++	int			cq_id;
++
++	struct bna_qpt qpt;
++	struct bna_ccb *ccb;
++
++	struct bna_ib *ib;
++	u8			ib_seg_offset;
++
++	struct bna_rx *rx;
++};
++
++struct bna_rss_config {
++	enum rss_hash_type hash_type;
++	u8			hash_mask;
++	u32		toeplitz_hash_key[BFI_RSS_HASH_KEY_LEN];
++};
++
++struct bna_hds_config {
++	enum hds_header_type hdr_type;
++	int			header_size;
++};
++
++/* This structure is used during RX creation */
++struct bna_rx_config {
++	enum bna_rx_type rx_type;
++	int			num_paths;
++	enum bna_rxp_type rxp_type;
++	int			paused;
++	int			q_depth;
++	/*
++	 * Small/Large (or Header/Data) buffer size to be configured
++	 * for SLR and HDS queue type. Large buffer size comes from
++	 * port->mtu.
++	 */
++	int			small_buff_size;
++
++	enum bna_status rss_status;
++	struct bna_rss_config rss_config;
++
++	enum bna_status hds_status;
++	struct bna_hds_config hds_config;
++
++	enum bna_status vlan_strip_status;
++};
++
++/* Rx Path structure - one per MSIX vector/CPU */
++struct bna_rxp {
++	/* This should be the first one */
++	struct list_head			qe;
++
++	enum bna_rxp_type type;
++	union	bna_rxq_u	rxq;
++	struct bna_cq cq;
++
++	struct bna_rx *rx;
++
++	/* MSI-x vector number for configuring RSS */
++	int			vector;
++
++	struct bna_mbox_qe mbox_qe;
++};
++
++/* HDS configuration structure */
++struct bna_rxf_hds {
++	enum hds_header_type hdr_type;
++	int			header_size;
++};
++
++/* RSS configuration structure */
++struct bna_rxf_rss {
++	enum rss_hash_type hash_type;
++	u8			hash_mask;
++	u32		toeplitz_hash_key[BFI_RSS_HASH_KEY_LEN];
++};
++
++/* RxF structure (hardware Rx Function) */
++struct bna_rxf {
++	bfa_fsm_t		fsm;
++	int			rxf_id;
++	enum rxf_flags ctrl_flags;
++	u16		default_vlan_tag;
++	enum bna_rxf_oper_state rxf_oper_state;
++	enum bna_status hds_status;
++	struct bna_rxf_hds hds_cfg;
++	enum bna_status rss_status;
++	struct bna_rxf_rss rss_cfg;
++	struct bna_rit_segment *rit_segment;
++	struct bna_rx *rx;
++	u32		forced_offset;
++	struct bna_mbox_qe mbox_qe;
++	int			mcast_rxq_id;
++
++	/* callback for bna_rxf_start() */
++	void (*start_cbfn) (struct bna_rx *rx, enum bna_cb_status status);
++	struct bna_rx *start_cbarg;
++
++	/* callback for bna_rxf_stop() */
++	void (*stop_cbfn) (struct bna_rx *rx, enum bna_cb_status status);
++	struct bna_rx *stop_cbarg;
++
++	/* callback for bna_rxf_receive_enable() / bna_rxf_receive_disable() */
++	void (*oper_state_cbfn) (struct bnad *bnad, struct bna_rx *rx,
++			enum bna_cb_status status);
++	struct bnad *oper_state_cbarg;
++
++	/**
++	 * callback for:
++	 *	bna_rxf_ucast_set()
++	 *	bna_rxf_{ucast/mcast}_add(),
++	 * 	bna_rxf_{ucast/mcast}_del(),
++	 *	bna_rxf_mode_set()
++	 */
++	void (*cam_fltr_cbfn)(struct bnad *bnad, struct bna_rx *rx,
++				enum bna_cb_status status);
++	struct bnad *cam_fltr_cbarg;
++
++	enum bna_rxf_flags rxf_flags;
++
++	/* List of unicast addresses yet to be applied to h/w */
++	struct list_head			ucast_pending_add_q;
++	struct list_head			ucast_pending_del_q;
++	int			ucast_pending_set;
++	/* ucast addresses applied to the h/w */
++	struct list_head			ucast_active_q;
++	struct bna_mac *ucast_active_mac;
++
++	/* List of multicast addresses yet to be applied to h/w */
++	struct list_head			mcast_pending_add_q;
++	struct list_head			mcast_pending_del_q;
++	/* multicast addresses applied to the h/w */
++	struct list_head			mcast_active_q;
++
++	/* Rx modes yet to be applied to h/w */
++	enum bna_rxmode rxmode_pending;
++	enum bna_rxmode rxmode_pending_bitmask;
++	/* Rx modes applied to h/w */
++	enum bna_rxmode rxmode_active;
++
++	enum bna_status vlan_filter_status;
++	u32		vlan_filter_table[(BFI_MAX_VLAN + 1) / 32];
++};
++
++/* Rx object */
++struct bna_rx {
++	/* This should be the first one */
++	struct list_head			qe;
++
++	bfa_fsm_t		fsm;
++
++	enum bna_rx_type type;
++
++	/* list-head for RX path objects */
++	struct list_head			rxp_q;
++
++	struct bna_rxf rxf;
++
++	enum bna_rx_flags rx_flags;
++
++	struct bna_mbox_qe mbox_qe;
++
++	struct bfa_wc		rxq_stop_wc;
++
++	/* Rx event handlers */
++	void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
++	void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
++	void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
++	void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
++	void (*rx_cleanup_cbfn)(struct bnad *, struct bna_ccb *);
++	void (*rx_post_cbfn)(struct bnad *, struct bna_rcb *);
++
++	/* callback for bna_rx_disable(), bna_rx_stop() */
++	void (*stop_cbfn)(void *arg, struct bna_rx *rx,
++				enum bna_cb_status status);
++	void			*stop_cbarg;
++
++	struct bna *bna;
++	void			*priv; /* bnad's cookie */
++};
++
++struct bna_rx_event_cbfn {
++	/* Optional */
++	void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
++	void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
++	void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
++	void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
++	/* Mandatory */
++	void (*rx_cleanup_cbfn)(struct bnad *, struct bna_ccb *);
++	void (*rx_post_cbfn)(struct bnad *, struct bna_rcb *);
++};
++
++/* Rx module - keeps track of free, active rx objects */
++struct bna_rx_mod {
++	struct bna *bna;		/* back pointer to parent */
++	struct bna_rx *rx;		/* BFI_MAX_RXQ entries */
++	struct bna_rxp *rxp;		/* BFI_MAX_RXQ entries */
++	struct bna_rxq *rxq;		/* BFI_MAX_RXQ entries */
++
++	struct list_head			rx_free_q;
++	struct list_head			rx_active_q;
++	int			rx_free_count;
++
++	struct list_head			rxp_free_q;
++	int			rxp_free_count;
++
++	struct list_head			rxq_free_q;
++	int			rxq_free_count;
++
++	enum bna_rx_mod_flags flags;
++
++	/* callback for bna_rx_mod_stop() */
++	void (*stop_cbfn)(struct bna_port *port,
++				enum bna_cb_status status);
++
++	struct bfa_wc		rx_stop_wc;
++	u32		dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX];
++	u32		rxf_bmap[2];
++};
++
++/**
++ *
++ * CAM
++ *
++ */
++
++struct bna_ucam_mod {
++	struct bna_mac *ucmac;		/* BFI_MAX_UCMAC entries */
++	struct list_head			free_q;
++
++	struct bna *bna;
++};
++
++struct bna_mcam_mod {
++	struct bna_mac *mcmac;		/* BFI_MAX_MCMAC entries */
++	struct list_head			free_q;
++
++	struct bna *bna;
++};
++
++/**
++ *
++ * Statistics
++ *
++ */
++
++struct bna_tx_stats {
++	int			tx_state;
++	int			tx_flags;
++	int			num_txqs;
++	u32		txq_bmap[2];
++	int			txf_id;
++};
++
++struct bna_rx_stats {
++	int			rx_state;
++	int			rx_flags;
++	int			num_rxps;
++	int			num_rxqs;
++	u32		rxq_bmap[2];
++	u32		cq_bmap[2];
++	int			rxf_id;
++	int			rxf_state;
++	int			rxf_oper_state;
++	int			num_active_ucast;
++	int			num_active_mcast;
++	int			rxmode_active;
++	int			vlan_filter_status;
++	u32		vlan_filter_table[(BFI_MAX_VLAN + 1) / 32];
++	int			rss_status;
++	int			hds_status;
++};
++
++struct bna_sw_stats {
++	int			device_state;
++	int			port_state;
++	int			port_flags;
++	int			llport_state;
++	int			priority;
++	int			num_active_tx;
++	int			num_active_rx;
++	struct bna_tx_stats tx_stats[BFI_MAX_TXQ];
++	struct bna_rx_stats rx_stats[BFI_MAX_RXQ];
++};
++
++struct bna_stats {
++	u32		txf_bmap[2];
++	u32		rxf_bmap[2];
++	struct bfi_ll_stats	*hw_stats;
++	struct bna_sw_stats *sw_stats;
++};
++
++/**
++ *
++ * BNA
++ *
++ */
++
++struct bna {
++	struct bfa_pcidev pcidev;
++
++	int			port_num;
++
++	struct bna_chip_regs regs;
++
++	struct bna_dma_addr hw_stats_dma;
++	struct bna_stats stats;
++
++	struct bna_device device;
++	struct bfa_cee cee;
++
++	struct bna_mbox_mod mbox_mod;
++
++	struct bna_port port;
++
++	struct bna_tx_mod tx_mod;
++
++	struct bna_rx_mod rx_mod;
++
++	struct bna_ib_mod ib_mod;
++
++	struct bna_ucam_mod ucam_mod;
++	struct bna_mcam_mod mcam_mod;
++
++	struct bna_rit_mod rit_mod;
++
++	int			rxf_default_id;
++	int			rxf_promisc_id;
++
++	struct bna_mbox_qe mbox_qe;
++
++	struct bnad *bnad;
++};
++
++#endif	/* __BNA_TYPES_H__ */
+--- /dev/null
++++ b/drivers/net/bna/bnad.c
+@@ -0,0 +1,3270 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++#include <linux/netdevice.h>
++#include <linux/skbuff.h>
++#include <linux/etherdevice.h>
++#include <linux/in.h>
++#include <linux/ethtool.h>
++#include <linux/if_vlan.h>
++#include <linux/if_ether.h>
++#include <linux/ip.h>
++
++#include "bnad.h"
++#include "bna.h"
++#include "cna.h"
++
++DEFINE_MUTEX(bnad_fwimg_mutex);
++
++/*
++ * Module params
++ */
++static uint bnad_msix_disable;
++module_param(bnad_msix_disable, uint, 0444);
++MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
++
++static uint bnad_ioc_auto_recover = 1;
++module_param(bnad_ioc_auto_recover, uint, 0444);
++MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
++
++/*
++ * Global variables
++ */
++u32 bnad_rxqs_per_cq = 2;
++
++const u8 bnad_bcast_addr[] =  {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
++
++/*
++ * Local MACROS
++ */
++#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
++
++#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
++
++#define BNAD_GET_MBOX_IRQ(_bnad)				\
++	(((_bnad)->cfg_flags & BNAD_CF_MSIX) ?			\
++	 ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : 	\
++	 ((_bnad)->pcidev->irq))
++
++#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth)	\
++do {								\
++	(_res_info)->res_type = BNA_RES_T_MEM;			\
++	(_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA;	\
++	(_res_info)->res_u.mem_info.num = (_num);		\
++	(_res_info)->res_u.mem_info.len =			\
++	sizeof(struct bnad_unmap_q) +				\
++	(sizeof(struct bnad_skb_unmap) * ((_depth) - 1));	\
++} while (0)
++
++/*
++ * Reinitialize completions in CQ, once Rx is taken down
++ */
++static void
++bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
++{
++	struct bna_cq_entry *cmpl, *next_cmpl;
++	unsigned int wi_range, wis = 0, ccb_prod = 0;
++	int i;
++
++	BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
++			    wi_range);
++
++	for (i = 0; i < ccb->q_depth; i++) {
++		wis++;
++		if (likely(--wi_range))
++			next_cmpl = cmpl + 1;
++		else {
++			BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
++			wis = 0;
++			BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
++						next_cmpl, wi_range);
++		}
++		cmpl->valid = 0;
++		cmpl = next_cmpl;
++	}
++}
++
++/*
++ * Frees all pending Tx Bufs
++ * At this point no activity is expected on the Q,
++ * so DMA unmap & freeing is fine.
++ */
++static void
++bnad_free_all_txbufs(struct bnad *bnad,
++		 struct bna_tcb *tcb)
++{
++	u16 		unmap_cons;
++	struct bnad_unmap_q *unmap_q = tcb->unmap_q;
++	struct bnad_skb_unmap *unmap_array;
++	struct sk_buff 		*skb = NULL;
++	int			i;
++
++	unmap_array = unmap_q->unmap_array;
++
++	unmap_cons = 0;
++	while (unmap_cons < unmap_q->q_depth) {
++		skb = unmap_array[unmap_cons].skb;
++		if (!skb) {
++			unmap_cons++;
++			continue;
++		}
++		unmap_array[unmap_cons].skb = NULL;
++
++		pci_unmap_single(bnad->pcidev,
++				 pci_unmap_addr(&unmap_array[unmap_cons],
++						dma_addr), skb_headlen(skb),
++						PCI_DMA_TODEVICE);
++
++		pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
++		unmap_cons++;
++		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++			pci_unmap_page(bnad->pcidev,
++				       pci_unmap_addr(&unmap_array[unmap_cons],
++						      dma_addr),
++				       skb_shinfo(skb)->frags[i].size,
++				       PCI_DMA_TODEVICE);
++			pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
++					   0);
++			unmap_cons++;
++		}
++		dev_kfree_skb_any(skb);
++	}
++}
++
++/* Data Path Handlers */
++
++/*
++ * bnad_free_txbufs : Frees the Tx bufs on Tx completion
++ * Can be called in a) Interrupt context
++ *		    b) Sending context
++ *		    c) Tasklet context
++ */
++static u32
++bnad_free_txbufs(struct bnad *bnad,
++		 struct bna_tcb *tcb)
++{
++	u32 		sent_packets = 0, sent_bytes = 0;
++	u16 		wis, unmap_cons, updated_hw_cons;
++	struct bnad_unmap_q *unmap_q = tcb->unmap_q;
++	struct bnad_skb_unmap *unmap_array;
++	struct sk_buff 		*skb;
++	int i;
++
++	/*
++	 * Just return if TX is stopped. This check is useful
++	 * when bnad_free_txbufs() runs out of a tasklet scheduled
++	 * before bnad_cb_tx_cleanup() cleared BNAD_RF_TX_STARTED bit
++	 * but this routine runs actually after the cleanup has been
++	 * executed.
++	 */
++	if (!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
++		return 0;
++
++	updated_hw_cons = *(tcb->hw_consumer_index);
++
++	wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
++				  updated_hw_cons, tcb->q_depth);
++
++	BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
++
++	unmap_array = unmap_q->unmap_array;
++	unmap_cons = unmap_q->consumer_index;
++
++	prefetch(&unmap_array[unmap_cons + 1]);
++	while (wis) {
++		skb = unmap_array[unmap_cons].skb;
++
++		unmap_array[unmap_cons].skb = NULL;
++
++		sent_packets++;
++		sent_bytes += skb->len;
++		wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
++
++		pci_unmap_single(bnad->pcidev,
++				 pci_unmap_addr(&unmap_array[unmap_cons],
++						dma_addr), skb_headlen(skb),
++				 PCI_DMA_TODEVICE);
++		pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
++		BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
++
++		prefetch(&unmap_array[unmap_cons + 1]);
++		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++			prefetch(&unmap_array[unmap_cons + 1]);
++
++			pci_unmap_page(bnad->pcidev,
++				       pci_unmap_addr(&unmap_array[unmap_cons],
++						      dma_addr),
++				       skb_shinfo(skb)->frags[i].size,
++				       PCI_DMA_TODEVICE);
++			pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
++					   0);
++			BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
++		}
++		dev_kfree_skb_any(skb);
++	}
++
++	/* Update consumer pointers. */
++	tcb->consumer_index = updated_hw_cons;
++	unmap_q->consumer_index = unmap_cons;
++
++	tcb->txq->tx_packets += sent_packets;
++	tcb->txq->tx_bytes += sent_bytes;
++
++	return sent_packets;
++}
++
++/* Tx Free Tasklet function */
++/* Frees for all the tcb's in all the Tx's */
++/*
++ * Scheduled from sending context, so that
++ * the fat Tx lock is not held for too long
++ * in the sending context.
++ */
++static void
++bnad_tx_free_tasklet(unsigned long bnad_ptr)
++{
++	struct bnad *bnad = (struct bnad *)bnad_ptr;
++	struct bna_tcb *tcb;
++	u32 		acked;
++	int			i, j;
++
++	for (i = 0; i < bnad->num_tx; i++) {
++		for (j = 0; j < bnad->num_txq_per_tx; j++) {
++			tcb = bnad->tx_info[i].tcb[j];
++			if (!tcb)
++				continue;
++			if (((u16) (*tcb->hw_consumer_index) !=
++				tcb->consumer_index) &&
++				(!test_and_set_bit(BNAD_TXQ_FREE_SENT,
++						  &tcb->flags))) {
++				acked = bnad_free_txbufs(bnad, tcb);
++				bna_ib_ack(tcb->i_dbell, acked);
++				smp_mb__before_clear_bit();
++				clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
++			}
++		}
++	}
++}
++
++static u32
++bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
++{
++	struct net_device *netdev = bnad->netdev;
++	u32 sent;
++
++	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
++		return 0;
++
++	sent = bnad_free_txbufs(bnad, tcb);
++	if (sent) {
++		if (netif_queue_stopped(netdev) &&
++		    netif_carrier_ok(netdev) &&
++		    BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
++				    BNAD_NETIF_WAKE_THRESHOLD) {
++			netif_wake_queue(netdev);
++			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
++		}
++		bna_ib_ack(tcb->i_dbell, sent);
++	} else
++		bna_ib_ack(tcb->i_dbell, 0);
++
++	smp_mb__before_clear_bit();
++	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
++
++	return sent;
++}
++
++/* MSIX Tx Completion Handler */
++static irqreturn_t
++bnad_msix_tx(int irq, void *data)
++{
++	struct bna_tcb *tcb = (struct bna_tcb *)data;
++	struct bnad *bnad = tcb->bnad;
++
++	bnad_tx(bnad, tcb);
++
++	return IRQ_HANDLED;
++}
++
++static void
++bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
++{
++	struct bnad_unmap_q *unmap_q = rcb->unmap_q;
++
++	rcb->producer_index = 0;
++	rcb->consumer_index = 0;
++
++	unmap_q->producer_index = 0;
++	unmap_q->consumer_index = 0;
++}
++
++static void
++bnad_free_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
++{
++	struct bnad_unmap_q *unmap_q;
++	struct sk_buff *skb;
++
++	unmap_q = rcb->unmap_q;
++	while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
++		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
++		BUG_ON(!(skb));
++		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
++		pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
++					unmap_array[unmap_q->consumer_index],
++					dma_addr), rcb->rxq->buffer_size +
++					NET_IP_ALIGN, PCI_DMA_FROMDEVICE);
++		dev_kfree_skb(skb);
++		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
++		BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
++	}
++
++	bnad_reset_rcb(bnad, rcb);
++}
++
++static void
++bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
++{
++	u16 to_alloc, alloced, unmap_prod, wi_range;
++	struct bnad_unmap_q *unmap_q = rcb->unmap_q;
++	struct bnad_skb_unmap *unmap_array;
++	struct bna_rxq_entry *rxent;
++	struct sk_buff *skb;
++	dma_addr_t dma_addr;
++
++	alloced = 0;
++	to_alloc =
++		BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
++
++	unmap_array = unmap_q->unmap_array;
++	unmap_prod = unmap_q->producer_index;
++
++	BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
++
++	while (to_alloc--) {
++		if (!wi_range) {
++			BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
++					     wi_range);
++		}
++		skb = alloc_skb(rcb->rxq->buffer_size + NET_IP_ALIGN,
++				     GFP_ATOMIC);
++		if (unlikely(!skb)) {
++			BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
++			goto finishing;
++		}
++		skb->dev = bnad->netdev;
++		skb_reserve(skb, NET_IP_ALIGN);
++		unmap_array[unmap_prod].skb = skb;
++		dma_addr = pci_map_single(bnad->pcidev, skb->data,
++			rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE);
++		pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
++				   dma_addr);
++		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
++		BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
++
++		rxent++;
++		wi_range--;
++		alloced++;
++	}
++
++finishing:
++	if (likely(alloced)) {
++		unmap_q->producer_index = unmap_prod;
++		rcb->producer_index = unmap_prod;
++		smp_mb();
++		bna_rxq_prod_indx_doorbell(rcb);
++	}
++}
++
++/*
++ * Locking is required in the enable path
++ * because it is called from a napi poll
++ * context, where the bna_lock is not held
++ * unlike the IRQ context.
++ */
++static void
++bnad_enable_txrx_irqs(struct bnad *bnad)
++{
++	struct bna_tcb *tcb;
++	struct bna_ccb *ccb;
++	int i, j;
++	unsigned long flags;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	for (i = 0; i < bnad->num_tx; i++) {
++		for (j = 0; j < bnad->num_txq_per_tx; j++) {
++			tcb = bnad->tx_info[i].tcb[j];
++			bna_ib_coalescing_timer_set(tcb->i_dbell,
++				tcb->txq->ib->ib_config.coalescing_timeo);
++			bna_ib_ack(tcb->i_dbell, 0);
++		}
++	}
++
++	for (i = 0; i < bnad->num_rx; i++) {
++		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
++			ccb = bnad->rx_info[i].rx_ctrl[j].ccb;
++			bnad_enable_rx_irq_unsafe(ccb);
++		}
++	}
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++}
++
++static inline void
++bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
++{
++	struct bnad_unmap_q *unmap_q = rcb->unmap_q;
++
++	if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
++		if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
++			 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
++			bnad_alloc_n_post_rxbufs(bnad, rcb);
++		smp_mb__before_clear_bit();
++		clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
++	}
++}
++
++static u32
++bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
++{
++	struct bna_cq_entry *cmpl, *next_cmpl;
++	struct bna_rcb *rcb = NULL;
++	unsigned int wi_range, packets = 0, wis = 0;
++	struct bnad_unmap_q *unmap_q;
++	struct sk_buff *skb;
++	u32 flags;
++	u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
++	struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
++
++	prefetch(bnad->netdev);
++	BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
++			    wi_range);
++	BUG_ON(!(wi_range <= ccb->q_depth));
++	while (cmpl->valid && packets < budget) {
++		packets++;
++		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
++
++		if (qid0 == cmpl->rxq_id)
++			rcb = ccb->rcb[0];
++		else
++			rcb = ccb->rcb[1];
++
++		unmap_q = rcb->unmap_q;
++
++		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
++		BUG_ON(!(skb));
++		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
++		pci_unmap_single(bnad->pcidev,
++				 pci_unmap_addr(&unmap_q->
++						unmap_array[unmap_q->
++							    consumer_index],
++						dma_addr),
++						rcb->rxq->buffer_size,
++						PCI_DMA_FROMDEVICE);
++		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
++
++		/* Should be more efficient ? Performance ? */
++		BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
++
++		wis++;
++		if (likely(--wi_range))
++			next_cmpl = cmpl + 1;
++		else {
++			BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
++			wis = 0;
++			BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
++						next_cmpl, wi_range);
++			BUG_ON(!(wi_range <= ccb->q_depth));
++		}
++		prefetch(next_cmpl);
++
++		flags = ntohl(cmpl->flags);
++		if (unlikely
++		    (flags &
++		     (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
++		      BNA_CQ_EF_TOO_LONG))) {
++			dev_kfree_skb_any(skb);
++			rcb->rxq->rx_packets_with_error++;
++			goto next;
++		}
++
++		skb_put(skb, ntohs(cmpl->length));
++		if (likely
++		    (bnad->rx_csum &&
++		     (((flags & BNA_CQ_EF_IPV4) &&
++		      (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
++		      (flags & BNA_CQ_EF_IPV6)) &&
++		      (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
++		      (flags & BNA_CQ_EF_L4_CKSUM_OK)))
++			skb->ip_summed = CHECKSUM_UNNECESSARY;
++		else
++			skb->ip_summed = CHECKSUM_NONE;
++
++		rcb->rxq->rx_packets++;
++		rcb->rxq->rx_bytes += skb->len;
++		skb->protocol = eth_type_trans(skb, bnad->netdev);
++
++		if (bnad->vlan_grp && (flags & BNA_CQ_EF_VLAN)) {
++			struct bnad_rx_ctrl *rx_ctrl =
++				(struct bnad_rx_ctrl *)ccb->ctrl;
++			if (skb->ip_summed == CHECKSUM_UNNECESSARY)
++				vlan_gro_receive(&rx_ctrl->napi, bnad->vlan_grp,
++						ntohs(cmpl->vlan_tag), skb);
++			else
++				vlan_hwaccel_receive_skb(skb,
++							 bnad->vlan_grp,
++							 ntohs(cmpl->vlan_tag));
++
++		} else { /* Not VLAN tagged/stripped */
++			struct bnad_rx_ctrl *rx_ctrl =
++				(struct bnad_rx_ctrl *)ccb->ctrl;
++			if (skb->ip_summed == CHECKSUM_UNNECESSARY)
++				napi_gro_receive(&rx_ctrl->napi, skb);
++			else
++				netif_receive_skb(skb);
++		}
++
++next:
++		cmpl->valid = 0;
++		cmpl = next_cmpl;
++	}
++
++	BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
++
++	if (likely(ccb)) {
++		bna_ib_ack(ccb->i_dbell, packets);
++		bnad_refill_rxq(bnad, ccb->rcb[0]);
++		if (ccb->rcb[1])
++			bnad_refill_rxq(bnad, ccb->rcb[1]);
++	} else
++		bna_ib_ack(ccb->i_dbell, 0);
++
++	return packets;
++}
++
++static void
++bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
++{
++	bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
++	bna_ib_ack(ccb->i_dbell, 0);
++}
++
++static void
++bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
++{
++	spin_lock_irq(&bnad->bna_lock); /* Because of polling context */
++	bnad_enable_rx_irq_unsafe(ccb);
++	spin_unlock_irq(&bnad->bna_lock);
++}
++
++static void
++bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
++{
++	struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
++	if (likely(napi_schedule_prep((&rx_ctrl->napi)))) {
++		bnad_disable_rx_irq(bnad, ccb);
++		__napi_schedule((&rx_ctrl->napi));
++	}
++	BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
++}
++
++/* MSIX Rx Path Handler */
++static irqreturn_t
++bnad_msix_rx(int irq, void *data)
++{
++	struct bna_ccb *ccb = (struct bna_ccb *)data;
++	struct bnad *bnad = ccb->bnad;
++
++	bnad_netif_rx_schedule_poll(bnad, ccb);
++
++	return IRQ_HANDLED;
++}
++
++/* Interrupt handlers */
++
++/* Mbox Interrupt Handlers */
++static irqreturn_t
++bnad_msix_mbox_handler(int irq, void *data)
++{
++	u32 intr_status;
++	unsigned long  flags;
++	struct net_device *netdev = data;
++	struct bnad *bnad;
++
++	bnad = netdev_priv(netdev);
++
++	/* BNA_ISR_GET(bnad); Inc Ref count */
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++
++	bna_intr_status_get(&bnad->bna, intr_status);
++
++	if (BNA_IS_MBOX_ERR_INTR(intr_status))
++		bna_mbox_handler(&bnad->bna, intr_status);
++
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	/* BNAD_ISR_PUT(bnad); Dec Ref count */
++	return IRQ_HANDLED;
++}
++
++static irqreturn_t
++bnad_isr(int irq, void *data)
++{
++	int i, j;
++	u32 intr_status;
++	unsigned long flags;
++	struct net_device *netdev = data;
++	struct bnad *bnad = netdev_priv(netdev);
++	struct bnad_rx_info *rx_info;
++	struct bnad_rx_ctrl *rx_ctrl;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++
++	bna_intr_status_get(&bnad->bna, intr_status);
++	if (!intr_status) {
++		spin_unlock_irqrestore(&bnad->bna_lock, flags);
++		return IRQ_NONE;
++	}
++
++	if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
++		bna_mbox_handler(&bnad->bna, intr_status);
++		if (!BNA_IS_INTX_DATA_INTR(intr_status)) {
++			spin_unlock_irqrestore(&bnad->bna_lock, flags);
++			goto done;
++		}
++	}
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	/* Process data interrupts */
++	for (i = 0; i < bnad->num_rx; i++) {
++		rx_info = &bnad->rx_info[i];
++		if (!rx_info->rx)
++			continue;
++		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
++			rx_ctrl = &rx_info->rx_ctrl[j];
++			if (rx_ctrl->ccb)
++				bnad_netif_rx_schedule_poll(bnad,
++							    rx_ctrl->ccb);
++		}
++	}
++done:
++	return IRQ_HANDLED;
++}
++
++/*
++ * Called in interrupt / callback context
++ * with bna_lock held, so cfg_flags access is OK
++ */
++static void
++bnad_enable_mbox_irq(struct bnad *bnad)
++{
++	int irq = BNAD_GET_MBOX_IRQ(bnad);
++
++	if (!(bnad->cfg_flags & BNAD_CF_MSIX))
++		return;
++
++	if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
++		enable_irq(irq);
++	BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
++}
++
++/*
++ * Called with bnad->bna_lock held b'cos of
++ * bnad->cfg_flags access.
++ */
++void
++bnad_disable_mbox_irq(struct bnad *bnad)
++{
++	int irq = BNAD_GET_MBOX_IRQ(bnad);
++
++	if (!(bnad->cfg_flags & BNAD_CF_MSIX))
++		return;
++
++	if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
++		disable_irq_nosync(irq);
++	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
++}
++
++/* Control Path Handlers */
++
++/* Callbacks */
++void
++bnad_cb_device_enable_mbox_intr(struct bnad *bnad)
++{
++	bnad_enable_mbox_irq(bnad);
++}
++
++void
++bnad_cb_device_disable_mbox_intr(struct bnad *bnad)
++{
++	bnad_disable_mbox_irq(bnad);
++}
++
++void
++bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status)
++{
++	complete(&bnad->bnad_completions.ioc_comp);
++	bnad->bnad_completions.ioc_comp_status = status;
++}
++
++void
++bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status)
++{
++	complete(&bnad->bnad_completions.ioc_comp);
++	bnad->bnad_completions.ioc_comp_status = status;
++}
++
++static void
++bnad_cb_port_disabled(void *arg, enum bna_cb_status status)
++{
++	struct bnad *bnad = (struct bnad *)arg;
++
++	complete(&bnad->bnad_completions.port_comp);
++
++	netif_carrier_off(bnad->netdev);
++}
++
++void
++bnad_cb_port_link_status(struct bnad *bnad,
++			enum bna_link_status link_status)
++{
++	bool link_up = 0;
++
++	link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
++
++	if (link_status == BNA_CEE_UP) {
++		set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
++		BNAD_UPDATE_CTR(bnad, cee_up);
++	} else
++		clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
++
++	if (link_up) {
++		if (!netif_carrier_ok(bnad->netdev)) {
++			pr_warn("bna: %s link up\n",
++				bnad->netdev->name);
++			netif_carrier_on(bnad->netdev);
++			BNAD_UPDATE_CTR(bnad, link_toggle);
++			if (test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) {
++				/* Force an immediate Transmit Schedule */
++				pr_info("bna: %s TX_STARTED\n",
++					bnad->netdev->name);
++				netif_wake_queue(bnad->netdev);
++				BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
++			} else {
++				netif_stop_queue(bnad->netdev);
++				BNAD_UPDATE_CTR(bnad, netif_queue_stop);
++			}
++		}
++	} else {
++		if (netif_carrier_ok(bnad->netdev)) {
++			pr_warn("bna: %s link down\n",
++				bnad->netdev->name);
++			netif_carrier_off(bnad->netdev);
++			BNAD_UPDATE_CTR(bnad, link_toggle);
++		}
++	}
++}
++
++static void
++bnad_cb_tx_disabled(void *arg, struct bna_tx *tx,
++			enum bna_cb_status status)
++{
++	struct bnad *bnad = (struct bnad *)arg;
++
++	complete(&bnad->bnad_completions.tx_comp);
++}
++
++static void
++bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
++{
++	struct bnad_tx_info *tx_info =
++			(struct bnad_tx_info *)tcb->txq->tx->priv;
++	struct bnad_unmap_q *unmap_q = tcb->unmap_q;
++
++	tx_info->tcb[tcb->id] = tcb;
++	unmap_q->producer_index = 0;
++	unmap_q->consumer_index = 0;
++	unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
++}
++
++static void
++bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
++{
++	struct bnad_tx_info *tx_info =
++			(struct bnad_tx_info *)tcb->txq->tx->priv;
++
++	tx_info->tcb[tcb->id] = NULL;
++}
++
++static void
++bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
++{
++	struct bnad_unmap_q *unmap_q = rcb->unmap_q;
++
++	unmap_q->producer_index = 0;
++	unmap_q->consumer_index = 0;
++	unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
++}
++
++static void
++bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
++{
++	struct bnad_rx_info *rx_info =
++			(struct bnad_rx_info *)ccb->cq->rx->priv;
++
++	rx_info->rx_ctrl[ccb->id].ccb = ccb;
++	ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
++}
++
++static void
++bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
++{
++	struct bnad_rx_info *rx_info =
++			(struct bnad_rx_info *)ccb->cq->rx->priv;
++
++	rx_info->rx_ctrl[ccb->id].ccb = NULL;
++}
++
++static void
++bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
++{
++	struct bnad_tx_info *tx_info =
++			(struct bnad_tx_info *)tcb->txq->tx->priv;
++
++	if (tx_info != &bnad->tx_info[0])
++		return;
++
++	clear_bit(BNAD_RF_TX_STARTED, &bnad->run_flags);
++	netif_stop_queue(bnad->netdev);
++	pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
++}
++
++static void
++bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
++{
++	if (test_and_set_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
++		return;
++
++	if (netif_carrier_ok(bnad->netdev)) {
++		pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
++		netif_wake_queue(bnad->netdev);
++		BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
++	}
++}
++
++static void
++bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
++{
++	struct bnad_unmap_q *unmap_q = tcb->unmap_q;
++
++	if (!tcb || (!tcb->unmap_q))
++		return;
++
++	if (!unmap_q->unmap_array)
++		return;
++
++	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
++		return;
++
++	bnad_free_all_txbufs(bnad, tcb);
++
++	unmap_q->producer_index = 0;
++	unmap_q->consumer_index = 0;
++
++	smp_mb__before_clear_bit();
++	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
++}
++
++static void
++bnad_cb_rx_cleanup(struct bnad *bnad,
++			struct bna_ccb *ccb)
++{
++	bnad_cq_cmpl_init(bnad, ccb);
++
++	bnad_free_rxbufs(bnad, ccb->rcb[0]);
++	clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
++
++	if (ccb->rcb[1]) {
++		bnad_free_rxbufs(bnad, ccb->rcb[1]);
++		clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
++	}
++}
++
++static void
++bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
++{
++	struct bnad_unmap_q *unmap_q = rcb->unmap_q;
++
++	set_bit(BNAD_RXQ_STARTED, &rcb->flags);
++
++	/* Now allocate & post buffers for this RCB */
++	/* !!Allocation in callback context */
++	if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
++		if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
++			 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
++			bnad_alloc_n_post_rxbufs(bnad, rcb);
++		smp_mb__before_clear_bit();
++		clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
++	}
++}
++
++static void
++bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
++			enum bna_cb_status status)
++{
++	struct bnad *bnad = (struct bnad *)arg;
++
++	complete(&bnad->bnad_completions.rx_comp);
++}
++
++static void
++bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx,
++				enum bna_cb_status status)
++{
++	bnad->bnad_completions.mcast_comp_status = status;
++	complete(&bnad->bnad_completions.mcast_comp);
++}
++
++void
++bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
++		       struct bna_stats *stats)
++{
++	if (status == BNA_CB_SUCCESS)
++		BNAD_UPDATE_CTR(bnad, hw_stats_updates);
++
++	if (!netif_running(bnad->netdev) ||
++		!test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
++		return;
++
++	mod_timer(&bnad->stats_timer,
++		  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
++}
++
++void
++bnad_cb_stats_clr(struct bnad *bnad)
++{
++}
++
++/* Resource allocation, free functions */
++
++static void
++bnad_mem_free(struct bnad *bnad,
++	      struct bna_mem_info *mem_info)
++{
++	int i;
++	dma_addr_t dma_pa;
++
++	if (mem_info->mdl == NULL)
++		return;
++
++	for (i = 0; i < mem_info->num; i++) {
++		if (mem_info->mdl[i].kva != NULL) {
++			if (mem_info->mem_type == BNA_MEM_T_DMA) {
++				BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
++						dma_pa);
++				pci_free_consistent(bnad->pcidev,
++						mem_info->mdl[i].len,
++						mem_info->mdl[i].kva, dma_pa);
++			} else
++				kfree(mem_info->mdl[i].kva);
++		}
++	}
++	kfree(mem_info->mdl);
++	mem_info->mdl = NULL;
++}
++
++static int
++bnad_mem_alloc(struct bnad *bnad,
++	       struct bna_mem_info *mem_info)
++{
++	int i;
++	dma_addr_t dma_pa;
++
++	if ((mem_info->num == 0) || (mem_info->len == 0)) {
++		mem_info->mdl = NULL;
++		return 0;
++	}
++
++	mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
++				GFP_KERNEL);
++	if (mem_info->mdl == NULL)
++		return -ENOMEM;
++
++	if (mem_info->mem_type == BNA_MEM_T_DMA) {
++		for (i = 0; i < mem_info->num; i++) {
++			mem_info->mdl[i].len = mem_info->len;
++			mem_info->mdl[i].kva =
++				pci_alloc_consistent(bnad->pcidev,
++						mem_info->len, &dma_pa);
++
++			if (mem_info->mdl[i].kva == NULL)
++				goto err_return;
++
++			BNA_SET_DMA_ADDR(dma_pa,
++					 &(mem_info->mdl[i].dma));
++		}
++	} else {
++		for (i = 0; i < mem_info->num; i++) {
++			mem_info->mdl[i].len = mem_info->len;
++			mem_info->mdl[i].kva = kzalloc(mem_info->len,
++							GFP_KERNEL);
++			if (mem_info->mdl[i].kva == NULL)
++				goto err_return;
++		}
++	}
++
++	return 0;
++
++err_return:
++	bnad_mem_free(bnad, mem_info);
++	return -ENOMEM;
++}
++
++/* Free IRQ for Mailbox */
++static void
++bnad_mbox_irq_free(struct bnad *bnad,
++		   struct bna_intr_info *intr_info)
++{
++	int irq;
++	unsigned long flags;
++
++	if (intr_info->idl == NULL)
++		return;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++
++	bnad_disable_mbox_irq(bnad);
++
++	irq = BNAD_GET_MBOX_IRQ(bnad);
++	free_irq(irq, bnad->netdev);
++
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	kfree(intr_info->idl);
++}
++
++/*
++ * Allocates IRQ for Mailbox, but keep it disabled
++ * This will be enabled once we get the mbox enable callback
++ * from bna
++ */
++static int
++bnad_mbox_irq_alloc(struct bnad *bnad,
++		    struct bna_intr_info *intr_info)
++{
++	int 		err;
++	unsigned long 	flags;
++	u32	irq;
++	irq_handler_t 	irq_handler;
++
++	/* Mbox should use only 1 vector */
++
++	intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
++	if (!intr_info->idl)
++		return -ENOMEM;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	if (bnad->cfg_flags & BNAD_CF_MSIX) {
++		irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
++		irq = bnad->msix_table[bnad->msix_num - 1].vector;
++		flags = 0;
++		intr_info->intr_type = BNA_INTR_T_MSIX;
++		intr_info->idl[0].vector = bnad->msix_num - 1;
++	} else {
++		irq_handler = (irq_handler_t)bnad_isr;
++		irq = bnad->pcidev->irq;
++		flags = IRQF_SHARED;
++		intr_info->intr_type = BNA_INTR_T_INTX;
++		/* intr_info->idl.vector = 0 ? */
++	}
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
++
++	err = request_irq(irq, irq_handler, flags,
++			  bnad->mbox_irq_name, bnad->netdev);
++	if (err) {
++		kfree(intr_info->idl);
++		intr_info->idl = NULL;
++		return err;
++	}
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bnad_disable_mbox_irq(bnad);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++	return 0;
++}
++
++static void
++bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
++{
++	kfree(intr_info->idl);
++	intr_info->idl = NULL;
++}
++
++/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
++static int
++bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
++		    uint txrx_id, struct bna_intr_info *intr_info)
++{
++	int i, vector_start = 0;
++	u32 cfg_flags;
++	unsigned long flags;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	cfg_flags = bnad->cfg_flags;
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	if (cfg_flags & BNAD_CF_MSIX) {
++		intr_info->intr_type = BNA_INTR_T_MSIX;
++		intr_info->idl = kcalloc(intr_info->num,
++					sizeof(struct bna_intr_descr),
++					GFP_KERNEL);
++		if (!intr_info->idl)
++			return -ENOMEM;
++
++		switch (src) {
++		case BNAD_INTR_TX:
++			vector_start = txrx_id;
++			break;
++
++		case BNAD_INTR_RX:
++			vector_start = bnad->num_tx * bnad->num_txq_per_tx +
++					txrx_id;
++			break;
++
++		default:
++			BUG();
++		}
++
++		for (i = 0; i < intr_info->num; i++)
++			intr_info->idl[i].vector = vector_start + i;
++	} else {
++		intr_info->intr_type = BNA_INTR_T_INTX;
++		intr_info->num = 1;
++		intr_info->idl = kcalloc(intr_info->num,
++					sizeof(struct bna_intr_descr),
++					GFP_KERNEL);
++		if (!intr_info->idl)
++			return -ENOMEM;
++
++		switch (src) {
++		case BNAD_INTR_TX:
++			intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */
++			break;
++
++		case BNAD_INTR_RX:
++			intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */
++			break;
++		}
++	}
++	return 0;
++}
++
++/**
++ * NOTE: Should be called for MSIX only
++ * Unregisters Tx MSIX vector(s) from the kernel
++ */
++static void
++bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
++			int num_txqs)
++{
++	int i;
++	int vector_num;
++
++	for (i = 0; i < num_txqs; i++) {
++		if (tx_info->tcb[i] == NULL)
++			continue;
++
++		vector_num = tx_info->tcb[i]->intr_vector;
++		free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
++	}
++}
++
++/**
++ * NOTE: Should be called for MSIX only
++ * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
++ */
++static int
++bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
++			uint tx_id, int num_txqs)
++{
++	int i;
++	int err;
++	int vector_num;
++
++	for (i = 0; i < num_txqs; i++) {
++		vector_num = tx_info->tcb[i]->intr_vector;
++		sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
++				tx_id + tx_info->tcb[i]->id);
++		err = request_irq(bnad->msix_table[vector_num].vector,
++				  (irq_handler_t)bnad_msix_tx, 0,
++				  tx_info->tcb[i]->name,
++				  tx_info->tcb[i]);
++		if (err)
++			goto err_return;
++	}
++
++	return 0;
++
++err_return:
++	if (i > 0)
++		bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
++	return -1;
++}
++
++/**
++ * NOTE: Should be called for MSIX only
++ * Unregisters Rx MSIX vector(s) from the kernel
++ */
++static void
++bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
++			int num_rxps)
++{
++	int i;
++	int vector_num;
++
++	for (i = 0; i < num_rxps; i++) {
++		if (rx_info->rx_ctrl[i].ccb == NULL)
++			continue;
++
++		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
++		free_irq(bnad->msix_table[vector_num].vector,
++			 rx_info->rx_ctrl[i].ccb);
++	}
++}
++
++/**
++ * NOTE: Should be called for MSIX only
++ * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
++ */
++static int
++bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
++			uint rx_id, int num_rxps)
++{
++	int i;
++	int err;
++	int vector_num;
++
++	for (i = 0; i < num_rxps; i++) {
++		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
++		sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
++			bnad->netdev->name,
++			rx_id + rx_info->rx_ctrl[i].ccb->id);
++		err = request_irq(bnad->msix_table[vector_num].vector,
++				  (irq_handler_t)bnad_msix_rx, 0,
++				  rx_info->rx_ctrl[i].ccb->name,
++				  rx_info->rx_ctrl[i].ccb);
++		if (err)
++			goto err_return;
++	}
++
++	return 0;
++
++err_return:
++	if (i > 0)
++		bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
++	return -1;
++}
++
++/* Free Tx object Resources */
++static void
++bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
++{
++	int i;
++
++	for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
++		if (res_info[i].res_type == BNA_RES_T_MEM)
++			bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
++		else if (res_info[i].res_type == BNA_RES_T_INTR)
++			bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
++	}
++}
++
++/* Allocates memory and interrupt resources for Tx object */
++static int
++bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
++		  uint tx_id)
++{
++	int i, err = 0;
++
++	for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
++		if (res_info[i].res_type == BNA_RES_T_MEM)
++			err = bnad_mem_alloc(bnad,
++					&res_info[i].res_u.mem_info);
++		else if (res_info[i].res_type == BNA_RES_T_INTR)
++			err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
++					&res_info[i].res_u.intr_info);
++		if (err)
++			goto err_return;
++	}
++	return 0;
++
++err_return:
++	bnad_tx_res_free(bnad, res_info);
++	return err;
++}
++
++/* Free Rx object Resources */
++static void
++bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
++{
++	int i;
++
++	for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
++		if (res_info[i].res_type == BNA_RES_T_MEM)
++			bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
++		else if (res_info[i].res_type == BNA_RES_T_INTR)
++			bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
++	}
++}
++
++/* Allocates memory and interrupt resources for Rx object */
++static int
++bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
++		  uint rx_id)
++{
++	int i, err = 0;
++
++	/* All memory needs to be allocated before setup_ccbs */
++	for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
++		if (res_info[i].res_type == BNA_RES_T_MEM)
++			err = bnad_mem_alloc(bnad,
++					&res_info[i].res_u.mem_info);
++		else if (res_info[i].res_type == BNA_RES_T_INTR)
++			err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
++					&res_info[i].res_u.intr_info);
++		if (err)
++			goto err_return;
++	}
++	return 0;
++
++err_return:
++	bnad_rx_res_free(bnad, res_info);
++	return err;
++}
++
++/* Timer callbacks */
++/* a) IOC timer */
++static void
++bnad_ioc_timeout(unsigned long data)
++{
++	struct bnad *bnad = (struct bnad *)data;
++	unsigned long flags;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bfa_ioc_timeout((void *) &bnad->bna.device.ioc);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++}
++
++static void
++bnad_ioc_hb_check(unsigned long data)
++{
++	struct bnad *bnad = (struct bnad *)data;
++	unsigned long flags;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bfa_ioc_hb_check((void *) &bnad->bna.device.ioc);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++}
++
++static void
++bnad_ioc_sem_timeout(unsigned long data)
++{
++	struct bnad *bnad = (struct bnad *)data;
++	unsigned long flags;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bfa_ioc_sem_timeout((void *) &bnad->bna.device.ioc);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++}
++
++/*
++ * All timer routines use bnad->bna_lock to protect against
++ * the following race, which may occur in case of no locking:
++ * 	Time	CPU m  		CPU n
++ *	0       1 = test_bit
++ *	1			clear_bit
++ *	2			del_timer_sync
++ *	3	mod_timer
++ */
++
++/* b) Dynamic Interrupt Moderation Timer */
++static void
++bnad_dim_timeout(unsigned long data)
++{
++	struct bnad *bnad = (struct bnad *)data;
++	struct bnad_rx_info *rx_info;
++	struct bnad_rx_ctrl *rx_ctrl;
++	int i, j;
++	unsigned long flags;
++
++	if (!netif_carrier_ok(bnad->netdev))
++		return;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	for (i = 0; i < bnad->num_rx; i++) {
++		rx_info = &bnad->rx_info[i];
++		if (!rx_info->rx)
++			continue;
++		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
++			rx_ctrl = &rx_info->rx_ctrl[j];
++			if (!rx_ctrl->ccb)
++				continue;
++			bna_rx_dim_update(rx_ctrl->ccb);
++		}
++	}
++
++	/* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
++	if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
++		mod_timer(&bnad->dim_timer,
++			  jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++}
++
++/* c)  Statistics Timer */
++static void
++bnad_stats_timeout(unsigned long data)
++{
++	struct bnad *bnad = (struct bnad *)data;
++	unsigned long flags;
++
++	if (!netif_running(bnad->netdev) ||
++		!test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
++		return;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bna_stats_get(&bnad->bna);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++}
++
++/*
++ * Set up timer for DIM
++ * Called with bnad->bna_lock held
++ */
++void
++bnad_dim_timer_start(struct bnad *bnad)
++{
++	if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
++	    !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
++		setup_timer(&bnad->dim_timer, bnad_dim_timeout,
++			    (unsigned long)bnad);
++		set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
++		mod_timer(&bnad->dim_timer,
++			  jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
++	}
++}
++
++/*
++ * Set up timer for statistics
++ * Called with mutex_lock(&bnad->conf_mutex) held
++ */
++static void
++bnad_stats_timer_start(struct bnad *bnad)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
++		setup_timer(&bnad->stats_timer, bnad_stats_timeout,
++			    (unsigned long)bnad);
++		mod_timer(&bnad->stats_timer,
++			  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
++	}
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++}
++
++/*
++ * Stops the stats timer
++ * Called with mutex_lock(&bnad->conf_mutex) held
++ */
++static void
++bnad_stats_timer_stop(struct bnad *bnad)
++{
++	int to_del = 0;
++	unsigned long flags;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
++		to_del = 1;
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++	if (to_del)
++		del_timer_sync(&bnad->stats_timer);
++}
++
++/* Utilities */
++
++static void
++bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
++{
++	int i = 1; /* Index 0 has broadcast address */
++	struct dev_mc_list *mc_addr;
++
++	netdev_for_each_mc_addr(mc_addr, netdev) {
++		memcpy(&mc_list[i * ETH_ALEN], &mc_addr->dmi_addr[0],
++							ETH_ALEN);
++		i++;
++	}
++}
++
++static int
++bnad_napi_poll_rx(struct napi_struct *napi, int budget)
++{
++	struct bnad_rx_ctrl *rx_ctrl =
++		container_of(napi, struct bnad_rx_ctrl, napi);
++	struct bna_ccb *ccb;
++	struct bnad *bnad;
++	int rcvd = 0;
++
++	ccb = rx_ctrl->ccb;
++
++	bnad = ccb->bnad;
++
++	if (!netif_carrier_ok(bnad->netdev))
++		goto poll_exit;
++
++	rcvd = bnad_poll_cq(bnad, ccb, budget);
++	if (rcvd == budget)
++		return rcvd;
++
++poll_exit:
++	napi_complete((napi));
++
++	BNAD_UPDATE_CTR(bnad, netif_rx_complete);
++
++	bnad_enable_rx_irq(bnad, ccb);
++	return rcvd;
++}
++
++static int
++bnad_napi_poll_txrx(struct napi_struct *napi, int budget)
++{
++	struct bnad_rx_ctrl *rx_ctrl =
++		container_of(napi, struct bnad_rx_ctrl, napi);
++	struct bna_ccb *ccb;
++	struct bnad *bnad;
++	int 			rcvd = 0;
++	int			i, j;
++
++	ccb = rx_ctrl->ccb;
++
++	bnad = ccb->bnad;
++
++	if (!netif_carrier_ok(bnad->netdev))
++		goto poll_exit;
++
++	/* Handle Tx Completions, if any */
++	for (i = 0; i < bnad->num_tx; i++) {
++		for (j = 0; j < bnad->num_txq_per_tx; j++)
++			bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
++	}
++
++	/* Handle Rx Completions */
++	rcvd = bnad_poll_cq(bnad, ccb, budget);
++	if (rcvd == budget)
++		return rcvd;
++poll_exit:
++	napi_complete((napi));
++
++	BNAD_UPDATE_CTR(bnad, netif_rx_complete);
++
++	bnad_enable_txrx_irqs(bnad);
++	return rcvd;
++}
++
++static void
++bnad_napi_enable(struct bnad *bnad, u32 rx_id)
++{
++	int (*napi_poll) (struct napi_struct *, int);
++	struct bnad_rx_ctrl *rx_ctrl;
++	int i;
++	unsigned long flags;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	if (bnad->cfg_flags & BNAD_CF_MSIX)
++		napi_poll = bnad_napi_poll_rx;
++	else
++		napi_poll = bnad_napi_poll_txrx;
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	/* Initialize & enable NAPI */
++	for (i = 0; i <	bnad->num_rxp_per_rx; i++) {
++		rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
++		netif_napi_add(bnad->netdev, &rx_ctrl->napi,
++			       napi_poll, 64);
++		napi_enable(&rx_ctrl->napi);
++	}
++}
++
++static void
++bnad_napi_disable(struct bnad *bnad, u32 rx_id)
++{
++	int i;
++
++	/* First disable and then clean up */
++	for (i = 0; i < bnad->num_rxp_per_rx; i++) {
++		napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
++		netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
++	}
++}
++
++/* Should be held with conf_lock held */
++void
++bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
++{
++	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
++	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
++	unsigned long flags;
++
++	if (!tx_info->tx)
++		return;
++
++	init_completion(&bnad->bnad_completions.tx_comp);
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++	wait_for_completion(&bnad->bnad_completions.tx_comp);
++
++	if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
++		bnad_tx_msix_unregister(bnad, tx_info,
++			bnad->num_txq_per_tx);
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bna_tx_destroy(tx_info->tx);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	tx_info->tx = NULL;
++
++	if (0 == tx_id)
++		tasklet_kill(&bnad->tx_free_tasklet);
++
++	bnad_tx_res_free(bnad, res_info);
++}
++
++/* Should be held with conf_lock held */
++int
++bnad_setup_tx(struct bnad *bnad, uint tx_id)
++{
++	int err;
++	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
++	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
++	struct bna_intr_info *intr_info =
++			&res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
++	struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
++	struct bna_tx_event_cbfn tx_cbfn;
++	struct bna_tx *tx;
++	unsigned long flags;
++
++	/* Initialize the Tx object configuration */
++	tx_config->num_txq = bnad->num_txq_per_tx;
++	tx_config->txq_depth = bnad->txq_depth;
++	tx_config->tx_type = BNA_TX_T_REGULAR;
++
++	/* Initialize the tx event handlers */
++	tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
++	tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
++	tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
++	tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
++	tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
++
++	/* Get BNA's resource requirement for one tx object */
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bna_tx_res_req(bnad->num_txq_per_tx,
++		bnad->txq_depth, res_info);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	/* Fill Unmap Q memory requirements */
++	BNAD_FILL_UNMAPQ_MEM_REQ(
++			&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
++			bnad->num_txq_per_tx,
++			BNAD_TX_UNMAPQ_DEPTH);
++
++	/* Allocate resources */
++	err = bnad_tx_res_alloc(bnad, res_info, tx_id);
++	if (err)
++		return err;
++
++	/* Ask BNA to create one Tx object, supplying required resources */
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
++			tx_info);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++	if (!tx)
++		goto err_return;
++	tx_info->tx = tx;
++
++	/* Register ISR for the Tx object */
++	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
++		err = bnad_tx_msix_register(bnad, tx_info,
++			tx_id, bnad->num_txq_per_tx);
++		if (err)
++			goto err_return;
++	}
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bna_tx_enable(tx);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	return 0;
++
++err_return:
++	bnad_tx_res_free(bnad, res_info);
++	return err;
++}
++
++/* Setup the rx config for bna_rx_create */
++/* bnad decides the configuration */
++static void
++bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
++{
++	rx_config->rx_type = BNA_RX_T_REGULAR;
++	rx_config->num_paths = bnad->num_rxp_per_rx;
++
++	if (bnad->num_rxp_per_rx > 1) {
++		rx_config->rss_status = BNA_STATUS_T_ENABLED;
++		rx_config->rss_config.hash_type =
++				(BFI_RSS_T_V4_TCP |
++				 BFI_RSS_T_V6_TCP |
++				 BFI_RSS_T_V4_IP  |
++				 BFI_RSS_T_V6_IP);
++		rx_config->rss_config.hash_mask =
++				bnad->num_rxp_per_rx - 1;
++		get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
++			sizeof(rx_config->rss_config.toeplitz_hash_key));
++	} else {
++		rx_config->rss_status = BNA_STATUS_T_DISABLED;
++		memset(&rx_config->rss_config, 0,
++		       sizeof(rx_config->rss_config));
++	}
++	rx_config->rxp_type = BNA_RXP_SLR;
++	rx_config->q_depth = bnad->rxq_depth;
++
++	rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
++
++	rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
++}
++
++/* Called with mutex_lock(&bnad->conf_mutex) held */
++void
++bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
++{
++	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
++	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
++	struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
++	unsigned long flags;
++	int dim_timer_del = 0;
++
++	if (!rx_info->rx)
++		return;
++
++	if (0 == rx_id) {
++		spin_lock_irqsave(&bnad->bna_lock, flags);
++		dim_timer_del = bnad_dim_timer_running(bnad);
++		if (dim_timer_del)
++			clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
++		spin_unlock_irqrestore(&bnad->bna_lock, flags);
++		if (dim_timer_del)
++			del_timer_sync(&bnad->dim_timer);
++	}
++
++	bnad_napi_disable(bnad, rx_id);
++
++	init_completion(&bnad->bnad_completions.rx_comp);
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++	wait_for_completion(&bnad->bnad_completions.rx_comp);
++
++	if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
++		bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bna_rx_destroy(rx_info->rx);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	rx_info->rx = NULL;
++
++	bnad_rx_res_free(bnad, res_info);
++}
++
++/* Called with mutex_lock(&bnad->conf_mutex) held */
++int
++bnad_setup_rx(struct bnad *bnad, uint rx_id)
++{
++	int err;
++	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
++	struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
++	struct bna_intr_info *intr_info =
++			&res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
++	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
++	struct bna_rx_event_cbfn rx_cbfn;
++	struct bna_rx *rx;
++	unsigned long flags;
++
++	/* Initialize the Rx object configuration */
++	bnad_init_rx_config(bnad, rx_config);
++
++	/* Initialize the Rx event handlers */
++	rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
++	rx_cbfn.rcb_destroy_cbfn = NULL;
++	rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
++	rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
++	rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
++	rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
++
++	/* Get BNA's resource requirement for one Rx object */
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bna_rx_res_req(rx_config, res_info);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	/* Fill Unmap Q memory requirements */
++	BNAD_FILL_UNMAPQ_MEM_REQ(
++			&res_info[BNA_RX_RES_MEM_T_UNMAPQ],
++			rx_config->num_paths +
++			((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
++				rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
++
++	/* Allocate resource */
++	err = bnad_rx_res_alloc(bnad, res_info, rx_id);
++	if (err)
++		return err;
++
++	/* Ask BNA to create one Rx object, supplying required resources */
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
++			rx_info);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++	if (!rx)
++		goto err_return;
++	rx_info->rx = rx;
++
++	/* Register ISR for the Rx object */
++	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
++		err = bnad_rx_msix_register(bnad, rx_info, rx_id,
++						rx_config->num_paths);
++		if (err)
++			goto err_return;
++	}
++
++	/* Enable NAPI */
++	bnad_napi_enable(bnad, rx_id);
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	if (0 == rx_id) {
++		/* Set up Dynamic Interrupt Moderation Vector */
++		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
++			bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
++
++		/* Enable VLAN filtering only on the default Rx */
++		bna_rx_vlanfilter_enable(rx);
++
++		/* Start the DIM timer */
++		bnad_dim_timer_start(bnad);
++	}
++
++	bna_rx_enable(rx);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	return 0;
++
++err_return:
++	bnad_cleanup_rx(bnad, rx_id);
++	return err;
++}
++
++/* Called with conf_lock & bnad->bna_lock held */
++void
++bnad_tx_coalescing_timeo_set(struct bnad *bnad)
++{
++	struct bnad_tx_info *tx_info;
++
++	tx_info = &bnad->tx_info[0];
++	if (!tx_info->tx)
++		return;
++
++	bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
++}
++
++/* Called with conf_lock & bnad->bna_lock held */
++void
++bnad_rx_coalescing_timeo_set(struct bnad *bnad)
++{
++	struct bnad_rx_info *rx_info;
++	int 	i;
++
++	for (i = 0; i < bnad->num_rx; i++) {
++		rx_info = &bnad->rx_info[i];
++		if (!rx_info->rx)
++			continue;
++		bna_rx_coalescing_timeo_set(rx_info->rx,
++				bnad->rx_coalescing_timeo);
++	}
++}
++
++/*
++ * Called with bnad->bna_lock held
++ */
++static int
++bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
++{
++	int ret;
++
++	if (!is_valid_ether_addr(mac_addr))
++		return -EADDRNOTAVAIL;
++
++	/* If datapath is down, pretend everything went through */
++	if (!bnad->rx_info[0].rx)
++		return 0;
++
++	ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
++	if (ret != BNA_CB_SUCCESS)
++		return -EADDRNOTAVAIL;
++
++	return 0;
++}
++
++/* Should be called with conf_lock held */
++static int
++bnad_enable_default_bcast(struct bnad *bnad)
++{
++	struct bnad_rx_info *rx_info = &bnad->rx_info[0];
++	int ret;
++	unsigned long flags;
++
++	init_completion(&bnad->bnad_completions.mcast_comp);
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
++				bnad_cb_rx_mcast_add);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	if (ret == BNA_CB_SUCCESS)
++		wait_for_completion(&bnad->bnad_completions.mcast_comp);
++	else
++		return -ENODEV;
++
++	if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
++		return -ENODEV;
++
++	return 0;
++}
++
++/* Statistics utilities */
++void
++bnad_netdev_qstats_fill(struct bnad *bnad)
++{
++	struct net_device_stats *net_stats = &bnad->net_stats;
++	int i, j;
++
++	for (i = 0; i < bnad->num_rx; i++) {
++		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
++			if (bnad->rx_info[i].rx_ctrl[j].ccb) {
++				net_stats->rx_packets += bnad->rx_info[i].
++				rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
++				net_stats->rx_bytes += bnad->rx_info[i].
++					rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
++				if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
++					bnad->rx_info[i].rx_ctrl[j].ccb->
++					rcb[1]->rxq) {
++					net_stats->rx_packets +=
++						bnad->rx_info[i].rx_ctrl[j].
++						ccb->rcb[1]->rxq->rx_packets;
++					net_stats->rx_bytes +=
++						bnad->rx_info[i].rx_ctrl[j].
++						ccb->rcb[1]->rxq->rx_bytes;
++				}
++			}
++		}
++	}
++	for (i = 0; i < bnad->num_tx; i++) {
++		for (j = 0; j < bnad->num_txq_per_tx; j++) {
++			if (bnad->tx_info[i].tcb[j]) {
++				net_stats->tx_packets +=
++				bnad->tx_info[i].tcb[j]->txq->tx_packets;
++				net_stats->tx_bytes +=
++					bnad->tx_info[i].tcb[j]->txq->tx_bytes;
++			}
++		}
++	}
++}
++
++/*
++ * Must be called with the bna_lock held.
++ */
++void
++bnad_netdev_hwstats_fill(struct bnad *bnad)
++{
++	struct bfi_ll_stats_mac *mac_stats;
++	struct net_device_stats *net_stats = &bnad->net_stats;
++	u64 bmap;
++	int i;
++
++	mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
++	net_stats->rx_errors =
++		mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
++		mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
++		mac_stats->rx_undersize;
++	net_stats->tx_errors = mac_stats->tx_fcs_error +
++					mac_stats->tx_undersize;
++	net_stats->rx_dropped = mac_stats->rx_drop;
++	net_stats->tx_dropped = mac_stats->tx_drop;
++	net_stats->multicast = mac_stats->rx_multicast;
++	net_stats->collisions = mac_stats->tx_total_collision;
++
++	net_stats->rx_length_errors = mac_stats->rx_frame_length_error;
++
++	/* receive ring buffer overflow  ?? */
++
++	net_stats->rx_crc_errors = mac_stats->rx_fcs_error;
++	net_stats->rx_frame_errors = mac_stats->rx_alignment_error;
++	/* recv'r fifo overrun */
++	bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
++		((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
++	for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
++		if (bmap & 1) {
++			net_stats->rx_fifo_errors =
++				bnad->stats.bna_stats->
++					hw_stats->rxf_stats[i].frame_drops;
++			break;
++		}
++		bmap >>= 1;
++	}
++}
++
++static void
++bnad_mbox_irq_sync(struct bnad *bnad)
++{
++	u32 irq;
++	unsigned long flags;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	if (bnad->cfg_flags & BNAD_CF_MSIX)
++		irq = bnad->msix_table[bnad->msix_num - 1].vector;
++	else
++		irq = bnad->pcidev->irq;
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	synchronize_irq(irq);
++}
++
++/* Utility used by bnad_start_xmit, for doing TSO */
++static int
++bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
++{
++	int err;
++
++	/* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
++	BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
++		   skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
++	if (skb_header_cloned(skb)) {
++		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
++		if (err) {
++			BNAD_UPDATE_CTR(bnad, tso_err);
++			return err;
++		}
++	}
++
++	/*
++	 * For TSO, the TCP checksum field is seeded with pseudo-header sum
++	 * excluding the length field.
++	 */
++	if (skb->protocol == htons(ETH_P_IP)) {
++		struct iphdr *iph = ip_hdr(skb);
++
++		/* Do we really need these? */
++		iph->tot_len = 0;
++		iph->check = 0;
++
++		tcp_hdr(skb)->check =
++			~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
++					   IPPROTO_TCP, 0);
++		BNAD_UPDATE_CTR(bnad, tso4);
++	} else {
++		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
++
++		BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
++		ipv6h->payload_len = 0;
++		tcp_hdr(skb)->check =
++			~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
++					 IPPROTO_TCP, 0);
++		BNAD_UPDATE_CTR(bnad, tso6);
++	}
++
++	return 0;
++}
++
++/*
++ * Initialize Q numbers depending on Rx Paths
++ * Called with bnad->bna_lock held, because of cfg_flags
++ * access.
++ */
++static void
++bnad_q_num_init(struct bnad *bnad)
++{
++	int rxps;
++
++	rxps = min((uint)num_online_cpus(),
++			(uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
++
++	if (!(bnad->cfg_flags & BNAD_CF_MSIX))
++		rxps = 1;	/* INTx */
++
++	bnad->num_rx = 1;
++	bnad->num_tx = 1;
++	bnad->num_rxp_per_rx = rxps;
++	bnad->num_txq_per_tx = BNAD_TXQ_NUM;
++}
++
++/*
++ * Adjusts the Q numbers, given a number of msix vectors
++ * Give preference to RSS as opposed to Tx priority Queues,
++ * in such a case, just use 1 Tx Q
++ * Called with bnad->bna_lock held b'cos of cfg_flags access
++ */
++static void
++bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
++{
++	bnad->num_txq_per_tx = 1;
++	if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx)  +
++	     bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
++	    (bnad->cfg_flags & BNAD_CF_MSIX)) {
++		bnad->num_rxp_per_rx = msix_vectors -
++			(bnad->num_tx * bnad->num_txq_per_tx) -
++			BNAD_MAILBOX_MSIX_VECTORS;
++	} else
++		bnad->num_rxp_per_rx = 1;
++}
++
++static void
++bnad_set_netdev_perm_addr(struct bnad *bnad)
++{
++	struct net_device *netdev = bnad->netdev;
++
++	memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
++	if (is_zero_ether_addr(netdev->dev_addr))
++		memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
++}
++
++/* Enable / disable device */
++static void
++bnad_device_disable(struct bnad *bnad)
++{
++	unsigned long flags;
++
++	init_completion(&bnad->bnad_completions.ioc_comp);
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	wait_for_completion(&bnad->bnad_completions.ioc_comp);
++
++}
++
++static int
++bnad_device_enable(struct bnad *bnad)
++{
++	int err = 0;
++	unsigned long flags;
++
++	init_completion(&bnad->bnad_completions.ioc_comp);
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bna_device_enable(&bnad->bna.device);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	wait_for_completion(&bnad->bnad_completions.ioc_comp);
++
++	if (bnad->bnad_completions.ioc_comp_status)
++		err = bnad->bnad_completions.ioc_comp_status;
++
++	return err;
++}
++
++/* Free BNA resources */
++static void
++bnad_res_free(struct bnad *bnad)
++{
++	int i;
++	struct bna_res_info *res_info = &bnad->res_info[0];
++
++	for (i = 0; i < BNA_RES_T_MAX; i++) {
++		if (res_info[i].res_type == BNA_RES_T_MEM)
++			bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
++		else
++			bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
++	}
++}
++
++/* Allocates memory and interrupt resources for BNA */
++static int
++bnad_res_alloc(struct bnad *bnad)
++{
++	int i, err;
++	struct bna_res_info *res_info = &bnad->res_info[0];
++
++	for (i = 0; i < BNA_RES_T_MAX; i++) {
++		if (res_info[i].res_type == BNA_RES_T_MEM)
++			err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
++		else
++			err = bnad_mbox_irq_alloc(bnad,
++						  &res_info[i].res_u.intr_info);
++		if (err)
++			goto err_return;
++	}
++	return 0;
++
++err_return:
++	bnad_res_free(bnad);
++	return err;
++}
++
++/* Interrupt enable / disable */
++static void
++bnad_enable_msix(struct bnad *bnad)
++{
++	int i, ret;
++	u32 tot_msix_num;
++	unsigned long flags;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
++		spin_unlock_irqrestore(&bnad->bna_lock, flags);
++		return;
++	}
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	if (bnad->msix_table)
++		return;
++
++	tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
++
++	bnad->msix_table =
++		kcalloc(tot_msix_num, sizeof(struct msix_entry), GFP_KERNEL);
++
++	if (!bnad->msix_table)
++		goto intx_mode;
++
++	for (i = 0; i < tot_msix_num; i++)
++		bnad->msix_table[i].entry = i;
++
++	ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, tot_msix_num);
++	if (ret > 0) {
++		/* Not enough MSI-X vectors. */
++
++		spin_lock_irqsave(&bnad->bna_lock, flags);
++		/* ret = #of vectors that we got */
++		bnad_q_num_adjust(bnad, ret);
++		spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++		bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
++			+ (bnad->num_rx
++			* bnad->num_rxp_per_rx) +
++			 BNAD_MAILBOX_MSIX_VECTORS;
++		tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
++
++		/* Try once more with adjusted numbers */
++		/* If this fails, fall back to INTx */
++		ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
++				      tot_msix_num);
++		if (ret)
++			goto intx_mode;
++
++	} else if (ret < 0)
++		goto intx_mode;
++	return;
++
++intx_mode:
++
++	kfree(bnad->msix_table);
++	bnad->msix_table = NULL;
++	bnad->msix_num = 0;
++	bnad->msix_diag_num = 0;
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bnad->cfg_flags &= ~BNAD_CF_MSIX;
++	bnad_q_num_init(bnad);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++}
++
++static void
++bnad_disable_msix(struct bnad *bnad)
++{
++	u32 cfg_flags;
++	unsigned long flags;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	cfg_flags = bnad->cfg_flags;
++	if (bnad->cfg_flags & BNAD_CF_MSIX)
++		bnad->cfg_flags &= ~BNAD_CF_MSIX;
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	if (cfg_flags & BNAD_CF_MSIX) {
++		pci_disable_msix(bnad->pcidev);
++		kfree(bnad->msix_table);
++		bnad->msix_table = NULL;
++	}
++}
++
++/* Netdev entry points */
++static int
++bnad_open(struct net_device *netdev)
++{
++	int err;
++	struct bnad *bnad = netdev_priv(netdev);
++	struct bna_pause_config pause_config;
++	int mtu;
++	unsigned long flags;
++
++	mutex_lock(&bnad->conf_mutex);
++
++	/* Tx */
++	err = bnad_setup_tx(bnad, 0);
++	if (err)
++		goto err_return;
++
++	/* Rx */
++	err = bnad_setup_rx(bnad, 0);
++	if (err)
++		goto cleanup_tx;
++
++	/* Port */
++	pause_config.tx_pause = 0;
++	pause_config.rx_pause = 0;
++
++	mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
++	bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
++	bna_port_enable(&bnad->bna.port);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	/* Enable broadcast */
++	bnad_enable_default_bcast(bnad);
++
++	/* Set the UCAST address */
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	/* Start the stats timer */
++	bnad_stats_timer_start(bnad);
++
++	mutex_unlock(&bnad->conf_mutex);
++
++	return 0;
++
++cleanup_tx:
++	bnad_cleanup_tx(bnad, 0);
++
++err_return:
++	mutex_unlock(&bnad->conf_mutex);
++	return err;
++}
++
++static int
++bnad_stop(struct net_device *netdev)
++{
++	struct bnad *bnad = netdev_priv(netdev);
++	unsigned long flags;
++
++	mutex_lock(&bnad->conf_mutex);
++
++	/* Stop the stats timer */
++	bnad_stats_timer_stop(bnad);
++
++	init_completion(&bnad->bnad_completions.port_comp);
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP,
++			bnad_cb_port_disabled);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	wait_for_completion(&bnad->bnad_completions.port_comp);
++
++	bnad_cleanup_tx(bnad, 0);
++	bnad_cleanup_rx(bnad, 0);
++
++	/* Synchronize mailbox IRQ */
++	bnad_mbox_irq_sync(bnad);
++
++	mutex_unlock(&bnad->conf_mutex);
++
++	return 0;
++}
++
++/* TX */
++/*
++ * bnad_start_xmit : Netdev entry point for Transmit
++ *		     Called under lock held by net_device
++ */
++static netdev_tx_t
++bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
++{
++	struct bnad *bnad = netdev_priv(netdev);
++
++	u16 		txq_prod, vlan_tag = 0;
++	u32 		unmap_prod, wis, wis_used, wi_range;
++	u32 		vectors, vect_id, i, acked;
++	u32		tx_id;
++	int 			err;
++
++	struct bnad_tx_info *tx_info;
++	struct bna_tcb *tcb;
++	struct bnad_unmap_q *unmap_q;
++	dma_addr_t 		dma_addr;
++	struct bna_txq_entry *txqent;
++	bna_txq_wi_ctrl_flag_t 	flags;
++
++	if (unlikely
++	    (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
++		dev_kfree_skb(skb);
++		return NETDEV_TX_OK;
++	}
++
++	/*
++	 * Takes care of the Tx that is scheduled between clearing the flag
++	 * and the netif_stop_queue() call.
++	 */
++	if (unlikely(!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))) {
++		dev_kfree_skb(skb);
++		return NETDEV_TX_OK;
++	}
++
++	tx_id = 0;
++
++	tx_info = &bnad->tx_info[tx_id];
++	tcb = tx_info->tcb[tx_id];
++	unmap_q = tcb->unmap_q;
++
++	vectors = 1 + skb_shinfo(skb)->nr_frags;
++	if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
++		dev_kfree_skb(skb);
++		return NETDEV_TX_OK;
++	}
++	wis = BNA_TXQ_WI_NEEDED(vectors);	/* 4 vectors per work item */
++	acked = 0;
++	if (unlikely
++	    (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
++	     vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
++		if ((u16) (*tcb->hw_consumer_index) !=
++		    tcb->consumer_index &&
++		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
++			acked = bnad_free_txbufs(bnad, tcb);
++			bna_ib_ack(tcb->i_dbell, acked);
++			smp_mb__before_clear_bit();
++			clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
++		} else {
++			netif_stop_queue(netdev);
++			BNAD_UPDATE_CTR(bnad, netif_queue_stop);
++		}
++
++		smp_mb();
++		/*
++		 * Check again to deal with race condition between
++		 * netif_stop_queue here, and netif_wake_queue in
++		 * interrupt handler which is not inside netif tx lock.
++		 */
++		if (likely
++		    (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
++		     vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
++			BNAD_UPDATE_CTR(bnad, netif_queue_stop);
++			return NETDEV_TX_BUSY;
++		} else {
++			netif_wake_queue(netdev);
++			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
++		}
++	}
++
++	unmap_prod = unmap_q->producer_index;
++	wis_used = 1;
++	vect_id = 0;
++	flags = 0;
++
++	txq_prod = tcb->producer_index;
++	BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
++	BUG_ON(!(wi_range <= tcb->q_depth));
++	txqent->hdr.wi.reserved = 0;
++	txqent->hdr.wi.num_vectors = vectors;
++	txqent->hdr.wi.opcode =
++		htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
++		       BNA_TXQ_WI_SEND));
++
++	if (bnad->vlan_grp && vlan_tx_tag_present(skb)) {
++		vlan_tag = (u16) vlan_tx_tag_get(skb);
++		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
++	}
++	if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
++		vlan_tag =
++			(tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
++		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
++	}
++
++	txqent->hdr.wi.vlan_tag = htons(vlan_tag);
++
++	if (skb_is_gso(skb)) {
++		err = bnad_tso_prepare(bnad, skb);
++		if (err) {
++			dev_kfree_skb(skb);
++			return NETDEV_TX_OK;
++		}
++		txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
++		flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
++		txqent->hdr.wi.l4_hdr_size_n_offset =
++			htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
++			      (tcp_hdrlen(skb) >> 2,
++			       skb_transport_offset(skb)));
++	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
++		u8 proto = 0;
++
++		txqent->hdr.wi.lso_mss = 0;
++
++		if (skb->protocol == htons(ETH_P_IP))
++			proto = ip_hdr(skb)->protocol;
++		else if (skb->protocol == htons(ETH_P_IPV6)) {
++			/* nexthdr may not be TCP immediately. */
++			proto = ipv6_hdr(skb)->nexthdr;
++		}
++		if (proto == IPPROTO_TCP) {
++			flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
++			txqent->hdr.wi.l4_hdr_size_n_offset =
++				htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
++				      (0, skb_transport_offset(skb)));
++
++			BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
++
++			BUG_ON(!(skb_headlen(skb) >=
++				skb_transport_offset(skb) + tcp_hdrlen(skb)));
++
++		} else if (proto == IPPROTO_UDP) {
++			flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
++			txqent->hdr.wi.l4_hdr_size_n_offset =
++				htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
++				      (0, skb_transport_offset(skb)));
++
++			BNAD_UPDATE_CTR(bnad, udpcsum_offload);
++
++			BUG_ON(!(skb_headlen(skb) >=
++				   skb_transport_offset(skb) +
++				   sizeof(struct udphdr)));
++		} else {
++			err = skb_checksum_help(skb);
++			BNAD_UPDATE_CTR(bnad, csum_help);
++			if (err) {
++				dev_kfree_skb(skb);
++				BNAD_UPDATE_CTR(bnad, csum_help_err);
++				return NETDEV_TX_OK;
++			}
++		}
++	} else {
++		txqent->hdr.wi.lso_mss = 0;
++		txqent->hdr.wi.l4_hdr_size_n_offset = 0;
++	}
++
++	txqent->hdr.wi.flags = htons(flags);
++
++	txqent->hdr.wi.frame_length = htonl(skb->len);
++
++	unmap_q->unmap_array[unmap_prod].skb = skb;
++	BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
++	txqent->vector[vect_id].length = htons(skb_headlen(skb));
++	dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
++		PCI_DMA_TODEVICE);
++	pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
++			   dma_addr);
++
++	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
++	BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
++
++	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
++		u32		size = frag->size;
++
++		if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
++			vect_id = 0;
++			if (--wi_range)
++				txqent++;
++			else {
++				BNA_QE_INDX_ADD(txq_prod, wis_used,
++						tcb->q_depth);
++				wis_used = 0;
++				BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
++						     txqent, wi_range);
++				BUG_ON(!(wi_range <= tcb->q_depth));
++			}
++			wis_used++;
++			txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
++		}
++
++		BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
++		txqent->vector[vect_id].length = htons(size);
++		dma_addr =
++			pci_map_page(bnad->pcidev, frag->page,
++				     frag->page_offset, size,
++				     PCI_DMA_TODEVICE);
++		pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
++				   dma_addr);
++		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
++		BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
++	}
++
++	unmap_q->producer_index = unmap_prod;
++	BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
++	tcb->producer_index = txq_prod;
++
++	smp_mb();
++	bna_txq_prod_indx_doorbell(tcb);
++
++	if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
++		tasklet_schedule(&bnad->tx_free_tasklet);
++
++	return NETDEV_TX_OK;
++}
++
++/*
++ * Used spin_lock to synchronize reading of stats structures, which
++ * is written by BNA under the same lock.
++ */
++static struct net_device_stats *
++bnad_get_netdev_stats(struct net_device *netdev)
++{
++	struct bnad *bnad = netdev_priv(netdev);
++	unsigned long flags;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++
++	memset(&bnad->net_stats, 0, sizeof(struct net_device_stats));
++
++	bnad_netdev_qstats_fill(bnad);
++	bnad_netdev_hwstats_fill(bnad);
++
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	return &bnad->net_stats;
++}
++
++static void
++bnad_set_rx_mode(struct net_device *netdev)
++{
++	struct bnad *bnad = netdev_priv(netdev);
++	u32	new_mask, valid_mask;
++	unsigned long flags;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++
++	new_mask = valid_mask = 0;
++
++	if (netdev->flags & IFF_PROMISC) {
++		if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
++			new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
++			valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
++			bnad->cfg_flags |= BNAD_CF_PROMISC;
++		}
++	} else {
++		if (bnad->cfg_flags & BNAD_CF_PROMISC) {
++			new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
++			valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
++			bnad->cfg_flags &= ~BNAD_CF_PROMISC;
++		}
++	}
++
++	if (netdev->flags & IFF_ALLMULTI) {
++		if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
++			new_mask |= BNA_RXMODE_ALLMULTI;
++			valid_mask |= BNA_RXMODE_ALLMULTI;
++			bnad->cfg_flags |= BNAD_CF_ALLMULTI;
++		}
++	} else {
++		if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
++			new_mask &= ~BNA_RXMODE_ALLMULTI;
++			valid_mask |= BNA_RXMODE_ALLMULTI;
++			bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
++		}
++	}
++
++	bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
++
++	if (!netdev_mc_empty(netdev)) {
++		u8 *mcaddr_list;
++		int mc_count = netdev_mc_count(netdev);
++
++		/* Index 0 holds the broadcast address */
++		mcaddr_list =
++			kzalloc((mc_count + 1) * ETH_ALEN,
++				GFP_ATOMIC);
++		if (!mcaddr_list)
++			return;
++
++		memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
++
++		/* Copy rest of the MC addresses */
++		bnad_netdev_mc_list_get(netdev, mcaddr_list);
++
++		bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
++					mcaddr_list, NULL);
++
++		/* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
++		kfree(mcaddr_list);
++	}
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++}
++
++/*
++ * bna_lock is used to sync writes to netdev->addr
++ * conf_lock cannot be used since this call may be made
++ * in a non-blocking context.
++ */
++static int
++bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
++{
++	int err;
++	struct bnad *bnad = netdev_priv(netdev);
++	struct sockaddr *sa = (struct sockaddr *)mac_addr;
++	unsigned long flags;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++
++	err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
++
++	if (!err)
++		memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
++
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	return err;
++}
++
++static int
++bnad_change_mtu(struct net_device *netdev, int new_mtu)
++{
++	int mtu, err = 0;
++	unsigned long flags;
++
++	struct bnad *bnad = netdev_priv(netdev);
++
++	if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
++		return -EINVAL;
++
++	mutex_lock(&bnad->conf_mutex);
++
++	netdev->mtu = new_mtu;
++
++	mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	mutex_unlock(&bnad->conf_mutex);
++	return err;
++}
++
++static void
++bnad_vlan_rx_register(struct net_device *netdev,
++				  struct vlan_group *vlan_grp)
++{
++	struct bnad *bnad = netdev_priv(netdev);
++
++	mutex_lock(&bnad->conf_mutex);
++	bnad->vlan_grp = vlan_grp;
++	mutex_unlock(&bnad->conf_mutex);
++}
++
++static void
++bnad_vlan_rx_add_vid(struct net_device *netdev,
++				 unsigned short vid)
++{
++	struct bnad *bnad = netdev_priv(netdev);
++	unsigned long flags;
++
++	if (!bnad->rx_info[0].rx)
++		return;
++
++	mutex_lock(&bnad->conf_mutex);
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	mutex_unlock(&bnad->conf_mutex);
++}
++
++static void
++bnad_vlan_rx_kill_vid(struct net_device *netdev,
++				  unsigned short vid)
++{
++	struct bnad *bnad = netdev_priv(netdev);
++	unsigned long flags;
++
++	if (!bnad->rx_info[0].rx)
++		return;
++
++	mutex_lock(&bnad->conf_mutex);
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	mutex_unlock(&bnad->conf_mutex);
++}
++
++#ifdef CONFIG_NET_POLL_CONTROLLER
++static void
++bnad_netpoll(struct net_device *netdev)
++{
++	struct bnad *bnad = netdev_priv(netdev);
++	struct bnad_rx_info *rx_info;
++	struct bnad_rx_ctrl *rx_ctrl;
++	u32 curr_mask;
++	int i, j;
++
++	if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
++		bna_intx_disable(&bnad->bna, curr_mask);
++		bnad_isr(bnad->pcidev->irq, netdev);
++		bna_intx_enable(&bnad->bna, curr_mask);
++	} else {
++		for (i = 0; i < bnad->num_rx; i++) {
++			rx_info = &bnad->rx_info[i];
++			if (!rx_info->rx)
++				continue;
++			for (j = 0; j < bnad->num_rxp_per_rx; j++) {
++				rx_ctrl = &rx_info->rx_ctrl[j];
++				if (rx_ctrl->ccb) {
++					bnad_disable_rx_irq(bnad,
++							    rx_ctrl->ccb);
++					bnad_netif_rx_schedule_poll(bnad,
++							    rx_ctrl->ccb);
++				}
++			}
++		}
++	}
++}
++#endif
++
++static const struct net_device_ops bnad_netdev_ops = {
++	.ndo_open		= bnad_open,
++	.ndo_stop		= bnad_stop,
++	.ndo_start_xmit		= bnad_start_xmit,
++	.ndo_get_stats		= bnad_get_netdev_stats,
++	.ndo_set_rx_mode	= bnad_set_rx_mode,
++	.ndo_set_multicast_list = bnad_set_rx_mode,
++	.ndo_validate_addr      = eth_validate_addr,
++	.ndo_set_mac_address    = bnad_set_mac_address,
++	.ndo_change_mtu		= bnad_change_mtu,
++	.ndo_vlan_rx_register   = bnad_vlan_rx_register,
++	.ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid,
++	.ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid,
++#ifdef CONFIG_NET_POLL_CONTROLLER
++	.ndo_poll_controller    = bnad_netpoll
++#endif
++};
++
++static void
++bnad_netdev_init(struct bnad *bnad, bool using_dac)
++{
++	struct net_device *netdev = bnad->netdev;
++
++	netdev->features |= NETIF_F_IPV6_CSUM;
++	netdev->features |= NETIF_F_TSO;
++	netdev->features |= NETIF_F_TSO6;
++
++	netdev->features |= NETIF_F_GRO;
++	pr_warn("bna: GRO enabled, using kernel stack GRO\n");
++
++	netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
++
++	if (using_dac)
++		netdev->features |= NETIF_F_HIGHDMA;
++
++	netdev->features |=
++		NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
++		NETIF_F_HW_VLAN_FILTER;
++
++	netdev->vlan_features = netdev->features;
++	netdev->mem_start = bnad->mmio_start;
++	netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
++
++	netdev->netdev_ops = &bnad_netdev_ops;
++	bnad_set_ethtool_ops(netdev);
++}
++
++/*
++ * 1. Initialize the bnad structure
++ * 2. Setup netdev pointer in pci_dev
++ * 3. Initialze Tx free tasklet
++ * 4. Initialize no. of TxQ & CQs & MSIX vectors
++ */
++static int
++bnad_init(struct bnad *bnad,
++	  struct pci_dev *pdev, struct net_device *netdev)
++{
++	unsigned long flags;
++
++	SET_NETDEV_DEV(netdev, &pdev->dev);
++	pci_set_drvdata(pdev, netdev);
++
++	bnad->netdev = netdev;
++	bnad->pcidev = pdev;
++	bnad->mmio_start = pci_resource_start(pdev, 0);
++	bnad->mmio_len = pci_resource_len(pdev, 0);
++	bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
++	if (!bnad->bar0) {
++		dev_err(&pdev->dev, "ioremap for bar0 failed\n");
++		pci_set_drvdata(pdev, NULL);
++		return -ENOMEM;
++	}
++	pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
++	       (unsigned long long) bnad->mmio_len);
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	if (!bnad_msix_disable)
++		bnad->cfg_flags = BNAD_CF_MSIX;
++
++	bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
++
++	bnad_q_num_init(bnad);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
++		(bnad->num_rx * bnad->num_rxp_per_rx) +
++			 BNAD_MAILBOX_MSIX_VECTORS;
++	bnad->msix_diag_num = 2;	/* 1 for Tx, 1 for Rx */
++
++	bnad->txq_depth = BNAD_TXQ_DEPTH;
++	bnad->rxq_depth = BNAD_RXQ_DEPTH;
++	bnad->rx_csum = true;
++
++	bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
++	bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
++
++	tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
++		     (unsigned long)bnad);
++
++	return 0;
++}
++
++/*
++ * Must be called after bnad_pci_uninit()
++ * so that iounmap() and pci_set_drvdata(NULL)
++ * happens only after PCI uninitialization.
++ */
++static void
++bnad_uninit(struct bnad *bnad)
++{
++	if (bnad->bar0)
++		iounmap(bnad->bar0);
++	pci_set_drvdata(bnad->pcidev, NULL);
++}
++
++/*
++ * Initialize locks
++	a) Per device mutes used for serializing configuration
++	   changes from OS interface
++	b) spin lock used to protect bna state machine
++ */
++static void
++bnad_lock_init(struct bnad *bnad)
++{
++	spin_lock_init(&bnad->bna_lock);
++	mutex_init(&bnad->conf_mutex);
++}
++
++static void
++bnad_lock_uninit(struct bnad *bnad)
++{
++	mutex_destroy(&bnad->conf_mutex);
++}
++
++/* PCI Initialization */
++static int
++bnad_pci_init(struct bnad *bnad,
++	      struct pci_dev *pdev, bool *using_dac)
++{
++	int err;
++
++	err = pci_enable_device(pdev);
++	if (err)
++		return err;
++	err = pci_request_regions(pdev, BNAD_NAME);
++	if (err)
++		goto disable_device;
++	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
++	    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
++		*using_dac = 1;
++	} else {
++		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
++		if (err) {
++			err = pci_set_consistent_dma_mask(pdev,
++						DMA_BIT_MASK(32));
++			if (err)
++				goto release_regions;
++		}
++		*using_dac = 0;
++	}
++	pci_set_master(pdev);
++	return 0;
++
++release_regions:
++	pci_release_regions(pdev);
++disable_device:
++	pci_disable_device(pdev);
++
++	return err;
++}
++
++static void
++bnad_pci_uninit(struct pci_dev *pdev)
++{
++	pci_release_regions(pdev);
++	pci_disable_device(pdev);
++}
++
++static int __devinit
++bnad_pci_probe(struct pci_dev *pdev,
++		const struct pci_device_id *pcidev_id)
++{
++	bool 	using_dac;
++	int 	err;
++	struct bnad *bnad;
++	struct bna *bna;
++	struct net_device *netdev;
++	struct bfa_pcidev pcidev_info;
++	unsigned long flags;
++
++	pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
++	       pdev, pcidev_id, PCI_FUNC(pdev->devfn));
++
++	mutex_lock(&bnad_fwimg_mutex);
++	if (!cna_get_firmware_buf(pdev)) {
++		mutex_unlock(&bnad_fwimg_mutex);
++		pr_warn("Failed to load Firmware Image!\n");
++		return -ENODEV;
++	}
++	mutex_unlock(&bnad_fwimg_mutex);
++
++	/*
++	 * Allocates sizeof(struct net_device + struct bnad)
++	 * bnad = netdev->priv
++	 */
++	netdev = alloc_etherdev(sizeof(struct bnad));
++	if (!netdev) {
++		dev_err(&pdev->dev, "alloc_etherdev failed\n");
++		err = -ENOMEM;
++		return err;
++	}
++	bnad = netdev_priv(netdev);
++
++
++	/*
++	 * PCI initialization
++	 * 	Output : using_dac = 1 for 64 bit DMA
++	 *		           = 0 for 32 bit DMA
++	 */
++	err = bnad_pci_init(bnad, pdev, &using_dac);
++	if (err)
++		goto free_netdev;
++
++	bnad_lock_init(bnad);
++	/*
++	 * Initialize bnad structure
++	 * Setup relation between pci_dev & netdev
++	 * Init Tx free tasklet
++	 */
++	err = bnad_init(bnad, pdev, netdev);
++	if (err)
++		goto pci_uninit;
++	/* Initialize netdev structure, set up ethtool ops */
++	bnad_netdev_init(bnad, using_dac);
++
++	bnad_enable_msix(bnad);
++
++	/* Get resource requirement form bna */
++	bna_res_req(&bnad->res_info[0]);
++
++	/* Allocate resources from bna */
++	err = bnad_res_alloc(bnad);
++	if (err)
++		goto free_netdev;
++
++	bna = &bnad->bna;
++
++	/* Setup pcidev_info for bna_init() */
++	pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
++	pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
++	pcidev_info.device_id = bnad->pcidev->device;
++	pcidev_info.pci_bar_kva = bnad->bar0;
++
++	mutex_lock(&bnad->conf_mutex);
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
++
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	bnad->stats.bna_stats = &bna->stats;
++
++	/* Set up timers */
++	setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout,
++				((unsigned long)bnad));
++	setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
++				((unsigned long)bnad));
++	setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_ioc_sem_timeout,
++				((unsigned long)bnad));
++
++	/* Now start the timer before calling IOC */
++	mod_timer(&bnad->bna.device.ioc.ioc_timer,
++		  jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
++
++	/*
++	 * Start the chip
++	 * Don't care even if err != 0, bna state machine will
++	 * deal with it
++	 */
++	err = bnad_device_enable(bnad);
++
++	/* Get the burnt-in mac */
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bna_port_mac_get(&bna->port, &bnad->perm_addr);
++	bnad_set_netdev_perm_addr(bnad);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	mutex_unlock(&bnad->conf_mutex);
++
++	/*
++	 * Make sure the link appears down to the stack
++	 */
++	netif_carrier_off(netdev);
++
++	/* Finally, reguister with net_device layer */
++	err = register_netdev(netdev);
++	if (err) {
++		pr_err("BNA : Registering with netdev failed\n");
++		goto disable_device;
++	}
++
++	return 0;
++
++disable_device:
++	mutex_lock(&bnad->conf_mutex);
++	bnad_device_disable(bnad);
++	del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
++	del_timer_sync(&bnad->bna.device.ioc.sem_timer);
++	del_timer_sync(&bnad->bna.device.ioc.hb_timer);
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bna_uninit(bna);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++	mutex_unlock(&bnad->conf_mutex);
++
++	bnad_res_free(bnad);
++	bnad_disable_msix(bnad);
++pci_uninit:
++	bnad_pci_uninit(pdev);
++	bnad_lock_uninit(bnad);
++	bnad_uninit(bnad);
++free_netdev:
++	free_netdev(netdev);
++	return err;
++}
++
++static void __devexit
++bnad_pci_remove(struct pci_dev *pdev)
++{
++	struct net_device *netdev = pci_get_drvdata(pdev);
++	struct bnad *bnad;
++	struct bna *bna;
++	unsigned long flags;
++
++	if (!netdev)
++		return;
++
++	pr_info("%s bnad_pci_remove\n", netdev->name);
++	bnad = netdev_priv(netdev);
++	bna = &bnad->bna;
++
++	unregister_netdev(netdev);
++
++	mutex_lock(&bnad->conf_mutex);
++	bnad_device_disable(bnad);
++	del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
++	del_timer_sync(&bnad->bna.device.ioc.sem_timer);
++	del_timer_sync(&bnad->bna.device.ioc.hb_timer);
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bna_uninit(bna);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++	mutex_unlock(&bnad->conf_mutex);
++
++	bnad_res_free(bnad);
++	bnad_disable_msix(bnad);
++	bnad_pci_uninit(pdev);
++	bnad_lock_uninit(bnad);
++	bnad_uninit(bnad);
++	free_netdev(netdev);
++}
++
++const struct pci_device_id bnad_pci_id_table[] = {
++	{
++		PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
++			PCI_DEVICE_ID_BROCADE_CT),
++		.class = PCI_CLASS_NETWORK_ETHERNET << 8,
++		.class_mask =  0xffff00
++	}, {0,  }
++};
++
++MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
++
++static struct pci_driver bnad_pci_driver = {
++	.name = BNAD_NAME,
++	.id_table = bnad_pci_id_table,
++	.probe = bnad_pci_probe,
++	.remove = __devexit_p(bnad_pci_remove),
++};
++
++static int __init
++bnad_module_init(void)
++{
++	int err;
++
++	pr_info("Brocade 10G Ethernet driver\n");
++
++	bfa_ioc_auto_recover(bnad_ioc_auto_recover);
++
++	err = pci_register_driver(&bnad_pci_driver);
++	if (err < 0) {
++		pr_err("bna : PCI registration failed in module init "
++		       "(%d)\n", err);
++		return err;
++	}
++
++	return 0;
++}
++
++static void __exit
++bnad_module_exit(void)
++{
++	pci_unregister_driver(&bnad_pci_driver);
++
++	if (bfi_fw)
++		release_firmware(bfi_fw);
++}
++
++module_init(bnad_module_init);
++module_exit(bnad_module_exit);
++
++MODULE_AUTHOR("Brocade");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
++MODULE_VERSION(BNAD_VERSION);
++MODULE_FIRMWARE(CNA_FW_FILE_CT);
+--- /dev/null
++++ b/drivers/net/bna/bnad.h
+@@ -0,0 +1,334 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++#ifndef __BNAD_H__
++#define __BNAD_H__
++
++#include <linux/rtnetlink.h>
++#include <linux/workqueue.h>
++#include <linux/ipv6.h>
++#include <linux/etherdevice.h>
++#include <linux/mutex.h>
++#include <linux/firmware.h>
++
++/* Fix for IA64 */
++#include <asm/checksum.h>
++#include <net/ip6_checksum.h>
++
++#include <net/ip.h>
++#include <net/tcp.h>
++
++#include "bna.h"
++
++#define BNAD_TXQ_DEPTH		2048
++#define BNAD_RXQ_DEPTH		2048
++
++#define BNAD_MAX_TXS		1
++#define BNAD_MAX_TXQ_PER_TX	8	/* 8 priority queues */
++#define BNAD_TXQ_NUM		1
++
++#define BNAD_MAX_RXS		1
++#define BNAD_MAX_RXPS_PER_RX	16
++
++/*
++ * Control structure pointed to ccb->ctrl, which
++ * determines the NAPI / LRO behavior CCB
++ * There is 1:1 corres. between ccb & ctrl
++ */
++struct bnad_rx_ctrl {
++	struct bna_ccb *ccb;
++	struct napi_struct	napi;
++};
++
++#define BNAD_RXMODE_PROMISC_DEFAULT	BNA_RXMODE_PROMISC
++
++#define BNAD_GET_TX_ID(_skb)	(0)
++
++/*
++ * GLOBAL #defines (CONSTANTS)
++ */
++#define BNAD_NAME			"bna"
++#define BNAD_NAME_LEN			64
++
++#define BNAD_VERSION			"2.3.2.0"
++
++#define BNAD_MAILBOX_MSIX_VECTORS	1
++
++#define BNAD_STATS_TIMER_FREQ		1000 	/* in msecs */
++#define BNAD_DIM_TIMER_FREQ		1000 	/* in msecs */
++
++#define BNAD_MAX_Q_DEPTH		0x10000
++#define BNAD_MIN_Q_DEPTH		0x200
++
++#define BNAD_JUMBO_MTU			9000
++
++#define BNAD_NETIF_WAKE_THRESHOLD	8
++
++#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT	3
++
++/* Bit positions for tcb->flags */
++#define BNAD_TXQ_FREE_SENT		0
++
++/* Bit positions for rcb->flags */
++#define BNAD_RXQ_REFILL			0
++#define BNAD_RXQ_STARTED		1
++
++/*
++ * DATA STRUCTURES
++ */
++
++/* enums */
++enum bnad_intr_source {
++	BNAD_INTR_TX		= 1,
++	BNAD_INTR_RX		= 2
++};
++
++enum bnad_link_state {
++	BNAD_LS_DOWN		= 0,
++	BNAD_LS_UP 		= 1
++};
++
++struct bnad_completion {
++	struct completion 	ioc_comp;
++	struct completion 	ucast_comp;
++	struct completion	mcast_comp;
++	struct completion	tx_comp;
++	struct completion	rx_comp;
++	struct completion	stats_comp;
++	struct completion	port_comp;
++
++	u8			ioc_comp_status;
++	u8			ucast_comp_status;
++	u8			mcast_comp_status;
++	u8			tx_comp_status;
++	u8			rx_comp_status;
++	u8			stats_comp_status;
++	u8			port_comp_status;
++};
++
++/* Tx Rx Control Stats */
++struct bnad_drv_stats {
++	u64 		netif_queue_stop;
++	u64		netif_queue_wakeup;
++	u64		tso4;
++	u64		tso6;
++	u64		tso_err;
++	u64		tcpcsum_offload;
++	u64		udpcsum_offload;
++	u64		csum_help;
++	u64		csum_help_err;
++
++	u64		hw_stats_updates;
++	u64		netif_rx_schedule;
++	u64		netif_rx_complete;
++	u64		netif_rx_dropped;
++
++	u64		link_toggle;
++	u64		cee_up;
++
++	u64		rxp_info_alloc_failed;
++	u64		mbox_intr_disabled;
++	u64		mbox_intr_enabled;
++	u64		tx_unmap_q_alloc_failed;
++	u64		rx_unmap_q_alloc_failed;
++
++	u64		rxbuf_alloc_failed;
++};
++
++/* Complete driver stats */
++struct bnad_stats {
++	struct bnad_drv_stats drv_stats;
++	struct bna_stats *bna_stats;
++};
++
++/* Tx / Rx Resources */
++struct bnad_tx_res_info {
++	struct bna_res_info res_info[BNA_TX_RES_T_MAX];
++};
++
++struct bnad_rx_res_info {
++	struct bna_res_info res_info[BNA_RX_RES_T_MAX];
++};
++
++struct bnad_tx_info {
++	struct bna_tx *tx; /* 1:1 between tx_info & tx */
++	struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX];
++} ____cacheline_aligned;
++
++struct bnad_rx_info {
++	struct bna_rx *rx; /* 1:1 between rx_info & rx */
++
++	struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXPS_PER_RX];
++} ____cacheline_aligned;
++
++/* Unmap queues for Tx / Rx cleanup */
++struct bnad_skb_unmap {
++	struct sk_buff		*skb;
++	DECLARE_PCI_UNMAP_ADDR(dma_addr)
++};
++
++struct bnad_unmap_q {
++	u32		producer_index;
++	u32		consumer_index;
++	u32 		q_depth;
++	/* This should be the last one */
++	struct bnad_skb_unmap unmap_array[1];
++};
++
++/* Bit mask values for bnad->cfg_flags */
++#define	BNAD_CF_DIM_ENABLED		0x01	/* DIM */
++#define	BNAD_CF_PROMISC			0x02
++#define BNAD_CF_ALLMULTI		0x04
++#define	BNAD_CF_MSIX			0x08	/* If in MSIx mode */
++
++/* Defines for run_flags bit-mask */
++/* Set, tested & cleared using xxx_bit() functions */
++/* Values indicated bit positions */
++#define	BNAD_RF_CEE_RUNNING		1
++#define BNAD_RF_HW_ERROR 		2
++#define BNAD_RF_MBOX_IRQ_DISABLED	3
++#define BNAD_RF_TX_STARTED		4
++#define BNAD_RF_RX_STARTED		5
++#define BNAD_RF_DIM_TIMER_RUNNING	6
++#define BNAD_RF_STATS_TIMER_RUNNING	7
++
++struct bnad {
++	struct net_device 	*netdev;
++
++	/* Data path */
++	struct bnad_tx_info tx_info[BNAD_MAX_TXS];
++	struct bnad_rx_info rx_info[BNAD_MAX_RXS];
++
++	struct vlan_group	*vlan_grp;
++	/*
++	 * These q numbers are global only because
++	 * they are used to calculate MSIx vectors.
++	 * Actually the exact # of queues are per Tx/Rx
++	 * object.
++	 */
++	u32		num_tx;
++	u32		num_rx;
++	u32		num_txq_per_tx;
++	u32		num_rxp_per_rx;
++
++	u32		txq_depth;
++	u32		rxq_depth;
++
++	u8			tx_coalescing_timeo;
++	u8			rx_coalescing_timeo;
++
++	struct bna_rx_config rx_config[BNAD_MAX_RXS];
++	struct bna_tx_config tx_config[BNAD_MAX_TXS];
++
++	u32		rx_csum;
++
++	void __iomem		*bar0;	/* BAR0 address */
++
++	struct bna bna;
++
++	u32		cfg_flags;
++	unsigned long		run_flags;
++
++	struct pci_dev 		*pcidev;
++	u64		mmio_start;
++	u64		mmio_len;
++
++	u32		msix_num;
++	u32		msix_diag_num;
++	struct msix_entry	*msix_table;
++
++	struct mutex		conf_mutex;
++	spinlock_t		bna_lock ____cacheline_aligned;
++
++	/* Timers */
++	struct timer_list	ioc_timer;
++	struct timer_list	dim_timer;
++	struct timer_list	stats_timer;
++
++	/* Control path resources, memory & irq */
++	struct bna_res_info res_info[BNA_RES_T_MAX];
++	struct bnad_tx_res_info tx_res_info[BNAD_MAX_TXS];
++	struct bnad_rx_res_info rx_res_info[BNAD_MAX_RXS];
++
++	struct bnad_completion bnad_completions;
++
++	/* Burnt in MAC address */
++	mac_t			perm_addr;
++
++	struct tasklet_struct	tx_free_tasklet;
++
++	/* Statistics */
++	struct bnad_stats stats;
++	struct net_device_stats net_stats;
++
++	struct bnad_diag *diag;
++
++	char			adapter_name[BNAD_NAME_LEN];
++	char 			port_name[BNAD_NAME_LEN];
++	char			mbox_irq_name[BNAD_NAME_LEN];
++};
++
++/*
++ * EXTERN VARIABLES
++ */
++extern struct firmware *bfi_fw;
++extern u32 		bnad_rxqs_per_cq;
++
++/*
++ * EXTERN PROTOTYPES
++ */
++extern u32 *cna_get_firmware_buf(struct pci_dev *pdev);
++/* Netdev entry point prototypes */
++extern void bnad_set_ethtool_ops(struct net_device *netdev);
++
++/* Configuration & setup */
++extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
++extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
++
++extern int bnad_setup_rx(struct bnad *bnad, uint rx_id);
++extern int bnad_setup_tx(struct bnad *bnad, uint tx_id);
++extern void bnad_cleanup_tx(struct bnad *bnad, uint tx_id);
++extern void bnad_cleanup_rx(struct bnad *bnad, uint rx_id);
++
++/* Timer start/stop protos */
++extern void bnad_dim_timer_start(struct bnad *bnad);
++
++/* Statistics */
++extern void bnad_netdev_qstats_fill(struct bnad *bnad);
++extern void bnad_netdev_hwstats_fill(struct bnad *bnad);
++
++/**
++ * MACROS
++ */
++/* To set & get the stats counters */
++#define BNAD_UPDATE_CTR(_bnad, _ctr)				\
++				(((_bnad)->stats.drv_stats._ctr)++)
++
++#define BNAD_GET_CTR(_bnad, _ctr) ((_bnad)->stats.drv_stats._ctr)
++
++#define bnad_enable_rx_irq_unsafe(_ccb)			\
++{							\
++	bna_ib_coalescing_timer_set((_ccb)->i_dbell,	\
++		(_ccb)->rx_coalescing_timeo);		\
++	bna_ib_ack((_ccb)->i_dbell, 0);			\
++}
++
++#define bnad_dim_timer_running(_bnad)				\
++	(((_bnad)->cfg_flags & BNAD_CF_DIM_ENABLED) && 		\
++	(test_bit(BNAD_RF_DIM_TIMER_RUNNING, &((_bnad)->run_flags))))
++
++#endif /* __BNAD_H__ */
+--- /dev/null
++++ b/drivers/net/bna/bnad_ethtool.c
+@@ -0,0 +1,1282 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++
++#include "cna.h"
++
++#include <linux/netdevice.h>
++#include <linux/skbuff.h>
++#include <linux/ethtool.h>
++#include <linux/rtnetlink.h>
++
++#include "bna.h"
++
++#include "bnad.h"
++
++#define BNAD_NUM_TXF_COUNTERS 12
++#define BNAD_NUM_RXF_COUNTERS 10
++#define BNAD_NUM_CQ_COUNTERS 3
++#define BNAD_NUM_RXQ_COUNTERS 6
++#define BNAD_NUM_TXQ_COUNTERS 5
++
++#define BNAD_ETHTOOL_STATS_NUM						\
++	(sizeof(struct net_device_stats) / sizeof(unsigned long) +	\
++	sizeof(struct bnad_drv_stats) / sizeof(u64) +		\
++	offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64))
++
++static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
++	"rx_packets",
++	"tx_packets",
++	"rx_bytes",
++	"tx_bytes",
++	"rx_errors",
++	"tx_errors",
++	"rx_dropped",
++	"tx_dropped",
++	"multicast",
++	"collisions",
++
++	"rx_length_errors",
++	"rx_over_errors",
++	"rx_crc_errors",
++	"rx_frame_errors",
++	"rx_fifo_errors",
++	"rx_missed_errors",
++
++	"tx_aborted_errors",
++	"tx_carrier_errors",
++	"tx_fifo_errors",
++	"tx_heartbeat_errors",
++	"tx_window_errors",
++
++	"rx_compressed",
++	"tx_compressed",
++
++	"netif_queue_stop",
++	"netif_queue_wakeup",
++	"tso4",
++	"tso6",
++	"tso_err",
++	"tcpcsum_offload",
++	"udpcsum_offload",
++	"csum_help",
++	"csum_help_err",
++	"hw_stats_updates",
++	"netif_rx_schedule",
++	"netif_rx_complete",
++	"netif_rx_dropped",
++
++	"link_toggle",
++	"cee_up",
++
++	"rxp_info_alloc_failed",
++	"mbox_intr_disabled",
++	"mbox_intr_enabled",
++	"tx_unmap_q_alloc_failed",
++	"rx_unmap_q_alloc_failed",
++	"rxbuf_alloc_failed",
++
++	"mac_frame_64",
++	"mac_frame_65_127",
++	"mac_frame_128_255",
++	"mac_frame_256_511",
++	"mac_frame_512_1023",
++	"mac_frame_1024_1518",
++	"mac_frame_1518_1522",
++	"mac_rx_bytes",
++	"mac_rx_packets",
++	"mac_rx_fcs_error",
++	"mac_rx_multicast",
++	"mac_rx_broadcast",
++	"mac_rx_control_frames",
++	"mac_rx_pause",
++	"mac_rx_unknown_opcode",
++	"mac_rx_alignment_error",
++	"mac_rx_frame_length_error",
++	"mac_rx_code_error",
++	"mac_rx_carrier_sense_error",
++	"mac_rx_undersize",
++	"mac_rx_oversize",
++	"mac_rx_fragments",
++	"mac_rx_jabber",
++	"mac_rx_drop",
++
++	"mac_tx_bytes",
++	"mac_tx_packets",
++	"mac_tx_multicast",
++	"mac_tx_broadcast",
++	"mac_tx_pause",
++	"mac_tx_deferral",
++	"mac_tx_excessive_deferral",
++	"mac_tx_single_collision",
++	"mac_tx_muliple_collision",
++	"mac_tx_late_collision",
++	"mac_tx_excessive_collision",
++	"mac_tx_total_collision",
++	"mac_tx_pause_honored",
++	"mac_tx_drop",
++	"mac_tx_jabber",
++	"mac_tx_fcs_error",
++	"mac_tx_control_frame",
++	"mac_tx_oversize",
++	"mac_tx_undersize",
++	"mac_tx_fragments",
++
++	"bpc_tx_pause_0",
++	"bpc_tx_pause_1",
++	"bpc_tx_pause_2",
++	"bpc_tx_pause_3",
++	"bpc_tx_pause_4",
++	"bpc_tx_pause_5",
++	"bpc_tx_pause_6",
++	"bpc_tx_pause_7",
++	"bpc_tx_zero_pause_0",
++	"bpc_tx_zero_pause_1",
++	"bpc_tx_zero_pause_2",
++	"bpc_tx_zero_pause_3",
++	"bpc_tx_zero_pause_4",
++	"bpc_tx_zero_pause_5",
++	"bpc_tx_zero_pause_6",
++	"bpc_tx_zero_pause_7",
++	"bpc_tx_first_pause_0",
++	"bpc_tx_first_pause_1",
++	"bpc_tx_first_pause_2",
++	"bpc_tx_first_pause_3",
++	"bpc_tx_first_pause_4",
++	"bpc_tx_first_pause_5",
++	"bpc_tx_first_pause_6",
++	"bpc_tx_first_pause_7",
++
++	"bpc_rx_pause_0",
++	"bpc_rx_pause_1",
++	"bpc_rx_pause_2",
++	"bpc_rx_pause_3",
++	"bpc_rx_pause_4",
++	"bpc_rx_pause_5",
++	"bpc_rx_pause_6",
++	"bpc_rx_pause_7",
++	"bpc_rx_zero_pause_0",
++	"bpc_rx_zero_pause_1",
++	"bpc_rx_zero_pause_2",
++	"bpc_rx_zero_pause_3",
++	"bpc_rx_zero_pause_4",
++	"bpc_rx_zero_pause_5",
++	"bpc_rx_zero_pause_6",
++	"bpc_rx_zero_pause_7",
++	"bpc_rx_first_pause_0",
++	"bpc_rx_first_pause_1",
++	"bpc_rx_first_pause_2",
++	"bpc_rx_first_pause_3",
++	"bpc_rx_first_pause_4",
++	"bpc_rx_first_pause_5",
++	"bpc_rx_first_pause_6",
++	"bpc_rx_first_pause_7",
++
++	"rad_rx_frames",
++	"rad_rx_octets",
++	"rad_rx_vlan_frames",
++	"rad_rx_ucast",
++	"rad_rx_ucast_octets",
++	"rad_rx_ucast_vlan",
++	"rad_rx_mcast",
++	"rad_rx_mcast_octets",
++	"rad_rx_mcast_vlan",
++	"rad_rx_bcast",
++	"rad_rx_bcast_octets",
++	"rad_rx_bcast_vlan",
++	"rad_rx_drops",
++
++	"fc_rx_ucast_octets",
++	"fc_rx_ucast",
++	"fc_rx_ucast_vlan",
++	"fc_rx_mcast_octets",
++	"fc_rx_mcast",
++	"fc_rx_mcast_vlan",
++	"fc_rx_bcast_octets",
++	"fc_rx_bcast",
++	"fc_rx_bcast_vlan",
++
++	"fc_tx_ucast_octets",
++	"fc_tx_ucast",
++	"fc_tx_ucast_vlan",
++	"fc_tx_mcast_octets",
++	"fc_tx_mcast",
++	"fc_tx_mcast_vlan",
++	"fc_tx_bcast_octets",
++	"fc_tx_bcast",
++	"fc_tx_bcast_vlan",
++	"fc_tx_parity_errors",
++	"fc_tx_timeout",
++	"fc_tx_fid_parity_errors",
++};
++
++static int
++bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
++{
++	cmd->supported = SUPPORTED_10000baseT_Full;
++	cmd->advertising = ADVERTISED_10000baseT_Full;
++	cmd->autoneg = AUTONEG_DISABLE;
++	cmd->supported |= SUPPORTED_FIBRE;
++	cmd->advertising |= ADVERTISED_FIBRE;
++	cmd->port = PORT_FIBRE;
++	cmd->phy_address = 0;
++
++	if (netif_carrier_ok(netdev)) {
++		cmd->speed = SPEED_10000;
++		cmd->duplex = DUPLEX_FULL;
++	} else {
++		cmd->speed = -1;
++		cmd->duplex = -1;
++	}
++	cmd->transceiver = XCVR_EXTERNAL;
++	cmd->maxtxpkt = 0;
++	cmd->maxrxpkt = 0;
++
++	return 0;
++}
++
++static int
++bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
++{
++	/* 10G full duplex setting supported only */
++	if (cmd->autoneg == AUTONEG_ENABLE)
++		return -EOPNOTSUPP; else {
++		if ((cmd->speed == SPEED_10000) && (cmd->duplex == DUPLEX_FULL))
++			return 0;
++	}
++
++	return -EOPNOTSUPP;
++}
++
++static void
++bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
++{
++	struct bnad *bnad = netdev_priv(netdev);
++	struct bfa_ioc_attr *ioc_attr;
++	unsigned long flags;
++
++	strcpy(drvinfo->driver, BNAD_NAME);
++	strcpy(drvinfo->version, BNAD_VERSION);
++
++	ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
++	if (ioc_attr) {
++		memset(ioc_attr, 0, sizeof(*ioc_attr));
++		spin_lock_irqsave(&bnad->bna_lock, flags);
++		bfa_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
++		spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++		strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
++			sizeof(drvinfo->fw_version) - 1);
++		kfree(ioc_attr);
++	}
++
++	strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN);
++}
++
++static int
++get_regs(struct bnad *bnad, u32 * regs)
++{
++	int num = 0, i;
++	u32 reg_addr;
++	unsigned long flags;
++
++#define BNAD_GET_REG(addr) 					\
++do {								\
++	if (regs)						\
++		regs[num++] = readl(bnad->bar0 + (addr));	\
++	else							\
++		num++;						\
++} while (0)
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++
++	/* DMA Block Internal Registers */
++	BNAD_GET_REG(DMA_CTRL_REG0);
++	BNAD_GET_REG(DMA_CTRL_REG1);
++	BNAD_GET_REG(DMA_ERR_INT_STATUS);
++	BNAD_GET_REG(DMA_ERR_INT_ENABLE);
++	BNAD_GET_REG(DMA_ERR_INT_STATUS_SET);
++
++	/* APP Block Register Address Offset from BAR0 */
++	BNAD_GET_REG(HOSTFN0_INT_STATUS);
++	BNAD_GET_REG(HOSTFN0_INT_MASK);
++	BNAD_GET_REG(HOST_PAGE_NUM_FN0);
++	BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN0);
++	BNAD_GET_REG(FN0_PCIE_ERR_REG);
++	BNAD_GET_REG(FN0_ERR_TYPE_STATUS_REG);
++	BNAD_GET_REG(FN0_ERR_TYPE_MSK_STATUS_REG);
++
++	BNAD_GET_REG(HOSTFN1_INT_STATUS);
++	BNAD_GET_REG(HOSTFN1_INT_MASK);
++	BNAD_GET_REG(HOST_PAGE_NUM_FN1);
++	BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN1);
++	BNAD_GET_REG(FN1_PCIE_ERR_REG);
++	BNAD_GET_REG(FN1_ERR_TYPE_STATUS_REG);
++	BNAD_GET_REG(FN1_ERR_TYPE_MSK_STATUS_REG);
++
++	BNAD_GET_REG(PCIE_MISC_REG);
++
++	BNAD_GET_REG(HOST_SEM0_REG);
++	BNAD_GET_REG(HOST_SEM1_REG);
++	BNAD_GET_REG(HOST_SEM2_REG);
++	BNAD_GET_REG(HOST_SEM3_REG);
++	BNAD_GET_REG(HOST_SEM0_INFO_REG);
++	BNAD_GET_REG(HOST_SEM1_INFO_REG);
++	BNAD_GET_REG(HOST_SEM2_INFO_REG);
++	BNAD_GET_REG(HOST_SEM3_INFO_REG);
++
++	BNAD_GET_REG(TEMPSENSE_CNTL_REG);
++	BNAD_GET_REG(TEMPSENSE_STAT_REG);
++
++	BNAD_GET_REG(APP_LOCAL_ERR_STAT);
++	BNAD_GET_REG(APP_LOCAL_ERR_MSK);
++
++	BNAD_GET_REG(PCIE_LNK_ERR_STAT);
++	BNAD_GET_REG(PCIE_LNK_ERR_MSK);
++
++	BNAD_GET_REG(FCOE_FIP_ETH_TYPE);
++	BNAD_GET_REG(RESV_ETH_TYPE);
++
++	BNAD_GET_REG(HOSTFN2_INT_STATUS);
++	BNAD_GET_REG(HOSTFN2_INT_MASK);
++	BNAD_GET_REG(HOST_PAGE_NUM_FN2);
++	BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN2);
++	BNAD_GET_REG(FN2_PCIE_ERR_REG);
++	BNAD_GET_REG(FN2_ERR_TYPE_STATUS_REG);
++	BNAD_GET_REG(FN2_ERR_TYPE_MSK_STATUS_REG);
++
++	BNAD_GET_REG(HOSTFN3_INT_STATUS);
++	BNAD_GET_REG(HOSTFN3_INT_MASK);
++	BNAD_GET_REG(HOST_PAGE_NUM_FN3);
++	BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN3);
++	BNAD_GET_REG(FN3_PCIE_ERR_REG);
++	BNAD_GET_REG(FN3_ERR_TYPE_STATUS_REG);
++	BNAD_GET_REG(FN3_ERR_TYPE_MSK_STATUS_REG);
++
++	/* Host Command Status Registers */
++	reg_addr = HOST_CMDSTS0_CLR_REG;
++	for (i = 0; i < 16; i++) {
++		BNAD_GET_REG(reg_addr);
++		BNAD_GET_REG(reg_addr + 4);
++		BNAD_GET_REG(reg_addr + 8);
++		reg_addr += 0x10;
++	}
++
++	/* Function ID register */
++	BNAD_GET_REG(FNC_ID_REG);
++
++	/* Function personality register */
++	BNAD_GET_REG(FNC_PERS_REG);
++
++	/* Operation mode register */
++	BNAD_GET_REG(OP_MODE);
++
++	/* LPU0 Registers */
++	BNAD_GET_REG(LPU0_MBOX_CTL_REG);
++	BNAD_GET_REG(LPU0_MBOX_CMD_REG);
++	BNAD_GET_REG(LPU0_MBOX_LINK_0REG);
++	BNAD_GET_REG(LPU1_MBOX_LINK_0REG);
++	BNAD_GET_REG(LPU0_MBOX_STATUS_0REG);
++	BNAD_GET_REG(LPU1_MBOX_STATUS_0REG);
++	BNAD_GET_REG(LPU0_ERR_STATUS_REG);
++	BNAD_GET_REG(LPU0_ERR_SET_REG);
++
++	/* LPU1 Registers */
++	BNAD_GET_REG(LPU1_MBOX_CTL_REG);
++	BNAD_GET_REG(LPU1_MBOX_CMD_REG);
++	BNAD_GET_REG(LPU0_MBOX_LINK_1REG);
++	BNAD_GET_REG(LPU1_MBOX_LINK_1REG);
++	BNAD_GET_REG(LPU0_MBOX_STATUS_1REG);
++	BNAD_GET_REG(LPU1_MBOX_STATUS_1REG);
++	BNAD_GET_REG(LPU1_ERR_STATUS_REG);
++	BNAD_GET_REG(LPU1_ERR_SET_REG);
++
++	/* PSS Registers */
++	BNAD_GET_REG(PSS_CTL_REG);
++	BNAD_GET_REG(PSS_ERR_STATUS_REG);
++	BNAD_GET_REG(ERR_STATUS_SET);
++	BNAD_GET_REG(PSS_RAM_ERR_STATUS_REG);
++
++	/* Catapult CPQ Registers */
++	BNAD_GET_REG(HOSTFN0_LPU0_MBOX0_CMD_STAT);
++	BNAD_GET_REG(HOSTFN0_LPU1_MBOX0_CMD_STAT);
++	BNAD_GET_REG(LPU0_HOSTFN0_MBOX0_CMD_STAT);
++	BNAD_GET_REG(LPU1_HOSTFN0_MBOX0_CMD_STAT);
++
++	BNAD_GET_REG(HOSTFN0_LPU0_MBOX1_CMD_STAT);
++	BNAD_GET_REG(HOSTFN0_LPU1_MBOX1_CMD_STAT);
++	BNAD_GET_REG(LPU0_HOSTFN0_MBOX1_CMD_STAT);
++	BNAD_GET_REG(LPU1_HOSTFN0_MBOX1_CMD_STAT);
++
++	BNAD_GET_REG(HOSTFN1_LPU0_MBOX0_CMD_STAT);
++	BNAD_GET_REG(HOSTFN1_LPU1_MBOX0_CMD_STAT);
++	BNAD_GET_REG(LPU0_HOSTFN1_MBOX0_CMD_STAT);
++	BNAD_GET_REG(LPU1_HOSTFN1_MBOX0_CMD_STAT);
++
++	BNAD_GET_REG(HOSTFN1_LPU0_MBOX1_CMD_STAT);
++	BNAD_GET_REG(HOSTFN1_LPU1_MBOX1_CMD_STAT);
++	BNAD_GET_REG(LPU0_HOSTFN1_MBOX1_CMD_STAT);
++	BNAD_GET_REG(LPU1_HOSTFN1_MBOX1_CMD_STAT);
++
++	BNAD_GET_REG(HOSTFN2_LPU0_MBOX0_CMD_STAT);
++	BNAD_GET_REG(HOSTFN2_LPU1_MBOX0_CMD_STAT);
++	BNAD_GET_REG(LPU0_HOSTFN2_MBOX0_CMD_STAT);
++	BNAD_GET_REG(LPU1_HOSTFN2_MBOX0_CMD_STAT);
++
++	BNAD_GET_REG(HOSTFN2_LPU0_MBOX1_CMD_STAT);
++	BNAD_GET_REG(HOSTFN2_LPU1_MBOX1_CMD_STAT);
++	BNAD_GET_REG(LPU0_HOSTFN2_MBOX1_CMD_STAT);
++	BNAD_GET_REG(LPU1_HOSTFN2_MBOX1_CMD_STAT);
++
++	BNAD_GET_REG(HOSTFN3_LPU0_MBOX0_CMD_STAT);
++	BNAD_GET_REG(HOSTFN3_LPU1_MBOX0_CMD_STAT);
++	BNAD_GET_REG(LPU0_HOSTFN3_MBOX0_CMD_STAT);
++	BNAD_GET_REG(LPU1_HOSTFN3_MBOX0_CMD_STAT);
++
++	BNAD_GET_REG(HOSTFN3_LPU0_MBOX1_CMD_STAT);
++	BNAD_GET_REG(HOSTFN3_LPU1_MBOX1_CMD_STAT);
++	BNAD_GET_REG(LPU0_HOSTFN3_MBOX1_CMD_STAT);
++	BNAD_GET_REG(LPU1_HOSTFN3_MBOX1_CMD_STAT);
++
++	/* Host Function Force Parity Error Registers */
++	BNAD_GET_REG(HOSTFN0_LPU_FORCE_PERR);
++	BNAD_GET_REG(HOSTFN1_LPU_FORCE_PERR);
++	BNAD_GET_REG(HOSTFN2_LPU_FORCE_PERR);
++	BNAD_GET_REG(HOSTFN3_LPU_FORCE_PERR);
++
++	/* LL Port[0|1] Halt Mask Registers */
++	BNAD_GET_REG(LL_HALT_MSK_P0);
++	BNAD_GET_REG(LL_HALT_MSK_P1);
++
++	/* LL Port[0|1] Error Mask Registers */
++	BNAD_GET_REG(LL_ERR_MSK_P0);
++	BNAD_GET_REG(LL_ERR_MSK_P1);
++
++	/* EMC FLI Registers */
++	BNAD_GET_REG(FLI_CMD_REG);
++	BNAD_GET_REG(FLI_ADDR_REG);
++	BNAD_GET_REG(FLI_CTL_REG);
++	BNAD_GET_REG(FLI_WRDATA_REG);
++	BNAD_GET_REG(FLI_RDDATA_REG);
++	BNAD_GET_REG(FLI_DEV_STATUS_REG);
++	BNAD_GET_REG(FLI_SIG_WD_REG);
++
++	BNAD_GET_REG(FLI_DEV_VENDOR_REG);
++	BNAD_GET_REG(FLI_ERR_STATUS_REG);
++
++	/* RxAdm 0 Registers */
++	BNAD_GET_REG(RAD0_CTL_REG);
++	BNAD_GET_REG(RAD0_PE_PARM_REG);
++	BNAD_GET_REG(RAD0_BCN_REG);
++	BNAD_GET_REG(RAD0_DEFAULT_REG);
++	BNAD_GET_REG(RAD0_PROMISC_REG);
++	BNAD_GET_REG(RAD0_BCNQ_REG);
++	BNAD_GET_REG(RAD0_DEFAULTQ_REG);
++
++	BNAD_GET_REG(RAD0_ERR_STS);
++	BNAD_GET_REG(RAD0_SET_ERR_STS);
++	BNAD_GET_REG(RAD0_ERR_INT_EN);
++	BNAD_GET_REG(RAD0_FIRST_ERR);
++	BNAD_GET_REG(RAD0_FORCE_ERR);
++
++	BNAD_GET_REG(RAD0_MAC_MAN_1H);
++	BNAD_GET_REG(RAD0_MAC_MAN_1L);
++	BNAD_GET_REG(RAD0_MAC_MAN_2H);
++	BNAD_GET_REG(RAD0_MAC_MAN_2L);
++	BNAD_GET_REG(RAD0_MAC_MAN_3H);
++	BNAD_GET_REG(RAD0_MAC_MAN_3L);
++	BNAD_GET_REG(RAD0_MAC_MAN_4H);
++	BNAD_GET_REG(RAD0_MAC_MAN_4L);
++
++	BNAD_GET_REG(RAD0_LAST4_IP);
++
++	/* RxAdm 1 Registers */
++	BNAD_GET_REG(RAD1_CTL_REG);
++	BNAD_GET_REG(RAD1_PE_PARM_REG);
++	BNAD_GET_REG(RAD1_BCN_REG);
++	BNAD_GET_REG(RAD1_DEFAULT_REG);
++	BNAD_GET_REG(RAD1_PROMISC_REG);
++	BNAD_GET_REG(RAD1_BCNQ_REG);
++	BNAD_GET_REG(RAD1_DEFAULTQ_REG);
++
++	BNAD_GET_REG(RAD1_ERR_STS);
++	BNAD_GET_REG(RAD1_SET_ERR_STS);
++	BNAD_GET_REG(RAD1_ERR_INT_EN);
++
++	/* TxA0 Registers */
++	BNAD_GET_REG(TXA0_CTRL_REG);
++	/* TxA0 TSO Sequence # Registers (RO) */
++	for (i = 0; i < 8; i++) {
++		BNAD_GET_REG(TXA0_TSO_TCP_SEQ_REG(i));
++		BNAD_GET_REG(TXA0_TSO_IP_INFO_REG(i));
++	}
++
++	/* TxA1 Registers */
++	BNAD_GET_REG(TXA1_CTRL_REG);
++	/* TxA1 TSO Sequence # Registers (RO) */
++	for (i = 0; i < 8; i++) {
++		BNAD_GET_REG(TXA1_TSO_TCP_SEQ_REG(i));
++		BNAD_GET_REG(TXA1_TSO_IP_INFO_REG(i));
++	}
++
++	/* RxA Registers */
++	BNAD_GET_REG(RXA0_CTL_REG);
++	BNAD_GET_REG(RXA1_CTL_REG);
++
++	/* PLB0 Registers */
++	BNAD_GET_REG(PLB0_ECM_TIMER_REG);
++	BNAD_GET_REG(PLB0_RL_CTL);
++	for (i = 0; i < 8; i++)
++		BNAD_GET_REG(PLB0_RL_MAX_BC(i));
++	BNAD_GET_REG(PLB0_RL_TU_PRIO);
++	for (i = 0; i < 8; i++)
++		BNAD_GET_REG(PLB0_RL_BYTE_CNT(i));
++	BNAD_GET_REG(PLB0_RL_MIN_REG);
++	BNAD_GET_REG(PLB0_RL_MAX_REG);
++	BNAD_GET_REG(PLB0_EMS_ADD_REG);
++
++	/* PLB1 Registers */
++	BNAD_GET_REG(PLB1_ECM_TIMER_REG);
++	BNAD_GET_REG(PLB1_RL_CTL);
++	for (i = 0; i < 8; i++)
++		BNAD_GET_REG(PLB1_RL_MAX_BC(i));
++	BNAD_GET_REG(PLB1_RL_TU_PRIO);
++	for (i = 0; i < 8; i++)
++		BNAD_GET_REG(PLB1_RL_BYTE_CNT(i));
++	BNAD_GET_REG(PLB1_RL_MIN_REG);
++	BNAD_GET_REG(PLB1_RL_MAX_REG);
++	BNAD_GET_REG(PLB1_EMS_ADD_REG);
++
++	/* HQM Control Register */
++	BNAD_GET_REG(HQM0_CTL_REG);
++	BNAD_GET_REG(HQM0_RXQ_STOP_SEM);
++	BNAD_GET_REG(HQM0_TXQ_STOP_SEM);
++	BNAD_GET_REG(HQM1_CTL_REG);
++	BNAD_GET_REG(HQM1_RXQ_STOP_SEM);
++	BNAD_GET_REG(HQM1_TXQ_STOP_SEM);
++
++	/* LUT Registers */
++	BNAD_GET_REG(LUT0_ERR_STS);
++	BNAD_GET_REG(LUT0_SET_ERR_STS);
++	BNAD_GET_REG(LUT1_ERR_STS);
++	BNAD_GET_REG(LUT1_SET_ERR_STS);
++
++	/* TRC Registers */
++	BNAD_GET_REG(TRC_CTL_REG);
++	BNAD_GET_REG(TRC_MODS_REG);
++	BNAD_GET_REG(TRC_TRGC_REG);
++	BNAD_GET_REG(TRC_CNT1_REG);
++	BNAD_GET_REG(TRC_CNT2_REG);
++	BNAD_GET_REG(TRC_NXTS_REG);
++	BNAD_GET_REG(TRC_DIRR_REG);
++	for (i = 0; i < 10; i++)
++		BNAD_GET_REG(TRC_TRGM_REG(i));
++	for (i = 0; i < 10; i++)
++		BNAD_GET_REG(TRC_NXTM_REG(i));
++	for (i = 0; i < 10; i++)
++		BNAD_GET_REG(TRC_STRM_REG(i));
++
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++#undef BNAD_GET_REG
++	return num;
++}
++static int
++bnad_get_regs_len(struct net_device *netdev)
++{
++	int ret = get_regs(netdev_priv(netdev), NULL) * sizeof(u32);
++	return ret;
++}
++
++static void
++bnad_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
++{
++	memset(buf, 0, bnad_get_regs_len(netdev));
++	get_regs(netdev_priv(netdev), buf);
++}
++
++static void
++bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo)
++{
++	wolinfo->supported = 0;
++	wolinfo->wolopts = 0;
++}
++
++static int
++bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
++{
++	struct bnad *bnad = netdev_priv(netdev);
++	unsigned long flags;
++
++	/* Lock rqd. to access bnad->bna_lock */
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	coalesce->use_adaptive_rx_coalesce =
++		(bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false;
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo *
++					BFI_COALESCING_TIMER_UNIT;
++	coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo *
++					BFI_COALESCING_TIMER_UNIT;
++	coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT;
++
++	return 0;
++}
++
++static int
++bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
++{
++	struct bnad *bnad = netdev_priv(netdev);
++	unsigned long flags;
++	int dim_timer_del = 0;
++
++	if (coalesce->rx_coalesce_usecs == 0 ||
++	    coalesce->rx_coalesce_usecs >
++	    BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
++		return -EINVAL;
++
++	if (coalesce->tx_coalesce_usecs == 0 ||
++	    coalesce->tx_coalesce_usecs >
++	    BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
++		return -EINVAL;
++
++	mutex_lock(&bnad->conf_mutex);
++	/*
++	 * Do not need to store rx_coalesce_usecs here
++	 * Every time DIM is disabled, we can get it from the
++	 * stack.
++	 */
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	if (coalesce->use_adaptive_rx_coalesce) {
++		if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) {
++			bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
++			bnad_dim_timer_start(bnad);
++		}
++	} else {
++		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) {
++			bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED;
++			dim_timer_del = bnad_dim_timer_running(bnad);
++			if (dim_timer_del) {
++				clear_bit(BNAD_RF_DIM_TIMER_RUNNING,
++							&bnad->run_flags);
++				spin_unlock_irqrestore(&bnad->bna_lock, flags);
++				del_timer_sync(&bnad->dim_timer);
++				spin_lock_irqsave(&bnad->bna_lock, flags);
++			}
++			bnad_rx_coalescing_timeo_set(bnad);
++		}
++	}
++	if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs /
++					BFI_COALESCING_TIMER_UNIT) {
++		bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs /
++						BFI_COALESCING_TIMER_UNIT;
++		bnad_tx_coalescing_timeo_set(bnad);
++	}
++
++	if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs /
++					BFI_COALESCING_TIMER_UNIT) {
++		bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs /
++						BFI_COALESCING_TIMER_UNIT;
++
++		if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED))
++			bnad_rx_coalescing_timeo_set(bnad);
++
++	}
++
++	/* Add Tx Inter-pkt DMA count?  */
++
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	mutex_unlock(&bnad->conf_mutex);
++	return 0;
++}
++
++static void
++bnad_get_ringparam(struct net_device *netdev,
++		   struct ethtool_ringparam *ringparam)
++{
++	struct bnad *bnad = netdev_priv(netdev);
++
++	ringparam->rx_max_pending = BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq;
++	ringparam->rx_mini_max_pending = 0;
++	ringparam->rx_jumbo_max_pending = 0;
++	ringparam->tx_max_pending = BNAD_MAX_Q_DEPTH;
++
++	ringparam->rx_pending = bnad->rxq_depth;
++	ringparam->rx_mini_max_pending = 0;
++	ringparam->rx_jumbo_max_pending = 0;
++	ringparam->tx_pending = bnad->txq_depth;
++}
++
++static int
++bnad_set_ringparam(struct net_device *netdev,
++		   struct ethtool_ringparam *ringparam)
++{
++	int i, current_err, err = 0;
++	struct bnad *bnad = netdev_priv(netdev);
++
++	mutex_lock(&bnad->conf_mutex);
++	if (ringparam->rx_pending == bnad->rxq_depth &&
++	    ringparam->tx_pending == bnad->txq_depth) {
++		mutex_unlock(&bnad->conf_mutex);
++		return 0;
++	}
++
++	if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
++	    ringparam->rx_pending > BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq ||
++	    !BNA_POWER_OF_2(ringparam->rx_pending)) {
++		mutex_unlock(&bnad->conf_mutex);
++		return -EINVAL;
++	}
++	if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
++	    ringparam->tx_pending > BNAD_MAX_Q_DEPTH ||
++	    !BNA_POWER_OF_2(ringparam->tx_pending)) {
++		mutex_unlock(&bnad->conf_mutex);
++		return -EINVAL;
++	}
++
++	if (ringparam->rx_pending != bnad->rxq_depth) {
++		bnad->rxq_depth = ringparam->rx_pending;
++		for (i = 0; i < bnad->num_rx; i++) {
++			if (!bnad->rx_info[i].rx)
++				continue;
++			bnad_cleanup_rx(bnad, i);
++			current_err = bnad_setup_rx(bnad, i);
++			if (current_err && !err)
++				err = current_err;
++		}
++	}
++	if (ringparam->tx_pending != bnad->txq_depth) {
++		bnad->txq_depth = ringparam->tx_pending;
++		for (i = 0; i < bnad->num_tx; i++) {
++			if (!bnad->tx_info[i].tx)
++				continue;
++			bnad_cleanup_tx(bnad, i);
++			current_err = bnad_setup_tx(bnad, i);
++			if (current_err && !err)
++				err = current_err;
++		}
++	}
++
++	mutex_unlock(&bnad->conf_mutex);
++	return err;
++}
++
++static void
++bnad_get_pauseparam(struct net_device *netdev,
++		    struct ethtool_pauseparam *pauseparam)
++{
++	struct bnad *bnad = netdev_priv(netdev);
++
++	pauseparam->autoneg = 0;
++	pauseparam->rx_pause = bnad->bna.port.pause_config.rx_pause;
++	pauseparam->tx_pause = bnad->bna.port.pause_config.tx_pause;
++}
++
++static int
++bnad_set_pauseparam(struct net_device *netdev,
++		    struct ethtool_pauseparam *pauseparam)
++{
++	struct bnad *bnad = netdev_priv(netdev);
++	struct bna_pause_config pause_config;
++	unsigned long flags;
++
++	if (pauseparam->autoneg == AUTONEG_ENABLE)
++		return -EINVAL;
++
++	mutex_lock(&bnad->conf_mutex);
++	if (pauseparam->rx_pause != bnad->bna.port.pause_config.rx_pause ||
++	    pauseparam->tx_pause != bnad->bna.port.pause_config.tx_pause) {
++		pause_config.rx_pause = pauseparam->rx_pause;
++		pause_config.tx_pause = pauseparam->tx_pause;
++		spin_lock_irqsave(&bnad->bna_lock, flags);
++		bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
++		spin_unlock_irqrestore(&bnad->bna_lock, flags);
++	}
++	mutex_unlock(&bnad->conf_mutex);
++	return 0;
++}
++
++static u32
++bnad_get_rx_csum(struct net_device *netdev)
++{
++	u32 rx_csum;
++	struct bnad *bnad = netdev_priv(netdev);
++
++	rx_csum = bnad->rx_csum;
++	return rx_csum;
++}
++
++static int
++bnad_set_rx_csum(struct net_device *netdev, u32 rx_csum)
++{
++	struct bnad *bnad = netdev_priv(netdev);
++
++	mutex_lock(&bnad->conf_mutex);
++	bnad->rx_csum = rx_csum;
++	mutex_unlock(&bnad->conf_mutex);
++	return 0;
++}
++
++static int
++bnad_set_tx_csum(struct net_device *netdev, u32 tx_csum)
++{
++	struct bnad *bnad = netdev_priv(netdev);
++
++	mutex_lock(&bnad->conf_mutex);
++	if (tx_csum) {
++		netdev->features |= NETIF_F_IP_CSUM;
++		netdev->features |= NETIF_F_IPV6_CSUM;
++	} else {
++		netdev->features &= ~NETIF_F_IP_CSUM;
++		netdev->features &= ~NETIF_F_IPV6_CSUM;
++	}
++	mutex_unlock(&bnad->conf_mutex);
++	return 0;
++}
++
++static int
++bnad_set_tso(struct net_device *netdev, u32 tso)
++{
++	struct bnad *bnad = netdev_priv(netdev);
++
++	mutex_lock(&bnad->conf_mutex);
++	if (tso) {
++		netdev->features |= NETIF_F_TSO;
++		netdev->features |= NETIF_F_TSO6;
++	} else {
++		netdev->features &= ~NETIF_F_TSO;
++		netdev->features &= ~NETIF_F_TSO6;
++	}
++	mutex_unlock(&bnad->conf_mutex);
++	return 0;
++}
++
++static void
++bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
++{
++	struct bnad *bnad = netdev_priv(netdev);
++	int i, j, q_num;
++	u64 bmap;
++
++	mutex_lock(&bnad->conf_mutex);
++
++	switch (stringset) {
++	case ETH_SS_STATS:
++		for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
++			BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
++				   ETH_GSTRING_LEN));
++			memcpy(string, bnad_net_stats_strings[i],
++			       ETH_GSTRING_LEN);
++			string += ETH_GSTRING_LEN;
++		}
++		bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
++			((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
++		for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
++			if (bmap & 1) {
++				sprintf(string, "txf%d_ucast_octets", i);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "txf%d_ucast", i);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "txf%d_ucast_vlan", i);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "txf%d_mcast_octets", i);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "txf%d_mcast", i);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "txf%d_mcast_vlan", i);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "txf%d_bcast_octets", i);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "txf%d_bcast", i);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "txf%d_bcast_vlan", i);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "txf%d_errors", i);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "txf%d_filter_vlan", i);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "txf%d_filter_mac_sa", i);
++				string += ETH_GSTRING_LEN;
++			}
++			bmap >>= 1;
++		}
++
++		bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
++			((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
++		for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
++			if (bmap & 1) {
++				sprintf(string, "rxf%d_ucast_octets", i);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "rxf%d_ucast", i);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "rxf%d_ucast_vlan", i);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "rxf%d_mcast_octets", i);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "rxf%d_mcast", i);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "rxf%d_mcast_vlan", i);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "rxf%d_bcast_octets", i);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "rxf%d_bcast", i);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "rxf%d_bcast_vlan", i);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "rxf%d_frame_drops", i);
++				string += ETH_GSTRING_LEN;
++			}
++			bmap >>= 1;
++		}
++
++		q_num = 0;
++		for (i = 0; i < bnad->num_rx; i++) {
++			if (!bnad->rx_info[i].rx)
++				continue;
++			for (j = 0; j < bnad->num_rxp_per_rx; j++) {
++				sprintf(string, "cq%d_producer_index", q_num);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "cq%d_consumer_index", q_num);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "cq%d_hw_producer_index",
++					q_num);
++				string += ETH_GSTRING_LEN;
++				q_num++;
++			}
++		}
++
++		q_num = 0;
++		for (i = 0; i < bnad->num_rx; i++) {
++			if (!bnad->rx_info[i].rx)
++				continue;
++			for (j = 0; j < bnad->num_rxp_per_rx; j++) {
++				sprintf(string, "rxq%d_packets", q_num);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "rxq%d_bytes", q_num);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "rxq%d_packets_with_error",
++								q_num);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "rxq%d_allocbuf_failed", q_num);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "rxq%d_producer_index", q_num);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "rxq%d_consumer_index", q_num);
++				string += ETH_GSTRING_LEN;
++				q_num++;
++				if (bnad->rx_info[i].rx_ctrl[j].ccb &&
++					bnad->rx_info[i].rx_ctrl[j].ccb->
++					rcb[1] &&
++					bnad->rx_info[i].rx_ctrl[j].ccb->
++					rcb[1]->rxq) {
++					sprintf(string, "rxq%d_packets", q_num);
++					string += ETH_GSTRING_LEN;
++					sprintf(string, "rxq%d_bytes", q_num);
++					string += ETH_GSTRING_LEN;
++					sprintf(string,
++					"rxq%d_packets_with_error", q_num);
++					string += ETH_GSTRING_LEN;
++					sprintf(string, "rxq%d_allocbuf_failed",
++								q_num);
++					string += ETH_GSTRING_LEN;
++					sprintf(string, "rxq%d_producer_index",
++								q_num);
++					string += ETH_GSTRING_LEN;
++					sprintf(string, "rxq%d_consumer_index",
++								q_num);
++					string += ETH_GSTRING_LEN;
++					q_num++;
++				}
++			}
++		}
++
++		q_num = 0;
++		for (i = 0; i < bnad->num_tx; i++) {
++			if (!bnad->tx_info[i].tx)
++				continue;
++			for (j = 0; j < bnad->num_txq_per_tx; j++) {
++				sprintf(string, "txq%d_packets", q_num);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "txq%d_bytes", q_num);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "txq%d_producer_index", q_num);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "txq%d_consumer_index", q_num);
++				string += ETH_GSTRING_LEN;
++				sprintf(string, "txq%d_hw_consumer_index",
++									q_num);
++				string += ETH_GSTRING_LEN;
++				q_num++;
++			}
++		}
++
++		break;
++
++	default:
++		break;
++	}
++
++	mutex_unlock(&bnad->conf_mutex);
++}
++
++static int
++bnad_get_stats_count_locked(struct net_device *netdev)
++{
++	struct bnad *bnad = netdev_priv(netdev);
++	int i, j, count, rxf_active_num = 0, txf_active_num = 0;
++	u64 bmap;
++
++	bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
++			((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
++	for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
++		if (bmap & 1)
++			txf_active_num++;
++		bmap >>= 1;
++	}
++	bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
++			((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
++	for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
++		if (bmap & 1)
++			rxf_active_num++;
++		bmap >>= 1;
++	}
++	count = BNAD_ETHTOOL_STATS_NUM +
++		txf_active_num * BNAD_NUM_TXF_COUNTERS +
++		rxf_active_num * BNAD_NUM_RXF_COUNTERS;
++
++	for (i = 0; i < bnad->num_rx; i++) {
++		if (!bnad->rx_info[i].rx)
++			continue;
++		count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS;
++		count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS;
++		for (j = 0; j < bnad->num_rxp_per_rx; j++)
++			if (bnad->rx_info[i].rx_ctrl[j].ccb &&
++				bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
++				bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
++				count +=  BNAD_NUM_RXQ_COUNTERS;
++	}
++
++	for (i = 0; i < bnad->num_tx; i++) {
++		if (!bnad->tx_info[i].tx)
++			continue;
++		count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS;
++	}
++	return count;
++}
++
++static int
++bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
++{
++	int i, j;
++	struct bna_rcb *rcb = NULL;
++	struct bna_tcb *tcb = NULL;
++
++	for (i = 0; i < bnad->num_rx; i++) {
++		if (!bnad->rx_info[i].rx)
++			continue;
++		for (j = 0; j < bnad->num_rxp_per_rx; j++)
++			if (bnad->rx_info[i].rx_ctrl[j].ccb &&
++				bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
++				bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) {
++				buf[bi++] = bnad->rx_info[i].rx_ctrl[j].
++						ccb->producer_index;
++				buf[bi++] = 0; /* ccb->consumer_index */
++				buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j].
++						ccb->hw_producer_index);
++			}
++	}
++	for (i = 0; i < bnad->num_rx; i++) {
++		if (!bnad->rx_info[i].rx)
++			continue;
++		for (j = 0; j < bnad->num_rxp_per_rx; j++)
++			if (bnad->rx_info[i].rx_ctrl[j].ccb) {
++				if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
++					bnad->rx_info[i].rx_ctrl[j].ccb->
++					rcb[0]->rxq) {
++					rcb = bnad->rx_info[i].rx_ctrl[j].
++							ccb->rcb[0];
++					buf[bi++] = rcb->rxq->rx_packets;
++					buf[bi++] = rcb->rxq->rx_bytes;
++					buf[bi++] = rcb->rxq->
++							rx_packets_with_error;
++					buf[bi++] = rcb->rxq->
++							rxbuf_alloc_failed;
++					buf[bi++] = rcb->producer_index;
++					buf[bi++] = rcb->consumer_index;
++				}
++				if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
++					bnad->rx_info[i].rx_ctrl[j].ccb->
++					rcb[1]->rxq) {
++					rcb = bnad->rx_info[i].rx_ctrl[j].
++								ccb->rcb[1];
++					buf[bi++] = rcb->rxq->rx_packets;
++					buf[bi++] = rcb->rxq->rx_bytes;
++					buf[bi++] = rcb->rxq->
++							rx_packets_with_error;
++					buf[bi++] = rcb->rxq->
++							rxbuf_alloc_failed;
++					buf[bi++] = rcb->producer_index;
++					buf[bi++] = rcb->consumer_index;
++				}
++			}
++	}
++
++	for (i = 0; i < bnad->num_tx; i++) {
++		if (!bnad->tx_info[i].tx)
++			continue;
++		for (j = 0; j < bnad->num_txq_per_tx; j++)
++			if (bnad->tx_info[i].tcb[j] &&
++				bnad->tx_info[i].tcb[j]->txq) {
++				tcb = bnad->tx_info[i].tcb[j];
++				buf[bi++] = tcb->txq->tx_packets;
++				buf[bi++] = tcb->txq->tx_bytes;
++				buf[bi++] = tcb->producer_index;
++				buf[bi++] = tcb->consumer_index;
++				buf[bi++] = *(tcb->hw_consumer_index);
++			}
++	}
++
++	return bi;
++}
++
++static void
++bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
++		       u64 *buf)
++{
++	struct bnad *bnad = netdev_priv(netdev);
++	int i, j, bi;
++	unsigned long *net_stats, flags;
++	u64 *stats64;
++	u64 bmap;
++
++	mutex_lock(&bnad->conf_mutex);
++	if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
++		mutex_unlock(&bnad->conf_mutex);
++		return;
++	}
++
++	/*
++	 * Used bna_lock to sync reads from bna_stats, which is written
++	 * under the same lock
++	 */
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bi = 0;
++	memset(buf, 0, stats->n_stats * sizeof(u64));
++	memset(&bnad->net_stats, 0, sizeof(struct net_device_stats));
++
++	bnad_netdev_qstats_fill(bnad);
++	bnad_netdev_hwstats_fill(bnad);
++
++	/* Fill net_stats into ethtool buffers */
++	net_stats = (unsigned long *)&bnad->net_stats;
++	for (i = 0; i < sizeof(struct net_device_stats) / sizeof(unsigned long);
++	     i++)
++		buf[bi++] = net_stats[i];
++
++	/* Fill driver stats into ethtool buffers */
++	stats64 = (u64 *)&bnad->stats.drv_stats;
++	for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
++		buf[bi++] = stats64[i];
++
++	/* Fill hardware stats excluding the rxf/txf into ethtool bufs */
++	stats64 = (u64 *) bnad->stats.bna_stats->hw_stats;
++	for (i = 0;
++	     i < offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64);
++	     i++)
++		buf[bi++] = stats64[i];
++
++	/* Fill txf stats into ethtool buffers */
++	bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
++			((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
++	for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
++		if (bmap & 1) {
++			stats64 = (u64 *)&bnad->stats.bna_stats->
++						hw_stats->txf_stats[i];
++			for (j = 0; j < sizeof(struct bfi_ll_stats_txf) /
++					sizeof(u64); j++)
++				buf[bi++] = stats64[j];
++		}
++		bmap >>= 1;
++	}
++
++	/*  Fill rxf stats into ethtool buffers */
++	bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
++			((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
++	for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
++		if (bmap & 1) {
++			stats64 = (u64 *)&bnad->stats.bna_stats->
++						hw_stats->rxf_stats[i];
++			for (j = 0; j < sizeof(struct bfi_ll_stats_rxf) /
++					sizeof(u64); j++)
++				buf[bi++] = stats64[j];
++		}
++		bmap >>= 1;
++	}
++
++	/* Fill per Q stats into ethtool buffers */
++	bi = bnad_per_q_stats_fill(bnad, buf, bi);
++
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	mutex_unlock(&bnad->conf_mutex);
++}
++
++static int
++bnad_get_sset_count(struct net_device *netdev, int sset)
++{
++	switch (sset) {
++	case ETH_SS_STATS:
++		return bnad_get_stats_count_locked(netdev);
++	default:
++		return -EOPNOTSUPP;
++	}
++}
++
++static struct ethtool_ops bnad_ethtool_ops = {
++	.get_settings = bnad_get_settings,
++	.set_settings = bnad_set_settings,
++	.get_drvinfo = bnad_get_drvinfo,
++	.get_regs_len = bnad_get_regs_len,
++	.get_regs = bnad_get_regs,
++	.get_wol = bnad_get_wol,
++	.get_link = ethtool_op_get_link,
++	.get_coalesce = bnad_get_coalesce,
++	.set_coalesce = bnad_set_coalesce,
++	.get_ringparam = bnad_get_ringparam,
++	.set_ringparam = bnad_set_ringparam,
++	.get_pauseparam = bnad_get_pauseparam,
++	.set_pauseparam = bnad_set_pauseparam,
++	.get_rx_csum = bnad_get_rx_csum,
++	.set_rx_csum = bnad_set_rx_csum,
++	.get_tx_csum = ethtool_op_get_tx_csum,
++	.set_tx_csum = bnad_set_tx_csum,
++	.get_sg = ethtool_op_get_sg,
++	.set_sg = ethtool_op_set_sg,
++	.get_tso = ethtool_op_get_tso,
++	.set_tso = bnad_set_tso,
++	.get_flags = ethtool_op_get_flags,
++	.set_flags = ethtool_op_set_flags,
++	.get_strings = bnad_get_strings,
++	.get_ethtool_stats = bnad_get_ethtool_stats,
++	.get_sset_count = bnad_get_sset_count
++};
++
++void
++bnad_set_ethtool_ops(struct net_device *netdev)
++{
++	SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops);
++}
+--- /dev/null
++++ b/drivers/net/bna/cna.h
+@@ -0,0 +1,81 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2006-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++
++#ifndef __CNA_H__
++#define __CNA_H__
++
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/pci.h>
++#include <linux/delay.h>
++#include <linux/bitops.h>
++#include <linux/timer.h>
++#include <linux/interrupt.h>
++#include <linux/if_ether.h>
++#include <asm/page.h>
++#include <asm/io.h>
++#include <asm/string.h>
++
++#include <linux/list.h>
++
++#define bfa_sm_fault(__mod, __event)    do {                            \
++	pr_err("SM Assertion failure: %s: %d: event = %d", __FILE__, __LINE__, \
++		__event); \
++} while (0)
++
++extern char bfa_version[];
++
++#define	CNA_FW_FILE_CT	"ctfw_cna.bin"
++#define FC_SYMNAME_MAX	256	/*!< max name server symbolic name size */
++
++#pragma pack(1)
++
++#define MAC_ADDRLEN	(6)
++typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t;
++
++#pragma pack()
++
++#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
++#define bfa_q_next(_qe)	(((struct list_head *) (_qe))->next)
++#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
++
++/*
++ * bfa_q_qe_init - to initialize a queue element
++ */
++#define bfa_q_qe_init(_qe) {						\
++	bfa_q_next(_qe) = (struct list_head *) NULL;			\
++	bfa_q_prev(_qe) = (struct list_head *) NULL;			\
++}
++
++/*
++ * bfa_q_deq - dequeue an element from head of the queue
++ */
++#define bfa_q_deq(_q, _qe) {						\
++	if (!list_empty(_q)) {						\
++		(*((struct list_head **) (_qe))) = bfa_q_next(_q);	\
++		bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) =	\
++						(struct list_head *) (_q); \
++		bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \
++		bfa_q_qe_init(*((struct list_head **) _qe));		\
++	} else {							\
++		*((struct list_head **) (_qe)) = (struct list_head *) NULL; \
++	}								\
++}
++
++#endif /* __CNA_H__ */
+--- /dev/null
++++ b/drivers/net/bna/cna_fwimg.c
+@@ -0,0 +1,64 @@
++/*
++ * Linux network driver for Brocade Converged Network Adapter.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License (GPL) Version 2 as
++ * published by the Free Software Foundation
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ */
++/*
++ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
++ * All rights reserved
++ * www.brocade.com
++ */
++#include <linux/firmware.h>
++#include "cna.h"
++
++const struct firmware *bfi_fw;
++static u32 *bfi_image_ct_cna;
++static u32 bfi_image_ct_cna_size;
++
++u32 *
++cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
++			u32 *bfi_image_size, char *fw_name)
++{
++	const struct firmware *fw;
++
++	if (request_firmware(&fw, fw_name, &pdev->dev)) {
++		pr_alert("Can't locate firmware %s\n", fw_name);
++		goto error;
++	}
++
++	*bfi_image = (u32 *)fw->data;
++	*bfi_image_size = fw->size/sizeof(u32);
++	bfi_fw = fw;
++
++	return *bfi_image;
++error:
++	return NULL;
++}
++
++u32 *
++cna_get_firmware_buf(struct pci_dev *pdev)
++{
++	if (bfi_image_ct_cna_size == 0)
++		cna_read_firmware(pdev, &bfi_image_ct_cna,
++			&bfi_image_ct_cna_size, CNA_FW_FILE_CT);
++	return bfi_image_ct_cna;
++}
++
++u32 *
++bfa_cb_image_get_chunk(int type, u32 off)
++{
++	return (u32 *)(bfi_image_ct_cna + off);
++}
++
++u32
++bfa_cb_image_get_size(int type)
++{
++	return bfi_image_ct_cna_size;
++}
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2189,6 +2189,9 @@
+ #define PCI_VENDOR_ID_ARIMA		0x161f
+ 
+ #define PCI_VENDOR_ID_BROCADE		0x1657
++#define PCI_DEVICE_ID_BROCADE_CT	0x0014
++#define PCI_DEVICE_ID_BROCADE_FC_8G1P	0x0017
++#define PCI_DEVICE_ID_BROCADE_CT_FC	0x0021
+ 
+ #define PCI_VENDOR_ID_SIBYTE		0x166d
+ #define PCI_DEVICE_ID_BCM1250_PCI	0x0001
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bna/0002-bna-Delete-get_flags-and-set_flags-ethtool-methods.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bna/0002-bna-Delete-get_flags-and-set_flags-ethtool-methods.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,34 @@
+From: David S. Miller <davem at davemloft.net>
+Date: Mon, 23 Aug 2010 20:34:51 -0700
+Subject: [PATCH 02/23] bna: Delete get_flags and set_flags ethtool methods.
+
+commit f04b4dd2b1f533cef0507e0410ffc6732d21a272 upstream.
+
+This driver doesn't support LRO, NTUPLE, or the RXHASH
+features.  So it should not set these ethtool operations.
+
+This also fixes the warning:
+
+drivers/net/bna/bnad_ethtool.c:1272: warning: initialization from incompatible pointer type
+
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ drivers/net/bna/bnad_ethtool.c |    2 --
+ 1 files changed, 0 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
+index e982785..7e630f5 100644
+--- a/drivers/net/bna/bnad_ethtool.c
++++ b/drivers/net/bna/bnad_ethtool.c
+@@ -1268,8 +1268,6 @@ static struct ethtool_ops bnad_ethtool_ops = {
+ 	.set_sg = ethtool_op_set_sg,
+ 	.get_tso = ethtool_op_get_tso,
+ 	.set_tso = bnad_set_tso,
+-	.get_flags = ethtool_op_get_flags,
+-	.set_flags = ethtool_op_set_flags,
+ 	.get_strings = bnad_get_strings,
+ 	.get_ethtool_stats = bnad_get_ethtool_stats,
+ 	.get_sset_count = bnad_get_sset_count
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bna/0003-bna-Fixed-build-break-for-allyesconfig.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bna/0003-bna-Fixed-build-break-for-allyesconfig.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,1356 @@
+From: Rasesh Mody <rmody at brocade.com>
+Date: Wed, 25 Aug 2010 23:00:27 -0700
+Subject: [PATCH 03/23] bna: Fixed build break for allyesconfig
+
+commit 8a891429d1879ae4f37f547ef5c2d68e19277e4a upstream.
+
+This is the patch to fix the build break caused by multiple
+definitions of symbols between Brocade's FC/FCOE driver(BFA)
+and 10G Networking Driver(BNA).
+
+Changes are:
+
+1. locally used functions are made static
+
+2. unused functions are removed
+
+3. using unique namespaces for the function names that must be
+globally visible
+
+Signed-off-by: Debashis Dutt <ddutt at brocade.com>
+Signed-off-by: Rasesh Mody <rmody at brocade.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ drivers/net/bna/bfa_cee.c      |  136 ++--------------------
+ drivers/net/bna/bfa_cee.h      |   14 +--
+ drivers/net/bna/bfa_ioc.c      |  249 ++++++++++++----------------------------
+ drivers/net/bna/bfa_ioc.h      |   96 +++++-----------
+ drivers/net/bna/bfa_ioc_ct.c   |   49 ++++----
+ drivers/net/bna/bna_ctrl.c     |   38 +++---
+ drivers/net/bna/bnad.c         |    9 +-
+ drivers/net/bna/bnad_ethtool.c |    2 +-
+ 8 files changed, 162 insertions(+), 431 deletions(-)
+
+diff --git a/drivers/net/bna/bfa_cee.c b/drivers/net/bna/bfa_cee.c
+index 1545fc9..f7b789a 100644
+--- a/drivers/net/bna/bfa_cee.c
++++ b/drivers/net/bna/bfa_cee.c
+@@ -152,7 +152,7 @@ bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
+ 		cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
+ }
+ /**
+- * bfa_cee_meminfo()
++ * bfa_nw_cee_meminfo()
+  *
+  * @brief Returns the size of the DMA memory needed by CEE module
+  *
+@@ -161,13 +161,13 @@ bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
+  * @return Size of DMA region
+  */
+ u32
+-bfa_cee_meminfo(void)
++bfa_nw_cee_meminfo(void)
+ {
+ 	return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo();
+ }
+ 
+ /**
+- * bfa_cee_mem_claim()
++ * bfa_nw_cee_mem_claim()
+  *
+  * @brief Initialized CEE DMA Memory
+  *
+@@ -178,7 +178,7 @@ bfa_cee_meminfo(void)
+  * @return void
+  */
+ void
+-bfa_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
++bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
+ {
+ 	cee->attr_dma.kva = dma_kva;
+ 	cee->attr_dma.pa = dma_pa;
+@@ -190,108 +190,6 @@ bfa_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
+ }
+ 
+ /**
+- * bfa_cee_get_attr()
+- *
+- * @brief
+- *   Send the request to the f/w to fetch CEE attributes.
+- *
+- * @param[in] Pointer to the CEE module data structure.
+- *
+- * @return Status
+- */
+-
+-enum bfa_status
+-bfa_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
+-		     bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
+-{
+-	struct bfi_cee_get_req *cmd;
+-
+-	BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
+-	if (!bfa_ioc_is_operational(cee->ioc))
+-		return BFA_STATUS_IOC_FAILURE;
+-	if (cee->get_attr_pending == true)
+-		return 	BFA_STATUS_DEVBUSY;
+-	cee->get_attr_pending = true;
+-	cmd = (struct bfi_cee_get_req *) cee->get_cfg_mb.msg;
+-	cee->attr = attr;
+-	cee->cbfn.get_attr_cbfn = cbfn;
+-	cee->cbfn.get_attr_cbarg = cbarg;
+-	bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
+-	    bfa_ioc_portid(cee->ioc));
+-	bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
+-	bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb);
+-
+-	return BFA_STATUS_OK;
+-}
+-
+-/**
+- * bfa_cee_get_stats()
+- *
+- * @brief
+- *   Send the request to the f/w to fetch CEE statistics.
+- *
+- * @param[in] Pointer to the CEE module data structure.
+- *
+- * @return Status
+- */
+-
+-enum bfa_status
+-bfa_cee_get_stats(struct bfa_cee *cee, struct bfa_cee_stats *stats,
+-		      bfa_cee_get_stats_cbfn_t cbfn, void *cbarg)
+-{
+-	struct bfi_cee_get_req *cmd;
+-
+-	BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
+-
+-	if (!bfa_ioc_is_operational(cee->ioc))
+-		return BFA_STATUS_IOC_FAILURE;
+-	if (cee->get_stats_pending == true)
+-		return 	BFA_STATUS_DEVBUSY;
+-	cee->get_stats_pending = true;
+-	cmd = (struct bfi_cee_get_req *) cee->get_stats_mb.msg;
+-	cee->stats = stats;
+-	cee->cbfn.get_stats_cbfn = cbfn;
+-	cee->cbfn.get_stats_cbarg = cbarg;
+-	bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ,
+-	    bfa_ioc_portid(cee->ioc));
+-	bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa);
+-	bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb);
+-
+-	return BFA_STATUS_OK;
+-}
+-
+-/**
+- * bfa_cee_reset_stats()
+- *
+- * @brief Clears CEE Stats in the f/w.
+- *
+- * @param[in] Pointer to the CEE module data structure.
+- *
+- * @return Status
+- */
+-
+-enum bfa_status
+-bfa_cee_reset_stats(struct bfa_cee *cee, bfa_cee_reset_stats_cbfn_t cbfn,
+-			void *cbarg)
+-{
+-	struct bfi_cee_reset_stats *cmd;
+-
+-	BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
+-	if (!bfa_ioc_is_operational(cee->ioc))
+-		return BFA_STATUS_IOC_FAILURE;
+-	if (cee->reset_stats_pending == true)
+-		return 	BFA_STATUS_DEVBUSY;
+-	cee->reset_stats_pending = true;
+-	cmd = (struct bfi_cee_reset_stats *) cee->reset_stats_mb.msg;
+-	cee->cbfn.reset_stats_cbfn = cbfn;
+-	cee->cbfn.reset_stats_cbarg = cbarg;
+-	bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS,
+-	    bfa_ioc_portid(cee->ioc));
+-	bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb);
+-	return BFA_STATUS_OK;
+-}
+-
+-/**
+  * bfa_cee_isrs()
+  *
+  * @brief Handles Mail-box interrupts for CEE module.
+@@ -301,7 +199,7 @@ bfa_cee_reset_stats(struct bfa_cee *cee, bfa_cee_reset_stats_cbfn_t cbfn,
+  * @return void
+  */
+ 
+-void
++static void
+ bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
+ {
+ 	union bfi_cee_i2h_msg_u *msg;
+@@ -334,7 +232,7 @@ bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
+  * @return void
+  */
+ 
+-void
++static void
+ bfa_cee_hbfail(void *arg)
+ {
+ 	struct bfa_cee *cee;
+@@ -367,7 +265,7 @@ bfa_cee_hbfail(void *arg)
+ }
+ 
+ /**
+- * bfa_cee_attach()
++ * bfa_nw_cee_attach()
+  *
+  * @brief CEE module-attach API
+  *
+@@ -380,28 +278,14 @@ bfa_cee_hbfail(void *arg)
+  * @return void
+  */
+ void
+-bfa_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
++bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
+ 		void *dev)
+ {
+ 	BUG_ON(!(cee != NULL));
+ 	cee->dev = dev;
+ 	cee->ioc = ioc;
+ 
+-	bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
++	bfa_nw_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
+ 	bfa_ioc_hbfail_init(&cee->hbfail, bfa_cee_hbfail, cee);
+-	bfa_ioc_hbfail_register(cee->ioc, &cee->hbfail);
+-}
+-
+-/**
+- * bfa_cee_detach()
+- *
+- * @brief CEE module-detach API
+- *
+- * @param[in] cee - Pointer to the CEE module data structure
+- *
+- * @return void
+- */
+-void
+-bfa_cee_detach(struct bfa_cee *cee)
+-{
++	bfa_nw_ioc_hbfail_register(cee->ioc, &cee->hbfail);
+ }
+diff --git a/drivers/net/bna/bfa_cee.h b/drivers/net/bna/bfa_cee.h
+index 1208cad..20543d1 100644
+--- a/drivers/net/bna/bfa_cee.h
++++ b/drivers/net/bna/bfa_cee.h
+@@ -56,17 +56,9 @@ struct bfa_cee {
+ 	struct bfa_mbox_cmd reset_stats_mb;
+ };
+ 
+-u32 bfa_cee_meminfo(void);
+-void bfa_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva,
++u32 bfa_nw_cee_meminfo(void);
++void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva,
+ 	u64 dma_pa);
+-void bfa_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev);
+-void bfa_cee_detach(struct bfa_cee *cee);
+-enum bfa_status bfa_cee_get_attr(struct bfa_cee *cee,
+-	struct bfa_cee_attr *attr, bfa_cee_get_attr_cbfn_t cbfn, void *cbarg);
+-enum bfa_status bfa_cee_get_stats(struct bfa_cee *cee,
+-	struct bfa_cee_stats *stats, bfa_cee_get_stats_cbfn_t cbfn,
+-	void *cbarg);
+-enum bfa_status bfa_cee_reset_stats(struct bfa_cee *cee,
+-	bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg);
++void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev);
+ 
+ #endif /* __BFA_CEE_H__ */
+diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
+index cdc2cb1..caa45c2 100644
+--- a/drivers/net/bna/bfa_ioc.c
++++ b/drivers/net/bna/bfa_ioc.c
+@@ -65,7 +65,7 @@
+ 			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
+ 			readl((__ioc)->ioc_regs.hfn_mbox_cmd))
+ 
+-bool bfa_auto_recover = true;
++bool bfa_nw_auto_recover = true;
+ 
+ /*
+  * forward declarations
+@@ -85,6 +85,23 @@ static void bfa_ioc_recover(struct bfa_ioc *ioc);
+ static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
+ static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
+ static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
++static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
++			 u32 boot_param);
++static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
++static u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr);
++static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
++						char *serial_num);
++static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
++						char *fw_ver);
++static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
++						char *chip_rev);
++static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
++						char *optrom_ver);
++static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
++						char *manufacturer);
++static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
++static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
++static mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc);
+ 
+ /**
+  * IOC state machine events
+@@ -138,7 +155,7 @@ static void
+ bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
+ {
+ 	ioc->retry_count = 0;
+-	ioc->auto_recover = bfa_auto_recover;
++	ioc->auto_recover = bfa_nw_auto_recover;
+ }
+ 
+ /**
+@@ -185,7 +202,7 @@ bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event)
+ 			ioc->retry_count = 0;
+ 			bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
+ 		} else {
+-			bfa_ioc_hw_sem_release(ioc);
++			bfa_nw_ioc_hw_sem_release(ioc);
+ 			bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
+ 		}
+ 		break;
+@@ -314,12 +331,12 @@ bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event)
+ 			break;
+ 		}
+ 
+-		bfa_ioc_hw_sem_release(ioc);
++		bfa_nw_ioc_hw_sem_release(ioc);
+ 		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ 		break;
+ 
+ 	case IOC_E_DISABLE:
+-		bfa_ioc_hw_sem_release(ioc);
++		bfa_nw_ioc_hw_sem_release(ioc);
+ 		bfa_ioc_timer_stop(ioc);
+ 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ 		break;
+@@ -346,7 +363,7 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
+ 	switch (event) {
+ 	case IOC_E_FWRSP_ENABLE:
+ 		bfa_ioc_timer_stop(ioc);
+-		bfa_ioc_hw_sem_release(ioc);
++		bfa_nw_ioc_hw_sem_release(ioc);
+ 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
+ 		break;
+ 
+@@ -363,13 +380,13 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
+ 			break;
+ 		}
+ 
+-		bfa_ioc_hw_sem_release(ioc);
++		bfa_nw_ioc_hw_sem_release(ioc);
+ 		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+ 		break;
+ 
+ 	case IOC_E_DISABLE:
+ 		bfa_ioc_timer_stop(ioc);
+-		bfa_ioc_hw_sem_release(ioc);
++		bfa_nw_ioc_hw_sem_release(ioc);
+ 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ 		break;
+ 
+@@ -662,7 +679,7 @@ bfa_ioc_disable_comp(struct bfa_ioc *ioc)
+ }
+ 
+ void
+-bfa_ioc_sem_timeout(void *ioc_arg)
++bfa_nw_ioc_sem_timeout(void *ioc_arg)
+ {
+ 	struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
+ 
+@@ -670,7 +687,7 @@ bfa_ioc_sem_timeout(void *ioc_arg)
+ }
+ 
+ bool
+-bfa_ioc_sem_get(void __iomem *sem_reg)
++bfa_nw_ioc_sem_get(void __iomem *sem_reg)
+ {
+ 	u32 r32;
+ 	int cnt = 0;
+@@ -692,7 +709,7 @@ bfa_ioc_sem_get(void __iomem *sem_reg)
+ }
+ 
+ void
+-bfa_ioc_sem_release(void __iomem *sem_reg)
++bfa_nw_ioc_sem_release(void __iomem *sem_reg)
+ {
+ 	writel(1, sem_reg);
+ }
+@@ -717,7 +734,7 @@ bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
+ }
+ 
+ void
+-bfa_ioc_hw_sem_release(struct bfa_ioc *ioc)
++bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
+ {
+ 	writel(1, ioc->ioc_regs.ioc_sem_reg);
+ }
+@@ -800,7 +817,7 @@ bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
+  * Get driver and firmware versions.
+  */
+ void
+-bfa_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
++bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
+ {
+ 	u32	pgnum, pgoff;
+ 	u32	loff = 0;
+@@ -823,7 +840,7 @@ bfa_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
+  * Returns TRUE if same.
+  */
+ bool
+-bfa_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
++bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
+ {
+ 	struct bfi_ioc_image_hdr *drv_fwhdr;
+ 	int i;
+@@ -854,7 +871,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
+ 	if (bfa_ioc_is_optrom(ioc))
+ 		return true;
+ 
+-	bfa_ioc_fwver_get(ioc, &fwhdr);
++	bfa_nw_ioc_fwver_get(ioc, &fwhdr);
+ 	drv_fwhdr = (struct bfi_ioc_image_hdr *)
+ 		bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
+ 
+@@ -864,7 +881,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
+ 	if (fwhdr.exec != drv_fwhdr->exec)
+ 		return false;
+ 
+-	return bfa_ioc_fwver_cmp(ioc, &fwhdr);
++	return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
+ }
+ 
+ /**
+@@ -941,14 +958,14 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
+ }
+ 
+ void
+-bfa_ioc_timeout(void *ioc_arg)
++bfa_nw_ioc_timeout(void *ioc_arg)
+ {
+ 	struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
+ 
+ 	bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
+ }
+ 
+-void
++static void
+ bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
+ {
+ 	u32 *msgp = (u32 *) ioc_msg;
+@@ -1009,7 +1026,7 @@ bfa_ioc_send_getattr(struct bfa_ioc *ioc)
+ }
+ 
+ void
+-bfa_ioc_hb_check(void *cbarg)
++bfa_nw_ioc_hb_check(void *cbarg)
+ {
+ 	struct bfa_ioc *ioc = cbarg;
+ 	u32	hb_count;
+@@ -1195,13 +1212,13 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
+ /**
+  * IOC public
+  */
+-enum bfa_status
++static enum bfa_status
+ bfa_ioc_pll_init(struct bfa_ioc *ioc)
+ {
+ 	/*
+ 	 *  Hold semaphore so that nobody can access the chip during init.
+ 	 */
+-	bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
++	bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
+ 
+ 	bfa_ioc_pll_init_asic(ioc);
+ 
+@@ -1209,7 +1226,7 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
+ 	/*
+ 	 *  release semaphore.
+ 	 */
+-	bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
++	bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
+ 
+ 	return BFA_STATUS_OK;
+ }
+@@ -1218,7 +1235,7 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
+  * Interface used by diag module to do firmware boot with memory test
+  * as the entry vector.
+  */
+-void
++static void
+ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
+ {
+ 	void __iomem *rb;
+@@ -1254,28 +1271,18 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
+  * Enable/disable IOC failure auto recovery.
+  */
+ void
+-bfa_ioc_auto_recover(bool auto_recover)
++bfa_nw_ioc_auto_recover(bool auto_recover)
+ {
+-	bfa_auto_recover = auto_recover;
++	bfa_nw_auto_recover = auto_recover;
+ }
+ 
+ bool
+-bfa_ioc_is_operational(struct bfa_ioc *ioc)
++bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
+ {
+ 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
+ }
+ 
+-bool
+-bfa_ioc_is_initialized(struct bfa_ioc *ioc)
+-{
+-	u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
+-
+-	return ((r32 != BFI_IOC_UNINIT) &&
+-		(r32 != BFI_IOC_INITING) &&
+-		(r32 != BFI_IOC_MEMTEST));
+-}
+-
+-void
++static void
+ bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
+ {
+ 	u32	*msgp = mbmsg;
+@@ -1299,7 +1306,7 @@ bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
+ 	readl(ioc->ioc_regs.lpu_mbox_cmd);
+ }
+ 
+-void
++static void
+ bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
+ {
+ 	union bfi_ioc_i2h_msg_u	*msg;
+@@ -1340,7 +1347,7 @@ bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
+  * @param[in]	bfa	driver instance structure
+  */
+ void
+-bfa_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
++bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
+ {
+ 	ioc->bfa	= bfa;
+ 	ioc->cbfn	= cbfn;
+@@ -1358,7 +1365,7 @@ bfa_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
+  * Driver detach time IOC cleanup.
+  */
+ void
+-bfa_ioc_detach(struct bfa_ioc *ioc)
++bfa_nw_ioc_detach(struct bfa_ioc *ioc)
+ {
+ 	bfa_fsm_send_event(ioc, IOC_E_DETACH);
+ }
+@@ -1369,7 +1376,7 @@ bfa_ioc_detach(struct bfa_ioc *ioc)
+  * @param[in]	pcidev	PCI device information for this IOC
+  */
+ void
+-bfa_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
++bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
+ 		 enum bfi_mclass mc)
+ {
+ 	ioc->ioc_mc	= mc;
+@@ -1377,7 +1384,7 @@ bfa_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
+ 	ioc->ctdev	= bfa_asic_id_ct(ioc->pcidev.device_id);
+ 	ioc->cna	= ioc->ctdev && !ioc->fcmode;
+ 
+-	bfa_ioc_set_ct_hwif(ioc);
++	bfa_nw_ioc_set_ct_hwif(ioc);
+ 
+ 	bfa_ioc_map_port(ioc);
+ 	bfa_ioc_reg_init(ioc);
+@@ -1390,7 +1397,7 @@ bfa_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
+  * @param[in]	dm_pa	physical address of IOC dma memory
+  */
+ void
+-bfa_ioc_mem_claim(struct bfa_ioc *ioc,  u8 *dm_kva, u64 dm_pa)
++bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc,  u8 *dm_kva, u64 dm_pa)
+ {
+ 	/**
+ 	 * dma memory for firmware attribute
+@@ -1404,13 +1411,13 @@ bfa_ioc_mem_claim(struct bfa_ioc *ioc,  u8 *dm_kva, u64 dm_pa)
+  * Return size of dma memory required.
+  */
+ u32
+-bfa_ioc_meminfo(void)
++bfa_nw_ioc_meminfo(void)
+ {
+ 	return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
+ }
+ 
+ void
+-bfa_ioc_enable(struct bfa_ioc *ioc)
++bfa_nw_ioc_enable(struct bfa_ioc *ioc)
+ {
+ 	bfa_ioc_stats(ioc, ioc_enables);
+ 	ioc->dbg_fwsave_once = true;
+@@ -1419,45 +1426,29 @@ bfa_ioc_enable(struct bfa_ioc *ioc)
+ }
+ 
+ void
+-bfa_ioc_disable(struct bfa_ioc *ioc)
++bfa_nw_ioc_disable(struct bfa_ioc *ioc)
+ {
+ 	bfa_ioc_stats(ioc, ioc_disables);
+ 	bfa_fsm_send_event(ioc, IOC_E_DISABLE);
+ }
+ 
+-u32
++static u32
+ bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
+ {
+ 	return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
+ }
+ 
+-u32
++static u32
+ bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr)
+ {
+ 	return PSS_SMEM_PGOFF(fmaddr);
+ }
+ 
+ /**
+- * Register mailbox message handler functions
+- *
+- * @param[in]	ioc		IOC instance
+- * @param[in]	mcfuncs		message class handler functions
+- */
+-void
+-bfa_ioc_mbox_register(struct bfa_ioc *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
+-{
+-	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+-	int				mc;
+-
+-	for (mc = 0; mc < BFI_MC_MAX; mc++)
+-		mod->mbhdlr[mc].cbfn = mcfuncs[mc];
+-}
+-
+-/**
+  * Register mailbox message handler function, to be called by common modules
+  */
+ void
+-bfa_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
++bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
+ 		    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
+ {
+ 	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+@@ -1474,7 +1465,7 @@ bfa_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
+  * @param[i]	cmd	Mailbox command
+  */
+ void
+-bfa_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
++bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
+ {
+ 	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ 	u32			stat;
+@@ -1506,7 +1497,7 @@ bfa_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
+  * Handle mailbox interrupts
+  */
+ void
+-bfa_ioc_mbox_isr(struct bfa_ioc *ioc)
++bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
+ {
+ 	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
+ 	struct bfi_mbmsg m;
+@@ -1530,86 +1521,24 @@ bfa_ioc_mbox_isr(struct bfa_ioc *ioc)
+ }
+ 
+ void
+-bfa_ioc_error_isr(struct bfa_ioc *ioc)
++bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
+ {
+ 	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
+ }
+ 
+-void
+-bfa_ioc_set_fcmode(struct bfa_ioc *ioc)
+-{
+-	ioc->fcmode  = true;
+-	ioc->port_id = bfa_ioc_pcifn(ioc);
+-}
+-
+-/**
+- * return true if IOC is disabled
+- */
+-bool
+-bfa_ioc_is_disabled(struct bfa_ioc *ioc)
+-{
+-	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
+-		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
+-}
+-
+-/**
+- * return true if IOC firmware is different.
+- */
+-bool
+-bfa_ioc_fw_mismatch(struct bfa_ioc *ioc)
+-{
+-	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
+-		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck) ||
+-		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch);
+-}
+-
+-#define bfa_ioc_state_disabled(__sm)		\
+-	(((__sm) == BFI_IOC_UNINIT) ||		\
+-	 ((__sm) == BFI_IOC_INITING) ||		\
+-	 ((__sm) == BFI_IOC_HWINIT) ||		\
+-	 ((__sm) == BFI_IOC_DISABLED) ||	\
+-	 ((__sm) == BFI_IOC_FAIL) ||		\
+-	 ((__sm) == BFI_IOC_CFG_DISABLED))
+-
+-/**
+- * Check if adapter is disabled -- both IOCs should be in a disabled
+- * state.
+- */
+-bool
+-bfa_ioc_adapter_is_disabled(struct bfa_ioc *ioc)
+-{
+-	u32	ioc_state;
+-	void __iomem *rb = ioc->pcidev.pci_bar_kva;
+-
+-	if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
+-		return false;
+-
+-	ioc_state = readl(rb + BFA_IOC0_STATE_REG);
+-	if (!bfa_ioc_state_disabled(ioc_state))
+-		return false;
+-
+-	if (ioc->pcidev.device_id != PCI_DEVICE_ID_BROCADE_FC_8G1P) {
+-		ioc_state = readl(rb + BFA_IOC1_STATE_REG);
+-		if (!bfa_ioc_state_disabled(ioc_state))
+-			return false;
+-	}
+-
+-	return true;
+-}
+-
+ /**
+  * Add to IOC heartbeat failure notification queue. To be used by common
+  * modules such as cee, port, diag.
+  */
+ void
+-bfa_ioc_hbfail_register(struct bfa_ioc *ioc,
++bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
+ 			struct bfa_ioc_hbfail_notify *notify)
+ {
+ 	list_add_tail(&notify->qe, &ioc->hb_notify_q);
+ }
+ 
+ #define BFA_MFG_NAME "Brocade"
+-void
++static void
+ bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
+ 			 struct bfa_adapter_attr *ad_attr)
+ {
+@@ -1640,7 +1569,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
+ 		ad_attr->prototype = 0;
+ 
+ 	ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
+-	ad_attr->mac  = bfa_ioc_get_mac(ioc);
++	ad_attr->mac  = bfa_nw_ioc_get_mac(ioc);
+ 
+ 	ad_attr->pcie_gen = ioc_attr->pcie_gen;
+ 	ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
+@@ -1653,7 +1582,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
+ 	ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
+ }
+ 
+-enum bfa_ioc_type
++static enum bfa_ioc_type
+ bfa_ioc_get_type(struct bfa_ioc *ioc)
+ {
+ 	if (!ioc->ctdev || ioc->fcmode)
+@@ -1668,7 +1597,7 @@ bfa_ioc_get_type(struct bfa_ioc *ioc)
+ 	}
+ }
+ 
+-void
++static void
+ bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
+ {
+ 	memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
+@@ -1677,14 +1606,14 @@ bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
+ 			BFA_ADAPTER_SERIAL_NUM_LEN);
+ }
+ 
+-void
++static void
+ bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
+ {
+ 	memset(fw_ver, 0, BFA_VERSION_LEN);
+ 	memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
+ }
+ 
+-void
++static void
+ bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
+ {
+ 	BUG_ON(!(chip_rev));
+@@ -1699,7 +1628,7 @@ bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
+ 	chip_rev[5] = '\0';
+ }
+ 
+-void
++static void
+ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
+ {
+ 	memset(optrom_ver, 0, BFA_VERSION_LEN);
+@@ -1707,14 +1636,14 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
+ 		      BFA_VERSION_LEN);
+ }
+ 
+-void
++static void
+ bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
+ {
+ 	memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
+ 	memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+ }
+ 
+-void
++static void
+ bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
+ {
+ 	struct bfi_ioc_attr *ioc_attr;
+@@ -1731,14 +1660,14 @@ bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
+ 		BFA_MFG_NAME, ioc_attr->card_type);
+ }
+ 
+-enum bfa_ioc_state
++static enum bfa_ioc_state
+ bfa_ioc_get_state(struct bfa_ioc *ioc)
+ {
+ 	return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+ }
+ 
+ void
+-bfa_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
++bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
+ {
+ 	memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
+ 
+@@ -1757,26 +1686,14 @@ bfa_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
+ /**
+  * WWN public
+  */
+-u64
++static u64
+ bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
+ {
+ 	return ioc->attr->pwwn;
+ }
+ 
+-u64
+-bfa_ioc_get_nwwn(struct bfa_ioc *ioc)
+-{
+-	return ioc->attr->nwwn;
+-}
+-
+-u64
+-bfa_ioc_get_adid(struct bfa_ioc *ioc)
+-{
+-	return ioc->attr->mfg_pwwn;
+-}
+-
+ mac_t
+-bfa_ioc_get_mac(struct bfa_ioc *ioc)
++bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
+ {
+ 	/*
+ 	 * Currently mfg mac is used as FCoE enode mac (not configured by PBC)
+@@ -1787,19 +1704,7 @@ bfa_ioc_get_mac(struct bfa_ioc *ioc)
+ 		return ioc->attr->mac;
+ }
+ 
+-u64
+-bfa_ioc_get_mfg_pwwn(struct bfa_ioc *ioc)
+-{
+-	return ioc->attr->mfg_pwwn;
+-}
+-
+-u64
+-bfa_ioc_get_mfg_nwwn(struct bfa_ioc *ioc)
+-{
+-	return ioc->attr->mfg_nwwn;
+-}
+-
+-mac_t
++static mac_t
+ bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc)
+ {
+ 	mac_t	m;
+@@ -1814,12 +1719,6 @@ bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc)
+ 	return m;
+ }
+ 
+-bool
+-bfa_ioc_get_fcmode(struct bfa_ioc *ioc)
+-{
+-	return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
+-}
+-
+ /**
+  * Firmware failure detected. Start recovery actions.
+  */
+diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/bna/bfa_ioc.h
+index 2e5c0ad..7f0719e 100644
+--- a/drivers/net/bna/bfa_ioc.h
++++ b/drivers/net/bna/bfa_ioc.h
+@@ -239,13 +239,9 @@ struct bfa_ioc_hwif {
+ /**
+  * IOC mailbox interface
+  */
+-void bfa_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd);
+-void bfa_ioc_mbox_register(struct bfa_ioc *ioc,
+-		bfa_ioc_mbox_mcfunc_t *mcfuncs);
+-void bfa_ioc_mbox_isr(struct bfa_ioc *ioc);
+-void bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len);
+-void bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg);
+-void bfa_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
++void bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd);
++void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc);
++void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
+ 		bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
+ 
+ /**
+@@ -256,83 +252,45 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
+ 	((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
+ 			   (__ioc)->fcmode))
+ 
+-enum bfa_status bfa_ioc_pll_init(struct bfa_ioc *ioc);
+-enum bfa_status bfa_ioc_cb_pll_init(void __iomem *rb, bool fcmode);
+-enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
+-
+ #define	bfa_ioc_isr_mode_set(__ioc, __msix)			\
+ 			((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
+ #define	bfa_ioc_ownership_reset(__ioc)				\
+ 			((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
+ 
+-void bfa_ioc_set_ct_hwif(struct bfa_ioc *ioc);
++void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc);
+ 
+-void bfa_ioc_attach(struct bfa_ioc *ioc, void *bfa,
++void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa,
+ 		struct bfa_ioc_cbfn *cbfn);
+-void bfa_ioc_auto_recover(bool auto_recover);
+-void bfa_ioc_detach(struct bfa_ioc *ioc);
+-void bfa_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
++void bfa_nw_ioc_auto_recover(bool auto_recover);
++void bfa_nw_ioc_detach(struct bfa_ioc *ioc);
++void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
+ 		enum bfi_mclass mc);
+-u32 bfa_ioc_meminfo(void);
+-void bfa_ioc_mem_claim(struct bfa_ioc *ioc,  u8 *dm_kva, u64 dm_pa);
+-void bfa_ioc_enable(struct bfa_ioc *ioc);
+-void bfa_ioc_disable(struct bfa_ioc *ioc);
+-bool bfa_ioc_intx_claim(struct bfa_ioc *ioc);
+-
+-void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
+-		u32 boot_param);
+-void bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *msg);
+-void bfa_ioc_error_isr(struct bfa_ioc *ioc);
+-bool bfa_ioc_is_operational(struct bfa_ioc *ioc);
+-bool bfa_ioc_is_initialized(struct bfa_ioc *ioc);
+-bool bfa_ioc_is_disabled(struct bfa_ioc *ioc);
+-bool bfa_ioc_fw_mismatch(struct bfa_ioc *ioc);
+-bool bfa_ioc_adapter_is_disabled(struct bfa_ioc *ioc);
+-void bfa_ioc_cfg_complete(struct bfa_ioc *ioc);
+-enum bfa_ioc_type bfa_ioc_get_type(struct bfa_ioc *ioc);
+-void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num);
+-void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver);
+-void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver);
+-void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
+-void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
+-		char *manufacturer);
+-void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev);
+-enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc *ioc);
+-
+-void bfa_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
+-void bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
+-		struct bfa_adapter_attr *ad_attr);
+-u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
+-u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr);
+-void bfa_ioc_set_fcmode(struct bfa_ioc *ioc);
+-bool bfa_ioc_get_fcmode(struct bfa_ioc *ioc);
+-void bfa_ioc_hbfail_register(struct bfa_ioc *ioc,
++u32 bfa_nw_ioc_meminfo(void);
++void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc,  u8 *dm_kva, u64 dm_pa);
++void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
++void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
++
++void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
++bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
++
++void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
++void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
+ 	struct bfa_ioc_hbfail_notify *notify);
+-bool bfa_ioc_sem_get(void __iomem *sem_reg);
+-void bfa_ioc_sem_release(void __iomem *sem_reg);
+-void bfa_ioc_hw_sem_release(struct bfa_ioc *ioc);
+-void bfa_ioc_fwver_get(struct bfa_ioc *ioc,
++bool bfa_nw_ioc_sem_get(void __iomem *sem_reg);
++void bfa_nw_ioc_sem_release(void __iomem *sem_reg);
++void bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc);
++void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc,
+ 			struct bfi_ioc_image_hdr *fwhdr);
+-bool bfa_ioc_fwver_cmp(struct bfa_ioc *ioc,
++bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
+ 			struct bfi_ioc_image_hdr *fwhdr);
++mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
+ 
+ /*
+  * Timeout APIs
+  */
+-void bfa_ioc_timeout(void *ioc);
+-void bfa_ioc_hb_check(void *ioc);
+-void bfa_ioc_sem_timeout(void *ioc);
+-
+-/*
+- * bfa mfg wwn API functions
+- */
+-u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
+-u64 bfa_ioc_get_nwwn(struct bfa_ioc *ioc);
+-mac_t bfa_ioc_get_mac(struct bfa_ioc *ioc);
+-u64 bfa_ioc_get_mfg_pwwn(struct bfa_ioc *ioc);
+-u64 bfa_ioc_get_mfg_nwwn(struct bfa_ioc *ioc);
+-mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc);
+-u64 bfa_ioc_get_adid(struct bfa_ioc *ioc);
++void bfa_nw_ioc_timeout(void *ioc);
++void bfa_nw_ioc_hb_check(void *ioc);
++void bfa_nw_ioc_sem_timeout(void *ioc);
+ 
+ /*
+  * F/W Image Size & Chunk
+diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
+index 870046e..462857c 100644
+--- a/drivers/net/bna/bfa_ioc_ct.c
++++ b/drivers/net/bna/bfa_ioc_ct.c
+@@ -32,25 +32,26 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
+ static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
+ static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc);
+ static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
++static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
+ 
+-struct bfa_ioc_hwif hwif_ct;
++struct bfa_ioc_hwif nw_hwif_ct;
+ 
+ /**
+  * Called from bfa_ioc_attach() to map asic specific calls.
+  */
+ void
+-bfa_ioc_set_ct_hwif(struct bfa_ioc *ioc)
++bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
+ {
+-	hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
+-	hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
+-	hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
+-	hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
+-	hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
+-	hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
+-	hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
+-	hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+-
+-	ioc->ioc_hwif = &hwif_ct;
++	nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
++	nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
++	nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
++	nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
++	nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
++	nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
++	nw_hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
++	nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
++
++	ioc->ioc_hwif = &nw_hwif_ct;
+ }
+ 
+ /**
+@@ -76,7 +77,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
+ 						BFA_IOC_FWIMG_MINSZ)
+ 		return true;
+ 
+-	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
++	bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ 	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
+ 
+ 	/**
+@@ -84,7 +85,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
+ 	 */
+ 	if (usecnt == 0) {
+ 		writel(1, ioc->ioc_regs.ioc_usage_reg);
+-		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
++		bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ 		return true;
+ 	}
+ 
+@@ -98,9 +99,9 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
+ 	/**
+ 	 * Check if another driver with a different firmware is active
+ 	 */
+-	bfa_ioc_fwver_get(ioc, &fwhdr);
+-	if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
+-		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
++	bfa_nw_ioc_fwver_get(ioc, &fwhdr);
++	if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
++		bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ 		return false;
+ 	}
+ 
+@@ -109,7 +110,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
+ 	 */
+ 	usecnt++;
+ 	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
+-	bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
++	bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ 	return true;
+ }
+ 
+@@ -134,14 +135,14 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
+ 	/**
+ 	 * decrement usage count
+ 	 */
+-	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
++	bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ 	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
+ 	BUG_ON(!(usecnt > 0));
+ 
+ 	usecnt--;
+ 	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
+ 
+-	bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
++	bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ }
+ 
+ /**
+@@ -302,9 +303,9 @@ static void
+ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
+ {
+ 	if (ioc->cna) {
+-		bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
++		bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ 		writel(0, ioc->ioc_regs.ioc_usage_reg);
+-		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
++		bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ 	}
+ 
+ 	/*
+@@ -313,10 +314,10 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
+ 	 * will lock it instead of clearing it.
+ 	 */
+ 	readl(ioc->ioc_regs.ioc_sem_reg);
+-	bfa_ioc_hw_sem_release(ioc);
++	bfa_nw_ioc_hw_sem_release(ioc);
+ }
+ 
+-enum bfa_status
++static enum bfa_status
+ bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode)
+ {
+ 	u32	pll_sclk, pll_fclk, r32;
+diff --git a/drivers/net/bna/bna_ctrl.c b/drivers/net/bna/bna_ctrl.c
+index 9d41ebf..f3034d6 100644
+--- a/drivers/net/bna/bna_ctrl.c
++++ b/drivers/net/bna/bna_ctrl.c
+@@ -81,7 +81,7 @@ bna_ll_isr(void *llarg, struct bfi_mbmsg *msg)
+ 			/* Post the next entry, if needed */
+ 			if (to_post) {
+ 				mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
+-				bfa_ioc_mbox_queue(&bna->device.ioc,
++				bfa_nw_ioc_mbox_queue(&bna->device.ioc,
+ 							&mb_qe->cmd);
+ 			}
+ 		} else {
+@@ -107,7 +107,7 @@ bna_err_handler(struct bna *bna, u32 intr_status)
+ 		writel(init_halt, bna->device.ioc.ioc_regs.ll_halt);
+ 	}
+ 
+-	bfa_ioc_error_isr(&bna->device.ioc);
++	bfa_nw_ioc_error_isr(&bna->device.ioc);
+ }
+ 
+ void
+@@ -118,7 +118,7 @@ bna_mbox_handler(struct bna *bna, u32 intr_status)
+ 		return;
+ 	}
+ 	if (BNA_IS_MBOX_INTR(intr_status))
+-		bfa_ioc_mbox_isr(&bna->device.ioc);
++		bfa_nw_ioc_mbox_isr(&bna->device.ioc);
+ }
+ 
+ void
+@@ -133,7 +133,7 @@ bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe)
+ 	bna->mbox_mod.msg_pending++;
+ 	if (bna->mbox_mod.state == BNA_MBOX_FREE) {
+ 		list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
+-		bfa_ioc_mbox_queue(&bna->device.ioc, &mbox_qe->cmd);
++		bfa_nw_ioc_mbox_queue(&bna->device.ioc, &mbox_qe->cmd);
+ 		bna->mbox_mod.state = BNA_MBOX_POSTED;
+ 	} else {
+ 		list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
+@@ -180,7 +180,7 @@ bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod)
+ void
+ bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
+ {
+-	bfa_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna);
++	bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna);
+ 	mbox_mod->state = BNA_MBOX_FREE;
+ 	mbox_mod->msg_ctr = mbox_mod->msg_pending = 0;
+ 	INIT_LIST_HEAD(&mbox_mod->posted_q);
+@@ -1289,7 +1289,7 @@ bna_port_mtu_set(struct bna_port *port, int mtu,
+ void
+ bna_port_mac_get(struct bna_port *port, mac_t *mac)
+ {
+-	*mac = bfa_ioc_get_mac(&port->bna->device.ioc);
++	*mac = bfa_nw_ioc_get_mac(&port->bna->device.ioc);
+ }
+ 
+ /**
+@@ -1427,7 +1427,7 @@ bna_device_sm_stopped(struct bna_device *device,
+ 	case DEVICE_E_ENABLE:
+ 		if (device->intr_type == BNA_INTR_T_MSIX)
+ 			bna_mbox_msix_idx_set(device);
+-		bfa_ioc_enable(&device->ioc);
++		bfa_nw_ioc_enable(&device->ioc);
+ 		bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
+ 		break;
+ 
+@@ -1547,7 +1547,7 @@ bna_device_sm_port_stop_wait(struct bna_device *device,
+ static void
+ bna_device_sm_ioc_disable_wait_entry(struct bna_device *device)
+ {
+-	bfa_ioc_disable(&device->ioc);
++	bfa_nw_ioc_disable(&device->ioc);
+ }
+ 
+ static void
+@@ -1655,12 +1655,12 @@ bna_device_init(struct bna_device *device, struct bna *bna,
+ 	 *	1. DMA memory for IOC attributes
+ 	 *	2. Kernel memory for FW trace
+ 	 */
+-	bfa_ioc_attach(&device->ioc, device, &bfa_iocll_cbfn);
+-	bfa_ioc_pci_init(&device->ioc, &bna->pcidev, BFI_MC_LL);
++	bfa_nw_ioc_attach(&device->ioc, device, &bfa_iocll_cbfn);
++	bfa_nw_ioc_pci_init(&device->ioc, &bna->pcidev, BFI_MC_LL);
+ 
+ 	BNA_GET_DMA_ADDR(
+ 		&res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
+-	bfa_ioc_mem_claim(&device->ioc,
++	bfa_nw_ioc_mem_claim(&device->ioc,
+ 		res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva,
+ 			  dma);
+ 
+@@ -1686,9 +1686,7 @@ bna_device_uninit(struct bna_device *device)
+ {
+ 	bna_mbox_mod_uninit(&device->bna->mbox_mod);
+ 
+-	bfa_cee_detach(&device->bna->cee);
+-
+-	bfa_ioc_detach(&device->ioc);
++	bfa_nw_ioc_detach(&device->ioc);
+ 
+ 	device->bna = NULL;
+ }
+@@ -1783,10 +1781,10 @@ bna_adv_device_init(struct bna_device *device, struct bna *bna,
+ 		&res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
+ 	kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
+ 
+-	bfa_cee_attach(&bna->cee, &device->ioc, bna);
+-	bfa_cee_mem_claim(&bna->cee, kva, dma);
+-	kva += bfa_cee_meminfo();
+-	dma += bfa_cee_meminfo();
++	bfa_nw_cee_attach(&bna->cee, &device->ioc, bna);
++	bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
++	kva += bfa_nw_cee_meminfo();
++	dma += bfa_nw_cee_meminfo();
+ 
+ }
+ 
+@@ -1800,7 +1798,7 @@ bna_adv_res_req(struct bna_res_info *res_info)
+ 	res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
+ 	res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
+ 	res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
+-				bfa_cee_meminfo(), PAGE_SIZE);
++				bfa_nw_cee_meminfo(), PAGE_SIZE);
+ 
+ 	/* Virtual memory for retreiving fw_trc */
+ 	res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
+@@ -3333,7 +3331,7 @@ bna_res_req(struct bna_res_info *res_info)
+ 	res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
+ 	res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
+ 	res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
+-				ALIGN(bfa_ioc_meminfo(), PAGE_SIZE);
++				ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
+ 
+ 	/* DMA memory for index segment of an IB */
+ 	res_info[BNA_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
+diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
+index 491d148..cbc1d56 100644
+--- a/drivers/net/bna/bnad.c
++++ b/drivers/net/bna/bnad.c
+@@ -1365,7 +1365,7 @@ bnad_ioc_timeout(unsigned long data)
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&bnad->bna_lock, flags);
+-	bfa_ioc_timeout((void *) &bnad->bna.device.ioc);
++	bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
+ 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ }
+ 
+@@ -1376,7 +1376,7 @@ bnad_ioc_hb_check(unsigned long data)
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&bnad->bna_lock, flags);
+-	bfa_ioc_hb_check((void *) &bnad->bna.device.ioc);
++	bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
+ 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ }
+ 
+@@ -1387,7 +1387,7 @@ bnad_ioc_sem_timeout(unsigned long data)
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&bnad->bna_lock, flags);
+-	bfa_ioc_sem_timeout((void *) &bnad->bna.device.ioc);
++	bfa_nw_ioc_sem_timeout((void *) &bnad->bna.device.ioc);
+ 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ }
+ 
+@@ -3067,7 +3067,6 @@ bnad_pci_probe(struct pci_dev *pdev,
+ 	}
+ 	bnad = netdev_priv(netdev);
+ 
+-
+ 	/*
+ 	 * PCI initialization
+ 	 * 	Output : using_dac = 1 for 64 bit DMA
+@@ -3239,7 +3238,7 @@ bnad_module_init(void)
+ 
+ 	pr_info("Brocade 10G Ethernet driver\n");
+ 
+-	bfa_ioc_auto_recover(bnad_ioc_auto_recover);
++	bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
+ 
+ 	err = pci_register_driver(&bnad_pci_driver);
+ 	if (err < 0) {
+diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
+index 7e630f5..b337bd9 100644
+--- a/drivers/net/bna/bnad_ethtool.c
++++ b/drivers/net/bna/bnad_ethtool.c
+@@ -276,7 +276,7 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+ 	if (ioc_attr) {
+ 		memset(ioc_attr, 0, sizeof(*ioc_attr));
+ 		spin_lock_irqsave(&bnad->bna_lock, flags);
+-		bfa_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
++		bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
+ 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ 
+ 		strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bna/0004-bna-fix-stats-handling.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bna/0004-bna-fix-stats-handling.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,47 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Subject: [PATCH 04/23] bna: fix stats handling
+
+Fixes the same bugs as
+commit 250e061e1d3e26600236a3dd9172e7f5f5916c00 upstream, from which the
+following description is taken:
+
+get_stats() method incorrectly clears a global array before folding
+various stats. This can break SNMP applications.
+
+Signed-off-by: Ben Hutchings <ben at decadent.org.uk>
+---
+--- a/drivers/net/bna/bnad.c
++++ b/drivers/net/bna/bnad.c
+@@ -2035,7 +2035,7 @@ bnad_netdev_qstats_fill(struct bnad *bnad)
+ 		((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
+ 	for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
+ 		if (bmap & 1) {
+-			net_stats->rx_fifo_errors =
++			net_stats->rx_fifo_errors +=
+ 				bnad->stats.bna_stats->
+ 					hw_stats->rxf_stats[i].frame_drops;
+ 			break;
+@@ -2643,17 +2643,22 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+ {
+ 	struct bnad *bnad = netdev_priv(netdev);
+ 	unsigned long flags;
++	unsigned i;
+ 
+ 	spin_lock_irqsave(&bnad->bna_lock, flags);
+ 
+ 	memset(&bnad->net_stats, 0, sizeof(struct net_device_stats));
+ 
+ 	bnad_netdev_qstats_fill(bnad);
+ 	bnad_netdev_hwstats_fill(bnad);
++
++	for (i = 0; i < sizeof(struct net_device_stats) / sizeof(unsigned long); i++)
++		((unsigned long *)&netdev->stats)[i] =
++			((unsigned long *)&bnad->net_stats)[i];
+ 
+ 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ 
+-	return &bnad->net_stats;
++	return &netdev->stats;
+ }
+ 
+ static void

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bna/0006-NET-bna-fix-lock-imbalance.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bna/0006-NET-bna-fix-lock-imbalance.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,41 @@
+From: Jiri Slaby <jslaby at suse.cz>
+Date: Sat, 4 Sep 2010 02:08:41 +0000
+Subject: [PATCH 06/23] NET: bna, fix lock imbalance
+
+commit ca1cef3a3a8af961f504abfcf9e13dac088e1ad1 upstream.
+
+bnad_set_rx_mode omit to unlock bna_lock on one fail path. Fix that.
+
+Signed-off-by: Jiri Slaby <jslaby at suse.cz>
+Cc: Debashis Dutt <ddutt at brocade.com>
+Cc: Rasesh Mody <rmody at brocade.com>
+Cc: David S. Miller <davem at davemloft.net>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ drivers/net/bna/bnad.c |    3 ++-
+ 1 files changed, 2 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
+index 44adc7a..8158fb9 100644
+--- a/drivers/net/bna/bnad.c
++++ b/drivers/net/bna/bnad.c
+@@ -2702,7 +2702,7 @@ bnad_set_rx_mode(struct net_device *netdev)
+ 			kzalloc((mc_count + 1) * ETH_ALEN,
+ 				GFP_ATOMIC);
+ 		if (!mcaddr_list)
+-			return;
++			goto unlock;
+ 
+ 		memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
+ 
+@@ -2715,6 +2715,7 @@ bnad_set_rx_mode(struct net_device *netdev)
+ 		/* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
+ 		kfree(mcaddr_list);
+ 	}
++unlock:
+ 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ }
+ 
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bna/0007-bna-Check-for-NULL-before-deref-in-bnad_cb_tx_cleanu.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bna/0007-bna-Check-for-NULL-before-deref-in-bnad_cb_tx_cleanu.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,33 @@
+From: David S. Miller <davem at davemloft.net>
+Date: Sun, 12 Sep 2010 12:06:00 -0700
+Subject: [PATCH 07/23] bna: Check for NULL before deref in bnad_cb_tx_cleanup
+
+commit 0ea05ce7fd0c1286759933933764aacd12f6ac3a upstream.
+
+Reported-by: Jiri Slaby <jirislaby at gmail.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ drivers/net/bna/bnad.c |    3 ++-
+ 1 files changed, 2 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
+index 8158fb9..e380c0e 100644
+--- a/drivers/net/bna/bnad.c
++++ b/drivers/net/bna/bnad.c
+@@ -867,11 +867,12 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
+ static void
+ bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
+ {
+-	struct bnad_unmap_q *unmap_q = tcb->unmap_q;
++	struct bnad_unmap_q *unmap_q;
+ 
+ 	if (!tcb || (!tcb->unmap_q))
+ 		return;
+ 
++	unmap_q = tcb->unmap_q;
+ 	if (!unmap_q->unmap_array)
+ 		return;
+ 
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bna/0008-bna-off-by-one.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bna/0008-bna-off-by-one.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,30 @@
+From: Dan Carpenter <error27 at gmail.com>
+Date: Sun, 19 Sep 2010 11:25:54 -0700
+Subject: [PATCH 08/23] bna: off by one
+
+commit 0746556beab1a57f1afb5c9d6f393d98528ce682 upstream.
+
+The mod->mbhdlr[] array has BFI_MC_MAX elements.
+
+Signed-off-by: Dan Carpenter <error27 at gmail.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ drivers/net/bna/bfa_ioc.c |    2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
+index caa45c2..73493de 100644
+--- a/drivers/net/bna/bfa_ioc.c
++++ b/drivers/net/bna/bfa_ioc.c
+@@ -1514,7 +1514,7 @@ bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
+ 		return;
+ 	}
+ 
+-	if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
++	if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
+ 		return;
+ 
+ 	mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bna/0009-drivers-net-return-operator-cleanup.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bna/0009-drivers-net-return-operator-cleanup.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,39 @@
+From: Eric Dumazet <eric.dumazet at gmail.com>
+Date: Thu, 23 Sep 2010 05:40:09 +0000
+Subject: [PATCH 09/23] drivers/net: return operator cleanup
+
+Change "return (EXPR);" to "return EXPR;"
+
+return is not a function, parentheses are not required.
+
+Signed-off-by: Eric Dumazet <eric.dumazet at gmail.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+[bwh: Restricted to drivers/net/bna]
+---
+ drivers/net/bna/bna_ctrl.c |    6 +++---
+ 1 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/bna/bna_ctrl.c b/drivers/net/bna/bna_ctrl.c
+index f3034d6..ddd922f 100644
+--- a/drivers/net/bna/bna_ctrl.c
++++ b/drivers/net/bna/bna_ctrl.c
+@@ -25,8 +25,8 @@
+ static int
+ bna_is_aen(u8 msg_id)
+ {
+-	return (msg_id == BFI_LL_I2H_LINK_DOWN_AEN ||
+-		msg_id == BFI_LL_I2H_LINK_UP_AEN);
++	return msg_id == BFI_LL_I2H_LINK_DOWN_AEN ||
++	       msg_id == BFI_LL_I2H_LINK_UP_AEN;
+ }
+ 
+ static void
+@@ -1702,7 +1702,7 @@ bna_device_cb_port_stopped(void *arg, enum bna_cb_status status)
+ int
+ bna_device_status_get(struct bna_device *device)
+ {
+-	return (device->fsm == (bfa_fsm_t)bna_device_sm_ready);
++	return device->fsm == (bfa_fsm_t)bna_device_sm_ready;
+ }
+ 
+ void

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bna/0010-bna-fix-interrupt-handling.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bna/0010-bna-fix-interrupt-handling.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,162 @@
+From: Rasesh Mody <rmody at brocade.com>
+Date: Tue, 5 Oct 2010 15:46:04 +0000
+Subject: [PATCH 10/23] bna: fix interrupt handling
+
+commit e2fa6f2ef6e48666b78d4b0f00914b06bb19d298 upstream.
+
+This fix handles the case when IRQ handler is called (for shared IRQs)
+even before the driver is ready to handle interrupts.
+
+Signed-off-by: Debashis Dutt <ddutt at brocade.com>
+Signed-off-by: Rasesh Mody <rmody at brocade.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ drivers/net/bna/bnad.c |   48 +++++++++++++++++++++++++++---------------------
+ 1 files changed, 27 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
+index e380c0e..7210c34 100644
+--- a/drivers/net/bna/bnad.c
++++ b/drivers/net/bna/bnad.c
+@@ -564,9 +564,11 @@ bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
+ static void
+ bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
+ {
+-	spin_lock_irq(&bnad->bna_lock); /* Because of polling context */
++	unsigned long flags;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags); /* Because of polling context */
+ 	bnad_enable_rx_irq_unsafe(ccb);
+-	spin_unlock_irq(&bnad->bna_lock);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ }
+ 
+ static void
+@@ -599,7 +601,7 @@ static irqreturn_t
+ bnad_msix_mbox_handler(int irq, void *data)
+ {
+ 	u32 intr_status;
+-	unsigned long  flags;
++	unsigned long flags;
+ 	struct net_device *netdev = data;
+ 	struct bnad *bnad;
+ 
+@@ -630,13 +632,15 @@ bnad_isr(int irq, void *data)
+ 	struct bnad_rx_info *rx_info;
+ 	struct bnad_rx_ctrl *rx_ctrl;
+ 
+-	spin_lock_irqsave(&bnad->bna_lock, flags);
++	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
++		return IRQ_NONE;
+ 
+ 	bna_intr_status_get(&bnad->bna, intr_status);
+-	if (!intr_status) {
+-		spin_unlock_irqrestore(&bnad->bna_lock, flags);
++
++	if (unlikely(!intr_status))
+ 		return IRQ_NONE;
+-	}
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
+ 
+ 	if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
+ 		bna_mbox_handler(&bnad->bna, intr_status);
+@@ -672,11 +676,10 @@ bnad_enable_mbox_irq(struct bnad *bnad)
+ {
+ 	int irq = BNAD_GET_MBOX_IRQ(bnad);
+ 
+-	if (!(bnad->cfg_flags & BNAD_CF_MSIX))
+-		return;
+-
+ 	if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
+-		enable_irq(irq);
++		if (bnad->cfg_flags & BNAD_CF_MSIX)
++			enable_irq(irq);
++
+ 	BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
+ }
+ 
+@@ -689,11 +692,11 @@ bnad_disable_mbox_irq(struct bnad *bnad)
+ {
+ 	int irq = BNAD_GET_MBOX_IRQ(bnad);
+ 
+-	if (!(bnad->cfg_flags & BNAD_CF_MSIX))
+-		return;
+ 
+ 	if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
+-		disable_irq_nosync(irq);
++		if (bnad->cfg_flags & BNAD_CF_MSIX)
++			disable_irq_nosync(irq);
++
+ 	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
+ }
+ 
+@@ -1045,14 +1048,12 @@ bnad_mbox_irq_free(struct bnad *bnad,
+ 		return;
+ 
+ 	spin_lock_irqsave(&bnad->bna_lock, flags);
+-
+ 	bnad_disable_mbox_irq(bnad);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ 
+ 	irq = BNAD_GET_MBOX_IRQ(bnad);
+ 	free_irq(irq, bnad->netdev);
+ 
+-	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+-
+ 	kfree(intr_info->idl);
+ }
+ 
+@@ -1094,8 +1095,15 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
+ 
+ 	sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
+ 
++	/*
++	 * Set the Mbox IRQ disable flag, so that the IRQ handler
++	 * called from request_irq() for SHARED IRQs do not execute
++	 */
++	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
++
+ 	err = request_irq(irq, irq_handler, flags,
+ 			  bnad->mbox_irq_name, bnad->netdev);
++
+ 	if (err) {
+ 		kfree(intr_info->idl);
+ 		intr_info->idl = NULL;
+@@ -1103,7 +1111,8 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
+ 	}
+ 
+ 	spin_lock_irqsave(&bnad->bna_lock, flags);
+-	bnad_disable_mbox_irq(bnad);
++	if (bnad->cfg_flags & BNAD_CF_MSIX)
++		disable_irq_nosync(irq);
+ 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ 	return 0;
+ }
+@@ -1485,7 +1494,6 @@ bnad_stats_timer_start(struct bnad *bnad)
+ 			  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
+ 	}
+ 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+-
+ }
+ 
+ /*
+@@ -2170,7 +2178,6 @@ bnad_device_disable(struct bnad *bnad)
+ 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ 
+ 	wait_for_completion(&bnad->bnad_completions.ioc_comp);
+-
+ }
+ 
+ static int
+@@ -3108,7 +3115,6 @@ bnad_pci_probe(struct pci_dev *pdev,
+ 
+ 	spin_lock_irqsave(&bnad->bna_lock, flags);
+ 	bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
+-
+ 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ 
+ 	bnad->stats.bna_stats = &bna->stats;
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bna/0011-bna-scope-and-dead-code-cleanup.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bna/0011-bna-scope-and-dead-code-cleanup.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,1715 @@
+From: Rasesh Mody <rmody at brocade.com>
+Date: Tue, 5 Oct 2010 15:46:05 +0000
+Subject: [PATCH 11/23] bna: scope and dead code cleanup
+
+commit b7ee31c5af7f04b67d8b8e4f3b2bcb8bcfced8a3 upstream.
+
+As suggested by Stephen Hemminger:
+1) Made functions and data structures static wherever possible.
+2) Removed unused code.
+
+Signed-off-by: Debashis Dutt <ddutt at brocade.com>
+Signed-off-by: Rasesh Mody <rmody at brocade.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ drivers/net/bna/bfa_ioc.c    |    8 +-
+ drivers/net/bna/bfa_ioc.h    |    1 -
+ drivers/net/bna/bfa_ioc_ct.c |    2 +-
+ drivers/net/bna/bfa_sm.h     |    2 +-
+ drivers/net/bna/bna.h        |  108 +--------
+ drivers/net/bna/bna_ctrl.c   |  559 ++++++++----------------------------------
+ drivers/net/bna/bna_hw.h     |    1 -
+ drivers/net/bna/bna_txrx.c   |  149 +++++-------
+ drivers/net/bna/bnad.c       |   29 +--
+ drivers/net/bna/bnad.h       |    1 -
+ drivers/net/bna/cna_fwimg.c  |    2 +-
+ 11 files changed, 170 insertions(+), 692 deletions(-)
+
+diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
+index 73493de..e94e5aa 100644
+--- a/drivers/net/bna/bfa_ioc.c
++++ b/drivers/net/bna/bfa_ioc.c
+@@ -65,7 +65,7 @@
+ 			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
+ 			readl((__ioc)->ioc_regs.hfn_mbox_cmd))
+ 
+-bool bfa_nw_auto_recover = true;
++static bool bfa_nw_auto_recover = true;
+ 
+ /*
+  * forward declarations
+@@ -1276,12 +1276,6 @@ bfa_nw_ioc_auto_recover(bool auto_recover)
+ 	bfa_nw_auto_recover = auto_recover;
+ }
+ 
+-bool
+-bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
+-{
+-	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
+-}
+-
+ static void
+ bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
+ {
+diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/bna/bfa_ioc.h
+index 7f0719e..a73d84e 100644
+--- a/drivers/net/bna/bfa_ioc.h
++++ b/drivers/net/bna/bfa_ioc.h
+@@ -271,7 +271,6 @@ void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
+ void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
+ 
+ void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
+-bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
+ 
+ void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
+ void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
+diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
+index 462857c..121cfd6 100644
+--- a/drivers/net/bna/bfa_ioc_ct.c
++++ b/drivers/net/bna/bfa_ioc_ct.c
+@@ -34,7 +34,7 @@ static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc);
+ static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
+ static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
+ 
+-struct bfa_ioc_hwif nw_hwif_ct;
++static struct bfa_ioc_hwif nw_hwif_ct;
+ 
+ /**
+  * Called from bfa_ioc_attach() to map asic specific calls.
+diff --git a/drivers/net/bna/bfa_sm.h b/drivers/net/bna/bfa_sm.h
+index 1d3d975..46462c4 100644
+--- a/drivers/net/bna/bfa_sm.h
++++ b/drivers/net/bna/bfa_sm.h
+@@ -77,7 +77,7 @@ typedef void (*bfa_fsm_t)(void *fsm, int event);
+ 	((_fsm)->fsm == (bfa_fsm_t)(_state))
+ 
+ static inline int
+-bfa_sm_to_state(struct bfa_sm_table *smt, bfa_sm_t sm)
++bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm)
+ {
+ 	int	i = 0;
+ 
+diff --git a/drivers/net/bna/bna.h b/drivers/net/bna/bna.h
+index 6a2b329..df6676b 100644
+--- a/drivers/net/bna/bna.h
++++ b/drivers/net/bna/bna.h
+@@ -19,8 +19,7 @@
+ #include "bfi_ll.h"
+ #include "bna_types.h"
+ 
+-extern u32 bna_dim_vector[][BNA_BIAS_T_MAX];
+-extern u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
++extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
+ 
+ /**
+  *
+@@ -344,9 +343,6 @@ do {									\
+  * BNA
+  */
+ 
+-/* Internal APIs */
+-void bna_adv_res_req(struct bna_res_info *res_info);
+-
+ /* APIs for BNAD */
+ void bna_res_req(struct bna_res_info *res_info);
+ void bna_init(struct bna *bna, struct bnad *bnad,
+@@ -354,7 +350,6 @@ void bna_init(struct bna *bna, struct bnad *bnad,
+ 			struct bna_res_info *res_info);
+ void bna_uninit(struct bna *bna);
+ void bna_stats_get(struct bna *bna);
+-void bna_stats_clr(struct bna *bna);
+ void bna_get_perm_mac(struct bna *bna, u8 *mac);
+ 
+ /* APIs for Rx */
+@@ -376,18 +371,6 @@ void bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
+  * DEVICE
+  */
+ 
+-/* Interanl APIs */
+-void bna_adv_device_init(struct bna_device *device, struct bna *bna,
+-			struct bna_res_info *res_info);
+-
+-/* APIs for BNA */
+-void bna_device_init(struct bna_device *device, struct bna *bna,
+-		     struct bna_res_info *res_info);
+-void bna_device_uninit(struct bna_device *device);
+-void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
+-int bna_device_status_get(struct bna_device *device);
+-int bna_device_state_get(struct bna_device *device);
+-
+ /* APIs for BNAD */
+ void bna_device_enable(struct bna_device *device);
+ void bna_device_disable(struct bna_device *device,
+@@ -397,12 +380,6 @@ void bna_device_disable(struct bna_device *device,
+  * MBOX
+  */
+ 
+-/* APIs for DEVICE */
+-void bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna);
+-void bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod);
+-void bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod);
+-void bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod);
+-
+ /* APIs for PORT, TX, RX */
+ void bna_mbox_handler(struct bna *bna, u32 intr_status);
+ void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe);
+@@ -411,17 +388,6 @@ void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe);
+  * PORT
+  */
+ 
+-/* APIs for BNA */
+-void bna_port_init(struct bna_port *port, struct bna *bna);
+-void bna_port_uninit(struct bna_port *port);
+-int bna_port_state_get(struct bna_port *port);
+-int bna_llport_state_get(struct bna_llport *llport);
+-
+-/* APIs for DEVICE */
+-void bna_port_start(struct bna_port *port);
+-void bna_port_stop(struct bna_port *port);
+-void bna_port_fail(struct bna_port *port);
+-
+ /* API for RX */
+ int bna_port_mtu_get(struct bna_port *port);
+ void bna_llport_admin_up(struct bna_llport *llport);
+@@ -437,12 +403,6 @@ void bna_port_pause_config(struct bna_port *port,
+ void bna_port_mtu_set(struct bna_port *port, int mtu,
+ 		      void (*cbfn)(struct bnad *, enum bna_cb_status));
+ void bna_port_mac_get(struct bna_port *port, mac_t *mac);
+-void bna_port_type_set(struct bna_port *port, enum bna_port_type type);
+-void bna_port_linkcbfn_set(struct bna_port *port,
+-			   void (*linkcbfn)(struct bnad *,
+-					    enum bna_link_status));
+-void bna_port_admin_up(struct bna_port *port);
+-void bna_port_admin_down(struct bna_port *port);
+ 
+ /* Callbacks for TX, RX */
+ void bna_port_cb_tx_stopped(struct bna_port *port,
+@@ -450,11 +410,6 @@ void bna_port_cb_tx_stopped(struct bna_port *port,
+ void bna_port_cb_rx_stopped(struct bna_port *port,
+ 			    enum bna_cb_status status);
+ 
+-/* Callbacks for MBOX */
+-void bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
+-			 int status);
+-void bna_port_cb_link_down(struct bna_port *port, int status);
+-
+ /**
+  * IB
+  */
+@@ -464,25 +419,10 @@ void bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
+ 		     struct bna_res_info *res_info);
+ void bna_ib_mod_uninit(struct bna_ib_mod *ib_mod);
+ 
+-/* APIs for TX, RX */
+-struct bna_ib *bna_ib_get(struct bna_ib_mod *ib_mod,
+-			    enum bna_intr_type intr_type, int vector);
+-void bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib);
+-int bna_ib_reserve_idx(struct bna_ib *ib);
+-void bna_ib_release_idx(struct bna_ib *ib, int idx);
+-int bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config);
+-void bna_ib_start(struct bna_ib *ib);
+-void bna_ib_stop(struct bna_ib *ib);
+-void bna_ib_fail(struct bna_ib *ib);
+-void bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo);
+-
+ /**
+  * TX MODULE AND TX
+  */
+ 
+-/* Internal APIs */
+-void bna_tx_prio_changed(struct bna_tx *tx, int prio);
+-
+ /* APIs for BNA */
+ void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
+ 		     struct bna_res_info *res_info);
+@@ -508,10 +448,6 @@ void bna_tx_enable(struct bna_tx *tx);
+ void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
+ 		    void (*cbfn)(void *, struct bna_tx *,
+ 				 enum bna_cb_status));
+-enum bna_cb_status
+-bna_tx_prio_set(struct bna_tx *tx, int prio,
+-		void (*cbfn)(struct bnad *, struct bna_tx *,
+-			     enum bna_cb_status));
+ void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
+ 
+ /**
+@@ -564,35 +500,20 @@ void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
+ 		    void (*cbfn)(void *, struct bna_rx *,
+ 				 enum bna_cb_status));
+ void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
+-void bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX]);
++void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
+ void bna_rx_dim_update(struct bna_ccb *ccb);
+ enum bna_cb_status
+ bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
+ 		 void (*cbfn)(struct bnad *, struct bna_rx *,
+ 			      enum bna_cb_status));
+ enum bna_cb_status
+-bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
+-		 void (*cbfn)(struct bnad *, struct bna_rx *,
+-			      enum bna_cb_status));
+-enum bna_cb_status
+-bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
+-		 void (*cbfn)(struct bnad *, struct bna_rx *,
+-			      enum bna_cb_status));
+-enum bna_cb_status
+ bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
+ 		 void (*cbfn)(struct bnad *, struct bna_rx *,
+ 			      enum bna_cb_status));
+ enum bna_cb_status
+-bna_rx_mcast_del(struct bna_rx *rx, u8 *mcmac,
+-		 void (*cbfn)(struct bnad *, struct bna_rx *,
+-			      enum bna_cb_status));
+-enum bna_cb_status
+ bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
+ 		     void (*cbfn)(struct bnad *, struct bna_rx *,
+ 				  enum bna_cb_status));
+-void bna_rx_mcast_delall(struct bna_rx *rx,
+-			 void (*cbfn)(struct bnad *, struct bna_rx *,
+-				      enum bna_cb_status));
+ enum bna_cb_status
+ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
+ 		enum bna_rxmode bitmask,
+@@ -601,36 +522,12 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
+ void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
+ void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
+ void bna_rx_vlanfilter_enable(struct bna_rx *rx);
+-void bna_rx_vlanfilter_disable(struct bna_rx *rx);
+-void bna_rx_rss_enable(struct bna_rx *rx);
+-void bna_rx_rss_disable(struct bna_rx *rx);
+-void bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config);
+-void bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors,
+-			int nvectors);
+ void bna_rx_hds_enable(struct bna_rx *rx, struct bna_rxf_hds *hds_config,
+ 		       void (*cbfn)(struct bnad *, struct bna_rx *,
+ 				    enum bna_cb_status));
+ void bna_rx_hds_disable(struct bna_rx *rx,
+ 			void (*cbfn)(struct bnad *, struct bna_rx *,
+ 				     enum bna_cb_status));
+-void bna_rx_receive_pause(struct bna_rx *rx,
+-			  void (*cbfn)(struct bnad *, struct bna_rx *,
+-				       enum bna_cb_status));
+-void bna_rx_receive_resume(struct bna_rx *rx,
+-			   void (*cbfn)(struct bnad *, struct bna_rx *,
+-					enum bna_cb_status));
+-
+-/* RxF APIs for RX */
+-void bna_rxf_start(struct bna_rxf *rxf);
+-void bna_rxf_stop(struct bna_rxf *rxf);
+-void bna_rxf_fail(struct bna_rxf *rxf);
+-void bna_rxf_init(struct bna_rxf *rxf, struct bna_rx *rx,
+-		  struct bna_rx_config *q_config);
+-void bna_rxf_uninit(struct bna_rxf *rxf);
+-
+-/* Callback from RXF to RX */
+-void bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status);
+-void bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status);
+ 
+ /**
+  * BNAD
+@@ -639,7 +536,6 @@ void bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status);
+ /* Callbacks for BNA */
+ void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
+ 		       struct bna_stats *stats);
+-void bnad_cb_stats_clr(struct bnad *bnad);
+ 
+ /* Callbacks for DEVICE */
+ void bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status);
+diff --git a/drivers/net/bna/bna_ctrl.c b/drivers/net/bna/bna_ctrl.c
+index ddd922f..07b2659 100644
+--- a/drivers/net/bna/bna_ctrl.c
++++ b/drivers/net/bna/bna_ctrl.c
+@@ -19,6 +19,46 @@
+ #include "bfa_sm.h"
+ #include "bfa_wc.h"
+ 
++static void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
++
++static void
++bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
++			int status)
++{
++	int i;
++	u8 prio_map;
++
++	port->llport.link_status = BNA_LINK_UP;
++	if (aen->cee_linkup)
++		port->llport.link_status = BNA_CEE_UP;
++
++	/* Compute the priority */
++	prio_map = aen->prio_map;
++	if (prio_map) {
++		for (i = 0; i < 8; i++) {
++			if ((prio_map >> i) & 0x1)
++				break;
++		}
++		port->priority = i;
++	} else
++		port->priority = 0;
++
++	/* Dispatch events */
++	bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup);
++	bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority);
++	port->link_cbfn(port->bna->bnad, port->llport.link_status);
++}
++
++static void
++bna_port_cb_link_down(struct bna_port *port, int status)
++{
++	port->llport.link_status = BNA_LINK_DOWN;
++
++	/* Dispatch events */
++	bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN);
++	port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
++}
++
+ /**
+  * MBOX
+  */
+@@ -96,7 +136,7 @@ bna_ll_isr(void *llarg, struct bfi_mbmsg *msg)
+ 		bna_mbox_aen_callback(bna, msg);
+ }
+ 
+-void
++static void
+ bna_err_handler(struct bna *bna, u32 intr_status)
+ {
+ 	u32 init_halt;
+@@ -140,7 +180,7 @@ bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe)
+ 	}
+ }
+ 
+-void
++static void
+ bna_mbox_flush_q(struct bna *bna, struct list_head *q)
+ {
+ 	struct bna_mbox_qe *mb_qe = NULL;
+@@ -166,18 +206,18 @@ bna_mbox_flush_q(struct bna *bna, struct list_head *q)
+ 	bna->mbox_mod.state = BNA_MBOX_FREE;
+ }
+ 
+-void
++static void
+ bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod)
+ {
+ }
+ 
+-void
++static void
+ bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod)
+ {
+ 	bna_mbox_flush_q(mbox_mod->bna, &mbox_mod->posted_q);
+ }
+ 
+-void
++static void
+ bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
+ {
+ 	bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna);
+@@ -187,7 +227,7 @@ bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
+ 	mbox_mod->bna = bna;
+ }
+ 
+-void
++static void
+ bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod)
+ {
+ 	mbox_mod->bna = NULL;
+@@ -538,7 +578,7 @@ bna_fw_cb_llport_down(void *arg, int status)
+ 	bfa_fsm_send_event(llport, LLPORT_E_FWRESP_DOWN);
+ }
+ 
+-void
++static void
+ bna_port_cb_llport_stopped(struct bna_port *port,
+ 				enum bna_cb_status status)
+ {
+@@ -591,7 +631,7 @@ bna_llport_fail(struct bna_llport *llport)
+ 	bfa_fsm_send_event(llport, LLPORT_E_FAIL);
+ }
+ 
+-int
++static int
+ bna_llport_state_get(struct bna_llport *llport)
+ {
+ 	return bfa_sm_to_state(llport_sm_table, llport->fsm);
+@@ -1109,7 +1149,7 @@ bna_port_cb_chld_stopped(void *arg)
+ 	bfa_fsm_send_event(port, PORT_E_CHLD_STOPPED);
+ }
+ 
+-void
++static void
+ bna_port_init(struct bna_port *port, struct bna *bna)
+ {
+ 	port->bna = bna;
+@@ -1137,7 +1177,7 @@ bna_port_init(struct bna_port *port, struct bna *bna)
+ 	bna_llport_init(&port->llport, bna);
+ }
+ 
+-void
++static void
+ bna_port_uninit(struct bna_port *port)
+ {
+ 	bna_llport_uninit(&port->llport);
+@@ -1147,13 +1187,13 @@ bna_port_uninit(struct bna_port *port)
+ 	port->bna = NULL;
+ }
+ 
+-int
++static int
+ bna_port_state_get(struct bna_port *port)
+ {
+ 	return bfa_sm_to_state(port_sm_table, port->fsm);
+ }
+ 
+-void
++static void
+ bna_port_start(struct bna_port *port)
+ {
+ 	port->flags |= BNA_PORT_F_DEVICE_READY;
+@@ -1161,7 +1201,7 @@ bna_port_start(struct bna_port *port)
+ 		bfa_fsm_send_event(port, PORT_E_START);
+ }
+ 
+-void
++static void
+ bna_port_stop(struct bna_port *port)
+ {
+ 	port->stop_cbfn = bna_device_cb_port_stopped;
+@@ -1171,7 +1211,7 @@ bna_port_stop(struct bna_port *port)
+ 	bfa_fsm_send_event(port, PORT_E_STOP);
+ }
+ 
+-void
++static void
+ bna_port_fail(struct bna_port *port)
+ {
+ 	port->flags &= ~BNA_PORT_F_DEVICE_READY;
+@@ -1190,44 +1230,6 @@ bna_port_cb_rx_stopped(struct bna_port *port, enum bna_cb_status status)
+ 	bfa_wc_down(&port->chld_stop_wc);
+ }
+ 
+-void
+-bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
+-			int status)
+-{
+-	int i;
+-	u8 prio_map;
+-
+-	port->llport.link_status = BNA_LINK_UP;
+-	if (aen->cee_linkup)
+-		port->llport.link_status = BNA_CEE_UP;
+-
+-	/* Compute the priority */
+-	prio_map = aen->prio_map;
+-	if (prio_map) {
+-		for (i = 0; i < 8; i++) {
+-			if ((prio_map >> i) & 0x1)
+-				break;
+-		}
+-		port->priority = i;
+-	} else
+-		port->priority = 0;
+-
+-	/* Dispatch events */
+-	bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup);
+-	bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority);
+-	port->link_cbfn(port->bna->bnad, port->llport.link_status);
+-}
+-
+-void
+-bna_port_cb_link_down(struct bna_port *port, int status)
+-{
+-	port->llport.link_status = BNA_LINK_DOWN;
+-
+-	/* Dispatch events */
+-	bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN);
+-	port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
+-}
+-
+ int
+ bna_port_mtu_get(struct bna_port *port)
+ {
+@@ -1293,54 +1295,6 @@ bna_port_mac_get(struct bna_port *port, mac_t *mac)
+ }
+ 
+ /**
+- * Should be called only when port is disabled
+- */
+-void
+-bna_port_type_set(struct bna_port *port, enum bna_port_type type)
+-{
+-	port->type = type;
+-	port->llport.type = type;
+-}
+-
+-/**
+- * Should be called only when port is disabled
+- */
+-void
+-bna_port_linkcbfn_set(struct bna_port *port,
+-		      void (*linkcbfn)(struct bnad *, enum bna_link_status))
+-{
+-	port->link_cbfn = linkcbfn;
+-}
+-
+-void
+-bna_port_admin_up(struct bna_port *port)
+-{
+-	struct bna_llport *llport = &port->llport;
+-
+-	if (llport->flags & BNA_LLPORT_F_ENABLED)
+-		return;
+-
+-	llport->flags |= BNA_LLPORT_F_ENABLED;
+-
+-	if (llport->flags & BNA_LLPORT_F_RX_ENABLED)
+-		bfa_fsm_send_event(llport, LLPORT_E_UP);
+-}
+-
+-void
+-bna_port_admin_down(struct bna_port *port)
+-{
+-	struct bna_llport *llport = &port->llport;
+-
+-	if (!(llport->flags & BNA_LLPORT_F_ENABLED))
+-		return;
+-
+-	llport->flags &= ~BNA_LLPORT_F_ENABLED;
+-
+-	if (llport->flags & BNA_LLPORT_F_RX_ENABLED)
+-		bfa_fsm_send_event(llport, LLPORT_E_DOWN);
+-}
+-
+-/**
+  * DEVICE
+  */
+ #define enable_mbox_intr(_device)\
+@@ -1357,7 +1311,7 @@ do {\
+ 	bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\
+ } while (0)
+ 
+-const struct bna_chip_regs_offset reg_offset[] =
++static const struct bna_chip_regs_offset reg_offset[] =
+ {{HOST_PAGE_NUM_FN0, HOSTFN0_INT_STATUS,
+ 	HOSTFN0_INT_MASK, HOST_MSIX_ERR_INDEX_FN0},
+ {HOST_PAGE_NUM_FN1, HOSTFN1_INT_STATUS,
+@@ -1642,7 +1596,34 @@ static struct bfa_ioc_cbfn bfa_iocll_cbfn = {
+ 	bna_device_cb_iocll_reset
+ };
+ 
+-void
++/* device */
++static void
++bna_adv_device_init(struct bna_device *device, struct bna *bna,
++		struct bna_res_info *res_info)
++{
++	u8 *kva;
++	u64 dma;
++
++	device->bna = bna;
++
++	kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
++
++	/**
++	 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
++	 * DMA memory.
++	 */
++	BNA_GET_DMA_ADDR(
++		&res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
++	kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
++
++	bfa_nw_cee_attach(&bna->cee, &device->ioc, bna);
++	bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
++	kva += bfa_nw_cee_meminfo();
++	dma += bfa_nw_cee_meminfo();
++
++}
++
++static void
+ bna_device_init(struct bna_device *device, struct bna *bna,
+ 		struct bna_res_info *res_info)
+ {
+@@ -1681,7 +1662,7 @@ bna_device_init(struct bna_device *device, struct bna *bna,
+ 	bfa_fsm_set_state(device, bna_device_sm_stopped);
+ }
+ 
+-void
++static void
+ bna_device_uninit(struct bna_device *device)
+ {
+ 	bna_mbox_mod_uninit(&device->bna->mbox_mod);
+@@ -1691,7 +1672,7 @@ bna_device_uninit(struct bna_device *device)
+ 	device->bna = NULL;
+ }
+ 
+-void
++static void
+ bna_device_cb_port_stopped(void *arg, enum bna_cb_status status)
+ {
+ 	struct bna_device *device = (struct bna_device *)arg;
+@@ -1699,7 +1680,7 @@ bna_device_cb_port_stopped(void *arg, enum bna_cb_status status)
+ 	bfa_fsm_send_event(device, DEVICE_E_PORT_STOPPED);
+ }
+ 
+-int
++static int
+ bna_device_status_get(struct bna_device *device)
+ {
+ 	return device->fsm == (bfa_fsm_t)bna_device_sm_ready;
+@@ -1733,24 +1714,13 @@ bna_device_disable(struct bna_device *device, enum bna_cleanup_type type)
+ 	bfa_fsm_send_event(device, DEVICE_E_DISABLE);
+ }
+ 
+-int
++static int
+ bna_device_state_get(struct bna_device *device)
+ {
+ 	return bfa_sm_to_state(device_sm_table, device->fsm);
+ }
+ 
+-u32 bna_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
+-	{12, 20},
+-	{10, 18},
+-	{8, 16},
+-	{6, 12},
+-	{4, 8},
+-	{3, 6},
+-	{2, 4},
+-	{1, 2},
+-};
+-
+-u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
++const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
+ 	{12, 12},
+ 	{6, 10},
+ 	{5, 10},
+@@ -1761,36 +1731,9 @@ u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
+ 	{1, 2},
+ };
+ 
+-/* device */
+-void
+-bna_adv_device_init(struct bna_device *device, struct bna *bna,
+-		struct bna_res_info *res_info)
+-{
+-	u8 *kva;
+-	u64 dma;
+-
+-	device->bna = bna;
+-
+-	kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
+-
+-	/**
+-	 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
+-	 * DMA memory.
+-	 */
+-	BNA_GET_DMA_ADDR(
+-		&res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
+-	kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
+-
+-	bfa_nw_cee_attach(&bna->cee, &device->ioc, bna);
+-	bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
+-	kva += bfa_nw_cee_meminfo();
+-	dma += bfa_nw_cee_meminfo();
+-
+-}
+-
+ /* utils */
+ 
+-void
++static void
+ bna_adv_res_req(struct bna_res_info *res_info)
+ {
+ 	/* DMA memory for COMMON_MODULE */
+@@ -2044,36 +1987,6 @@ bna_fw_stats_get(struct bna *bna)
+ 	bna->stats.txf_bmap[1] = bna->tx_mod.txf_bmap[1];
+ }
+ 
+-static void
+-bna_fw_cb_stats_clr(void *arg, int status)
+-{
+-	struct bna *bna = (struct bna *)arg;
+-
+-	bfa_q_qe_init(&bna->mbox_qe.qe);
+-
+-	memset(bna->stats.sw_stats, 0, sizeof(struct bna_sw_stats));
+-	memset(bna->stats.hw_stats, 0, sizeof(struct bfi_ll_stats));
+-
+-	bnad_cb_stats_clr(bna->bnad);
+-}
+-
+-static void
+-bna_fw_stats_clr(struct bna *bna)
+-{
+-	struct bfi_ll_stats_req ll_req;
+-
+-	bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
+-	ll_req.stats_mask = htons(BFI_LL_STATS_ALL);
+-	ll_req.rxf_id_mask[0] = htonl(0xffffffff);
+-	ll_req.rxf_id_mask[1] =	htonl(0xffffffff);
+-	ll_req.txf_id_mask[0] =	htonl(0xffffffff);
+-	ll_req.txf_id_mask[1] =	htonl(0xffffffff);
+-
+-	bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req),
+-				bna_fw_cb_stats_clr, bna);
+-	bna_mbox_send(bna, &bna->mbox_qe);
+-}
+-
+ void
+ bna_stats_get(struct bna *bna)
+ {
+@@ -2083,22 +1996,8 @@ bna_stats_get(struct bna *bna)
+ 		bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
+ }
+ 
+-void
+-bna_stats_clr(struct bna *bna)
+-{
+-	if (bna_device_status_get(&bna->device))
+-		bna_fw_stats_clr(bna);
+-	else {
+-		memset(&bna->stats.sw_stats, 0,
+-				sizeof(struct bna_sw_stats));
+-		memset(bna->stats.hw_stats, 0,
+-				sizeof(struct bfi_ll_stats));
+-		bnad_cb_stats_clr(bna->bnad);
+-	}
+-}
+-
+ /* IB */
+-void
++static void
+ bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
+ {
+ 	ib->ib_config.coalescing_timeo = coalescing_timeo;
+@@ -2157,7 +2056,7 @@ rxf_fltr_mbox_cmd(struct bna_rxf *rxf, u8 cmd, enum bna_status status)
+ 	bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
+ }
+ 
+-void
++static void
+ __rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status)
+ {
+ 	struct bna_rx_fndb_ram *rx_fndb_ram;
+@@ -2553,7 +2452,7 @@ rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf)
+  *	0 = no h/w change
+  *	1 = need h/w change
+  */
+-int
++static int
+ rxf_promisc_enable(struct bna_rxf *rxf)
+ {
+ 	struct bna *bna = rxf->rx->bna;
+@@ -2584,7 +2483,7 @@ rxf_promisc_enable(struct bna_rxf *rxf)
+  *	0 = no h/w change
+  *	1 = need h/w change
+  */
+-int
++static int
+ rxf_promisc_disable(struct bna_rxf *rxf)
+ {
+ 	struct bna *bna = rxf->rx->bna;
+@@ -2623,7 +2522,7 @@ rxf_promisc_disable(struct bna_rxf *rxf)
+  *	0 = no h/w change
+  *	1 = need h/w change
+  */
+-int
++static int
+ rxf_default_enable(struct bna_rxf *rxf)
+ {
+ 	struct bna *bna = rxf->rx->bna;
+@@ -2654,7 +2553,7 @@ rxf_default_enable(struct bna_rxf *rxf)
+  *	0 = no h/w change
+  *	1 = need h/w change
+  */
+-int
++static int
+ rxf_default_disable(struct bna_rxf *rxf)
+ {
+ 	struct bna *bna = rxf->rx->bna;
+@@ -2693,7 +2592,7 @@ rxf_default_disable(struct bna_rxf *rxf)
+  *	0 = no h/w change
+  *	1 = need h/w change
+  */
+-int
++static int
+ rxf_allmulti_enable(struct bna_rxf *rxf)
+ {
+ 	int ret = 0;
+@@ -2721,7 +2620,7 @@ rxf_allmulti_enable(struct bna_rxf *rxf)
+  *	0 = no h/w change
+  *	1 = need h/w change
+  */
+-int
++static int
+ rxf_allmulti_disable(struct bna_rxf *rxf)
+ {
+ 	int ret = 0;
+@@ -2746,159 +2645,6 @@ rxf_allmulti_disable(struct bna_rxf *rxf)
+ }
+ 
+ /* RxF <- bnad */
+-void
+-bna_rx_mcast_delall(struct bna_rx *rx,
+-		    void (*cbfn)(struct bnad *, struct bna_rx *,
+-				 enum bna_cb_status))
+-{
+-	struct bna_rxf *rxf = &rx->rxf;
+-	struct list_head *qe;
+-	struct bna_mac *mac;
+-	int need_hw_config = 0;
+-
+-	/* Purge all entries from pending_add_q */
+-	while (!list_empty(&rxf->mcast_pending_add_q)) {
+-		bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
+-		mac = (struct bna_mac *)qe;
+-		bfa_q_qe_init(&mac->qe);
+-		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+-	}
+-
+-	/* Schedule all entries in active_q for deletion */
+-	while (!list_empty(&rxf->mcast_active_q)) {
+-		bfa_q_deq(&rxf->mcast_active_q, &qe);
+-		mac = (struct bna_mac *)qe;
+-		bfa_q_qe_init(&mac->qe);
+-		list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
+-		need_hw_config = 1;
+-	}
+-
+-	if (need_hw_config) {
+-		rxf->cam_fltr_cbfn = cbfn;
+-		rxf->cam_fltr_cbarg = rx->bna->bnad;
+-		bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+-		return;
+-	}
+-
+-	if (cbfn)
+-		(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+-}
+-
+-/* RxF <- Rx */
+-void
+-bna_rx_receive_resume(struct bna_rx *rx,
+-		      void (*cbfn)(struct bnad *, struct bna_rx *,
+-				   enum bna_cb_status))
+-{
+-	struct bna_rxf *rxf = &rx->rxf;
+-
+-	if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED) {
+-		rxf->oper_state_cbfn = cbfn;
+-		rxf->oper_state_cbarg = rx->bna->bnad;
+-		bfa_fsm_send_event(rxf, RXF_E_RESUME);
+-	} else if (cbfn)
+-		(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+-}
+-
+-void
+-bna_rx_receive_pause(struct bna_rx *rx,
+-		     void (*cbfn)(struct bnad *, struct bna_rx *,
+-				  enum bna_cb_status))
+-{
+-	struct bna_rxf *rxf = &rx->rxf;
+-
+-	if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_RUNNING) {
+-		rxf->oper_state_cbfn = cbfn;
+-		rxf->oper_state_cbarg = rx->bna->bnad;
+-		bfa_fsm_send_event(rxf, RXF_E_PAUSE);
+-	} else if (cbfn)
+-		(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+-}
+-
+-/* RxF <- bnad */
+-enum bna_cb_status
+-bna_rx_ucast_add(struct bna_rx *rx, u8 *addr,
+-		 void (*cbfn)(struct bnad *, struct bna_rx *,
+-			      enum bna_cb_status))
+-{
+-	struct bna_rxf *rxf = &rx->rxf;
+-	struct list_head *qe;
+-	struct bna_mac *mac;
+-
+-	/* Check if already added */
+-	list_for_each(qe, &rxf->ucast_active_q) {
+-		mac = (struct bna_mac *)qe;
+-		if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+-			if (cbfn)
+-				(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+-			return BNA_CB_SUCCESS;
+-		}
+-	}
+-
+-	/* Check if pending addition */
+-	list_for_each(qe, &rxf->ucast_pending_add_q) {
+-		mac = (struct bna_mac *)qe;
+-		if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+-			if (cbfn)
+-				(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+-			return BNA_CB_SUCCESS;
+-		}
+-	}
+-
+-	mac = bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
+-	if (mac == NULL)
+-		return BNA_CB_UCAST_CAM_FULL;
+-	bfa_q_qe_init(&mac->qe);
+-	memcpy(mac->addr, addr, ETH_ALEN);
+-	list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
+-
+-	rxf->cam_fltr_cbfn = cbfn;
+-	rxf->cam_fltr_cbarg = rx->bna->bnad;
+-
+-	bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+-
+-	return BNA_CB_SUCCESS;
+-}
+-
+-/* RxF <- bnad */
+-enum bna_cb_status
+-bna_rx_ucast_del(struct bna_rx *rx, u8 *addr,
+-		 void (*cbfn)(struct bnad *, struct bna_rx *,
+-			      enum bna_cb_status))
+-{
+-	struct bna_rxf *rxf = &rx->rxf;
+-	struct list_head *qe;
+-	struct bna_mac *mac;
+-
+-	list_for_each(qe, &rxf->ucast_pending_add_q) {
+-		mac = (struct bna_mac *)qe;
+-		if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+-			list_del(qe);
+-			bfa_q_qe_init(qe);
+-			bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
+-			if (cbfn)
+-				(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+-			return BNA_CB_SUCCESS;
+-		}
+-	}
+-
+-	list_for_each(qe, &rxf->ucast_active_q) {
+-		mac = (struct bna_mac *)qe;
+-		if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+-			list_del(qe);
+-			bfa_q_qe_init(qe);
+-			list_add_tail(qe, &rxf->ucast_pending_del_q);
+-			rxf->cam_fltr_cbfn = cbfn;
+-			rxf->cam_fltr_cbarg = rx->bna->bnad;
+-			bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+-			return BNA_CB_SUCCESS;
+-		}
+-	}
+-
+-	return BNA_CB_INVALID_MAC;
+-}
+-
+-/* RxF <- bnad */
+ enum bna_cb_status
+ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
+ 		enum bna_rxmode bitmask,
+@@ -2978,39 +2724,6 @@ err_return:
+ 	return BNA_CB_FAIL;
+ }
+ 
+-/* RxF <- bnad */
+-void
+-bna_rx_rss_enable(struct bna_rx *rx)
+-{
+-	struct bna_rxf *rxf = &rx->rxf;
+-
+-	rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
+-	rxf->rss_status = BNA_STATUS_T_ENABLED;
+-	bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+-}
+-
+-/* RxF <- bnad */
+-void
+-bna_rx_rss_disable(struct bna_rx *rx)
+-{
+-	struct bna_rxf *rxf = &rx->rxf;
+-
+-	rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
+-	rxf->rss_status = BNA_STATUS_T_DISABLED;
+-	bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+-}
+-
+-/* RxF <- bnad */
+-void
+-bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config)
+-{
+-	struct bna_rxf *rxf = &rx->rxf;
+-	rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
+-	rxf->rss_status = BNA_STATUS_T_ENABLED;
+-	rxf->rss_cfg = *rss_config;
+-	bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+-}
+-
+ void
+ /* RxF <- bnad */
+ bna_rx_vlanfilter_enable(struct bna_rx *rx)
+@@ -3024,68 +2737,8 @@ bna_rx_vlanfilter_enable(struct bna_rx *rx)
+ 	}
+ }
+ 
+-/* RxF <- bnad */
+-void
+-bna_rx_vlanfilter_disable(struct bna_rx *rx)
+-{
+-	struct bna_rxf *rxf = &rx->rxf;
+-
+-	if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
+-		rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
+-		rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
+-		bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+-	}
+-}
+-
+ /* Rx */
+ 
+-struct bna_rxp *
+-bna_rx_get_rxp(struct bna_rx *rx, int vector)
+-{
+-	struct bna_rxp *rxp;
+-	struct list_head *qe;
+-
+-	list_for_each(qe, &rx->rxp_q) {
+-		rxp = (struct bna_rxp *)qe;
+-		if (rxp->vector == vector)
+-			return rxp;
+-	}
+-	return NULL;
+-}
+-
+-/*
+- * bna_rx_rss_rit_set()
+- * Sets the Q ids for the specified msi-x vectors in the RIT.
+- * Maximum rit size supported is 64, which should be the max size of the
+- * vectors array.
+- */
+-
+-void
+-bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors, int nvectors)
+-{
+-	int i;
+-	struct bna_rxp *rxp;
+-	struct bna_rxq *q0 = NULL, *q1 = NULL;
+-	struct bna *bna;
+-	struct bna_rxf *rxf;
+-
+-	/* Build the RIT contents for this RX */
+-	bna = rx->bna;
+-
+-	rxf = &rx->rxf;
+-	for (i = 0; i < nvectors; i++) {
+-		rxp = bna_rx_get_rxp(rx, vectors[i]);
+-
+-		GET_RXQS(rxp, q0, q1);
+-		rxf->rit_segment->rit[i].large_rxq_id = q0->rxq_id;
+-		rxf->rit_segment->rit[i].small_rxq_id = (q1 ? q1->rxq_id : 0);
+-	}
+-
+-	rxf->rit_segment->rit_size = nvectors;
+-
+-	/* Subsequent call to enable/reconfig RSS will update the RIT in h/w */
+-}
+-
+ /* Rx <- bnad */
+ void
+ bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
+@@ -3102,7 +2755,7 @@ bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
+ 
+ /* Rx <- bnad */
+ void
+-bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX])
++bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
+ {
+ 	int i, j;
+ 
+@@ -3165,22 +2818,6 @@ bna_rx_dim_update(struct bna_ccb *ccb)
+ 
+ /* Tx */
+ /* TX <- bnad */
+-enum bna_cb_status
+-bna_tx_prio_set(struct bna_tx *tx, int prio,
+-		void (*cbfn)(struct bnad *, struct bna_tx *,
+-			     enum bna_cb_status))
+-{
+-	if (tx->flags & BNA_TX_F_PRIO_LOCK)
+-		return BNA_CB_FAIL;
+-	else {
+-		tx->prio_change_cbfn = cbfn;
+-		bna_tx_prio_changed(tx, prio);
+-	}
+-
+-	return BNA_CB_SUCCESS;
+-}
+-
+-/* TX <- bnad */
+ void
+ bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
+ {
+diff --git a/drivers/net/bna/bna_hw.h b/drivers/net/bna/bna_hw.h
+index 67eb376..806b224 100644
+--- a/drivers/net/bna/bna_hw.h
++++ b/drivers/net/bna/bna_hw.h
+@@ -1282,7 +1282,6 @@ struct bna_chip_regs_offset {
+ 	u32 fn_int_mask;
+ 	u32 msix_idx;
+ };
+-extern const struct bna_chip_regs_offset reg_offset[];
+ 
+ struct bna_chip_regs {
+ 	void __iomem *page_addr;
+diff --git a/drivers/net/bna/bna_txrx.c b/drivers/net/bna/bna_txrx.c
+index 890846d..ad93fdb 100644
+--- a/drivers/net/bna/bna_txrx.c
++++ b/drivers/net/bna/bna_txrx.c
+@@ -195,7 +195,7 @@ bna_ib_mod_uninit(struct bna_ib_mod *ib_mod)
+ 	ib_mod->bna = NULL;
+ }
+ 
+-struct bna_ib *
++static struct bna_ib *
+ bna_ib_get(struct bna_ib_mod *ib_mod,
+ 		enum bna_intr_type intr_type,
+ 		int vector)
+@@ -240,7 +240,7 @@ bna_ib_get(struct bna_ib_mod *ib_mod,
+ 	return ib;
+ }
+ 
+-void
++static void
+ bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
+ {
+ 	bna_intr_put(ib_mod, ib->intr);
+@@ -255,7 +255,7 @@ bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
+ }
+ 
+ /* Returns index offset - starting from 0 */
+-int
++static int
+ bna_ib_reserve_idx(struct bna_ib *ib)
+ {
+ 	struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
+@@ -309,7 +309,7 @@ bna_ib_reserve_idx(struct bna_ib *ib)
+ 	return idx;
+ }
+ 
+-void
++static void
+ bna_ib_release_idx(struct bna_ib *ib, int idx)
+ {
+ 	struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
+@@ -356,7 +356,7 @@ bna_ib_release_idx(struct bna_ib *ib, int idx)
+ 	}
+ }
+ 
+-int
++static int
+ bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
+ {
+ 	if (ib->start_count)
+@@ -374,7 +374,7 @@ bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
+ 	return 0;
+ }
+ 
+-void
++static void
+ bna_ib_start(struct bna_ib *ib)
+ {
+ 	struct bna_ib_blk_mem ib_cfg;
+@@ -450,7 +450,7 @@ bna_ib_start(struct bna_ib *ib)
+ 	}
+ }
+ 
+-void
++static void
+ bna_ib_stop(struct bna_ib *ib)
+ {
+ 	u32 intx_mask;
+@@ -468,7 +468,7 @@ bna_ib_stop(struct bna_ib *ib)
+ 	}
+ }
+ 
+-void
++static void
+ bna_ib_fail(struct bna_ib *ib)
+ {
+ 	ib->start_count = 0;
+@@ -1394,7 +1394,7 @@ rxf_reset_packet_filter(struct bna_rxf *rxf)
+ 	rxf_reset_packet_filter_allmulti(rxf);
+ }
+ 
+-void
++static void
+ bna_rxf_init(struct bna_rxf *rxf,
+ 		struct bna_rx *rx,
+ 		struct bna_rx_config *q_config)
+@@ -1444,7 +1444,7 @@ bna_rxf_init(struct bna_rxf *rxf,
+ 	bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ }
+ 
+-void
++static void
+ bna_rxf_uninit(struct bna_rxf *rxf)
+ {
+ 	struct bna_mac *mac;
+@@ -1476,7 +1476,18 @@ bna_rxf_uninit(struct bna_rxf *rxf)
+ 	rxf->rx = NULL;
+ }
+ 
+-void
++static void
++bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
++{
++	bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
++	if (rx->rxf.rxf_id < 32)
++		rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
++	else
++		rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
++				1 << (rx->rxf.rxf_id - 32));
++}
++
++static void
+ bna_rxf_start(struct bna_rxf *rxf)
+ {
+ 	rxf->start_cbfn = bna_rx_cb_rxf_started;
+@@ -1485,7 +1496,18 @@ bna_rxf_start(struct bna_rxf *rxf)
+ 	bfa_fsm_send_event(rxf, RXF_E_START);
+ }
+ 
+-void
++static void
++bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
++{
++	bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
++	if (rx->rxf.rxf_id < 32)
++		rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
++	else
++		rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
++				1 << (rx->rxf.rxf_id - 32);
++}
++
++static void
+ bna_rxf_stop(struct bna_rxf *rxf)
+ {
+ 	rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
+@@ -1493,7 +1515,7 @@ bna_rxf_stop(struct bna_rxf *rxf)
+ 	bfa_fsm_send_event(rxf, RXF_E_STOP);
+ }
+ 
+-void
++static void
+ bna_rxf_fail(struct bna_rxf *rxf)
+ {
+ 	rxf->rxf_flags |= BNA_RXF_FL_FAILED;
+@@ -1576,43 +1598,6 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
+ }
+ 
+ enum bna_cb_status
+-bna_rx_mcast_del(struct bna_rx *rx, u8 *addr,
+-		 void (*cbfn)(struct bnad *, struct bna_rx *,
+-			      enum bna_cb_status))
+-{
+-	struct bna_rxf *rxf = &rx->rxf;
+-	struct list_head *qe;
+-	struct bna_mac *mac;
+-
+-	list_for_each(qe, &rxf->mcast_pending_add_q) {
+-		mac = (struct bna_mac *)qe;
+-		if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+-			list_del(qe);
+-			bfa_q_qe_init(qe);
+-			bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+-			if (cbfn)
+-				(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
+-			return BNA_CB_SUCCESS;
+-		}
+-	}
+-
+-	list_for_each(qe, &rxf->mcast_active_q) {
+-		mac = (struct bna_mac *)qe;
+-		if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
+-			list_del(qe);
+-			bfa_q_qe_init(qe);
+-			list_add_tail(qe, &rxf->mcast_pending_del_q);
+-			rxf->cam_fltr_cbfn = cbfn;
+-			rxf->cam_fltr_cbarg = rx->bna->bnad;
+-			bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
+-			return BNA_CB_SUCCESS;
+-		}
+-	}
+-
+-	return BNA_CB_INVALID_MAC;
+-}
+-
+-enum bna_cb_status
+ bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
+ 		     void (*cbfn)(struct bnad *, struct bna_rx *,
+ 				  enum bna_cb_status))
+@@ -1862,7 +1847,7 @@ bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
+ bfa_fsm_state_decl(bna_rx, rxq_stop_wait,
+ 	struct bna_rx, enum bna_rx_event);
+ 
+-static struct bfa_sm_table rx_sm_table[] = {
++static const struct bfa_sm_table rx_sm_table[] = {
+ 	{BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED},
+ 	{BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT},
+ 	{BFA_SM(bna_rx_sm_started), BNA_RX_STARTED},
+@@ -2247,7 +2232,7 @@ bna_rit_create(struct bna_rx *rx)
+ 	}
+ }
+ 
+-int
++static int
+ _rx_can_satisfy(struct bna_rx_mod *rx_mod,
+ 		struct bna_rx_config *rx_cfg)
+ {
+@@ -2272,7 +2257,7 @@ _rx_can_satisfy(struct bna_rx_mod *rx_mod,
+ 	return 1;
+ }
+ 
+-struct bna_rxq *
++static struct bna_rxq *
+ _get_free_rxq(struct bna_rx_mod *rx_mod)
+ {
+ 	struct bna_rxq *rxq = NULL;
+@@ -2286,7 +2271,7 @@ _get_free_rxq(struct bna_rx_mod *rx_mod)
+ 	return rxq;
+ }
+ 
+-void
++static void
+ _put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
+ {
+ 	bfa_q_qe_init(&rxq->qe);
+@@ -2294,7 +2279,7 @@ _put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
+ 	rx_mod->rxq_free_count++;
+ }
+ 
+-struct bna_rxp *
++static struct bna_rxp *
+ _get_free_rxp(struct bna_rx_mod *rx_mod)
+ {
+ 	struct list_head	*qe = NULL;
+@@ -2310,7 +2295,7 @@ _get_free_rxp(struct bna_rx_mod *rx_mod)
+ 	return rxp;
+ }
+ 
+-void
++static void
+ _put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
+ {
+ 	bfa_q_qe_init(&rxp->qe);
+@@ -2318,7 +2303,7 @@ _put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
+ 	rx_mod->rxp_free_count++;
+ }
+ 
+-struct bna_rx *
++static struct bna_rx *
+ _get_free_rx(struct bna_rx_mod *rx_mod)
+ {
+ 	struct list_head	*qe = NULL;
+@@ -2336,7 +2321,7 @@ _get_free_rx(struct bna_rx_mod *rx_mod)
+ 	return rx;
+ }
+ 
+-void
++static void
+ _put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
+ {
+ 	bfa_q_qe_init(&rx->qe);
+@@ -2344,7 +2329,7 @@ _put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
+ 	rx_mod->rx_free_count++;
+ }
+ 
+-void
++static void
+ _rx_init(struct bna_rx *rx, struct bna *bna)
+ {
+ 	rx->bna = bna;
+@@ -2360,7 +2345,7 @@ _rx_init(struct bna_rx *rx, struct bna *bna)
+ 	rx->stop_cbarg = NULL;
+ }
+ 
+-void
++static void
+ _rxp_add_rxqs(struct bna_rxp *rxp,
+ 		struct bna_rxq *q0,
+ 		struct bna_rxq *q1)
+@@ -2383,7 +2368,7 @@ _rxp_add_rxqs(struct bna_rxp *rxp,
+ 	}
+ }
+ 
+-void
++static void
+ _rxq_qpt_init(struct bna_rxq *rxq,
+ 		struct bna_rxp *rxp,
+ 		u32 page_count,
+@@ -2412,7 +2397,7 @@ _rxq_qpt_init(struct bna_rxq *rxq,
+ 	}
+ }
+ 
+-void
++static void
+ _rxp_cqpt_setup(struct bna_rxp *rxp,
+ 		u32 page_count,
+ 		u32 page_size,
+@@ -2441,13 +2426,13 @@ _rxp_cqpt_setup(struct bna_rxp *rxp,
+ 	}
+ }
+ 
+-void
++static void
+ _rx_add_rxp(struct bna_rx *rx, struct bna_rxp *rxp)
+ {
+ 	list_add_tail(&rxp->qe, &rx->rxp_q);
+ }
+ 
+-void
++static void
+ _init_rxmod_queues(struct bna_rx_mod *rx_mod)
+ {
+ 	INIT_LIST_HEAD(&rx_mod->rx_free_q);
+@@ -2460,7 +2445,7 @@ _init_rxmod_queues(struct bna_rx_mod *rx_mod)
+ 	rx_mod->rxp_free_count = 0;
+ }
+ 
+-void
++static void
+ _rx_ctor(struct bna_rx *rx, int id)
+ {
+ 	bfa_q_qe_init(&rx->qe);
+@@ -2492,7 +2477,7 @@ bna_rx_cb_rxq_stopped_all(void *arg)
+ 	bfa_fsm_send_event(rx, RX_E_RXQ_STOPPED);
+ }
+ 
+-void
++static void
+ bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx,
+ 			 enum bna_cb_status status)
+ {
+@@ -2501,7 +2486,7 @@ bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx,
+ 	bfa_wc_down(&rx_mod->rx_stop_wc);
+ }
+ 
+-void
++static void
+ bna_rx_mod_cb_rx_stopped_all(void *arg)
+ {
+ 	struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
+@@ -2511,7 +2496,7 @@ bna_rx_mod_cb_rx_stopped_all(void *arg)
+ 	rx_mod->stop_cbfn = NULL;
+ }
+ 
+-void
++static void
+ bna_rx_start(struct bna_rx *rx)
+ {
+ 	rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
+@@ -2519,7 +2504,7 @@ bna_rx_start(struct bna_rx *rx)
+ 		bfa_fsm_send_event(rx, RX_E_START);
+ }
+ 
+-void
++static void
+ bna_rx_stop(struct bna_rx *rx)
+ {
+ 	rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
+@@ -2532,7 +2517,7 @@ bna_rx_stop(struct bna_rx *rx)
+ 	}
+ }
+ 
+-void
++static void
+ bna_rx_fail(struct bna_rx *rx)
+ {
+ 	/* Indicate port is not enabled, and failed */
+@@ -2542,28 +2527,6 @@ bna_rx_fail(struct bna_rx *rx)
+ }
+ 
+ void
+-bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
+-{
+-	bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
+-	if (rx->rxf.rxf_id < 32)
+-		rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
+-	else
+-		rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
+-				1 << (rx->rxf.rxf_id - 32));
+-}
+-
+-void
+-bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
+-{
+-	bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
+-	if (rx->rxf.rxf_id < 32)
+-		rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
+-	else
+-		rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
+-				1 << (rx->rxf.rxf_id - 32);
+-}
+-
+-void
+ bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
+ {
+ 	struct bna_rx *rx;
+@@ -3731,7 +3694,7 @@ bna_tx_fail(struct bna_tx *tx)
+ 	bfa_fsm_send_event(tx, TX_E_FAIL);
+ }
+ 
+-void
++static void
+ bna_tx_prio_changed(struct bna_tx *tx, int prio)
+ {
+ 	struct bna_txq *txq;
+diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
+index 7210c34..74c64d6 100644
+--- a/drivers/net/bna/bnad.c
++++ b/drivers/net/bna/bnad.c
+@@ -28,7 +28,7 @@
+ #include "bna.h"
+ #include "cna.h"
+ 
+-DEFINE_MUTEX(bnad_fwimg_mutex);
++static DEFINE_MUTEX(bnad_fwimg_mutex);
+ 
+ /*
+  * Module params
+@@ -46,7 +46,7 @@ MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
+  */
+ u32 bnad_rxqs_per_cq = 2;
+ 
+-const u8 bnad_bcast_addr[] =  {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
++static const u8 bnad_bcast_addr[] =  {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ 
+ /*
+  * Local MACROS
+@@ -687,7 +687,7 @@ bnad_enable_mbox_irq(struct bnad *bnad)
+  * Called with bnad->bna_lock held b'cos of
+  * bnad->cfg_flags access.
+  */
+-void
++static void
+ bnad_disable_mbox_irq(struct bnad *bnad)
+ {
+ 	int irq = BNAD_GET_MBOX_IRQ(bnad);
+@@ -956,11 +956,6 @@ bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
+ 		  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
+ }
+ 
+-void
+-bnad_cb_stats_clr(struct bnad *bnad)
+-{
+-}
+-
+ /* Resource allocation, free functions */
+ 
+ static void
+@@ -1111,8 +1106,10 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
+ 	}
+ 
+ 	spin_lock_irqsave(&bnad->bna_lock, flags);
++
+ 	if (bnad->cfg_flags & BNAD_CF_MSIX)
+ 		disable_irq_nosync(irq);
++
+ 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ 	return 0;
+ }
+@@ -2243,7 +2240,6 @@ static void
+ bnad_enable_msix(struct bnad *bnad)
+ {
+ 	int i, ret;
+-	u32 tot_msix_num;
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&bnad->bna_lock, flags);
+@@ -2256,18 +2252,16 @@ bnad_enable_msix(struct bnad *bnad)
+ 	if (bnad->msix_table)
+ 		return;
+ 
+-	tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
+-
+ 	bnad->msix_table =
+-		kcalloc(tot_msix_num, sizeof(struct msix_entry), GFP_KERNEL);
++		kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
+ 
+ 	if (!bnad->msix_table)
+ 		goto intx_mode;
+ 
+-	for (i = 0; i < tot_msix_num; i++)
++	for (i = 0; i < bnad->msix_num; i++)
+ 		bnad->msix_table[i].entry = i;
+ 
+-	ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, tot_msix_num);
++	ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
+ 	if (ret > 0) {
+ 		/* Not enough MSI-X vectors. */
+ 
+@@ -2280,12 +2274,11 @@ bnad_enable_msix(struct bnad *bnad)
+ 			+ (bnad->num_rx
+ 			* bnad->num_rxp_per_rx) +
+ 			 BNAD_MAILBOX_MSIX_VECTORS;
+-		tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
+ 
+ 		/* Try once more with adjusted numbers */
+ 		/* If this fails, fall back to INTx */
+ 		ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
+-				      tot_msix_num);
++				      bnad->msix_num);
+ 		if (ret)
+ 			goto intx_mode;
+ 
+@@ -2298,7 +2291,6 @@ intx_mode:
+ 	kfree(bnad->msix_table);
+ 	bnad->msix_table = NULL;
+ 	bnad->msix_num = 0;
+-	bnad->msix_diag_num = 0;
+ 	spin_lock_irqsave(&bnad->bna_lock, flags);
+ 	bnad->cfg_flags &= ~BNAD_CF_MSIX;
+ 	bnad_q_num_init(bnad);
+@@ -2946,7 +2938,6 @@ bnad_init(struct bnad *bnad,
+ 	bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
+ 		(bnad->num_rx * bnad->num_rxp_per_rx) +
+ 			 BNAD_MAILBOX_MSIX_VECTORS;
+-	bnad->msix_diag_num = 2;	/* 1 for Tx, 1 for Rx */
+ 
+ 	bnad->txq_depth = BNAD_TXQ_DEPTH;
+ 	bnad->rxq_depth = BNAD_RXQ_DEPTH;
+@@ -3217,7 +3208,7 @@ bnad_pci_remove(struct pci_dev *pdev)
+ 	free_netdev(netdev);
+ }
+ 
+-const struct pci_device_id bnad_pci_id_table[] = {
++static const struct pci_device_id bnad_pci_id_table[] = {
+ 	{
+ 		PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
+ 			PCI_DEVICE_ID_BROCADE_CT),
+diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
+index ee37788..ebc3a90 100644
+--- a/drivers/net/bna/bnad.h
++++ b/drivers/net/bna/bnad.h
+@@ -248,7 +248,6 @@ struct bnad {
+ 	u64		mmio_len;
+ 
+ 	u32		msix_num;
+-	u32		msix_diag_num;
+ 	struct msix_entry	*msix_table;
+ 
+ 	struct mutex		conf_mutex;
+diff --git a/drivers/net/bna/cna_fwimg.c b/drivers/net/bna/cna_fwimg.c
+index 0bd1d37..e8f4ecd 100644
+--- a/drivers/net/bna/cna_fwimg.c
++++ b/drivers/net/bna/cna_fwimg.c
+@@ -22,7 +22,7 @@ const struct firmware *bfi_fw;
+ static u32 *bfi_image_ct_cna;
+ static u32 bfi_image_ct_cna_size;
+ 
+-u32 *
++static u32 *
+ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
+ 			u32 *bfi_image_size, char *fw_name)
+ {
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bna/0013-bna-TxRx-and-datapath-fix.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bna/0013-bna-TxRx-and-datapath-fix.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,762 @@
+From: Rasesh Mody <rmody at brocade.com>
+Date: Thu, 23 Dec 2010 21:45:01 +0000
+Subject: [PATCH 13/23] bna: TxRx and datapath fix
+
+commit be7fa3263a15d3f278c3bfbf606ec169aaa3a920 upstream.
+
+Change Details:
+	- Check HW ready condition before accessing h/w register in data-path
+	- Postpone clean-up of data buffers to the data-path restart path and
+	wait in the cleanup routines for in-flight DMA to complete
+	- Separate out Tx completion processing from Rx poll routine
+
+Signed-off-by: Debashis Dutt <ddutt at brocade.com>
+Signed-off-by: Rasesh Mody <rmody at brocade.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ drivers/net/bna/bnad.c |  353 ++++++++++++++++++++++-------------------------
+ drivers/net/bna/bnad.h |   22 ++--
+ 2 files changed, 178 insertions(+), 197 deletions(-)
+
+diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
+index 7e839b9..3c40502 100644
+--- a/drivers/net/bna/bnad.c
++++ b/drivers/net/bna/bnad.c
+@@ -70,6 +70,8 @@ do {								\
+ 	(sizeof(struct bnad_skb_unmap) * ((_depth) - 1));	\
+ } while (0)
+ 
++#define BNAD_TXRX_SYNC_MDELAY	250	/* 250 msecs */
++
+ /*
+  * Reinitialize completions in CQ, once Rx is taken down
+  */
+@@ -130,7 +132,9 @@ bnad_free_all_txbufs(struct bnad *bnad,
+ 						PCI_DMA_TODEVICE);
+ 
+ 		pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+-		unmap_cons++;
++		if (++unmap_cons >= unmap_q->q_depth)
++			break;
++
+ 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ 			pci_unmap_page(bnad->pcidev,
+ 				       pci_unmap_addr(&unmap_array[unmap_cons],
+@@ -139,7 +143,8 @@ bnad_free_all_txbufs(struct bnad *bnad,
+ 				       PCI_DMA_TODEVICE);
+ 			pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+ 					   0);
+-			unmap_cons++;
++			if (++unmap_cons >= unmap_q->q_depth)
++				break;
+ 		}
+ 		dev_kfree_skb_any(skb);
+ 	}
+@@ -167,11 +172,11 @@ bnad_free_txbufs(struct bnad *bnad,
+ 	/*
+ 	 * Just return if TX is stopped. This check is useful
+ 	 * when bnad_free_txbufs() runs out of a tasklet scheduled
+-	 * before bnad_cb_tx_cleanup() cleared BNAD_RF_TX_STARTED bit
++	 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
+ 	 * but this routine runs actually after the cleanup has been
+ 	 * executed.
+ 	 */
+-	if (!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
++	if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
+ 		return 0;
+ 
+ 	updated_hw_cons = *(tcb->hw_consumer_index);
+@@ -252,7 +257,9 @@ bnad_tx_free_tasklet(unsigned long bnad_ptr)
+ 				(!test_and_set_bit(BNAD_TXQ_FREE_SENT,
+ 						  &tcb->flags))) {
+ 				acked = bnad_free_txbufs(bnad, tcb);
+-				bna_ib_ack(tcb->i_dbell, acked);
++				if (likely(test_bit(BNAD_TXQ_TX_STARTED,
++					&tcb->flags)))
++					bna_ib_ack(tcb->i_dbell, acked);
+ 				smp_mb__before_clear_bit();
+ 				clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+ 			}
+@@ -264,7 +271,7 @@ static u32
+ bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
+ {
+ 	struct net_device *netdev = bnad->netdev;
+-	u32 sent;
++	u32 sent = 0;
+ 
+ 	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
+ 		return 0;
+@@ -275,12 +282,15 @@ bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
+ 		    netif_carrier_ok(netdev) &&
+ 		    BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
+ 				    BNAD_NETIF_WAKE_THRESHOLD) {
+-			netif_wake_queue(netdev);
+-			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
++			if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
++				netif_wake_queue(netdev);
++				BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
++			}
+ 		}
++	}
++
++	if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
+ 		bna_ib_ack(tcb->i_dbell, sent);
+-	} else
+-		bna_ib_ack(tcb->i_dbell, 0);
+ 
+ 	smp_mb__before_clear_bit();
+ 	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+@@ -313,25 +323,26 @@ bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
+ }
+ 
+ static void
+-bnad_free_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
++bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
+ {
+ 	struct bnad_unmap_q *unmap_q;
+ 	struct sk_buff *skb;
++	int unmap_cons;
+ 
+ 	unmap_q = rcb->unmap_q;
+-	while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
+-		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+-		BUG_ON(!(skb));
+-		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
++	for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
++		skb = unmap_q->unmap_array[unmap_cons].skb;
++		if (!skb)
++			continue;
++		BUG_ON(!(pci_unmap_addr(
++			&unmap_q->unmap_array[unmap_cons], dma_addr)));
++		unmap_q->unmap_array[unmap_cons].skb = NULL;
+ 		pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
+-					unmap_array[unmap_q->consumer_index],
+-					dma_addr), rcb->rxq->buffer_size +
+-					NET_IP_ALIGN, PCI_DMA_FROMDEVICE);
++					unmap_array[unmap_cons],
++					dma_addr), rcb->rxq->buffer_size,
++					PCI_DMA_FROMDEVICE);
+ 		dev_kfree_skb(skb);
+-		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
+-		BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
+ 	}
+-
+ 	bnad_reset_rcb(bnad, rcb);
+ }
+ 
+@@ -385,41 +396,9 @@ finishing:
+ 		unmap_q->producer_index = unmap_prod;
+ 		rcb->producer_index = unmap_prod;
+ 		smp_mb();
+-		bna_rxq_prod_indx_doorbell(rcb);
+-	}
+-}
+-
+-/*
+- * Locking is required in the enable path
+- * because it is called from a napi poll
+- * context, where the bna_lock is not held
+- * unlike the IRQ context.
+- */
+-static void
+-bnad_enable_txrx_irqs(struct bnad *bnad)
+-{
+-	struct bna_tcb *tcb;
+-	struct bna_ccb *ccb;
+-	int i, j;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&bnad->bna_lock, flags);
+-	for (i = 0; i < bnad->num_tx; i++) {
+-		for (j = 0; j < bnad->num_txq_per_tx; j++) {
+-			tcb = bnad->tx_info[i].tcb[j];
+-			bna_ib_coalescing_timer_set(tcb->i_dbell,
+-				tcb->txq->ib->ib_config.coalescing_timeo);
+-			bna_ib_ack(tcb->i_dbell, 0);
+-		}
+-	}
+-
+-	for (i = 0; i < bnad->num_rx; i++) {
+-		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
+-			ccb = bnad->rx_info[i].rx_ctrl[j].ccb;
+-			bnad_enable_rx_irq_unsafe(ccb);
+-		}
++		if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
++			bna_rxq_prod_indx_doorbell(rcb);
+ 	}
+-	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ }
+ 
+ static inline void
+@@ -448,6 +427,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
+ 	u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
+ 	struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
+ 
++	if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
++		return 0;
++
+ 	prefetch(bnad->netdev);
+ 	BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
+ 			    wi_range);
+@@ -544,12 +526,15 @@ next:
+ 	BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
+ 
+ 	if (likely(ccb)) {
+-		bna_ib_ack(ccb->i_dbell, packets);
++		if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
++			bna_ib_ack(ccb->i_dbell, packets);
+ 		bnad_refill_rxq(bnad, ccb->rcb[0]);
+ 		if (ccb->rcb[1])
+ 			bnad_refill_rxq(bnad, ccb->rcb[1]);
+-	} else
+-		bna_ib_ack(ccb->i_dbell, 0);
++	} else {
++		if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
++			bna_ib_ack(ccb->i_dbell, 0);
++	}
+ 
+ 	return packets;
+ }
+@@ -557,6 +542,9 @@ next:
+ static void
+ bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
+ {
++	if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
++		return;
++
+ 	bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
+ 	bna_ib_ack(ccb->i_dbell, 0);
+ }
+@@ -575,9 +563,11 @@ static void
+ bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
+ {
+ 	struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
+-	if (likely(napi_schedule_prep((&rx_ctrl->napi)))) {
++	struct napi_struct *napi = &rx_ctrl->napi;
++
++	if (likely(napi_schedule_prep(napi))) {
+ 		bnad_disable_rx_irq(bnad, ccb);
+-		__napi_schedule((&rx_ctrl->napi));
++		__napi_schedule(napi);
+ 	}
+ 	BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
+ }
+@@ -602,12 +592,11 @@ bnad_msix_mbox_handler(int irq, void *data)
+ {
+ 	u32 intr_status;
+ 	unsigned long flags;
+-	struct net_device *netdev = data;
+-	struct bnad *bnad;
++	struct bnad *bnad = (struct bnad *)data;
+ 
+-	bnad = netdev_priv(netdev);
++	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
++		return IRQ_HANDLED;
+ 
+-	/* BNA_ISR_GET(bnad); Inc Ref count */
+ 	spin_lock_irqsave(&bnad->bna_lock, flags);
+ 
+ 	bna_intr_status_get(&bnad->bna, intr_status);
+@@ -617,7 +606,6 @@ bnad_msix_mbox_handler(int irq, void *data)
+ 
+ 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ 
+-	/* BNAD_ISR_PUT(bnad); Dec Ref count */
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -627,8 +615,7 @@ bnad_isr(int irq, void *data)
+ 	int i, j;
+ 	u32 intr_status;
+ 	unsigned long flags;
+-	struct net_device *netdev = data;
+-	struct bnad *bnad = netdev_priv(netdev);
++	struct bnad *bnad = (struct bnad *)data;
+ 	struct bnad_rx_info *rx_info;
+ 	struct bnad_rx_ctrl *rx_ctrl;
+ 
+@@ -642,16 +629,21 @@ bnad_isr(int irq, void *data)
+ 
+ 	spin_lock_irqsave(&bnad->bna_lock, flags);
+ 
+-	if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
++	if (BNA_IS_MBOX_ERR_INTR(intr_status))
+ 		bna_mbox_handler(&bnad->bna, intr_status);
+-		if (!BNA_IS_INTX_DATA_INTR(intr_status)) {
+-			spin_unlock_irqrestore(&bnad->bna_lock, flags);
+-			goto done;
+-		}
+-	}
++
+ 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ 
++	if (!BNA_IS_INTX_DATA_INTR(intr_status))
++		return IRQ_HANDLED;
++
+ 	/* Process data interrupts */
++	/* Tx processing */
++	for (i = 0; i < bnad->num_tx; i++) {
++		for (j = 0; j < bnad->num_txq_per_tx; j++)
++			bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
++	}
++	/* Rx processing */
+ 	for (i = 0; i < bnad->num_rx; i++) {
+ 		rx_info = &bnad->rx_info[i];
+ 		if (!rx_info->rx)
+@@ -663,7 +655,6 @@ bnad_isr(int irq, void *data)
+ 							    rx_ctrl->ccb);
+ 		}
+ 	}
+-done:
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -674,11 +665,7 @@ done:
+ static void
+ bnad_enable_mbox_irq(struct bnad *bnad)
+ {
+-	int irq = BNAD_GET_MBOX_IRQ(bnad);
+-
+-	if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
+-		if (bnad->cfg_flags & BNAD_CF_MSIX)
+-			enable_irq(irq);
++	clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
+ 
+ 	BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
+ }
+@@ -690,14 +677,19 @@ bnad_enable_mbox_irq(struct bnad *bnad)
+ static void
+ bnad_disable_mbox_irq(struct bnad *bnad)
+ {
+-	int irq = BNAD_GET_MBOX_IRQ(bnad);
++	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
+ 
++	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
++}
+ 
+-	if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
+-		if (bnad->cfg_flags & BNAD_CF_MSIX)
+-			disable_irq_nosync(irq);
++static void
++bnad_set_netdev_perm_addr(struct bnad *bnad)
++{
++	struct net_device *netdev = bnad->netdev;
+ 
+-	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
++	memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
++	if (is_zero_ether_addr(netdev->dev_addr))
++		memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
+ }
+ 
+ /* Control Path Handlers */
+@@ -755,11 +747,14 @@ bnad_cb_port_link_status(struct bnad *bnad,
+ 
+ 	if (link_up) {
+ 		if (!netif_carrier_ok(bnad->netdev)) {
++			struct bna_tcb *tcb = bnad->tx_info[0].tcb[0];
++			if (!tcb)
++				return;
+ 			pr_warn("bna: %s link up\n",
+ 				bnad->netdev->name);
+ 			netif_carrier_on(bnad->netdev);
+ 			BNAD_UPDATE_CTR(bnad, link_toggle);
+-			if (test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) {
++			if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
+ 				/* Force an immediate Transmit Schedule */
+ 				pr_info("bna: %s TX_STARTED\n",
+ 					bnad->netdev->name);
+@@ -807,6 +802,18 @@ bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
+ {
+ 	struct bnad_tx_info *tx_info =
+ 			(struct bnad_tx_info *)tcb->txq->tx->priv;
++	struct bnad_unmap_q *unmap_q = tcb->unmap_q;
++
++	while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
++		cpu_relax();
++
++	bnad_free_all_txbufs(bnad, tcb);
++
++	unmap_q->producer_index = 0;
++	unmap_q->consumer_index = 0;
++
++	smp_mb__before_clear_bit();
++	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+ 
+ 	tx_info->tcb[tcb->id] = NULL;
+ }
+@@ -822,6 +829,12 @@ bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
+ }
+ 
+ static void
++bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
++{
++	bnad_free_all_rxbufs(bnad, rcb);
++}
++
++static void
+ bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
+ {
+ 	struct bnad_rx_info *rx_info =
+@@ -849,7 +862,7 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
+ 	if (tx_info != &bnad->tx_info[0])
+ 		return;
+ 
+-	clear_bit(BNAD_RF_TX_STARTED, &bnad->run_flags);
++	clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
+ 	netif_stop_queue(bnad->netdev);
+ 	pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
+ }
+@@ -857,30 +870,15 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
+ static void
+ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
+ {
+-	if (test_and_set_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
+-		return;
+-
+-	if (netif_carrier_ok(bnad->netdev)) {
+-		pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
+-		netif_wake_queue(bnad->netdev);
+-		BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
+-	}
+-}
+-
+-static void
+-bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
+-{
+-	struct bnad_unmap_q *unmap_q;
++	struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+ 
+-	if (!tcb || (!tcb->unmap_q))
++	if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
+ 		return;
+ 
+-	unmap_q = tcb->unmap_q;
+-	if (!unmap_q->unmap_array)
+-		return;
++	clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags);
+ 
+-	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
+-		return;
++	while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
++		cpu_relax();
+ 
+ 	bnad_free_all_txbufs(bnad, tcb);
+ 
+@@ -889,21 +887,45 @@ bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
+ 
+ 	smp_mb__before_clear_bit();
+ 	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
++
++	/*
++	 * Workaround for first device enable failure & we
++	 * get a 0 MAC address. We try to get the MAC address
++	 * again here.
++	 */
++	if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
++		bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr);
++		bnad_set_netdev_perm_addr(bnad);
++	}
++
++	set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
++
++	if (netif_carrier_ok(bnad->netdev)) {
++		pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
++		netif_wake_queue(bnad->netdev);
++		BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
++	}
++}
++
++static void
++bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
++{
++	/* Delay only once for the whole Tx Path Shutdown */
++	if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags))
++		mdelay(BNAD_TXRX_SYNC_MDELAY);
+ }
+ 
+ static void
+ bnad_cb_rx_cleanup(struct bnad *bnad,
+ 			struct bna_ccb *ccb)
+ {
+-	bnad_cq_cmpl_init(bnad, ccb);
+-
+-	bnad_free_rxbufs(bnad, ccb->rcb[0]);
+ 	clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
+ 
+-	if (ccb->rcb[1]) {
+-		bnad_free_rxbufs(bnad, ccb->rcb[1]);
++	if (ccb->rcb[1])
+ 		clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
+-	}
++
++	if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags))
++		mdelay(BNAD_TXRX_SYNC_MDELAY);
+ }
+ 
+ static void
+@@ -911,6 +933,13 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
+ {
+ 	struct bnad_unmap_q *unmap_q = rcb->unmap_q;
+ 
++	clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags);
++
++	if (rcb == rcb->cq->ccb->rcb[0])
++		bnad_cq_cmpl_init(bnad, rcb->cq->ccb);
++
++	bnad_free_all_rxbufs(bnad, rcb);
++
+ 	set_bit(BNAD_RXQ_STARTED, &rcb->flags);
+ 
+ 	/* Now allocate & post buffers for this RCB */
+@@ -1047,7 +1076,7 @@ bnad_mbox_irq_free(struct bnad *bnad,
+ 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ 
+ 	irq = BNAD_GET_MBOX_IRQ(bnad);
+-	free_irq(irq, bnad->netdev);
++	free_irq(irq, bnad);
+ 
+ 	kfree(intr_info->idl);
+ }
+@@ -1061,7 +1090,7 @@ static int
+ bnad_mbox_irq_alloc(struct bnad *bnad,
+ 		    struct bna_intr_info *intr_info)
+ {
+-	int 		err;
++	int 		err = 0;
+ 	unsigned long 	flags;
+ 	u32	irq;
+ 	irq_handler_t 	irq_handler;
+@@ -1096,22 +1125,17 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
+ 	 */
+ 	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
+ 
++	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
++
+ 	err = request_irq(irq, irq_handler, flags,
+-			  bnad->mbox_irq_name, bnad->netdev);
++			  bnad->mbox_irq_name, bnad);
+ 
+ 	if (err) {
+ 		kfree(intr_info->idl);
+ 		intr_info->idl = NULL;
+-		return err;
+ 	}
+ 
+-	spin_lock_irqsave(&bnad->bna_lock, flags);
+-
+-	if (bnad->cfg_flags & BNAD_CF_MSIX)
+-		disable_irq_nosync(irq);
+-
+-	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+-	return 0;
++	return err;
+ }
+ 
+ static void
+@@ -1555,62 +1579,19 @@ poll_exit:
+ 	return rcvd;
+ }
+ 
+-static int
+-bnad_napi_poll_txrx(struct napi_struct *napi, int budget)
+-{
+-	struct bnad_rx_ctrl *rx_ctrl =
+-		container_of(napi, struct bnad_rx_ctrl, napi);
+-	struct bna_ccb *ccb;
+-	struct bnad *bnad;
+-	int 			rcvd = 0;
+-	int			i, j;
+-
+-	ccb = rx_ctrl->ccb;
+-
+-	bnad = ccb->bnad;
+-
+-	if (!netif_carrier_ok(bnad->netdev))
+-		goto poll_exit;
+-
+-	/* Handle Tx Completions, if any */
+-	for (i = 0; i < bnad->num_tx; i++) {
+-		for (j = 0; j < bnad->num_txq_per_tx; j++)
+-			bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
+-	}
+-
+-	/* Handle Rx Completions */
+-	rcvd = bnad_poll_cq(bnad, ccb, budget);
+-	if (rcvd == budget)
+-		return rcvd;
+-poll_exit:
+-	napi_complete((napi));
+-
+-	BNAD_UPDATE_CTR(bnad, netif_rx_complete);
+-
+-	bnad_enable_txrx_irqs(bnad);
+-	return rcvd;
+-}
+-
+ static void
+ bnad_napi_enable(struct bnad *bnad, u32 rx_id)
+ {
+-	int (*napi_poll) (struct napi_struct *, int);
+ 	struct bnad_rx_ctrl *rx_ctrl;
+ 	int i;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&bnad->bna_lock, flags);
+-	if (bnad->cfg_flags & BNAD_CF_MSIX)
+-		napi_poll = bnad_napi_poll_rx;
+-	else
+-		napi_poll = bnad_napi_poll_txrx;
+-	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ 
+ 	/* Initialize & enable NAPI */
+ 	for (i = 0; i <	bnad->num_rxp_per_rx; i++) {
+ 		rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
++
+ 		netif_napi_add(bnad->netdev, &rx_ctrl->napi,
+-			       napi_poll, 64);
++			       bnad_napi_poll_rx, 64);
++
+ 		napi_enable(&rx_ctrl->napi);
+ 	}
+ }
+@@ -1825,6 +1806,7 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
+ 
+ 	/* Initialize the Rx event handlers */
+ 	rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
++	rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
+ 	rx_cbfn.rcb_destroy_cbfn = NULL;
+ 	rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
+ 	rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
+@@ -2152,16 +2134,6 @@ bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
+ 		bnad->num_rxp_per_rx = 1;
+ }
+ 
+-static void
+-bnad_set_netdev_perm_addr(struct bnad *bnad)
+-{
+-	struct net_device *netdev = bnad->netdev;
+-
+-	memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
+-	if (is_zero_ether_addr(netdev->dev_addr))
+-		memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
+-}
+-
+ /* Enable / disable device */
+ static void
+ bnad_device_disable(struct bnad *bnad)
+@@ -2433,21 +2405,21 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
++	tx_id = 0;
++
++	tx_info = &bnad->tx_info[tx_id];
++	tcb = tx_info->tcb[tx_id];
++	unmap_q = tcb->unmap_q;
++
+ 	/*
+ 	 * Takes care of the Tx that is scheduled between clearing the flag
+ 	 * and the netif_stop_queue() call.
+ 	 */
+-	if (unlikely(!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))) {
++	if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
+ 		dev_kfree_skb(skb);
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	tx_id = 0;
+-
+-	tx_info = &bnad->tx_info[tx_id];
+-	tcb = tx_info->tcb[tx_id];
+-	unmap_q = tcb->unmap_q;
+-
+ 	vectors = 1 + skb_shinfo(skb)->nr_frags;
+ 	if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
+ 		dev_kfree_skb(skb);
+@@ -2462,7 +2434,8 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 		    tcb->consumer_index &&
+ 		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
+ 			acked = bnad_free_txbufs(bnad, tcb);
+-			bna_ib_ack(tcb->i_dbell, acked);
++			if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
++				bna_ib_ack(tcb->i_dbell, acked);
+ 			smp_mb__before_clear_bit();
+ 			clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+ 		} else {
+@@ -2624,6 +2597,10 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	tcb->producer_index = txq_prod;
+ 
+ 	smp_mb();
++
++	if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
++		return NETDEV_TX_OK;
++
+ 	bna_txq_prod_indx_doorbell(tcb);
+ 
+ 	if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
+@@ -3066,7 +3043,7 @@ bnad_pci_probe(struct pci_dev *pdev,
+ 	/*
+ 	 * PCI initialization
+ 	 * 	Output : using_dac = 1 for 64 bit DMA
+-	 *		           = 0 for 32 bit DMA
++	 *			   = 0 for 32 bit DMA
+ 	 */
+ 	err = bnad_pci_init(bnad, pdev, &using_dac);
+ 	if (err)
+diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
+index ebc3a90..f59685a 100644
+--- a/drivers/net/bna/bnad.h
++++ b/drivers/net/bna/bnad.h
+@@ -51,6 +51,7 @@
+  */
+ struct bnad_rx_ctrl {
+ 	struct bna_ccb *ccb;
++	unsigned long  flags;
+ 	struct napi_struct	napi;
+ };
+ 
+@@ -82,6 +83,7 @@ struct bnad_rx_ctrl {
+ 
+ /* Bit positions for tcb->flags */
+ #define BNAD_TXQ_FREE_SENT		0
++#define BNAD_TXQ_TX_STARTED		1
+ 
+ /* Bit positions for rcb->flags */
+ #define BNAD_RXQ_REFILL			0
+@@ -199,12 +201,12 @@ struct bnad_unmap_q {
+ /* Set, tested & cleared using xxx_bit() functions */
+ /* Values indicated bit positions */
+ #define	BNAD_RF_CEE_RUNNING		1
+-#define BNAD_RF_HW_ERROR 		2
+-#define BNAD_RF_MBOX_IRQ_DISABLED	3
+-#define BNAD_RF_TX_STARTED		4
+-#define BNAD_RF_RX_STARTED		5
+-#define BNAD_RF_DIM_TIMER_RUNNING	6
+-#define BNAD_RF_STATS_TIMER_RUNNING	7
++#define BNAD_RF_MBOX_IRQ_DISABLED	2
++#define BNAD_RF_RX_STARTED		3
++#define BNAD_RF_DIM_TIMER_RUNNING	4
++#define BNAD_RF_STATS_TIMER_RUNNING	5
++#define BNAD_RF_TX_SHUTDOWN_DELAYED	6
++#define BNAD_RF_RX_SHUTDOWN_DELAYED	7
+ 
+ struct bnad {
+ 	struct net_device 	*netdev;
+@@ -320,9 +322,11 @@ extern void bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64
+ 
+ #define bnad_enable_rx_irq_unsafe(_ccb)			\
+ {							\
+-	bna_ib_coalescing_timer_set((_ccb)->i_dbell,	\
+-		(_ccb)->rx_coalescing_timeo);		\
+-	bna_ib_ack((_ccb)->i_dbell, 0);			\
++	if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) {\
++		bna_ib_coalescing_timer_set((_ccb)->i_dbell,	\
++			(_ccb)->rx_coalescing_timeo);		\
++		bna_ib_ack((_ccb)->i_dbell, 0);			\
++	}							\
+ }
+ 
+ #define bnad_dim_timer_running(_bnad)				\
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bna/0014-bna-Port-enable-disable-sync-and-txq-priority-fix.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bna/0014-bna-Port-enable-disable-sync-and-txq-priority-fix.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,364 @@
+From: Rasesh Mody <rmody at brocade.com>
+Date: Thu, 23 Dec 2010 21:45:02 +0000
+Subject: [PATCH 14/23] bna: Port enable disable sync and txq priority fix
+
+commit 0613ecfc94b13e86c9ff1252fd63e35a94475cd6 upstream.
+
+Change Details:
+	- Fixed port enable/disable sync through a change in LL port state
+	machine
+	- Change txq->priority masking to 0x7 (3 bits) from 0x3 (2 bits)
+
+Signed-off-by: Debashis Dutt <ddutt at brocade.com>
+Signed-off-by: Rasesh Mody <rmody at brocade.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ drivers/net/bna/bna.h       |    4 +-
+ drivers/net/bna/bna_ctrl.c  |  136 +++++++++++++++++++++++++++++++++----------
+ drivers/net/bna/bna_txrx.c  |    8 +-
+ drivers/net/bna/bna_types.h |    7 +-
+ 4 files changed, 116 insertions(+), 39 deletions(-)
+
+diff --git a/drivers/net/bna/bna.h b/drivers/net/bna/bna.h
+index df6676b..fd93f76 100644
+--- a/drivers/net/bna/bna.h
++++ b/drivers/net/bna/bna.h
+@@ -390,8 +390,8 @@ void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe);
+ 
+ /* API for RX */
+ int bna_port_mtu_get(struct bna_port *port);
+-void bna_llport_admin_up(struct bna_llport *llport);
+-void bna_llport_admin_down(struct bna_llport *llport);
++void bna_llport_rx_started(struct bna_llport *llport);
++void bna_llport_rx_stopped(struct bna_llport *llport);
+ 
+ /* API for BNAD */
+ void bna_port_enable(struct bna_port *port);
+diff --git a/drivers/net/bna/bna_ctrl.c b/drivers/net/bna/bna_ctrl.c
+index 07b2659..68e4c5e 100644
+--- a/drivers/net/bna/bna_ctrl.c
++++ b/drivers/net/bna/bna_ctrl.c
+@@ -59,14 +59,70 @@ bna_port_cb_link_down(struct bna_port *port, int status)
+ 	port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
+ }
+ 
++static inline int
++llport_can_be_up(struct bna_llport *llport)
++{
++	int ready = 0;
++	if (llport->type == BNA_PORT_T_REGULAR)
++		ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) &&
++			 (llport->flags & BNA_LLPORT_F_RX_STARTED) &&
++			 (llport->flags & BNA_LLPORT_F_PORT_ENABLED));
++	else
++		ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) &&
++			 (llport->flags & BNA_LLPORT_F_RX_STARTED) &&
++			 !(llport->flags & BNA_LLPORT_F_PORT_ENABLED));
++	return ready;
++}
++
++#define llport_is_up llport_can_be_up
++
++enum bna_llport_event {
++	LLPORT_E_START			= 1,
++	LLPORT_E_STOP			= 2,
++	LLPORT_E_FAIL			= 3,
++	LLPORT_E_UP			= 4,
++	LLPORT_E_DOWN			= 5,
++	LLPORT_E_FWRESP_UP_OK		= 6,
++	LLPORT_E_FWRESP_UP_FAIL		= 7,
++	LLPORT_E_FWRESP_DOWN		= 8
++};
++
++static void
++bna_llport_cb_port_enabled(struct bna_llport *llport)
++{
++	llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
++
++	if (llport_can_be_up(llport))
++		bfa_fsm_send_event(llport, LLPORT_E_UP);
++}
++
++static void
++bna_llport_cb_port_disabled(struct bna_llport *llport)
++{
++	int llport_up = llport_is_up(llport);
++
++	llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
++
++	if (llport_up)
++		bfa_fsm_send_event(llport, LLPORT_E_DOWN);
++}
++
+ /**
+  * MBOX
+  */
+ static int
+ bna_is_aen(u8 msg_id)
+ {
+-	return msg_id == BFI_LL_I2H_LINK_DOWN_AEN ||
+-	       msg_id == BFI_LL_I2H_LINK_UP_AEN;
++	switch (msg_id) {
++	case BFI_LL_I2H_LINK_DOWN_AEN:
++	case BFI_LL_I2H_LINK_UP_AEN:
++	case BFI_LL_I2H_PORT_ENABLE_AEN:
++	case BFI_LL_I2H_PORT_DISABLE_AEN:
++		return 1;
++
++	default:
++		return 0;
++	}
+ }
+ 
+ static void
+@@ -81,6 +137,12 @@ bna_mbox_aen_callback(struct bna *bna, struct bfi_mbmsg *msg)
+ 	case BFI_LL_I2H_LINK_DOWN_AEN:
+ 		bna_port_cb_link_down(&bna->port, aen->reason);
+ 		break;
++	case BFI_LL_I2H_PORT_ENABLE_AEN:
++		bna_llport_cb_port_enabled(&bna->port.llport);
++		break;
++	case BFI_LL_I2H_PORT_DISABLE_AEN:
++		bna_llport_cb_port_disabled(&bna->port.llport);
++		break;
+ 	default:
+ 		break;
+ 	}
+@@ -251,16 +313,6 @@ static void bna_llport_start(struct bna_llport *llport);
+ static void bna_llport_stop(struct bna_llport *llport);
+ static void bna_llport_fail(struct bna_llport *llport);
+ 
+-enum bna_llport_event {
+-	LLPORT_E_START			= 1,
+-	LLPORT_E_STOP			= 2,
+-	LLPORT_E_FAIL			= 3,
+-	LLPORT_E_UP			= 4,
+-	LLPORT_E_DOWN			= 5,
+-	LLPORT_E_FWRESP_UP		= 6,
+-	LLPORT_E_FWRESP_DOWN		= 7
+-};
+-
+ enum bna_llport_state {
+ 	BNA_LLPORT_STOPPED		= 1,
+ 	BNA_LLPORT_DOWN			= 2,
+@@ -320,7 +372,7 @@ bna_llport_sm_stopped(struct bna_llport *llport,
+ 		/* No-op */
+ 		break;
+ 
+-	case LLPORT_E_FWRESP_UP:
++	case LLPORT_E_FWRESP_UP_OK:
+ 	case LLPORT_E_FWRESP_DOWN:
+ 		/**
+ 		 * These events are received due to flushing of mbox when
+@@ -366,6 +418,7 @@ bna_llport_sm_down(struct bna_llport *llport,
+ static void
+ bna_llport_sm_up_resp_wait_entry(struct bna_llport *llport)
+ {
++	BUG_ON(!llport_can_be_up(llport));
+ 	/**
+ 	 * NOTE: Do not call bna_fw_llport_up() here. That will over step
+ 	 * mbox due to down_resp_wait -> up_resp_wait transition on event
+@@ -390,10 +443,14 @@ bna_llport_sm_up_resp_wait(struct bna_llport *llport,
+ 		bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
+ 		break;
+ 
+-	case LLPORT_E_FWRESP_UP:
++	case LLPORT_E_FWRESP_UP_OK:
+ 		bfa_fsm_set_state(llport, bna_llport_sm_up);
+ 		break;
+ 
++	case LLPORT_E_FWRESP_UP_FAIL:
++		bfa_fsm_set_state(llport, bna_llport_sm_down);
++		break;
++
+ 	case LLPORT_E_FWRESP_DOWN:
+ 		/* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */
+ 		bna_fw_llport_up(llport);
+@@ -431,11 +488,12 @@ bna_llport_sm_down_resp_wait(struct bna_llport *llport,
+ 		bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
+ 		break;
+ 
+-	case LLPORT_E_FWRESP_UP:
++	case LLPORT_E_FWRESP_UP_OK:
+ 		/* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */
+ 		bna_fw_llport_down(llport);
+ 		break;
+ 
++	case LLPORT_E_FWRESP_UP_FAIL:
+ 	case LLPORT_E_FWRESP_DOWN:
+ 		bfa_fsm_set_state(llport, bna_llport_sm_down);
+ 		break;
+@@ -496,11 +554,12 @@ bna_llport_sm_last_resp_wait(struct bna_llport *llport,
+ 		/* No-op */
+ 		break;
+ 
+-	case LLPORT_E_FWRESP_UP:
++	case LLPORT_E_FWRESP_UP_OK:
+ 		/* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */
+ 		bna_fw_llport_down(llport);
+ 		break;
+ 
++	case LLPORT_E_FWRESP_UP_FAIL:
+ 	case LLPORT_E_FWRESP_DOWN:
+ 		bfa_fsm_set_state(llport, bna_llport_sm_stopped);
+ 		break;
+@@ -541,7 +600,14 @@ bna_fw_cb_llport_up(void *arg, int status)
+ 	struct bna_llport *llport = (struct bna_llport *)arg;
+ 
+ 	bfa_q_qe_init(&llport->mbox_qe.qe);
+-	bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP);
++	if (status == BFI_LL_CMD_FAIL) {
++		if (llport->type == BNA_PORT_T_REGULAR)
++			llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
++		else
++			llport->flags &= ~BNA_LLPORT_F_ADMIN_UP;
++		bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_FAIL);
++	} else
++		bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_OK);
+ }
+ 
+ static void
+@@ -588,13 +654,14 @@ bna_port_cb_llport_stopped(struct bna_port *port,
+ static void
+ bna_llport_init(struct bna_llport *llport, struct bna *bna)
+ {
+-	llport->flags |= BNA_LLPORT_F_ENABLED;
++	llport->flags |= BNA_LLPORT_F_ADMIN_UP;
++	llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
+ 	llport->type = BNA_PORT_T_REGULAR;
+ 	llport->bna = bna;
+ 
+ 	llport->link_status = BNA_LINK_DOWN;
+ 
+-	llport->admin_up_count = 0;
++	llport->rx_started_count = 0;
+ 
+ 	llport->stop_cbfn = NULL;
+ 
+@@ -606,7 +673,8 @@ bna_llport_init(struct bna_llport *llport, struct bna *bna)
+ static void
+ bna_llport_uninit(struct bna_llport *llport)
+ {
+-	llport->flags &= ~BNA_LLPORT_F_ENABLED;
++	llport->flags &= ~BNA_LLPORT_F_ADMIN_UP;
++	llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
+ 
+ 	llport->bna = NULL;
+ }
+@@ -628,6 +696,8 @@ bna_llport_stop(struct bna_llport *llport)
+ static void
+ bna_llport_fail(struct bna_llport *llport)
+ {
++	/* Reset the physical port status to enabled */
++	llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
+ 	bfa_fsm_send_event(llport, LLPORT_E_FAIL);
+ }
+ 
+@@ -638,25 +708,31 @@ bna_llport_state_get(struct bna_llport *llport)
+ }
+ 
+ void
+-bna_llport_admin_up(struct bna_llport *llport)
++bna_llport_rx_started(struct bna_llport *llport)
+ {
+-	llport->admin_up_count++;
++	llport->rx_started_count++;
++
++	if (llport->rx_started_count == 1) {
++
++		llport->flags |= BNA_LLPORT_F_RX_STARTED;
+ 
+-	if (llport->admin_up_count == 1) {
+-		llport->flags |= BNA_LLPORT_F_RX_ENABLED;
+-		if (llport->flags & BNA_LLPORT_F_ENABLED)
++		if (llport_can_be_up(llport))
+ 			bfa_fsm_send_event(llport, LLPORT_E_UP);
+ 	}
+ }
+ 
+ void
+-bna_llport_admin_down(struct bna_llport *llport)
++bna_llport_rx_stopped(struct bna_llport *llport)
+ {
+-	llport->admin_up_count--;
++	int llport_up = llport_is_up(llport);
++
++	llport->rx_started_count--;
++
++	if (llport->rx_started_count == 0) {
++
++		llport->flags &= ~BNA_LLPORT_F_RX_STARTED;
+ 
+-	if (llport->admin_up_count == 0) {
+-		llport->flags &= ~BNA_LLPORT_F_RX_ENABLED;
+-		if (llport->flags & BNA_LLPORT_F_ENABLED)
++		if (llport_up)
+ 			bfa_fsm_send_event(llport, LLPORT_E_DOWN);
+ 	}
+ }
+diff --git a/drivers/net/bna/bna_txrx.c b/drivers/net/bna/bna_txrx.c
+index ad93fdb..fb6cf1f 100644
+--- a/drivers/net/bna/bna_txrx.c
++++ b/drivers/net/bna/bna_txrx.c
+@@ -1947,7 +1947,7 @@ bna_rx_sm_started_entry(struct bna_rx *rx)
+ 		bna_ib_ack(&rxp->cq.ib->door_bell, 0);
+ 	}
+ 
+-	bna_llport_admin_up(&rx->bna->port.llport);
++	bna_llport_rx_started(&rx->bna->port.llport);
+ }
+ 
+ void
+@@ -1955,13 +1955,13 @@ bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
+ {
+ 	switch (event) {
+ 	case RX_E_FAIL:
+-		bna_llport_admin_down(&rx->bna->port.llport);
++		bna_llport_rx_stopped(&rx->bna->port.llport);
+ 		bfa_fsm_set_state(rx, bna_rx_sm_stopped);
+ 		rx_ib_fail(rx);
+ 		bna_rxf_fail(&rx->rxf);
+ 		break;
+ 	case RX_E_STOP:
+-		bna_llport_admin_down(&rx->bna->port.llport);
++		bna_llport_rx_stopped(&rx->bna->port.llport);
+ 		bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
+ 		break;
+ 	default:
+@@ -3373,7 +3373,7 @@ __bna_txq_start(struct bna_tx *tx, struct bna_txq *txq)
+ 
+ 	txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE;
+ 	txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) |
+-			(txq->priority & 0x3));
++			(txq->priority & 0x7));
+ 	txq_cfg.wvc_n_cquota_n_rquota =
+ 			((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) |
+ 			(BFI_TX_MAX_WRR_QUOTA & 0xfff));
+diff --git a/drivers/net/bna/bna_types.h b/drivers/net/bna/bna_types.h
+index 6877310..d79fd98 100644
+--- a/drivers/net/bna/bna_types.h
++++ b/drivers/net/bna/bna_types.h
+@@ -249,8 +249,9 @@ enum bna_link_status {
+ };
+ 
+ enum bna_llport_flags {
+-	BNA_LLPORT_F_ENABLED 	= 1,
+-	BNA_LLPORT_F_RX_ENABLED	= 2
++	BNA_LLPORT_F_ADMIN_UP	 	= 1,
++	BNA_LLPORT_F_PORT_ENABLED	= 2,
++	BNA_LLPORT_F_RX_STARTED		= 4
+ };
+ 
+ enum bna_port_flags {
+@@ -405,7 +406,7 @@ struct bna_llport {
+ 
+ 	enum bna_link_status link_status;
+ 
+-	int			admin_up_count;
++	int			rx_started_count;
+ 
+ 	void (*stop_cbfn)(struct bna_port *, enum bna_cb_status);
+ 
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bna/0015-bna-Fix-ethtool-register-dump-and-reordered-an-API.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bna/0015-bna-Fix-ethtool-register-dump-and-reordered-an-API.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,62 @@
+From: Rasesh Mody <rmody at brocade.com>
+Date: Thu, 23 Dec 2010 21:45:03 +0000
+Subject: [PATCH 15/23] bna: Fix ethtool register dump and reordered an API
+
+commit 815f41e74031d6dc6d6dd988f58c03a1d72d02b9 upstream.
+
+Change Details:
+	- Removed semaphore register dump from ethtool
+	- Moved netif_carrier_off() call to before calling bna_init()
+
+Signed-off-by: Debashis Dutt <ddutt at brocade.com>
+Signed-off-by: Rasesh Mody <rmody at brocade.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ drivers/net/bna/bnad.c         |    8 +++-----
+ drivers/net/bna/bnad_ethtool.c |    4 ----
+ 2 files changed, 3 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
+index 3c40502..5e7a030 100644
+--- a/drivers/net/bna/bnad.c
++++ b/drivers/net/bna/bnad.c
+@@ -3061,6 +3061,9 @@ bnad_pci_probe(struct pci_dev *pdev,
+ 	/* Initialize netdev structure, set up ethtool ops */
+ 	bnad_netdev_init(bnad, using_dac);
+ 
++	/* Set link to down state */
++	netif_carrier_off(netdev);
++
+ 	bnad_enable_msix(bnad);
+ 
+ 	/* Get resource requirement form bna */
+@@ -3114,11 +3117,6 @@ bnad_pci_probe(struct pci_dev *pdev,
+ 
+ 	mutex_unlock(&bnad->conf_mutex);
+ 
+-	/*
+-	 * Make sure the link appears down to the stack
+-	 */
+-	netif_carrier_off(netdev);
+-
+ 	/* Finally, reguister with net_device layer */
+ 	err = register_netdev(netdev);
+ 	if (err) {
+diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
+index 11fa2ea..3011110 100644
+--- a/drivers/net/bna/bnad_ethtool.c
++++ b/drivers/net/bna/bnad_ethtool.c
+@@ -330,10 +330,6 @@ do {								\
+ 
+ 	BNAD_GET_REG(PCIE_MISC_REG);
+ 
+-	BNAD_GET_REG(HOST_SEM0_REG);
+-	BNAD_GET_REG(HOST_SEM1_REG);
+-	BNAD_GET_REG(HOST_SEM2_REG);
+-	BNAD_GET_REG(HOST_SEM3_REG);
+ 	BNAD_GET_REG(HOST_SEM0_INFO_REG);
+ 	BNAD_GET_REG(HOST_SEM1_INFO_REG);
+ 	BNAD_GET_REG(HOST_SEM2_INFO_REG);
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bna/0016-bna-Enable-pure-priority-tagged-packet-reception-and.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bna/0016-bna-Enable-pure-priority-tagged-packet-reception-and.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,72 @@
+From: Rasesh Mody <rmody at brocade.com>
+Date: Thu, 23 Dec 2010 21:45:04 +0000
+Subject: [PATCH 16/23] bna: Enable pure priority tagged packet reception and
+ rxf uninit cleanup fix
+
+commit 886f7fedb0cee56acca7620d89186669273d3d56 upstream.
+
+Change Details:
+	- Enable reception of pure priority tagged packets by default by
+	turning on VLAN Id = 0
+	- Clear the promiscuous mode, all multicast mode flags when
+	bna_rxf_uninit is called
+
+Signed-off-by: Debashis Dutt <ddutt at brocade.com>
+Signed-off-by: Rasesh Mody <rmody at brocade.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ drivers/net/bna/bna_txrx.c |   25 +++++++++++++++++++++++++
+ 1 files changed, 25 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/net/bna/bna_txrx.c b/drivers/net/bna/bna_txrx.c
+index fb6cf1f..5076618 100644
+--- a/drivers/net/bna/bna_txrx.c
++++ b/drivers/net/bna/bna_txrx.c
+@@ -1441,12 +1441,16 @@ bna_rxf_init(struct bna_rxf *rxf,
+ 	memset(rxf->vlan_filter_table, 0,
+ 			(sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32)));
+ 
++	/* Set up VLAN 0 for pure priority tagged packets */
++	rxf->vlan_filter_table[0] |= 1;
++
+ 	bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
+ }
+ 
+ static void
+ bna_rxf_uninit(struct bna_rxf *rxf)
+ {
++	struct bna *bna = rxf->rx->bna;
+ 	struct bna_mac *mac;
+ 
+ 	bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment);
+@@ -1473,6 +1477,27 @@ bna_rxf_uninit(struct bna_rxf *rxf)
+ 		bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
+ 	}
+ 
++	/* Turn off pending promisc mode */
++	if (is_promisc_enable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask)) {
++		/* system promisc state should be pending */
++		BUG_ON(!(bna->rxf_promisc_id == rxf->rxf_id));
++		promisc_inactive(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++		 bna->rxf_promisc_id = BFI_MAX_RXF;
++	}
++	/* Promisc mode should not be active */
++	BUG_ON(rxf->rxmode_active & BNA_RXMODE_PROMISC);
++
++	/* Turn off pending all-multi mode */
++	if (is_allmulti_enable(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask)) {
++		allmulti_inactive(rxf->rxmode_pending,
++				rxf->rxmode_pending_bitmask);
++	}
++	/* Allmulti mode should not be active */
++	BUG_ON(rxf->rxmode_active & BNA_RXMODE_ALLMULTI);
++
+ 	rxf->rx = NULL;
+ }
+ 
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bna/0017-bna-Fix-for-TX-queue.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bna/0017-bna-Fix-for-TX-queue.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,103 @@
+From: Rasesh Mody <rmody at brocade.com>
+Date: Thu, 23 Dec 2010 21:45:05 +0000
+Subject: [PATCH 17/23] bna: Fix for TX queue
+
+commit f7c0fa4cd5dcf58dd95b216d2c33444a3b4a44e0 upstream.
+
+Change Details:
+	- Call netif_wake_queue() if we have freed up sufficient elements
+	at the end of completion processing
+	- Add netif_queue_stopped counter back to bnad_drv_stats {}
+	- Get netif_queue_stopped value from stack
+	- Remove BUG_ON() on value returned by pci_unmap_addr()
+
+Signed-off-by: Debashis Dutt <ddutt at brocade.com>
+Signed-off-by: Rasesh Mody <rmody at brocade.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+[bwh: Adjust context for different version of stats fix]
+---
+ drivers/net/bna/bnad.c         |   20 ++++++++++++++++----
+ drivers/net/bna/bnad.h         |    7 +++++--
+ drivers/net/bna/bnad_ethtool.c |    4 ++++
+ 3 files changed, 25 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/bna/bnad.c
++++ b/drivers/net/bna/bnad.c
+@@ -109,7 +109,7 @@ static void
+ bnad_free_all_txbufs(struct bnad *bnad,
+ 		 struct bna_tcb *tcb)
+ {
+-	u16 		unmap_cons;
++	u32 		unmap_cons;
+ 	struct bnad_unmap_q *unmap_q = tcb->unmap_q;
+ 	struct bnad_skb_unmap *unmap_array;
+ 	struct sk_buff 		*skb = NULL;
+@@ -244,7 +244,7 @@ bnad_tx_free_tasklet(unsigned long bnad_ptr)
+ {
+ 	struct bnad *bnad = (struct bnad *)bnad_ptr;
+ 	struct bna_tcb *tcb;
+-	u32 		acked;
++	u32 		acked = 0;
+ 	int			i, j;
+ 
+ 	for (i = 0; i < bnad->num_tx; i++) {
+@@ -263,6 +263,20 @@ bnad_tx_free_tasklet(unsigned long bnad_ptr)
+ 				smp_mb__before_clear_bit();
+ 				clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+ 			}
++			if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
++						&tcb->flags)))
++				continue;
++			if (netif_queue_stopped(bnad->netdev)) {
++				if (acked && netif_carrier_ok(bnad->netdev) &&
++					BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
++						BNAD_NETIF_WAKE_THRESHOLD) {
++					netif_wake_queue(bnad->netdev);
++					/* TODO */
++					/* Counters for individual TxQs? */
++					BNAD_UPDATE_CTR(bnad,
++						netif_queue_wakeup);
++				}
++			}
+ 		}
+ 	}
+ }
+@@ -334,8 +348,6 @@ bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
+ 		skb = unmap_q->unmap_array[unmap_cons].skb;
+ 		if (!skb)
+ 			continue;
+-		BUG_ON(!(pci_unmap_addr(
+-			&unmap_q->unmap_array[unmap_cons], dma_addr)));
+ 		unmap_q->unmap_array[unmap_cons].skb = NULL;
+ 		pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
+ 					unmap_array[unmap_cons],
+--- a/drivers/net/bna/bnad.h
++++ b/drivers/net/bna/bnad.h
+@@ -126,6 +126,7 @@ struct bnad_completion {
+ struct bnad_drv_stats {
+ 	u64 		netif_queue_stop;
+ 	u64		netif_queue_wakeup;
++	u64		netif_queue_stopped;
+ 	u64		tso4;
+ 	u64		tso6;
+ 	u64		tso_err;
+--- a/drivers/net/bna/bnad_ethtool.c
++++ b/drivers/net/bna/bnad_ethtool.c
+@@ -68,6 +68,7 @@ static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
+ 
+ 	"netif_queue_stop",
+ 	"netif_queue_wakeup",
++	"netif_queue_stopped",
+ 	"tso4",
+ 	"tso6",
+ 	"tso_err",
+@@ -1183,6 +1184,9 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
+ 	     i++)
+ 		buf[bi++] = net_stats[i];
+ 
++	/* Get netif_queue_stopped from stack */
++	bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
++
+ 	/* Fill driver stats into ethtool buffers */
+ 	stats64 = (u64 *)&bnad->stats.drv_stats;
+ 	for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bna/0018-bna-IOC-uninit-check-and-misc-cleanup.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bna/0018-bna-IOC-uninit-check-and-misc-cleanup.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,149 @@
+From: Rasesh Mody <rmody at brocade.com>
+Date: Thu, 23 Dec 2010 21:45:06 +0000
+Subject: [PATCH 18/23] bna: IOC uninit check and misc cleanup
+
+commit 2c7d38210ff8e65f8961699bce92c273c77d113c upstream.
+
+Change Details:
+	- Added a check in ioc firmware lock function to see if IOC is
+	in BFI_IOC_UNINIT state or not. If it is not in UNINIT state
+	and the last IOC boot was not done by OS driver, force IOC state
+	to BFI_IOC_UNINIT
+	- Unused macro and API cleanup
+
+Signed-off-by: Debashis Dutt <ddutt at brocade.com>
+Signed-off-by: Rasesh Mody <rmody at brocade.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ drivers/net/bna/bfa_defs_mfg_comm.h |   22 -----------------
+ drivers/net/bna/bfa_ioc.c           |   45 +---------------------------------
+ 2 files changed, 2 insertions(+), 65 deletions(-)
+
+diff --git a/drivers/net/bna/bfa_defs_mfg_comm.h b/drivers/net/bna/bfa_defs_mfg_comm.h
+index 987978f..fdd6776 100644
+--- a/drivers/net/bna/bfa_defs_mfg_comm.h
++++ b/drivers/net/bna/bfa_defs_mfg_comm.h
+@@ -95,28 +95,6 @@ enum {
+ 	(type) == BFA_MFG_TYPE_CNA10P1 || \
+ 	bfa_mfg_is_mezz(type)))
+ 
+-/**
+- * Check if the card having old wwn/mac handling
+- */
+-#define bfa_mfg_is_old_wwn_mac_model(type) (( \
+-	(type) == BFA_MFG_TYPE_FC8P2 || \
+-	(type) == BFA_MFG_TYPE_FC8P1 || \
+-	(type) == BFA_MFG_TYPE_FC4P2 || \
+-	(type) == BFA_MFG_TYPE_FC4P1 || \
+-	(type) == BFA_MFG_TYPE_CNA10P2 || \
+-	(type) == BFA_MFG_TYPE_CNA10P1 || \
+-	(type) == BFA_MFG_TYPE_JAYHAWK || \
+-	(type) == BFA_MFG_TYPE_WANCHESE))
+-
+-#define bfa_mfg_increment_wwn_mac(m, i)				\
+-do {								\
+-	u32 t = ((m)[0] << 16) | ((m)[1] << 8) | (m)[2];	\
+-	t += (i);						\
+-	(m)[0] = (t >> 16) & 0xFF;				\
+-	(m)[1] = (t >> 8) & 0xFF;				\
+-	(m)[2] = t & 0xFF;					\
+-} while (0)
+-
+ #define bfa_mfg_adapter_prop_init_flash(card_type, prop)	\
+ do {								\
+ 	switch ((card_type)) {					\
+diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
+index e94e5aa..8ed147e 100644
+--- a/drivers/net/bna/bfa_ioc.c
++++ b/drivers/net/bna/bfa_ioc.c
+@@ -58,9 +58,6 @@
+ #define bfa_ioc_notify_hbfail(__ioc)			\
+ 			((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
+ 
+-#define bfa_ioc_is_optrom(__ioc)	\
+-	(bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
+-
+ #define bfa_ioc_mbox_cmd_pending(__ioc)		\
+ 			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
+ 			readl((__ioc)->ioc_regs.hfn_mbox_cmd))
+@@ -101,7 +98,6 @@ static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
+ 						char *manufacturer);
+ static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
+ static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
+-static mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc);
+ 
+ /**
+  * IOC state machine events
+@@ -865,12 +861,6 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
+ {
+ 	struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
+ 
+-	/**
+-	 * If bios/efi boot (flash based) -- return true
+-	 */
+-	if (bfa_ioc_is_optrom(ioc))
+-		return true;
+-
+ 	bfa_nw_ioc_fwver_get(ioc, &fwhdr);
+ 	drv_fwhdr = (struct bfi_ioc_image_hdr *)
+ 		bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
+@@ -934,13 +924,8 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
+ 	/**
+ 	 * If IOC function is disabled and firmware version is same,
+ 	 * just re-enable IOC.
+-	 *
+-	 * If option rom, IOC must not be in operational state. With
+-	 * convergence, IOC will be in operational state when 2nd driver
+-	 * is loaded.
+ 	 */
+-	if (ioc_fwstate == BFI_IOC_DISABLED ||
+-	    (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
++	if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
+ 		/**
+ 		 * When using MSI-X any pending firmware ready event should
+ 		 * be flushed. Otherwise MSI-X interrupts are not delivered.
+@@ -1078,11 +1063,6 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
+ 	 */
+ 	bfa_ioc_lmem_init(ioc);
+ 
+-	/**
+-	 * Flash based firmware boot
+-	 */
+-	if (bfa_ioc_is_optrom(ioc))
+-		boot_type = BFI_BOOT_TYPE_FLASH;
+ 	fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
+ 
+ 	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
+@@ -1689,28 +1669,7 @@ bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
+ mac_t
+ bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
+ {
+-	/*
+-	 * Currently mfg mac is used as FCoE enode mac (not configured by PBC)
+-	 */
+-	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
+-		return bfa_ioc_get_mfg_mac(ioc);
+-	else
+-		return ioc->attr->mac;
+-}
+-
+-static mac_t
+-bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc)
+-{
+-	mac_t	m;
+-
+-	m = ioc->attr->mfg_mac;
+-	if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
+-		m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
+-	else
+-		bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
+-			bfa_ioc_pcifn(ioc));
+-
+-	return m;
++	return ioc->attr->mac;
+ }
+ 
+ /**
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bna/0019-bna-Removed-unused-code.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bna/0019-bna-Removed-unused-code.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,393 @@
+From: Rasesh Mody <rmody at brocade.com>
+Date: Thu, 23 Dec 2010 21:45:07 +0000
+Subject: [PATCH 19/23] bna: Removed unused code
+
+commit ce9b9f383775e6de74ca4c93d5c643dc3d76dd3c upstream.
+
+Change Details:
+	- Remove unused APIs and code cleanup
+
+Signed-off-by: Debashis Dutt <ddutt at brocade.com>
+Signed-off-by: Rasesh Mody <rmody at brocade.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ drivers/net/bna/bna_ctrl.c  |  241 +------------------------------------------
+ drivers/net/bna/bna_txrx.c  |   11 +--
+ drivers/net/bna/bna_types.h |    4 +-
+ 3 files changed, 3 insertions(+), 253 deletions(-)
+
+diff --git a/drivers/net/bna/bna_ctrl.c b/drivers/net/bna/bna_ctrl.c
+index 68e4c5e..e152747 100644
+--- a/drivers/net/bna/bna_ctrl.c
++++ b/drivers/net/bna/bna_ctrl.c
+@@ -2132,37 +2132,6 @@ rxf_fltr_mbox_cmd(struct bna_rxf *rxf, u8 cmd, enum bna_status status)
+ 	bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
+ }
+ 
+-static void
+-__rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status)
+-{
+-	struct bna_rx_fndb_ram *rx_fndb_ram;
+-	u32 ctrl_flags;
+-	int i;
+-
+-	rx_fndb_ram = (struct bna_rx_fndb_ram *)
+-			BNA_GET_MEM_BASE_ADDR(rxf->rx->bna->pcidev.pci_bar_kva,
+-			RX_FNDB_RAM_BASE_OFFSET);
+-
+-	for (i = 0; i < BFI_MAX_RXF; i++) {
+-		if (status == BNA_STATUS_T_ENABLED) {
+-			if (i == rxf->rxf_id)
+-				continue;
+-
+-			ctrl_flags =
+-				readl(&rx_fndb_ram[i].control_flags);
+-			ctrl_flags |= BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
+-			writel(ctrl_flags,
+-						&rx_fndb_ram[i].control_flags);
+-		} else {
+-			ctrl_flags =
+-				readl(&rx_fndb_ram[i].control_flags);
+-			ctrl_flags &= ~BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
+-			writel(ctrl_flags,
+-						&rx_fndb_ram[i].control_flags);
+-		}
+-	}
+-}
+-
+ int
+ rxf_process_packet_filter_ucast(struct bna_rxf *rxf)
+ {
+@@ -2229,46 +2198,6 @@ rxf_process_packet_filter_promisc(struct bna_rxf *rxf)
+ }
+ 
+ int
+-rxf_process_packet_filter_default(struct bna_rxf *rxf)
+-{
+-	struct bna *bna = rxf->rx->bna;
+-
+-	/* Enable/disable default mode */
+-	if (is_default_enable(rxf->rxmode_pending,
+-				rxf->rxmode_pending_bitmask)) {
+-		/* move default configuration from pending -> active */
+-		default_inactive(rxf->rxmode_pending,
+-				rxf->rxmode_pending_bitmask);
+-		rxf->rxmode_active |= BNA_RXMODE_DEFAULT;
+-
+-		/* Disable VLAN filter to allow all VLANs */
+-		__rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED);
+-		/* Redirect all other RxF vlan filtering to this one */
+-		__rxf_default_function_config(rxf, BNA_STATUS_T_ENABLED);
+-		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
+-				BNA_STATUS_T_ENABLED);
+-		return 1;
+-	} else if (is_default_disable(rxf->rxmode_pending,
+-				rxf->rxmode_pending_bitmask)) {
+-		/* move default configuration from pending -> active */
+-		default_inactive(rxf->rxmode_pending,
+-				rxf->rxmode_pending_bitmask);
+-		rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
+-		bna->rxf_default_id = BFI_MAX_RXF;
+-
+-		/* Revert VLAN filter */
+-		__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+-		/* Stop RxF vlan filter table redirection */
+-		__rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
+-		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
+-				BNA_STATUS_T_DISABLED);
+-		return 1;
+-	}
+-
+-	return 0;
+-}
+-
+-int
+ rxf_process_packet_filter_allmulti(struct bna_rxf *rxf)
+ {
+ 	/* Enable/disable allmulti mode */
+@@ -2365,48 +2294,6 @@ rxf_clear_packet_filter_promisc(struct bna_rxf *rxf)
+ }
+ 
+ int
+-rxf_clear_packet_filter_default(struct bna_rxf *rxf)
+-{
+-	struct bna *bna = rxf->rx->bna;
+-
+-	/* 8. Execute pending default mode disable command */
+-	if (is_default_disable(rxf->rxmode_pending,
+-				rxf->rxmode_pending_bitmask)) {
+-		/* move default configuration from pending -> active */
+-		default_inactive(rxf->rxmode_pending,
+-				rxf->rxmode_pending_bitmask);
+-		rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
+-		bna->rxf_default_id = BFI_MAX_RXF;
+-
+-		/* Revert VLAN filter */
+-		__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+-		/* Stop RxF vlan filter table redirection */
+-		__rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
+-		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
+-				BNA_STATUS_T_DISABLED);
+-		return 1;
+-	}
+-
+-	/* 9. Clear active default mode; move it to pending enable */
+-	if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
+-		/* move default configuration from active -> pending */
+-		default_enable(rxf->rxmode_pending,
+-				rxf->rxmode_pending_bitmask);
+-		rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
+-
+-		/* Revert VLAN filter */
+-		__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+-		/* Stop RxF vlan filter table redirection */
+-		__rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
+-		rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
+-				BNA_STATUS_T_DISABLED);
+-		return 1;
+-	}
+-
+-	return 0;
+-}
+-
+-int
+ rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf)
+ {
+ 	/* 10. Execute pending allmulti mode disable command */
+@@ -2481,28 +2368,6 @@ rxf_reset_packet_filter_promisc(struct bna_rxf *rxf)
+ }
+ 
+ void
+-rxf_reset_packet_filter_default(struct bna_rxf *rxf)
+-{
+-	struct bna *bna = rxf->rx->bna;
+-
+-	/* 8. Clear pending default mode disable */
+-	if (is_default_disable(rxf->rxmode_pending,
+-				rxf->rxmode_pending_bitmask)) {
+-		default_inactive(rxf->rxmode_pending,
+-				rxf->rxmode_pending_bitmask);
+-		rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
+-		bna->rxf_default_id = BFI_MAX_RXF;
+-	}
+-
+-	/* 9. Move default mode config from active -> pending */
+-	if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
+-		default_enable(rxf->rxmode_pending,
+-				rxf->rxmode_pending_bitmask);
+-		rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
+-	}
+-}
+-
+-void
+ rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf)
+ {
+ 	/* 10. Clear pending allmulti mode disable */
+@@ -2599,76 +2464,6 @@ rxf_promisc_disable(struct bna_rxf *rxf)
+  *	1 = need h/w change
+  */
+ static int
+-rxf_default_enable(struct bna_rxf *rxf)
+-{
+-	struct bna *bna = rxf->rx->bna;
+-	int ret = 0;
+-
+-	/* There can not be any pending disable command */
+-
+-	/* Do nothing if pending enable or already enabled */
+-	if (is_default_enable(rxf->rxmode_pending,
+-		rxf->rxmode_pending_bitmask) ||
+-		(rxf->rxmode_active & BNA_RXMODE_DEFAULT)) {
+-		/* Schedule enable */
+-	} else {
+-		/* Default mode should not be active in the system */
+-		default_enable(rxf->rxmode_pending,
+-				rxf->rxmode_pending_bitmask);
+-		bna->rxf_default_id = rxf->rxf_id;
+-		ret = 1;
+-	}
+-
+-	return ret;
+-}
+-
+-/**
+- * Should only be called by bna_rxf_mode_set.
+- * Helps deciding if h/w configuration is needed or not.
+- *  Returns:
+- *	0 = no h/w change
+- *	1 = need h/w change
+- */
+-static int
+-rxf_default_disable(struct bna_rxf *rxf)
+-{
+-	struct bna *bna = rxf->rx->bna;
+-	int ret = 0;
+-
+-	/* There can not be any pending disable */
+-
+-	/* Turn off pending enable command , if any */
+-	if (is_default_enable(rxf->rxmode_pending,
+-				rxf->rxmode_pending_bitmask)) {
+-		/* Promisc mode should not be active */
+-		/* system default state should be pending */
+-		default_inactive(rxf->rxmode_pending,
+-				rxf->rxmode_pending_bitmask);
+-		/* Remove the default state from the system */
+-		bna->rxf_default_id = BFI_MAX_RXF;
+-
+-	/* Schedule disable */
+-	} else if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
+-		/* Default mode should be active in the system */
+-		default_disable(rxf->rxmode_pending,
+-				rxf->rxmode_pending_bitmask);
+-		ret = 1;
+-
+-	/* Do nothing if already disabled */
+-	} else {
+-	}
+-
+-	return ret;
+-}
+-
+-/**
+- * Should only be called by bna_rxf_mode_set.
+- * Helps deciding if h/w configuration is needed or not.
+- *  Returns:
+- *	0 = no h/w change
+- *	1 = need h/w change
+- */
+-static int
+ rxf_allmulti_enable(struct bna_rxf *rxf)
+ {
+ 	int ret = 0;
+@@ -2730,38 +2525,13 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
+ 	struct bna_rxf *rxf = &rx->rxf;
+ 	int need_hw_config = 0;
+ 
+-	/* Error checks */
++	/* Process the commands */
+ 
+ 	if (is_promisc_enable(new_mode, bitmask)) {
+ 		/* If promisc mode is already enabled elsewhere in the system */
+ 		if ((rx->bna->rxf_promisc_id != BFI_MAX_RXF) &&
+ 			(rx->bna->rxf_promisc_id != rxf->rxf_id))
+ 			goto err_return;
+-
+-		/* If default mode is already enabled in the system */
+-		if (rx->bna->rxf_default_id != BFI_MAX_RXF)
+-			goto err_return;
+-
+-		/* Trying to enable promiscuous and default mode together */
+-		if (is_default_enable(new_mode, bitmask))
+-			goto err_return;
+-	}
+-
+-	if (is_default_enable(new_mode, bitmask)) {
+-		/* If default mode is already enabled elsewhere in the system */
+-		if ((rx->bna->rxf_default_id != BFI_MAX_RXF) &&
+-			(rx->bna->rxf_default_id != rxf->rxf_id)) {
+-				goto err_return;
+-		}
+-
+-		/* If promiscuous mode is already enabled in the system */
+-		if (rx->bna->rxf_promisc_id != BFI_MAX_RXF)
+-			goto err_return;
+-	}
+-
+-	/* Process the commands */
+-
+-	if (is_promisc_enable(new_mode, bitmask)) {
+ 		if (rxf_promisc_enable(rxf))
+ 			need_hw_config = 1;
+ 	} else if (is_promisc_disable(new_mode, bitmask)) {
+@@ -2769,14 +2539,6 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
+ 			need_hw_config = 1;
+ 	}
+ 
+-	if (is_default_enable(new_mode, bitmask)) {
+-		if (rxf_default_enable(rxf))
+-			need_hw_config = 1;
+-	} else if (is_default_disable(new_mode, bitmask)) {
+-		if (rxf_default_disable(rxf))
+-			need_hw_config = 1;
+-	}
+-
+ 	if (is_allmulti_enable(new_mode, bitmask)) {
+ 		if (rxf_allmulti_enable(rxf))
+ 			need_hw_config = 1;
+@@ -3202,7 +2964,6 @@ bna_init(struct bna *bna, struct bnad *bnad, struct bfa_pcidev *pcidev,
+ 
+ 	bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
+ 
+-	bna->rxf_default_id = BFI_MAX_RXF;
+ 	bna->rxf_promisc_id = BFI_MAX_RXF;
+ 
+ 	/* Mbox q element for posting stat request to f/w */
+diff --git a/drivers/net/bna/bna_txrx.c b/drivers/net/bna/bna_txrx.c
+index 5076618..58c7664 100644
+--- a/drivers/net/bna/bna_txrx.c
++++ b/drivers/net/bna/bna_txrx.c
+@@ -1226,8 +1226,7 @@ rxf_process_packet_filter_vlan(struct bna_rxf *rxf)
+ 	/* Apply the VLAN filter */
+ 	if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) {
+ 		rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING;
+-		if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC) &&
+-			!(rxf->rxmode_active & BNA_RXMODE_DEFAULT))
++		if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))
+ 			__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
+ 	}
+ 
+@@ -1276,9 +1275,6 @@ rxf_process_packet_filter(struct bna_rxf *rxf)
+ 	if (rxf_process_packet_filter_promisc(rxf))
+ 		return 1;
+ 
+-	if (rxf_process_packet_filter_default(rxf))
+-		return 1;
+-
+ 	if (rxf_process_packet_filter_allmulti(rxf))
+ 		return 1;
+ 
+@@ -1340,9 +1336,6 @@ rxf_clear_packet_filter(struct bna_rxf *rxf)
+ 	if (rxf_clear_packet_filter_promisc(rxf))
+ 		return 1;
+ 
+-	if (rxf_clear_packet_filter_default(rxf))
+-		return 1;
+-
+ 	if (rxf_clear_packet_filter_allmulti(rxf))
+ 		return 1;
+ 
+@@ -1389,8 +1382,6 @@ rxf_reset_packet_filter(struct bna_rxf *rxf)
+ 
+ 	rxf_reset_packet_filter_promisc(rxf);
+ 
+-	rxf_reset_packet_filter_default(rxf);
+-
+ 	rxf_reset_packet_filter_allmulti(rxf);
+ }
+ 
+diff --git a/drivers/net/bna/bna_types.h b/drivers/net/bna/bna_types.h
+index d79fd98..b9c134f 100644
+--- a/drivers/net/bna/bna_types.h
++++ b/drivers/net/bna/bna_types.h
+@@ -165,8 +165,7 @@ enum bna_rxp_type {
+ 
+ enum bna_rxmode {
+ 	BNA_RXMODE_PROMISC 	= 1,
+-	BNA_RXMODE_DEFAULT 	= 2,
+-	BNA_RXMODE_ALLMULTI 	= 4
++	BNA_RXMODE_ALLMULTI 	= 2
+ };
+ 
+ enum bna_rx_event {
+@@ -1118,7 +1117,6 @@ struct bna {
+ 
+ 	struct bna_rit_mod rit_mod;
+ 
+-	int			rxf_default_id;
+ 	int			rxf_promisc_id;
+ 
+ 	struct bna_mbox_qe mbox_qe;
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bna/0020-bna-Restore-VLAN-filter-table.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bna/0020-bna-Restore-VLAN-filter-table.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,77 @@
+From: Rasesh Mody <rmody at brocade.com>
+Date: Thu, 23 Dec 2010 21:45:08 +0000
+Subject: [PATCH 20/23] bna: Restore VLAN filter table
+
+commit aad75b66f1d3784514351f06bc589c55d5325bc8 upstream.
+
+Change Details:
+	- Retrieve the VLAN configuration from the networking stack
+	and apply it to the base interface during ifconfig up
+
+Signed-off-by: Debashis Dutt <ddutt at brocade.com>
+Signed-off-by: Rasesh Mody <rmody at brocade.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+[bwh: Backport to 2.6.32: VLAN_N_VID => VLAN_GROUP_ARRAY_LEN]
+---
+ drivers/net/bna/bnad.c |   29 +++++++++++++++++++++++++++--
+ 1 files changed, 27 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/bna/bnad.c
++++ b/drivers/net/bna/bnad.c
+@@ -566,7 +566,8 @@ bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
+ {
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&bnad->bna_lock, flags); /* Because of polling context */
++	/* Because of polling context */
++	spin_lock_irqsave(&bnad->bna_lock, flags);
+ 	bnad_enable_rx_irq_unsafe(ccb);
+ 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ }
+@@ -1962,6 +1963,27 @@ bnad_enable_default_bcast(struct bnad *bnad)
+ 	return 0;
+ }
+ 
++/* Called with bnad_conf_lock() held */
++static void
++bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
++{
++	u16 vlan_id;
++	unsigned long flags;
++
++	if (!bnad->vlan_grp)
++		return;
++
++	BUG_ON(!(VLAN_GROUP_ARRAY_LEN == (BFI_MAX_VLAN + 1)));
++
++	for (vlan_id = 0; vlan_id < VLAN_GROUP_ARRAY_LEN; vlan_id++) {
++		if (!vlan_group_get_device(bnad->vlan_grp, vlan_id))
++			continue;
++		spin_lock_irqsave(&bnad->bna_lock, flags);
++		bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vlan_id);
++		spin_unlock_irqrestore(&bnad->bna_lock, flags);
++	}
++}
++
+ /* Statistics utilities */
+ void
+ bnad_netdev_qstats_fill(struct bnad *bnad)
+@@ -2337,6 +2359,9 @@ bnad_open(struct net_device *netdev)
+ 	/* Enable broadcast */
+ 	bnad_enable_default_bcast(bnad);
+ 
++	/* Restore VLANs, if any */
++	bnad_restore_vlans(bnad, 0);
++
+ 	/* Set the UCAST address */
+ 	spin_lock_irqsave(&bnad->bna_lock, flags);
+ 	bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
+@@ -3021,7 +3046,7 @@ static int __devinit
+ bnad_pci_probe(struct pci_dev *pdev,
+ 		const struct pci_device_id *pcidev_id)
+ {
+-	bool 	using_dac;
++	bool 	using_dac = false;
+ 	int 	err;
+ 	struct bnad *bnad;
+ 	struct bna *bna;

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bna/0021-bna-IOC-failure-auto-recovery-fix.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bna/0021-bna-IOC-failure-auto-recovery-fix.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,2062 @@
+From: Rasesh Mody <rmody at brocade.com>
+Date: Thu, 23 Dec 2010 21:45:09 +0000
+Subject: [PATCH 21/23] bna: IOC failure auto recovery fix
+
+commit 1d32f7696286eef9e5644eb57e79a36756274357 upstream.
+
+Change Details:
+	- Made IOC auto_recovery synchronized and not timer based.
+	- Only one PCI function will attempt to recover and reinitialize
+	the ASIC on a failure, that too after all the active PCI
+	functions acknowledge the IOC failure.
+
+Signed-off-by: Debashis Dutt <ddutt at brocade.com>
+Signed-off-by: Rasesh Mody <rmody at brocade.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ drivers/net/bna/bfa_defs.h   |   22 +-
+ drivers/net/bna/bfa_ioc.c    | 1178 +++++++++++++++++++++++++++++++----------
+ drivers/net/bna/bfa_ioc.h    |   49 +-
+ drivers/net/bna/bfa_ioc_ct.c |  102 ++++-
+ drivers/net/bna/bfi_ctreg.h  |   41 +-
+ drivers/net/bna/bna.h        |    2 -
+ drivers/net/bna/bnad.c       |   21 +-
+ 7 files changed, 1063 insertions(+), 352 deletions(-)
+
+diff --git a/drivers/net/bna/bfa_defs.h b/drivers/net/bna/bfa_defs.h
+index 29c1b8d..2ea0dfe 100644
+--- a/drivers/net/bna/bfa_defs.h
++++ b/drivers/net/bna/bfa_defs.h
+@@ -112,16 +112,18 @@ struct bfa_ioc_pci_attr {
+  * IOC states
+  */
+ enum bfa_ioc_state {
+-	BFA_IOC_RESET		= 1,	/*!< IOC is in reset state */
+-	BFA_IOC_SEMWAIT		= 2,	/*!< Waiting for IOC h/w semaphore */
+-	BFA_IOC_HWINIT		= 3,	/*!< IOC h/w is being initialized */
+-	BFA_IOC_GETATTR		= 4,	/*!< IOC is being configured */
+-	BFA_IOC_OPERATIONAL	= 5,	/*!< IOC is operational */
+-	BFA_IOC_INITFAIL	= 6,	/*!< IOC hardware failure */
+-	BFA_IOC_HBFAIL		= 7,	/*!< IOC heart-beat failure */
+-	BFA_IOC_DISABLING	= 8,	/*!< IOC is being disabled */
+-	BFA_IOC_DISABLED	= 9,	/*!< IOC is disabled */
+-	BFA_IOC_FWMISMATCH	= 10,	/*!< IOC f/w different from drivers */
++	BFA_IOC_UNINIT		= 1,	/*!< IOC is in uninit state */
++	BFA_IOC_RESET		= 2,	/*!< IOC is in reset state */
++	BFA_IOC_SEMWAIT		= 3,	/*!< Waiting for IOC h/w semaphore */
++	BFA_IOC_HWINIT		= 4,	/*!< IOC h/w is being initialized */
++	BFA_IOC_GETATTR		= 5,	/*!< IOC is being configured */
++	BFA_IOC_OPERATIONAL	= 6,	/*!< IOC is operational */
++	BFA_IOC_INITFAIL	= 7,	/*!< IOC hardware failure */
++	BFA_IOC_FAIL		= 8,	/*!< IOC heart-beat failure */
++	BFA_IOC_DISABLING	= 9,	/*!< IOC is being disabled */
++	BFA_IOC_DISABLED	= 10,	/*!< IOC is disabled */
++	BFA_IOC_FWMISMATCH	= 11,	/*!< IOC f/w different from drivers */
++	BFA_IOC_ENABLING	= 12,	/*!< IOC is being enabled */
+ };
+ 
+ /**
+diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
+index 8ed147e..34933cb 100644
+--- a/drivers/net/bna/bfa_ioc.c
++++ b/drivers/net/bna/bfa_ioc.c
+@@ -26,25 +26,6 @@
+  * IOC local definitions
+  */
+ 
+-#define bfa_ioc_timer_start(__ioc)					\
+-	mod_timer(&(__ioc)->ioc_timer, jiffies +	\
+-			msecs_to_jiffies(BFA_IOC_TOV))
+-#define bfa_ioc_timer_stop(__ioc)   del_timer(&(__ioc)->ioc_timer)
+-
+-#define bfa_ioc_recovery_timer_start(__ioc)				\
+-	mod_timer(&(__ioc)->ioc_timer, jiffies +	\
+-			msecs_to_jiffies(BFA_IOC_TOV_RECOVER))
+-
+-#define bfa_sem_timer_start(__ioc)					\
+-	mod_timer(&(__ioc)->sem_timer, jiffies +	\
+-			msecs_to_jiffies(BFA_IOC_HWSEM_TOV))
+-#define bfa_sem_timer_stop(__ioc)	del_timer(&(__ioc)->sem_timer)
+-
+-#define bfa_hb_timer_start(__ioc)					\
+-	mod_timer(&(__ioc)->hb_timer, jiffies +		\
+-			msecs_to_jiffies(BFA_IOC_HB_TOV))
+-#define bfa_hb_timer_stop(__ioc)	del_timer(&(__ioc)->hb_timer)
+-
+ /**
+  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
+  */
+@@ -55,8 +36,16 @@
+ 			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
+ #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
+ #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
+-#define bfa_ioc_notify_hbfail(__ioc)			\
+-			((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
++#define bfa_ioc_notify_fail(__ioc)			\
++			((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
++#define bfa_ioc_sync_join(__ioc)			\
++			((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
++#define bfa_ioc_sync_leave(__ioc)			\
++			((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
++#define bfa_ioc_sync_ack(__ioc)				\
++			((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
++#define bfa_ioc_sync_complete(__ioc)			\
++			((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
+ 
+ #define bfa_ioc_mbox_cmd_pending(__ioc)		\
+ 			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
+@@ -82,6 +71,12 @@ static void bfa_ioc_recover(struct bfa_ioc *ioc);
+ static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
+ static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
+ static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
++static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
++static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
++static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
++static void bfa_ioc_pf_initfailed(struct bfa_ioc *ioc);
++static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
++static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
+ static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
+ 			 u32 boot_param);
+ static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
+@@ -100,69 +95,171 @@ static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
+ static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
+ 
+ /**
+- * IOC state machine events
++ * IOC state machine definitions/declarations
+  */
+ enum ioc_event {
+-	IOC_E_ENABLE		= 1,	/*!< IOC enable request		*/
+-	IOC_E_DISABLE		= 2,	/*!< IOC disable request	*/
+-	IOC_E_TIMEOUT		= 3,	/*!< f/w response timeout	*/
+-	IOC_E_FWREADY		= 4,	/*!< f/w initialization done	*/
+-	IOC_E_FWRSP_GETATTR	= 5,	/*!< IOC get attribute response	*/
+-	IOC_E_FWRSP_ENABLE	= 6,	/*!< enable f/w response	*/
+-	IOC_E_FWRSP_DISABLE	= 7,	/*!< disable f/w response	*/
+-	IOC_E_HBFAIL		= 8,	/*!< heartbeat failure		*/
+-	IOC_E_HWERROR		= 9,	/*!< hardware error interrupt	*/
+-	IOC_E_SEMLOCKED		= 10,	/*!< h/w semaphore is locked	*/
+-	IOC_E_DETACH		= 11,	/*!< driver detach cleanup	*/
++	IOC_E_RESET		= 1,	/*!< IOC reset request		*/
++	IOC_E_ENABLE		= 2,	/*!< IOC enable request		*/
++	IOC_E_DISABLE		= 3,	/*!< IOC disable request	*/
++	IOC_E_DETACH		= 4,	/*!< driver detach cleanup	*/
++	IOC_E_ENABLED		= 5,	/*!< f/w enabled		*/
++	IOC_E_FWRSP_GETATTR	= 6,	/*!< IOC get attribute response	*/
++	IOC_E_DISABLED		= 7,	/*!< f/w disabled		*/
++	IOC_E_INITFAILED	= 8,	/*!< failure notice by iocpf sm	*/
++	IOC_E_PFAILED		= 9,	/*!< failure notice by iocpf sm	*/
++	IOC_E_HBFAIL		= 10,	/*!< heartbeat failure		*/
++	IOC_E_HWERROR		= 11,	/*!< hardware error interrupt	*/
++	IOC_E_TIMEOUT		= 12,	/*!< timeout			*/
+ };
+ 
++bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
+ bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
+-bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc, enum ioc_event);
+-bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc, enum ioc_event);
+-bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc, enum ioc_event);
+-bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc, enum ioc_event);
+ bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
+ bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
+ bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
+-bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc, enum ioc_event);
+-bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc, enum ioc_event);
++bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
++bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
+ bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
+ bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
+ 
+ static struct bfa_sm_table ioc_sm_table[] = {
++	{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
+ 	{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
+-	{BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
+-	{BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
+-	{BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
+-	{BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
+-	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
++	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
+ 	{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
+ 	{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
+-	{BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
+-	{BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
++	{BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
++	{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
+ 	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
+ 	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
+ };
+ 
+ /**
++ * IOCPF state machine definitions/declarations
++ */
++
++/*
++ * Forward declareations for iocpf state machine
++ */
++static void bfa_iocpf_enable(struct bfa_ioc *ioc);
++static void bfa_iocpf_disable(struct bfa_ioc *ioc);
++static void bfa_iocpf_fail(struct bfa_ioc *ioc);
++static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
++static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
++static void bfa_iocpf_stop(struct bfa_ioc *ioc);
++
++/**
++ * IOCPF state machine events
++ */
++enum iocpf_event {
++	IOCPF_E_ENABLE		= 1,	/*!< IOCPF enable request	*/
++	IOCPF_E_DISABLE		= 2,	/*!< IOCPF disable request	*/
++	IOCPF_E_STOP		= 3,	/*!< stop on driver detach	*/
++	IOCPF_E_FWREADY	 	= 4,	/*!< f/w initialization done	*/
++	IOCPF_E_FWRSP_ENABLE	= 5,	/*!< enable f/w response	*/
++	IOCPF_E_FWRSP_DISABLE	= 6,	/*!< disable f/w response	*/
++	IOCPF_E_FAIL		= 7,	/*!< failure notice by ioc sm	*/
++	IOCPF_E_INITFAIL	= 8,	/*!< init fail notice by ioc sm	*/
++	IOCPF_E_GETATTRFAIL	= 9,	/*!< init fail notice by ioc sm	*/
++	IOCPF_E_SEMLOCKED	= 10,   /*!< h/w semaphore is locked	*/
++	IOCPF_E_TIMEOUT		= 11,   /*!< f/w response timeout	*/
++};
++
++/**
++ * IOCPF states
++ */
++enum bfa_iocpf_state {
++	BFA_IOCPF_RESET		= 1,	/*!< IOC is in reset state */
++	BFA_IOCPF_SEMWAIT	= 2,	/*!< Waiting for IOC h/w semaphore */
++	BFA_IOCPF_HWINIT	= 3,	/*!< IOC h/w is being initialized */
++	BFA_IOCPF_READY		= 4,	/*!< IOCPF is initialized */
++	BFA_IOCPF_INITFAIL	= 5,	/*!< IOCPF failed */
++	BFA_IOCPF_FAIL		= 6,	/*!< IOCPF failed */
++	BFA_IOCPF_DISABLING	= 7,	/*!< IOCPF is being disabled */
++	BFA_IOCPF_DISABLED	= 8,	/*!< IOCPF is disabled */
++	BFA_IOCPF_FWMISMATCH	= 9,	/*!< IOC f/w different from drivers */
++};
++
++bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
++bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
++bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
++bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
++bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
++bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
++bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
++bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
++						enum iocpf_event);
++bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
++bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
++bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
++bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
++bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
++						enum iocpf_event);
++bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
++
++static struct bfa_sm_table iocpf_sm_table[] = {
++	{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
++	{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
++	{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
++	{BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
++	{BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
++	{BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
++	{BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
++	{BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
++	{BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
++	{BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
++	{BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
++	{BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
++	{BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
++	{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
++};
++
++/**
++ * IOC State Machine
++ */
++
++/**
++ * Beginning state. IOC uninit state.
++ */
++static void
++bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
++{
++}
++
++/**
++ * IOC is in uninit state.
++ */
++static void
++bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
++{
++	switch (event) {
++	case IOC_E_RESET:
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
++		break;
++
++	default:
++		bfa_sm_fault(ioc, event);
++	}
++}
++
++/**
+  * Reset entry actions -- initialize state machine
+  */
+ static void
+ bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
+ {
+-	ioc->retry_count = 0;
+-	ioc->auto_recover = bfa_nw_auto_recover;
++	bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
+ }
+ 
+ /**
+- * Beginning state. IOC is in reset state.
++ * IOC is in reset state.
+  */
+ static void
+ bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
+ {
+ 	switch (event) {
+ 	case IOC_E_ENABLE:
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
+ 		break;
+ 
+ 	case IOC_E_DISABLE:
+@@ -170,6 +267,51 @@ bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
+ 		break;
+ 
+ 	case IOC_E_DETACH:
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
++		break;
++
++	default:
++		bfa_sm_fault(ioc, event);
++	}
++}
++
++static void
++bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
++{
++	bfa_iocpf_enable(ioc);
++}
++
++/**
++ * Host IOC function is being enabled, awaiting response from firmware.
++ * Semaphore is acquired.
++ */
++static void
++bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
++{
++	switch (event) {
++	case IOC_E_ENABLED:
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
++		break;
++
++	case IOC_E_PFAILED:
++		/* !!! fall through !!! */
++	case IOC_E_HWERROR:
++		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
++		if (event != IOC_E_PFAILED)
++			bfa_iocpf_initfail(ioc);
++		break;
++
++	case IOC_E_DISABLE:
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
++		break;
++
++	case IOC_E_DETACH:
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
++		bfa_iocpf_stop(ioc);
++		break;
++
++	case IOC_E_ENABLE:
+ 		break;
+ 
+ 	default:
+@@ -181,38 +323,310 @@ bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
+  * Semaphore should be acquired for version check.
+  */
+ static void
+-bfa_ioc_sm_fwcheck_entry(struct bfa_ioc *ioc)
++bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
++{
++	mod_timer(&ioc->ioc_timer, jiffies +
++		msecs_to_jiffies(BFA_IOC_TOV));
++	bfa_ioc_send_getattr(ioc);
++}
++
++/**
++ * IOC configuration in progress. Timer is active.
++ */
++static void
++bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
++{
++	switch (event) {
++	case IOC_E_FWRSP_GETATTR:
++		del_timer(&ioc->ioc_timer);
++		bfa_ioc_check_attr_wwns(ioc);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
++		break;
++
++	case IOC_E_PFAILED:
++	case IOC_E_HWERROR:
++		del_timer(&ioc->ioc_timer);
++		/* fall through */
++	case IOC_E_TIMEOUT:
++		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
++		if (event != IOC_E_PFAILED)
++			bfa_iocpf_getattrfail(ioc);
++		break;
++
++	case IOC_E_DISABLE:
++		del_timer(&ioc->ioc_timer);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
++		break;
++
++	case IOC_E_ENABLE:
++		break;
++
++	default:
++		bfa_sm_fault(ioc, event);
++	}
++}
++
++static void
++bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
++{
++	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
++	bfa_ioc_hb_monitor(ioc);
++}
++
++static void
++bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
++{
++	switch (event) {
++	case IOC_E_ENABLE:
++		break;
++
++	case IOC_E_DISABLE:
++		bfa_ioc_hb_stop(ioc);
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
++		break;
++
++	case IOC_E_PFAILED:
++	case IOC_E_HWERROR:
++		bfa_ioc_hb_stop(ioc);
++		/* !!! fall through !!! */
++	case IOC_E_HBFAIL:
++		bfa_ioc_fail_notify(ioc);
++		if (ioc->iocpf.auto_recover)
++			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
++		else
++			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
++
++		if (event != IOC_E_PFAILED)
++			bfa_iocpf_fail(ioc);
++		break;
++
++	default:
++		bfa_sm_fault(ioc, event);
++	}
++}
++
++static void
++bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
++{
++	bfa_iocpf_disable(ioc);
++}
++
++/**
++ * IOC is being desabled
++ */
++static void
++bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
++{
++	switch (event) {
++	case IOC_E_DISABLED:
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
++		break;
++
++	case IOC_E_HWERROR:
++		/*
++		 * No state change.  Will move to disabled state
++		 * after iocpf sm completes failure processing and
++		 * moves to disabled state.
++		 */
++		bfa_iocpf_fail(ioc);
++		break;
++
++	default:
++		bfa_sm_fault(ioc, event);
++	}
++}
++
++/**
++ * IOC desable completion entry.
++ */
++static void
++bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
++{
++	bfa_ioc_disable_comp(ioc);
++}
++
++static void
++bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
++{
++	switch (event) {
++	case IOC_E_ENABLE:
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
++		break;
++
++	case IOC_E_DISABLE:
++		ioc->cbfn->disable_cbfn(ioc->bfa);
++		break;
++
++	case IOC_E_DETACH:
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
++		bfa_iocpf_stop(ioc);
++		break;
++
++	default:
++		bfa_sm_fault(ioc, event);
++	}
++}
++
++static void
++bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
++{
++}
++
++/**
++ * Hardware initialization retry.
++ */
++static void
++bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
++{
++	switch (event) {
++	case IOC_E_ENABLED:
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
++		break;
++
++	case IOC_E_PFAILED:
++	case IOC_E_HWERROR:
++		/**
++		 * Initialization retry failed.
++		 */
++		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
++		if (event != IOC_E_PFAILED)
++			bfa_iocpf_initfail(ioc);
++		break;
++
++	case IOC_E_INITFAILED:
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
++		break;
++
++	case IOC_E_ENABLE:
++		break;
++
++	case IOC_E_DISABLE:
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
++		break;
++
++	case IOC_E_DETACH:
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
++		bfa_iocpf_stop(ioc);
++		break;
++
++	default:
++		bfa_sm_fault(ioc, event);
++	}
++}
++
++static void
++bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
+ {
+-	bfa_ioc_hw_sem_get(ioc);
++}
++
++/**
++ * IOC failure.
++ */
++static void
++bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
++{
++	switch (event) {
++	case IOC_E_ENABLE:
++		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
++		break;
++
++	case IOC_E_DISABLE:
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
++		break;
++
++	case IOC_E_DETACH:
++		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
++		bfa_iocpf_stop(ioc);
++		break;
++
++	case IOC_E_HWERROR:
++		/* HB failure notification, ignore. */
++		break;
++
++	default:
++		bfa_sm_fault(ioc, event);
++	}
++}
++
++/**
++ * IOCPF State Machine
++ */
++
++/**
++ * Reset entry actions -- initialize state machine
++ */
++static void
++bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
++{
++	iocpf->retry_count = 0;
++	iocpf->auto_recover = bfa_nw_auto_recover;
++}
++
++/**
++ * Beginning state. IOC is in reset state.
++ */
++static void
++bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
++{
++	switch (event) {
++	case IOCPF_E_ENABLE:
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
++		break;
++
++	case IOCPF_E_STOP:
++		break;
++
++	default:
++		bfa_sm_fault(iocpf->ioc, event);
++	}
++}
++
++/**
++ * Semaphore should be acquired for version check.
++ */
++static void
++bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
++{
++	bfa_ioc_hw_sem_get(iocpf->ioc);
+ }
+ 
+ /**
+  * Awaiting h/w semaphore to continue with version check.
+  */
+ static void
+-bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event)
++bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
+ {
++	struct bfa_ioc *ioc = iocpf->ioc;
++
+ 	switch (event) {
+-	case IOC_E_SEMLOCKED:
++	case IOCPF_E_SEMLOCKED:
+ 		if (bfa_ioc_firmware_lock(ioc)) {
+-			ioc->retry_count = 0;
+-			bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
++			if (bfa_ioc_sync_complete(ioc)) {
++				iocpf->retry_count = 0;
++				bfa_ioc_sync_join(ioc);
++				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
++			} else {
++				bfa_ioc_firmware_unlock(ioc);
++				bfa_nw_ioc_hw_sem_release(ioc);
++				mod_timer(&ioc->sem_timer, jiffies +
++					msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
++			}
+ 		} else {
+ 			bfa_nw_ioc_hw_sem_release(ioc);
+-			bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
++			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
+ 		}
+ 		break;
+ 
+-	case IOC_E_DISABLE:
+-		bfa_ioc_disable_comp(ioc);
+-		/* fall through */
+-
+-	case IOC_E_DETACH:
++	case IOCPF_E_DISABLE:
+ 		bfa_ioc_hw_sem_get_cancel(ioc);
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
++		bfa_ioc_pf_disabled(ioc);
+ 		break;
+ 
+-	case IOC_E_FWREADY:
++	case IOCPF_E_STOP:
++		bfa_ioc_hw_sem_get_cancel(ioc);
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ 		break;
+ 
+ 	default:
+@@ -221,41 +635,42 @@ bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event)
+ }
+ 
+ /**
+- * Notify enable completion callback and generate mismatch AEN.
++ * Notify enable completion callback
+  */
+ static void
+-bfa_ioc_sm_mismatch_entry(struct bfa_ioc *ioc)
++bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
+ {
+-	/**
+-	 * Provide enable completion callback and AEN notification only once.
+-	 */
+-	if (ioc->retry_count == 0)
+-		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+-	ioc->retry_count++;
+-	bfa_ioc_timer_start(ioc);
++	/* Call only the first time sm enters fwmismatch state. */
++	if (iocpf->retry_count == 0)
++		bfa_ioc_pf_fwmismatch(iocpf->ioc);
++
++	iocpf->retry_count++;
++	mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
++		msecs_to_jiffies(BFA_IOC_TOV));
+ }
+ 
+ /**
+  * Awaiting firmware version match.
+  */
+ static void
+-bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event)
++bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
+ {
++	struct bfa_ioc *ioc = iocpf->ioc;
++
+ 	switch (event) {
+-	case IOC_E_TIMEOUT:
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
++	case IOCPF_E_TIMEOUT:
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
+ 		break;
+ 
+-	case IOC_E_DISABLE:
+-		bfa_ioc_disable_comp(ioc);
+-		/* fall through */
+-
+-	case IOC_E_DETACH:
+-		bfa_ioc_timer_stop(ioc);
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
++	case IOCPF_E_DISABLE:
++		del_timer(&ioc->iocpf_timer);
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
++		bfa_ioc_pf_disabled(ioc);
+ 		break;
+ 
+-	case IOC_E_FWREADY:
++	case IOCPF_E_STOP:
++		del_timer(&ioc->iocpf_timer);
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ 		break;
+ 
+ 	default:
+@@ -267,26 +682,34 @@ bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event)
+  * Request for semaphore.
+  */
+ static void
+-bfa_ioc_sm_semwait_entry(struct bfa_ioc *ioc)
++bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
+ {
+-	bfa_ioc_hw_sem_get(ioc);
++	bfa_ioc_hw_sem_get(iocpf->ioc);
+ }
+ 
+ /**
+  * Awaiting semaphore for h/w initialzation.
+  */
+ static void
+-bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event)
++bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
+ {
++	struct bfa_ioc *ioc = iocpf->ioc;
++
+ 	switch (event) {
+-	case IOC_E_SEMLOCKED:
+-		ioc->retry_count = 0;
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
++	case IOCPF_E_SEMLOCKED:
++		if (bfa_ioc_sync_complete(ioc)) {
++			bfa_ioc_sync_join(ioc);
++			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
++		} else {
++			bfa_nw_ioc_hw_sem_release(ioc);
++			mod_timer(&ioc->sem_timer, jiffies +
++				msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
++		}
+ 		break;
+ 
+-	case IOC_E_DISABLE:
++	case IOCPF_E_DISABLE:
+ 		bfa_ioc_hw_sem_get_cancel(ioc);
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+ 		break;
+ 
+ 	default:
+@@ -295,46 +718,46 @@ bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event)
+ }
+ 
+ static void
+-bfa_ioc_sm_hwinit_entry(struct bfa_ioc *ioc)
++bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
+ {
+-	bfa_ioc_timer_start(ioc);
+-	bfa_ioc_reset(ioc, false);
++	mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
++		msecs_to_jiffies(BFA_IOC_TOV));
++	bfa_ioc_reset(iocpf->ioc, 0);
+ }
+ 
+ /**
+- * @brief
+  * Hardware is being initialized. Interrupts are enabled.
+  * Holding hardware semaphore lock.
+  */
+ static void
+-bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event)
++bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
+ {
++	struct bfa_ioc *ioc = iocpf->ioc;
++
+ 	switch (event) {
+-	case IOC_E_FWREADY:
+-		bfa_ioc_timer_stop(ioc);
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
++	case IOCPF_E_FWREADY:
++		del_timer(&ioc->iocpf_timer);
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
+ 		break;
+ 
+-	case IOC_E_HWERROR:
+-		bfa_ioc_timer_stop(ioc);
+-		/* fall through */
+-
+-	case IOC_E_TIMEOUT:
+-		ioc->retry_count++;
+-		if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
+-			bfa_ioc_timer_start(ioc);
+-			bfa_ioc_reset(ioc, true);
+-			break;
+-		}
++	case IOCPF_E_INITFAIL:
++		del_timer(&ioc->iocpf_timer);
++		/*
++		 * !!! fall through !!!
++		 */
+ 
++	case IOCPF_E_TIMEOUT:
+ 		bfa_nw_ioc_hw_sem_release(ioc);
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
++		if (event == IOCPF_E_TIMEOUT)
++			bfa_ioc_pf_failed(ioc);
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
+ 		break;
+ 
+-	case IOC_E_DISABLE:
++	case IOCPF_E_DISABLE:
++		del_timer(&ioc->iocpf_timer);
++		bfa_ioc_sync_leave(ioc);
+ 		bfa_nw_ioc_hw_sem_release(ioc);
+-		bfa_ioc_timer_stop(ioc);
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+ 		break;
+ 
+ 	default:
+@@ -343,10 +766,11 @@ bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event)
+ }
+ 
+ static void
+-bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
++bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
+ {
+-	bfa_ioc_timer_start(ioc);
+-	bfa_ioc_send_enable(ioc);
++	mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
++		msecs_to_jiffies(BFA_IOC_TOV));
++	bfa_ioc_send_enable(iocpf->ioc);
+ }
+ 
+ /**
+@@ -354,39 +778,36 @@ bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
+  * Semaphore is acquired.
+  */
+ static void
+-bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
++bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
+ {
++	struct bfa_ioc *ioc = iocpf->ioc;
++
+ 	switch (event) {
+-	case IOC_E_FWRSP_ENABLE:
+-		bfa_ioc_timer_stop(ioc);
++	case IOCPF_E_FWRSP_ENABLE:
++		del_timer(&ioc->iocpf_timer);
+ 		bfa_nw_ioc_hw_sem_release(ioc);
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
+ 		break;
+ 
+-	case IOC_E_HWERROR:
+-		bfa_ioc_timer_stop(ioc);
+-		/* fall through */
+-
+-	case IOC_E_TIMEOUT:
+-		ioc->retry_count++;
+-		if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
+-			writel(BFI_IOC_UNINIT,
+-				      ioc->ioc_regs.ioc_fwstate);
+-			bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
+-			break;
+-		}
+-
++	case IOCPF_E_INITFAIL:
++		del_timer(&ioc->iocpf_timer);
++		/*
++		 * !!! fall through !!!
++		 */
++	case IOCPF_E_TIMEOUT:
+ 		bfa_nw_ioc_hw_sem_release(ioc);
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
++		if (event == IOCPF_E_TIMEOUT)
++			bfa_ioc_pf_failed(ioc);
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
+ 		break;
+ 
+-	case IOC_E_DISABLE:
+-		bfa_ioc_timer_stop(ioc);
++	case IOCPF_E_DISABLE:
++		del_timer(&ioc->iocpf_timer);
+ 		bfa_nw_ioc_hw_sem_release(ioc);
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
+ 		break;
+ 
+-	case IOC_E_FWREADY:
++	case IOCPF_E_FWREADY:
+ 		bfa_ioc_send_enable(ioc);
+ 		break;
+ 
+@@ -395,38 +816,42 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
+ 	}
+ }
+ 
++static bool
++bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
++{
++	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
++}
++
+ static void
+-bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
++bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
+ {
+-	bfa_ioc_timer_start(ioc);
+-	bfa_ioc_send_getattr(ioc);
++	bfa_ioc_pf_enabled(iocpf->ioc);
+ }
+ 
+-/**
+- * @brief
+- * IOC configuration in progress. Timer is active.
+- */
+ static void
+-bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
++bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
+ {
++	struct bfa_ioc *ioc = iocpf->ioc;
++
+ 	switch (event) {
+-	case IOC_E_FWRSP_GETATTR:
+-		bfa_ioc_timer_stop(ioc);
+-		bfa_ioc_check_attr_wwns(ioc);
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
++	case IOCPF_E_DISABLE:
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
+ 		break;
+ 
+-	case IOC_E_HWERROR:
+-		bfa_ioc_timer_stop(ioc);
+-		/* fall through */
++	case IOCPF_E_GETATTRFAIL:
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
++		break;
+ 
+-	case IOC_E_TIMEOUT:
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
++	case IOCPF_E_FAIL:
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
+ 		break;
+ 
+-	case IOC_E_DISABLE:
+-		bfa_ioc_timer_stop(ioc);
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
++	case IOCPF_E_FWREADY:
++		bfa_ioc_pf_failed(ioc);
++		if (bfa_nw_ioc_is_operational(ioc))
++			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
++		else
++			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
+ 		break;
+ 
+ 	default:
+@@ -435,35 +860,40 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
+ }
+ 
+ static void
+-bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
++bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
+ {
+-	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
+-	bfa_ioc_hb_monitor(ioc);
++	mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
++		msecs_to_jiffies(BFA_IOC_TOV));
++	bfa_ioc_send_disable(iocpf->ioc);
+ }
+ 
++/**
++ * IOC is being disabled
++ */
+ static void
+-bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
++bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
+ {
+-	switch (event) {
+-	case IOC_E_ENABLE:
+-		break;
++	struct bfa_ioc *ioc = iocpf->ioc;
+ 
+-	case IOC_E_DISABLE:
+-		bfa_ioc_hb_stop(ioc);
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
++	switch (event) {
++	case IOCPF_E_FWRSP_DISABLE:
++	case IOCPF_E_FWREADY:
++		del_timer(&ioc->iocpf_timer);
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+ 		break;
+ 
+-	case IOC_E_HWERROR:
+-	case IOC_E_FWREADY:
+-		/**
+-		 * Hard error or IOC recovery by other function.
+-		 * Treat it same as heartbeat failure.
++	case IOCPF_E_FAIL:
++		del_timer(&ioc->iocpf_timer);
++		/*
++		 * !!! fall through !!!
+ 		 */
+-		bfa_ioc_hb_stop(ioc);
+-		/* !!! fall through !!! */
+ 
+-	case IOC_E_HBFAIL:
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
++	case IOCPF_E_TIMEOUT:
++		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
++		break;
++
++	case IOCPF_E_FWRSP_ENABLE:
+ 		break;
+ 
+ 	default:
+@@ -472,33 +902,27 @@ bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
+ }
+ 
+ static void
+-bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
++bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
+ {
+-	bfa_ioc_timer_start(ioc);
+-	bfa_ioc_send_disable(ioc);
++	bfa_ioc_hw_sem_get(iocpf->ioc);
+ }
+ 
+ /**
+- * IOC is being disabled
++ * IOC hb ack request is being removed.
+  */
+ static void
+-bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
++bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
+ {
++	struct bfa_ioc *ioc = iocpf->ioc;
++
+ 	switch (event) {
+-	case IOC_E_FWRSP_DISABLE:
+-		bfa_ioc_timer_stop(ioc);
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
++	case IOCPF_E_SEMLOCKED:
++		bfa_ioc_sync_leave(ioc);
++		bfa_nw_ioc_hw_sem_release(ioc);
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+ 		break;
+ 
+-	case IOC_E_HWERROR:
+-		bfa_ioc_timer_stop(ioc);
+-		/*
+-		 * !!! fall through !!!
+-		 */
+-
+-	case IOC_E_TIMEOUT:
+-		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
++	case IOCPF_E_FAIL:
+ 		break;
+ 
+ 	default:
+@@ -510,29 +934,25 @@ bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
+  * IOC disable completion entry.
+  */
+ static void
+-bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
++bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
+ {
+-	bfa_ioc_disable_comp(ioc);
++	bfa_ioc_pf_disabled(iocpf->ioc);
+ }
+ 
+ static void
+-bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
++bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
+ {
+-	switch (event) {
+-	case IOC_E_ENABLE:
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
+-		break;
+-
+-	case IOC_E_DISABLE:
+-		ioc->cbfn->disable_cbfn(ioc->bfa);
+-		break;
++	struct bfa_ioc *ioc = iocpf->ioc;
+ 
+-	case IOC_E_FWREADY:
++	switch (event) {
++	case IOCPF_E_ENABLE:
++		iocpf->retry_count = 0;
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
+ 		break;
+ 
+-	case IOC_E_DETACH:
++	case IOCPF_E_STOP:
+ 		bfa_ioc_firmware_unlock(ioc);
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ 		break;
+ 
+ 	default:
+@@ -541,33 +961,50 @@ bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
+ }
+ 
+ static void
+-bfa_ioc_sm_initfail_entry(struct bfa_ioc *ioc)
++bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
+ {
+-	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+-	bfa_ioc_timer_start(ioc);
++	bfa_ioc_hw_sem_get(iocpf->ioc);
+ }
+ 
+ /**
+- * @brief
+  * Hardware initialization failed.
+  */
+ static void
+-bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event)
++bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
+ {
++	struct bfa_ioc *ioc = iocpf->ioc;
++
+ 	switch (event) {
+-	case IOC_E_DISABLE:
+-		bfa_ioc_timer_stop(ioc);
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
++	case IOCPF_E_SEMLOCKED:
++		bfa_ioc_notify_fail(ioc);
++		bfa_ioc_sync_ack(ioc);
++		iocpf->retry_count++;
++		if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
++			bfa_ioc_sync_leave(ioc);
++			bfa_nw_ioc_hw_sem_release(ioc);
++			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
++		} else {
++			if (bfa_ioc_sync_complete(ioc))
++				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
++			else {
++				bfa_nw_ioc_hw_sem_release(ioc);
++				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
++			}
++		}
+ 		break;
+ 
+-	case IOC_E_DETACH:
+-		bfa_ioc_timer_stop(ioc);
++	case IOCPF_E_DISABLE:
++		bfa_ioc_hw_sem_get_cancel(ioc);
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
++		break;
++
++	case IOCPF_E_STOP:
++		bfa_ioc_hw_sem_get_cancel(ioc);
+ 		bfa_ioc_firmware_unlock(ioc);
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+ 		break;
+ 
+-	case IOC_E_TIMEOUT:
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
++	case IOCPF_E_FAIL:
+ 		break;
+ 
+ 	default:
+@@ -576,80 +1013,108 @@ bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event)
+ }
+ 
+ static void
+-bfa_ioc_sm_hbfail_entry(struct bfa_ioc *ioc)
++bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
+ {
+-	struct list_head			*qe;
+-	struct bfa_ioc_hbfail_notify *notify;
++	bfa_ioc_pf_initfailed(iocpf->ioc);
++}
+ 
+-	/**
+-	 * Mark IOC as failed in hardware and stop firmware.
+-	 */
+-	bfa_ioc_lpu_stop(ioc);
+-	writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
++/**
++ * Hardware initialization failed.
++ */
++static void
++bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
++{
++	struct bfa_ioc *ioc = iocpf->ioc;
+ 
+-	/**
+-	 * Notify other functions on HB failure.
+-	 */
+-	bfa_ioc_notify_hbfail(ioc);
++	switch (event) {
++	case IOCPF_E_DISABLE:
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
++		break;
+ 
+-	/**
+-	 * Notify driver and common modules registered for notification.
+-	 */
+-	ioc->cbfn->hbfail_cbfn(ioc->bfa);
+-	list_for_each(qe, &ioc->hb_notify_q) {
+-		notify = (struct bfa_ioc_hbfail_notify *) qe;
+-		notify->cbfn(notify->cbarg);
++	case IOCPF_E_STOP:
++		bfa_ioc_firmware_unlock(ioc);
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
++		break;
++
++	default:
++		bfa_sm_fault(ioc, event);
+ 	}
++}
+ 
++static void
++bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
++{
+ 	/**
+-	 * Flush any queued up mailbox requests.
++	 * Mark IOC as failed in hardware and stop firmware.
+ 	 */
+-	bfa_ioc_mbox_hbfail(ioc);
++	bfa_ioc_lpu_stop(iocpf->ioc);
+ 
+ 	/**
+-	 * Trigger auto-recovery after a delay.
++	 * Flush any queued up mailbox requests.
+ 	 */
+-	if (ioc->auto_recover)
+-		mod_timer(&ioc->ioc_timer, jiffies +
+-			msecs_to_jiffies(BFA_IOC_TOV_RECOVER));
++	bfa_ioc_mbox_hbfail(iocpf->ioc);
++	bfa_ioc_hw_sem_get(iocpf->ioc);
+ }
+ 
+ /**
+- * @brief
+- * IOC heartbeat failure.
++ * IOC is in failed state.
+  */
+ static void
+-bfa_ioc_sm_hbfail(struct bfa_ioc *ioc, enum ioc_event event)
++bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
+ {
+-	switch (event) {
++	struct bfa_ioc *ioc = iocpf->ioc;
+ 
+-	case IOC_E_ENABLE:
+-		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
++	switch (event) {
++	case IOCPF_E_SEMLOCKED:
++		iocpf->retry_count = 0;
++		bfa_ioc_sync_ack(ioc);
++		bfa_ioc_notify_fail(ioc);
++		if (!iocpf->auto_recover) {
++			bfa_ioc_sync_leave(ioc);
++			bfa_nw_ioc_hw_sem_release(ioc);
++			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
++		} else {
++			if (bfa_ioc_sync_complete(ioc))
++				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
++			else {
++				bfa_nw_ioc_hw_sem_release(ioc);
++				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
++			}
++		}
+ 		break;
+ 
+-	case IOC_E_DISABLE:
+-		if (ioc->auto_recover)
+-			bfa_ioc_timer_stop(ioc);
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
++	case IOCPF_E_DISABLE:
++		bfa_ioc_hw_sem_get_cancel(ioc);
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+ 		break;
+ 
+-	case IOC_E_TIMEOUT:
+-		bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
++	case IOCPF_E_FAIL:
+ 		break;
+ 
+-	case IOC_E_FWREADY:
+-		/**
+-		 * Recovery is already initiated by other function.
+-		 */
+-		break;
++	default:
++		bfa_sm_fault(ioc, event);
++	}
++}
+ 
+-	case IOC_E_HWERROR:
+-		/*
+-		 * HB failure notification, ignore.
+-		 */
++static void
++bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
++{
++}
++
++/**
++ * @brief
++ * IOC is in failed state.
++ */
++static void
++bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
++{
++	switch (event) {
++	case IOCPF_E_DISABLE:
++		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+ 		break;
++
+ 	default:
+-		bfa_sm_fault(ioc, event);
++		bfa_sm_fault(iocpf->ioc, event);
+ 	}
+ }
+ 
+@@ -674,14 +1139,6 @@ bfa_ioc_disable_comp(struct bfa_ioc *ioc)
+ 	}
+ }
+ 
+-void
+-bfa_nw_ioc_sem_timeout(void *ioc_arg)
+-{
+-	struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
+-
+-	bfa_ioc_hw_sem_get(ioc);
+-}
+-
+ bool
+ bfa_nw_ioc_sem_get(void __iomem *sem_reg)
+ {
+@@ -721,7 +1178,7 @@ bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
+ 	 */
+ 	r32 = readl(ioc->ioc_regs.ioc_sem_reg);
+ 	if (r32 == 0) {
+-		bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
++		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
+ 		return;
+ 	}
+ 
+@@ -932,7 +1389,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
+ 		 */
+ 		bfa_ioc_msgflush(ioc);
+ 		ioc->cbfn->reset_cbfn(ioc->bfa);
+-		bfa_fsm_send_event(ioc, IOC_E_FWREADY);
++		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
+ 		return;
+ 	}
+ 
+@@ -1018,7 +1475,6 @@ bfa_nw_ioc_hb_check(void *cbarg)
+ 
+ 	hb_count = readl(ioc->ioc_regs.heartbeat);
+ 	if (ioc->hb_count == hb_count) {
+-		pr_crit("Firmware heartbeat failure at %d", hb_count);
+ 		bfa_ioc_recover(ioc);
+ 		return;
+ 	} else {
+@@ -1189,6 +1645,55 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
+ 		bfa_q_deq(&mod->cmd_q, &cmd);
+ }
+ 
++static void
++bfa_ioc_fail_notify(struct bfa_ioc *ioc)
++{
++	struct list_head		*qe;
++	struct bfa_ioc_hbfail_notify	*notify;
++
++	/**
++	 * Notify driver and common modules registered for notification.
++	 */
++	ioc->cbfn->hbfail_cbfn(ioc->bfa);
++	list_for_each(qe, &ioc->hb_notify_q) {
++		notify = (struct bfa_ioc_hbfail_notify *) qe;
++		notify->cbfn(notify->cbarg);
++	}
++}
++
++static void
++bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
++{
++	bfa_fsm_send_event(ioc, IOC_E_ENABLED);
++}
++
++static void
++bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
++{
++	bfa_fsm_send_event(ioc, IOC_E_DISABLED);
++}
++
++static void
++bfa_ioc_pf_initfailed(struct bfa_ioc *ioc)
++{
++	bfa_fsm_send_event(ioc, IOC_E_INITFAILED);
++}
++
++static void
++bfa_ioc_pf_failed(struct bfa_ioc *ioc)
++{
++	bfa_fsm_send_event(ioc, IOC_E_PFAILED);
++}
++
++static void
++bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
++{
++	/**
++	 * Provide enable completion callback and AEN notification.
++	 */
++	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
++}
++
+ /**
+  * IOC public
+  */
+@@ -1284,6 +1789,7 @@ static void
+ bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
+ {
+ 	union bfi_ioc_i2h_msg_u	*msg;
++	struct bfa_iocpf *iocpf = &ioc->iocpf;
+ 
+ 	msg = (union bfi_ioc_i2h_msg_u *) m;
+ 
+@@ -1294,15 +1800,15 @@ bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
+ 		break;
+ 
+ 	case BFI_IOC_I2H_READY_EVENT:
+-		bfa_fsm_send_event(ioc, IOC_E_FWREADY);
++		bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
+ 		break;
+ 
+ 	case BFI_IOC_I2H_ENABLE_REPLY:
+-		bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
++		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
+ 		break;
+ 
+ 	case BFI_IOC_I2H_DISABLE_REPLY:
+-		bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
++		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
+ 		break;
+ 
+ 	case BFI_IOC_I2H_GETATTR_REPLY:
+@@ -1328,11 +1834,13 @@ bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
+ 	ioc->fcmode	= false;
+ 	ioc->pllinit	= false;
+ 	ioc->dbg_fwsave_once = true;
++	ioc->iocpf.ioc  = ioc;
+ 
+ 	bfa_ioc_mbox_attach(ioc);
+ 	INIT_LIST_HEAD(&ioc->hb_notify_q);
+ 
+-	bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
++	bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
++	bfa_fsm_send_event(ioc, IOC_E_RESET);
+ }
+ 
+ /**
+@@ -1637,7 +2145,40 @@ bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
+ static enum bfa_ioc_state
+ bfa_ioc_get_state(struct bfa_ioc *ioc)
+ {
+-	return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
++	enum bfa_iocpf_state iocpf_st;
++	enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
++
++	if (ioc_st == BFA_IOC_ENABLING ||
++		ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
++
++		iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
++
++		switch (iocpf_st) {
++		case BFA_IOCPF_SEMWAIT:
++			ioc_st = BFA_IOC_SEMWAIT;
++			break;
++
++		case BFA_IOCPF_HWINIT:
++			ioc_st = BFA_IOC_HWINIT;
++			break;
++
++		case BFA_IOCPF_FWMISMATCH:
++			ioc_st = BFA_IOC_FWMISMATCH;
++			break;
++
++		case BFA_IOCPF_FAIL:
++			ioc_st = BFA_IOC_FAIL;
++			break;
++
++		case BFA_IOCPF_INITFAIL:
++			ioc_st = BFA_IOC_INITFAIL;
++			break;
++
++		default:
++			break;
++		}
++	}
++	return ioc_st;
+ }
+ 
+ void
+@@ -1678,8 +2219,13 @@ bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
+ static void
+ bfa_ioc_recover(struct bfa_ioc *ioc)
+ {
+-	bfa_ioc_stats(ioc, ioc_hbfails);
+-	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
++	u16 bdf;
++
++	bdf = (ioc->pcidev.pci_slot << 8 | ioc->pcidev.pci_func << 3 |
++					ioc->pcidev.device_id);
++
++	pr_crit("Firmware heartbeat failure at %d", bdf);
++	BUG_ON(1);
+ }
+ 
+ static void
+@@ -1687,5 +2233,61 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
+ {
+ 	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
+ 		return;
++}
++
++/**
++ * @dg hal_iocpf_pvt BFA IOC PF private functions
++ * @{
++ */
++
++static void
++bfa_iocpf_enable(struct bfa_ioc *ioc)
++{
++	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
++}
++
++static void
++bfa_iocpf_disable(struct bfa_ioc *ioc)
++{
++	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
++}
++
++static void
++bfa_iocpf_fail(struct bfa_ioc *ioc)
++{
++	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
++}
++
++static void
++bfa_iocpf_initfail(struct bfa_ioc *ioc)
++{
++	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
++}
++
++static void
++bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
++{
++	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
++}
++
++static void
++bfa_iocpf_stop(struct bfa_ioc *ioc)
++{
++	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
++}
++
++void
++bfa_nw_iocpf_timeout(void *ioc_arg)
++{
++	struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
++
++	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
++}
+ 
++void
++bfa_nw_iocpf_sem_timeout(void *ioc_arg)
++{
++	struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
++
++	bfa_ioc_hw_sem_get(ioc);
+ }
+diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/bna/bfa_ioc.h
+index a73d84e..e4974bc 100644
+--- a/drivers/net/bna/bfa_ioc.h
++++ b/drivers/net/bna/bfa_ioc.h
+@@ -26,16 +26,7 @@
+ #define BFA_IOC_TOV		3000	/* msecs */
+ #define BFA_IOC_HWSEM_TOV	500	/* msecs */
+ #define BFA_IOC_HB_TOV		500	/* msecs */
+-#define BFA_IOC_HWINIT_MAX	2
+-#define BFA_IOC_TOV_RECOVER	BFA_IOC_HB_TOV
+-
+-/**
+- * Generic Scatter Gather Element used by driver
+- */
+-struct bfa_sge {
+-	u32	sg_len;
+-	void	*sg_addr;
+-};
++#define BFA_IOC_HWINIT_MAX	5
+ 
+ /**
+  * PCI device information required by IOC
+@@ -65,19 +56,6 @@ struct bfa_dma {
+ #define BFI_SMEM_CT_SIZE	0x280000U	/* ! 2.5MB for catapult	*/
+ 
+ /**
+- * @brief BFA dma address assignment macro
+- */
+-#define bfa_dma_addr_set(dma_addr, pa)	\
+-		__bfa_dma_addr_set(&dma_addr, (u64)pa)
+-
+-static inline void
+-__bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
+-{
+-	dma_addr->a32.addr_lo = (u32) pa;
+-	dma_addr->a32.addr_hi = (u32) (upper_32_bits(pa));
+-}
+-
+-/**
+  * @brief BFA dma address assignment macro. (big endian format)
+  */
+ #define bfa_dma_be_addr_set(dma_addr, pa)	\
+@@ -105,8 +83,11 @@ struct bfa_ioc_regs {
+ 	void __iomem *host_page_num_fn;
+ 	void __iomem *heartbeat;
+ 	void __iomem *ioc_fwstate;
++	void __iomem *alt_ioc_fwstate;
+ 	void __iomem *ll_halt;
++	void __iomem *alt_ll_halt;
+ 	void __iomem *err_set;
++	void __iomem *ioc_fail_sync;
+ 	void __iomem *shirq_isr_next;
+ 	void __iomem *shirq_msk_next;
+ 	void __iomem *smem_page_start;
+@@ -165,16 +146,22 @@ struct bfa_ioc_hbfail_notify {
+ 	(__notify)->cbarg = (__cbarg);				\
+ } while (0)
+ 
++struct bfa_iocpf {
++	bfa_fsm_t		fsm;
++	struct bfa_ioc		*ioc;
++	u32			retry_count;
++	bool			auto_recover;
++};
++
+ struct bfa_ioc {
+ 	bfa_fsm_t		fsm;
+ 	struct bfa 		*bfa;
+ 	struct bfa_pcidev 	pcidev;
+-	struct bfa_timer_mod	*timer_mod;
+ 	struct timer_list 	ioc_timer;
++	struct timer_list 	iocpf_timer;
+ 	struct timer_list 	sem_timer;
+ 	struct timer_list	hb_timer;
+ 	u32			hb_count;
+-	u32			retry_count;
+ 	struct list_head	hb_notify_q;
+ 	void			*dbg_fwsave;
+ 	int			dbg_fwsave_len;
+@@ -182,7 +169,6 @@ struct bfa_ioc {
+ 	enum bfi_mclass		ioc_mc;
+ 	struct bfa_ioc_regs 	ioc_regs;
+ 	struct bfa_ioc_drv_stats stats;
+-	bool			auto_recover;
+ 	bool			fcmode;
+ 	bool			ctdev;
+ 	bool			cna;
+@@ -195,6 +181,7 @@ struct bfa_ioc {
+ 	struct bfa_ioc_cbfn	*cbfn;
+ 	struct bfa_ioc_mbox_mod	mbox_mod;
+ 	struct bfa_ioc_hwif	*ioc_hwif;
++	struct bfa_iocpf	iocpf;
+ };
+ 
+ struct bfa_ioc_hwif {
+@@ -205,8 +192,12 @@ struct bfa_ioc_hwif {
+ 	void		(*ioc_map_port)	(struct bfa_ioc *ioc);
+ 	void		(*ioc_isr_mode_set)	(struct bfa_ioc *ioc,
+ 					bool msix);
+-	void		(*ioc_notify_hbfail)	(struct bfa_ioc *ioc);
++	void		(*ioc_notify_fail)	(struct bfa_ioc *ioc);
+ 	void		(*ioc_ownership_reset)	(struct bfa_ioc *ioc);
++	void		(*ioc_sync_join)	(struct bfa_ioc *ioc);
++	void		(*ioc_sync_leave)	(struct bfa_ioc *ioc);
++	void		(*ioc_sync_ack)		(struct bfa_ioc *ioc);
++	bool		(*ioc_sync_complete)	(struct bfa_ioc *ioc);
+ };
+ 
+ #define bfa_ioc_pcifn(__ioc)		((__ioc)->pcidev.pci_func)
+@@ -271,7 +262,6 @@ void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
+ void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
+ 
+ void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
+-
+ void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
+ void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
+ 	struct bfa_ioc_hbfail_notify *notify);
+@@ -289,7 +279,8 @@ mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
+  */
+ void bfa_nw_ioc_timeout(void *ioc);
+ void bfa_nw_ioc_hb_check(void *ioc);
+-void bfa_nw_ioc_sem_timeout(void *ioc);
++void bfa_nw_iocpf_timeout(void *ioc);
++void bfa_nw_iocpf_sem_timeout(void *ioc);
+ 
+ /*
+  * F/W Image Size & Chunk
+diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
+index 121cfd6..469997c 100644
+--- a/drivers/net/bna/bfa_ioc_ct.c
++++ b/drivers/net/bna/bfa_ioc_ct.c
+@@ -22,6 +22,15 @@
+ #include "bfi_ctreg.h"
+ #include "bfa_defs.h"
+ 
++#define bfa_ioc_ct_sync_pos(__ioc)	\
++		((u32) (1 << bfa_ioc_pcifn(__ioc)))
++#define BFA_IOC_SYNC_REQD_SH		16
++#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
++#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
++#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
++#define bfa_ioc_ct_sync_reqd_pos(__ioc) \
++		(bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
++
+ /*
+  * forward declarations
+  */
+@@ -30,8 +39,12 @@ static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
+ static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
+ static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
+ static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
+-static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc);
++static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
+ static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
++static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
++static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
++static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
++static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
+ static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
+ 
+ static struct bfa_ioc_hwif nw_hwif_ct;
+@@ -48,8 +61,12 @@ bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
+ 	nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
+ 	nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
+ 	nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
+-	nw_hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
++	nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
+ 	nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
++	nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
++	nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
++	nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
++	nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
+ 
+ 	ioc->ioc_hwif = &nw_hwif_ct;
+ }
+@@ -86,6 +103,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
+ 	if (usecnt == 0) {
+ 		writel(1, ioc->ioc_regs.ioc_usage_reg);
+ 		bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
++		writel(0, ioc->ioc_regs.ioc_fail_sync);
+ 		return true;
+ 	}
+ 
+@@ -149,12 +167,14 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
+  * Notify other functions on HB failure.
+  */
+ static void
+-bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc)
++bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
+ {
+ 	if (ioc->cna) {
+ 		writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
++		writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
+ 		/* Wait for halt to take effect */
+ 		readl(ioc->ioc_regs.ll_halt);
++		readl(ioc->ioc_regs.alt_ll_halt);
+ 	} else {
+ 		writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
+ 		readl(ioc->ioc_regs.err_set);
+@@ -206,15 +226,19 @@ bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
+ 	if (ioc->port_id == 0) {
+ 		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
+ 		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
++		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
+ 		ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
+ 		ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
+ 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
++		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
+ 	} else {
+ 		ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
+ 		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
++		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+ 		ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
+ 		ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
+ 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
++		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
+ 	}
+ 
+ 	/*
+@@ -232,6 +256,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
+ 	ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
+ 	ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
+ 	ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
++	ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
+ 
+ 	/**
+ 	 * sram memory access
+@@ -317,6 +342,77 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
+ 	bfa_nw_ioc_hw_sem_release(ioc);
+ }
+ 
++/**
++ * Synchronized IOC failure processing routines
++ */
++static void
++bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
++{
++	u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
++	u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
++
++	writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
++}
++
++static void
++bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
++{
++	u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
++	u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
++					bfa_ioc_ct_sync_pos(ioc);
++
++	writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
++}
++
++static void
++bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
++{
++	u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
++
++	writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
++}
++
++static bool
++bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
++{
++	u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
++	u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
++	u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
++	u32 tmp_ackd;
++
++	if (sync_ackd == 0)
++		return true;
++
++	/**
++	 * The check below is to see whether any other PCI fn
++	 * has reinitialized the ASIC (reset sync_ackd bits)
++	 * and failed again while this IOC was waiting for hw
++	 * semaphore (in bfa_iocpf_sm_semwait()).
++	 */
++	tmp_ackd = sync_ackd;
++	if ((sync_reqd &  bfa_ioc_ct_sync_pos(ioc)) &&
++			!(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
++		sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
++
++	if (sync_reqd == sync_ackd) {
++		writel(bfa_ioc_ct_clear_sync_ackd(r32),
++				ioc->ioc_regs.ioc_fail_sync);
++		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
++		writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
++		return true;
++	}
++
++	/**
++	 * If another PCI fn reinitialized and failed again while
++	 * this IOC was waiting for hw sem, the sync_ackd bit for
++	 * this IOC need to be set again to allow reinitialization.
++	 */
++	if (tmp_ackd != sync_ackd)
++		writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
++
++	return false;
++}
++
+ static enum bfa_status
+ bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode)
+ {
+diff --git a/drivers/net/bna/bfi_ctreg.h b/drivers/net/bna/bfi_ctreg.h
+index 404ea35..5130d79 100644
+--- a/drivers/net/bna/bfi_ctreg.h
++++ b/drivers/net/bna/bfi_ctreg.h
+@@ -535,6 +535,7 @@ enum {
+ #define BFA_IOC1_HBEAT_REG		HOST_SEM2_INFO_REG
+ #define BFA_IOC1_STATE_REG		HOST_SEM3_INFO_REG
+ #define BFA_FW_USE_COUNT		 HOST_SEM4_INFO_REG
++#define BFA_IOC_FAIL_SYNC		HOST_SEM5_INFO_REG
+ 
+ #define CPE_DEPTH_Q(__n) \
+ 	(CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
+@@ -552,22 +553,30 @@ enum {
+ 	(RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
+ #define RME_CI_PTR_Q(__n) \
+ 	(RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
+-#define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \
+-	* (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
+-#define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \
+-	* (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
+-#define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \
+-	* (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
+-#define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \
+-	* (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
+-#define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \
+-	* (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
+-#define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \
+-	* (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
+-#define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \
+-	* (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
+-#define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \
+-	* (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
++#define HQM_QSET_RXQ_DRBL_P0(__n) \
++	(HQM_QSET0_RXQ_DRBL_P0 + (__n) * \
++		(HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
++#define HQM_QSET_TXQ_DRBL_P0(__n) \
++	(HQM_QSET0_TXQ_DRBL_P0 + (__n) * \
++		(HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
++#define HQM_QSET_IB_DRBL_1_P0(__n) \
++	(HQM_QSET0_IB_DRBL_1_P0 + (__n) * \
++		(HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
++#define HQM_QSET_IB_DRBL_2_P0(__n) \
++	(HQM_QSET0_IB_DRBL_2_P0 + (__n) * \
++		(HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
++#define HQM_QSET_RXQ_DRBL_P1(__n) \
++	(HQM_QSET0_RXQ_DRBL_P1 + (__n) * \
++		(HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
++#define HQM_QSET_TXQ_DRBL_P1(__n) \
++	(HQM_QSET0_TXQ_DRBL_P1 + (__n) * \
++		(HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
++#define HQM_QSET_IB_DRBL_1_P1(__n) \
++	(HQM_QSET0_IB_DRBL_1_P1 + (__n) * \
++		(HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
++#define HQM_QSET_IB_DRBL_2_P1(__n) \
++	(HQM_QSET0_IB_DRBL_2_P1 + (__n) * \
++		(HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
+ 
+ #define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
+ #define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
+diff --git a/drivers/net/bna/bna.h b/drivers/net/bna/bna.h
+index fd93f76..a287f89 100644
+--- a/drivers/net/bna/bna.h
++++ b/drivers/net/bna/bna.h
+@@ -32,8 +32,6 @@ extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
+ /* Log string size */
+ #define BNA_MESSAGE_SIZE		256
+ 
+-#define bna_device_timer(_dev)		bfa_timer_beat(&((_dev)->timer_mod))
+-
+ /* MBOX API for PORT, TX, RX */
+ #define bna_mbox_qe_fill(_qe, _cmd, _cmd_len, _cbfn, _cbarg)		\
+ do {									\
+diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
+index 140ea95..fad9126 100644
+--- a/drivers/net/bna/bnad.c
++++ b/drivers/net/bna/bnad.c
+@@ -1425,13 +1425,24 @@ bnad_ioc_hb_check(unsigned long data)
+ }
+ 
+ static void
+-bnad_ioc_sem_timeout(unsigned long data)
++bnad_iocpf_timeout(unsigned long data)
+ {
+ 	struct bnad *bnad = (struct bnad *)data;
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&bnad->bna_lock, flags);
+-	bfa_nw_ioc_sem_timeout((void *) &bnad->bna.device.ioc);
++	bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc);
++	spin_unlock_irqrestore(&bnad->bna_lock, flags);
++}
++
++static void
++bnad_iocpf_sem_timeout(unsigned long data)
++{
++	struct bnad *bnad = (struct bnad *)data;
++	unsigned long flags;
++
++	spin_lock_irqsave(&bnad->bna_lock, flags);
++	bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc);
+ 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ }
+ 
+@@ -3132,11 +3143,13 @@ bnad_pci_probe(struct pci_dev *pdev,
+ 				((unsigned long)bnad));
+ 	setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
+ 				((unsigned long)bnad));
+-	setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_ioc_sem_timeout,
++	setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout,
++				((unsigned long)bnad));
++	setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout,
+ 				((unsigned long)bnad));
+ 
+ 	/* Now start the timer before calling IOC */
+-	mod_timer(&bnad->bna.device.ioc.ioc_timer,
++	mod_timer(&bnad->bna.device.ioc.iocpf_timer,
+ 		  jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
+ 
+ 	/*
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bna/0022-bna-Update-the-driver-version-to-2.3.2.3.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bna/0022-bna-Update-the-driver-version-to-2.3.2.3.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,29 @@
+From: Rasesh Mody <rmody at brocade.com>
+Date: Thu, 23 Dec 2010 21:45:10 +0000
+Subject: [PATCH 22/23] bna: Update the driver version to 2.3.2.3
+
+commit a1a5da57d0884017b8c3a011a28d4f5e08a2ea4f upstream.
+
+Signed-off-by: Debashis Dutt <ddutt at brocade.com>
+Signed-off-by: Rasesh Mody <rmody at brocade.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ drivers/net/bna/bnad.h |    2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
+index 1954dea..8b1d515 100644
+--- a/drivers/net/bna/bnad.h
++++ b/drivers/net/bna/bnad.h
+@@ -65,7 +65,7 @@ struct bnad_rx_ctrl {
+ #define BNAD_NAME			"bna"
+ #define BNAD_NAME_LEN			64
+ 
+-#define BNAD_VERSION			"2.3.2.0"
++#define BNAD_VERSION			"2.3.2.3"
+ 
+ #define BNAD_MAILBOX_MSIX_VECTORS	1
+ 
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bna/0023-bna-Remove-unnecessary-memset-0.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bna/0023-bna-Remove-unnecessary-memset-0.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,30 @@
+From: Joe Perches <joe at perches.com>
+Date: Wed, 12 Jan 2011 11:21:18 +0000
+Subject: [PATCH 23/23] bna: Remove unnecessary memset(,0,)
+
+commit 35ab7b798a2dc4a9b19bd85833f83a19736bcfd8 upstream.
+
+kzalloc'd memory doesn't need a memset to 0.
+
+Signed-off-by: Joe Perches <joe at perches.com>
+Acked-by: Rasesh Mody <rmody at brocade.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ drivers/net/bna/bnad_ethtool.c |    1 -
+ 1 files changed, 0 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
+index 99be5ae..142d604 100644
+--- a/drivers/net/bna/bnad_ethtool.c
++++ b/drivers/net/bna/bnad_ethtool.c
+@@ -275,7 +275,6 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+ 
+ 	ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
+ 	if (ioc_attr) {
+-		memset(ioc_attr, 0, sizeof(*ioc_attr));
+ 		spin_lock_irqsave(&bnad->bna_lock, flags);
+ 		bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
+ 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bnx2x-Add-support-for-BCM84823.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bnx2x-Add-support-for-BCM84823.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,142 @@
+From: Yaniv Rosner <yanivr at broadcom.com>
+Date: Thu, 5 Nov 2009 19:18:23 +0200
+Subject: [PATCH] bnx2x: Add support for BCM84823
+
+commit 4f60dab113230943fb1bc7969053d9a1b6578339 upstream.
+
+Add support for new phy type BCM84823 (Dual copper-port phy)
+
+Signed-off-by: Yaniv Rosner <yanivr at broadcom.com>
+Signed-off-by: Eilon Greenstein <eilong at broadcom.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ drivers/net/bnx2x_hsi.h  |    1 +
+ drivers/net/bnx2x_link.c |   44 +++++++++++++++++++++++++++++++++++++++++---
+ 2 files changed, 42 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h
+index dc2f8ed..5258533 100644
+--- a/drivers/net/bnx2x_hsi.h
++++ b/drivers/net/bnx2x_hsi.h
+@@ -264,6 +264,7 @@ struct port_hw_cfg {			    /* port 0: 0x12c  port 1: 0x2bc */
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101	    0x00000800
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727	    0x00000900
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC   0x00000a00
++#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823	    0x00000b00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE	    0x0000fd00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN	    0x0000ff00
+ 
+diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
+index b08ff77..7897fe1 100644
+--- a/drivers/net/bnx2x_link.c
++++ b/drivers/net/bnx2x_link.c
+@@ -2200,6 +2200,8 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
+ 				       MDIO_PMA_REG_CTRL,
+ 				       1<<15);
+ 			break;
++		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
++			break;
+ 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
+ 			DP(NETIF_MSG_LINK, "XGXS PHY Failure detected\n");
+ 			break;
+@@ -4373,6 +4375,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
+ 			break;
+ 		}
+ 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
++		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
+ 			/* This phy uses the NIG latch mechanism since link
+ 				indication arrives through its LED4 and not via
+ 				its LASI signal, so we get steady signal
+@@ -4380,6 +4383,12 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
+ 			bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
+ 				    1 << NIG_LATCH_BC_ENABLE_MI_INT);
+ 
++			bnx2x_cl45_write(bp, params->port,
++				       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
++				       ext_phy_addr,
++				       MDIO_PMA_DEVAD,
++				       MDIO_PMA_REG_CTRL, 0x0000);
++
+ 			bnx2x_8481_set_led4(params, ext_phy_type, ext_phy_addr);
+ 			if (params->req_line_speed == SPEED_AUTO_NEG) {
+ 
+@@ -5230,6 +5239,7 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
+ 			}
+ 			break;
+ 		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
++		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
+ 			/* Check 10G-BaseT link status */
+ 			/* Check PMD signal ok */
+ 			bnx2x_cl45_read(bp, params->port, ext_phy_type,
+@@ -5445,8 +5455,10 @@ static void bnx2x_link_int_ack(struct link_params *params,
+ 		     (NIG_STATUS_XGXS0_LINK10G |
+ 		      NIG_STATUS_XGXS0_LINK_STATUS |
+ 		      NIG_STATUS_SERDES0_LINK_STATUS));
+-	if (XGXS_EXT_PHY_TYPE(params->ext_phy_config)
+-	    == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481) {
++	if ((XGXS_EXT_PHY_TYPE(params->ext_phy_config)
++		== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481) ||
++	(XGXS_EXT_PHY_TYPE(params->ext_phy_config)
++		== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823)) {
+ 		bnx2x_8481_rearm_latch_signal(bp, port, is_mi_int);
+ 	}
+ 	if (vars->phy_link_up) {
+@@ -5559,6 +5571,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
+ 		status = bnx2x_format_ver(spirom_ver, version, len);
+ 		break;
+ 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
++	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
+ 		spirom_ver = ((spirom_ver & 0xF80) >> 7) << 16 |
+ 			(spirom_ver & 0x7F);
+ 		status = bnx2x_format_ver(spirom_ver, version, len);
+@@ -6250,6 +6263,22 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
+ 			bnx2x_8726_reset_phy(bp, params->port, ext_phy_addr);
+ 			break;
+ 		}
++		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
++		{
++			u8 ext_phy_addr =
++				XGXS_EXT_PHY_ADDR(params->ext_phy_config);
++			bnx2x_cl45_write(bp, port,
++				       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
++				       ext_phy_addr,
++				       MDIO_AN_DEVAD,
++				       MDIO_AN_REG_CTRL, 0x0000);
++			bnx2x_cl45_write(bp, port,
++				       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
++				       ext_phy_addr,
++				       MDIO_PMA_DEVAD,
++				       MDIO_PMA_REG_CTRL, 1);
++			break;
++		}
+ 		default:
+ 			/* HW reset */
+ 			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+@@ -6661,6 +6690,13 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base)
+ 	return 0;
+ }
+ 
++
++static u8 bnx2x_84823_common_init_phy(struct bnx2x *bp, u32 shmem_base)
++{
++	/* HW reset */
++	bnx2x_ext_phy_hw_reset(bp, 1);
++	return 0;
++}
+ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
+ {
+ 	u8 rc = 0;
+@@ -6690,7 +6726,9 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
+ 		/* GPIO1 affects both ports, so there's need to pull
+ 		it for single port alone */
+ 		rc = bnx2x_8726_common_init_phy(bp, shmem_base);
+-
++		break;
++	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
++		rc = bnx2x_84823_common_init_phy(bp, shmem_base);
+ 		break;
+ 	default:
+ 		DP(NETIF_MSG_LINK,
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/bnx2x-Fix-10G-mode-in-BCM8481-BCM84823.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/bnx2x-Fix-10G-mode-in-BCM8481-BCM84823.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,105 @@
+From: Yaniv Rosner <yanivr at broadcom.com>
+Date: Thu, 5 Nov 2009 19:18:30 +0200
+Subject: [PATCH] bnx2x: Fix 10G mode in BCM8481/BCM84823
+
+commit 46d15cc7a09d6a7f96908b2cd812744c483893b4 upstream.
+
+Signed-off-by: Yaniv Rosner <yanivr at broadcom.com>
+Signed-off-by: Eilon Greenstein <eilong at broadcom.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ drivers/net/bnx2x_link.c |   38 ++++++++------------------------------
+ drivers/net/bnx2x_reg.h  |    1 +
+ 2 files changed, 9 insertions(+), 30 deletions(-)
+
+diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
+index 1b73c1d..d2c9e19 100644
+--- a/drivers/net/bnx2x_link.c
++++ b/drivers/net/bnx2x_link.c
+@@ -3533,8 +3533,8 @@ static void bnx2x_8481_set_10G_led_mode(struct link_params *params,
+ 		       MDIO_PMA_REG_8481_LINK_SIGNAL,
+ 		       &val1);
+ 	/* Set bit 2 to 0, and bits [1:0] to 10 */
+-	val1 &= ~((1<<0) | (1<<2)); /* Clear bits 0,2*/
+-	val1 |= (1<<1); /* Set bit 1 */
++	val1 &= ~((1<<0) | (1<<2) | (1<<7)); /* Clear bits 0,2,7*/
++	val1 |= ((1<<1) | (1<<6)); /* Set bit 1, 6 */
+ 
+ 	bnx2x_cl45_write(bp, params->port,
+ 		       ext_phy_type,
+@@ -3568,36 +3568,19 @@ static void bnx2x_8481_set_10G_led_mode(struct link_params *params,
+ 		       MDIO_PMA_REG_8481_LED2_MASK,
+ 		       0);
+ 
+-	/* LED3 (10G/1G/100/10G Activity) */
+-	bnx2x_cl45_read(bp, params->port,
+-		      ext_phy_type,
+-		      ext_phy_addr,
+-		      MDIO_PMA_DEVAD,
+-		      MDIO_PMA_REG_8481_LINK_SIGNAL,
+-		      &val1);
+-	/* Enable blink based on source 4(Activity) */
+-	val1 &= ~((1<<7) | (1<<8)); /* Clear bits 7,8 */
+-	val1 |= (1<<6); /* Set only bit 6 */
++	/* Unmask LED3 for 10G link */
+ 	bnx2x_cl45_write(bp, params->port,
+ 		       ext_phy_type,
+ 		       ext_phy_addr,
+ 		       MDIO_PMA_DEVAD,
+-		       MDIO_PMA_REG_8481_LINK_SIGNAL,
+-		       val1);
+-
+-	bnx2x_cl45_read(bp, params->port,
+-		      ext_phy_type,
+-		      ext_phy_addr,
+-		      MDIO_PMA_DEVAD,
+ 		      MDIO_PMA_REG_8481_LED3_MASK,
+-		      &val1);
+-	val1 |= (1<<4); /* Unmask LED3 for 10G link */
++		       0x6);
+ 	bnx2x_cl45_write(bp, params->port,
+ 		       ext_phy_type,
+ 		       ext_phy_addr,
+ 		       MDIO_PMA_DEVAD,
+-		       MDIO_PMA_REG_8481_LED3_MASK,
+-		       val1);
++		       MDIO_PMA_REG_8481_LED3_BLINK,
++		       0);
+ }
+ 
+ 
+@@ -4476,17 +4459,12 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
+ 				    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
+ 					DP(NETIF_MSG_LINK, "Advertising 10G\n");
+ 					/* Restart autoneg for 10G*/
+-			bnx2x_cl45_read(bp, params->port,
+-				      ext_phy_type,
+-				      ext_phy_addr,
+-				      MDIO_AN_DEVAD,
+-				      MDIO_AN_REG_CTRL, &val);
+-			val |= 0x200;
++
+ 			bnx2x_cl45_write(bp, params->port,
+ 				       ext_phy_type,
+ 				       ext_phy_addr,
+ 				       MDIO_AN_DEVAD,
+-				       MDIO_AN_REG_CTRL, val);
++				       MDIO_AN_REG_CTRL, 0x3200);
+ 				}
+ 			} else {
+ 				/* Force speed */
+diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
+index 4be9bab..b668173 100644
+--- a/drivers/net/bnx2x_reg.h
++++ b/drivers/net/bnx2x_reg.h
+@@ -5129,6 +5129,7 @@ Theotherbitsarereservedandshouldbezero*/
+ #define MDIO_PMA_REG_8481_LED1_MASK	0xa82c
+ #define MDIO_PMA_REG_8481_LED2_MASK	0xa82f
+ #define MDIO_PMA_REG_8481_LED3_MASK	0xa832
++#define MDIO_PMA_REG_8481_LED3_BLINK	0xa834
+ #define MDIO_PMA_REG_8481_SIGNAL_MASK	0xa835
+ #define MDIO_PMA_REG_8481_LINK_SIGNAL	0xa83b
+ 
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0001-SCSI-hpsa-add-driver-for-HP-Smart-Array-controllers.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0001-SCSI-hpsa-add-driver-for-HP-Smart-Array-controllers.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,4213 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Tue, 8 Dec 2009 14:09:11 -0800
+Subject: [PATCH 01/79] [SCSI] hpsa: add driver for HP Smart Array
+ controllers.
+
+commit edd163687ea59f01d6b43c9e1fdaa0126fa30191 upstream.
+
+This driver supports a subset of HP Smart Array Controllers.
+It is a SCSI alternative to the cciss driver.
+
+[akpm at linux-foundation.org: avoid helpful cleanup patches]
+[achiang at hp.com: make device attrs static]
+[akpm at linux-foundation.org: msleep() does set_current_state() itself]
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: Mike Miller <mikem at beardog.cce.hp.com>
+Signed-off-by: Alex Chiang <achiang at hp.com>
+Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/Kconfig    |   10 +
+ drivers/scsi/Makefile   |    1 +
+ drivers/scsi/hpsa.c     | 3531 +++++++++++++++++++++++++++++++++++++++++++++++
+ drivers/scsi/hpsa.h     |  273 ++++
+ drivers/scsi/hpsa_cmd.h |  326 +++++
+ 5 files changed, 4141 insertions(+), 0 deletions(-)
+ create mode 100644 drivers/scsi/hpsa.c
+ create mode 100644 drivers/scsi/hpsa.h
+ create mode 100644 drivers/scsi/hpsa_cmd.h
+
+diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
+index 36900c7..9191d1e 100644
+--- a/drivers/scsi/Kconfig
++++ b/drivers/scsi/Kconfig
+@@ -388,6 +388,16 @@ config BLK_DEV_3W_XXXX_RAID
+ 	  Please read the comments at the top of
+ 	  <file:drivers/scsi/3w-xxxx.c>.
+ 
++config SCSI_HPSA
++	tristate "HP Smart Array SCSI driver"
++	depends on PCI && SCSI
++	help
++	  This driver supports HP Smart Array Controllers (circa 2009).
++	  It is a SCSI alternative to the cciss driver, which is a block
++	  driver.  Anyone wishing to use HP Smart Array controllers who
++	  would prefer the devices be presented to linux as SCSI devices,
++	  rather than as generic block devices should say Y here.
++
+ config SCSI_3W_9XXX
+ 	tristate "3ware 9xxx SATA-RAID support"
+ 	depends on PCI && SCSI
+diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
+index 280d3c6..92a8c50 100644
+--- a/drivers/scsi/Makefile
++++ b/drivers/scsi/Makefile
+@@ -91,6 +91,7 @@ obj-$(CONFIG_SCSI_BFA_FC)	+= bfa/
+ obj-$(CONFIG_SCSI_PAS16)	+= pas16.o
+ obj-$(CONFIG_SCSI_T128)		+= t128.o
+ obj-$(CONFIG_SCSI_DMX3191D)	+= dmx3191d.o
++obj-$(CONFIG_SCSI_HPSA)		+= hpsa.o
+ obj-$(CONFIG_SCSI_DTC3280)	+= dtc.o
+ obj-$(CONFIG_SCSI_SYM53C8XX_2)	+= sym53c8xx_2/
+ obj-$(CONFIG_SCSI_ZALON)	+= zalon7xx.o
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+new file mode 100644
+index 0000000..bb96fdd
+--- /dev/null
++++ b/drivers/scsi/hpsa.c
+@@ -0,0 +1,3531 @@
++/*
++ *    Disk Array driver for HP Smart Array SAS controllers
++ *    Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
++ *
++ *    This program is free software; you can redistribute it and/or modify
++ *    it under the terms of the GNU General Public License as published by
++ *    the Free Software Foundation; version 2 of the License.
++ *
++ *    This program is distributed in the hope that it will be useful,
++ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
++ *    NON INFRINGEMENT.  See the GNU General Public License for more details.
++ *
++ *    You should have received a copy of the GNU General Public License
++ *    along with this program; if not, write to the Free Software
++ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ *
++ *    Questions/Comments/Bugfixes to iss_storagedev at hp.com
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/types.h>
++#include <linux/pci.h>
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <linux/timer.h>
++#include <linux/seq_file.h>
++#include <linux/init.h>
++#include <linux/spinlock.h>
++#include <linux/smp_lock.h>
++#include <linux/compat.h>
++#include <linux/blktrace_api.h>
++#include <linux/uaccess.h>
++#include <linux/io.h>
++#include <linux/dma-mapping.h>
++#include <linux/completion.h>
++#include <linux/moduleparam.h>
++#include <scsi/scsi.h>
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_device.h>
++#include <scsi/scsi_host.h>
++#include <linux/cciss_ioctl.h>
++#include <linux/string.h>
++#include <linux/bitmap.h>
++#include <asm/atomic.h>
++#include <linux/kthread.h>
++#include "hpsa_cmd.h"
++#include "hpsa.h"
++
++/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
++#define HPSA_DRIVER_VERSION "1.0.0"
++#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
++
++/* How long to wait (in milliseconds) for board to go into simple mode */
++#define MAX_CONFIG_WAIT 30000
++#define MAX_IOCTL_CONFIG_WAIT 1000
++
++/*define how many times we will try a command because of bus resets */
++#define MAX_CMD_RETRIES 3
++
++/* Embedded module documentation macros - see modules.h */
++MODULE_AUTHOR("Hewlett-Packard Company");
++MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
++	HPSA_DRIVER_VERSION);
++MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
++MODULE_VERSION(HPSA_DRIVER_VERSION);
++MODULE_LICENSE("GPL");
++
++static int hpsa_allow_any;
++module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
++MODULE_PARM_DESC(hpsa_allow_any,
++		"Allow hpsa driver to access unknown HP Smart Array hardware");
++
++/* define the PCI info for the cards we can control */
++static const struct pci_device_id hpsa_pci_device_id[] = {
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x3223},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x3234},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x323D},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3243},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3247},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324a},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324b},
++	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,             PCI_ANY_ID, PCI_ANY_ID,
++		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
++	{0,}
++};
++
++MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
++
++/*  board_id = Subsystem Device ID & Vendor ID
++ *  product = Marketing Name for the board
++ *  access = Address of the struct of function pointers
++ */
++static struct board_type products[] = {
++	{0x3223103C, "Smart Array P800", &SA5_access},
++	{0x3234103C, "Smart Array P400", &SA5_access},
++	{0x323d103c, "Smart Array P700M", &SA5_access},
++	{0x3241103C, "Smart Array P212", &SA5_access},
++	{0x3243103C, "Smart Array P410", &SA5_access},
++	{0x3245103C, "Smart Array P410i", &SA5_access},
++	{0x3247103C, "Smart Array P411", &SA5_access},
++	{0x3249103C, "Smart Array P812", &SA5_access},
++	{0x324a103C, "Smart Array P712m", &SA5_access},
++	{0x324b103C, "Smart Array P711m", &SA5_access},
++	{0xFFFF103C, "Unknown Smart Array", &SA5_access},
++};
++
++static int number_of_controllers;
++
++static irqreturn_t do_hpsa_intr(int irq, void *dev_id);
++static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
++static void start_io(struct ctlr_info *h);
++
++#ifdef CONFIG_COMPAT
++static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
++#endif
++
++static void cmd_free(struct ctlr_info *h, struct CommandList *c);
++static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
++static struct CommandList *cmd_alloc(struct ctlr_info *h);
++static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
++static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h,
++	void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr,
++	int cmd_type);
++
++static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
++		void (*done)(struct scsi_cmnd *));
++
++static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
++static int hpsa_slave_alloc(struct scsi_device *sdev);
++static void hpsa_slave_destroy(struct scsi_device *sdev);
++
++static ssize_t raid_level_show(struct device *dev,
++	struct device_attribute *attr, char *buf);
++static ssize_t lunid_show(struct device *dev,
++	struct device_attribute *attr, char *buf);
++static ssize_t unique_id_show(struct device *dev,
++	struct device_attribute *attr, char *buf);
++static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
++static ssize_t host_store_rescan(struct device *dev,
++	 struct device_attribute *attr, const char *buf, size_t count);
++static int check_for_unit_attention(struct ctlr_info *h,
++	struct CommandList *c);
++static void check_ioctl_unit_attention(struct ctlr_info *h,
++	struct CommandList *c);
++
++static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
++static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
++static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
++static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
++
++static struct device_attribute *hpsa_sdev_attrs[] = {
++	&dev_attr_raid_level,
++	&dev_attr_lunid,
++	&dev_attr_unique_id,
++	NULL,
++};
++
++static struct device_attribute *hpsa_shost_attrs[] = {
++	&dev_attr_rescan,
++	NULL,
++};
++
++static struct scsi_host_template hpsa_driver_template = {
++	.module			= THIS_MODULE,
++	.name			= "hpsa",
++	.proc_name		= "hpsa",
++	.queuecommand		= hpsa_scsi_queue_command,
++	.can_queue		= 512,
++	.this_id		= -1,
++	.sg_tablesize		= MAXSGENTRIES,
++	.cmd_per_lun		= 512,
++	.use_clustering		= ENABLE_CLUSTERING,
++	.eh_device_reset_handler = hpsa_eh_device_reset_handler,
++	.ioctl			= hpsa_ioctl,
++	.slave_alloc		= hpsa_slave_alloc,
++	.slave_destroy		= hpsa_slave_destroy,
++#ifdef CONFIG_COMPAT
++	.compat_ioctl		= hpsa_compat_ioctl,
++#endif
++	.sdev_attrs = hpsa_sdev_attrs,
++	.shost_attrs = hpsa_shost_attrs,
++};
++
++static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
++{
++	unsigned long *priv = shost_priv(sdev->host);
++	return (struct ctlr_info *) *priv;
++}
++
++static struct task_struct *hpsa_scan_thread;
++static DEFINE_MUTEX(hpsa_scan_mutex);
++static LIST_HEAD(hpsa_scan_q);
++static int hpsa_scan_func(void *data);
++
++/**
++ * add_to_scan_list() - add controller to rescan queue
++ * @h:		      Pointer to the controller.
++ *
++ * Adds the controller to the rescan queue if not already on the queue.
++ *
++ * returns 1 if added to the queue, 0 if skipped (could be on the
++ * queue already, or the controller could be initializing or shutting
++ * down).
++ **/
++static int add_to_scan_list(struct ctlr_info *h)
++{
++	struct ctlr_info *test_h;
++	int found = 0;
++	int ret = 0;
++
++	if (h->busy_initializing)
++		return 0;
++
++	/*
++	 * If we don't get the lock, it means the driver is unloading
++	 * and there's no point in scheduling a new scan.
++	 */
++	if (!mutex_trylock(&h->busy_shutting_down))
++		return 0;
++
++	mutex_lock(&hpsa_scan_mutex);
++	list_for_each_entry(test_h, &hpsa_scan_q, scan_list) {
++		if (test_h == h) {
++			found = 1;
++			break;
++		}
++	}
++	if (!found && !h->busy_scanning) {
++		INIT_COMPLETION(h->scan_wait);
++		list_add_tail(&h->scan_list, &hpsa_scan_q);
++		ret = 1;
++	}
++	mutex_unlock(&hpsa_scan_mutex);
++	mutex_unlock(&h->busy_shutting_down);
++
++	return ret;
++}
++
++/**
++ * remove_from_scan_list() - remove controller from rescan queue
++ * @h:			   Pointer to the controller.
++ *
++ * Removes the controller from the rescan queue if present. Blocks if
++ * the controller is currently conducting a rescan.  The controller
++ * can be in one of three states:
++ * 1. Doesn't need a scan
++ * 2. On the scan list, but not scanning yet (we remove it)
++ * 3. Busy scanning (and not on the list). In this case we want to wait for
++ *    the scan to complete to make sure the scanning thread for this
++ *    controller is completely idle.
++ **/
++static void remove_from_scan_list(struct ctlr_info *h)
++{
++	struct ctlr_info *test_h, *tmp_h;
++
++	mutex_lock(&hpsa_scan_mutex);
++	list_for_each_entry_safe(test_h, tmp_h, &hpsa_scan_q, scan_list) {
++		if (test_h == h) { /* state 2. */
++			list_del(&h->scan_list);
++			complete_all(&h->scan_wait);
++			mutex_unlock(&hpsa_scan_mutex);
++			return;
++		}
++	}
++	if (h->busy_scanning) { /* state 3. */
++		mutex_unlock(&hpsa_scan_mutex);
++		wait_for_completion(&h->scan_wait);
++	} else { /* state 1, nothing to do. */
++		mutex_unlock(&hpsa_scan_mutex);
++	}
++}
++
++/* hpsa_scan_func() - kernel thread used to rescan controllers
++ * @data:	 Ignored.
++ *
++ * A kernel thread used scan for drive topology changes on
++ * controllers. The thread processes only one controller at a time
++ * using a queue.  Controllers are added to the queue using
++ * add_to_scan_list() and removed from the queue either after done
++ * processing or using remove_from_scan_list().
++ *
++ * returns 0.
++ **/
++static int hpsa_scan_func(__attribute__((unused)) void *data)
++{
++	struct ctlr_info *h;
++	int host_no;
++
++	while (1) {
++		set_current_state(TASK_INTERRUPTIBLE);
++		schedule();
++		if (kthread_should_stop())
++			break;
++
++		while (1) {
++			mutex_lock(&hpsa_scan_mutex);
++			if (list_empty(&hpsa_scan_q)) {
++				mutex_unlock(&hpsa_scan_mutex);
++				break;
++			}
++			h = list_entry(hpsa_scan_q.next, struct ctlr_info,
++					scan_list);
++			list_del(&h->scan_list);
++			h->busy_scanning = 1;
++			mutex_unlock(&hpsa_scan_mutex);
++			host_no = h->scsi_host ?  h->scsi_host->host_no : -1;
++			hpsa_update_scsi_devices(h, host_no);
++			complete_all(&h->scan_wait);
++			mutex_lock(&hpsa_scan_mutex);
++			h->busy_scanning = 0;
++			mutex_unlock(&hpsa_scan_mutex);
++		}
++	}
++	return 0;
++}
++
++static int check_for_unit_attention(struct ctlr_info *h,
++	struct CommandList *c)
++{
++	if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
++		return 0;
++
++	switch (c->err_info->SenseInfo[12]) {
++	case STATE_CHANGED:
++		dev_warn(&h->pdev->dev, "hpsa%d: a state change "
++			"detected, command retried\n", h->ctlr);
++		break;
++	case LUN_FAILED:
++		dev_warn(&h->pdev->dev, "hpsa%d: LUN failure "
++			"detected, action required\n", h->ctlr);
++		break;
++	case REPORT_LUNS_CHANGED:
++		dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
++			"changed\n", h->ctlr);
++	/*
++	 * Here, we could call add_to_scan_list and wake up the scan thread,
++	 * except that it's quite likely that we will get more than one
++	 * REPORT_LUNS_CHANGED condition in quick succession, which means
++	 * that those which occur after the first one will likely happen
++	 * *during* the hpsa_scan_thread's rescan.  And the rescan code is not
++	 * robust enough to restart in the middle, undoing what it has already
++	 * done, and it's not clear that it's even possible to do this, since
++	 * part of what it does is notify the SCSI mid layer, which starts
++	 * doing it's own i/o to read partition tables and so on, and the
++	 * driver doesn't have visibility to know what might need undoing.
++	 * In any event, if possible, it is horribly complicated to get right
++	 * so we just don't do it for now.
++	 *
++	 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
++	 */
++		break;
++	case POWER_OR_RESET:
++		dev_warn(&h->pdev->dev, "hpsa%d: a power on "
++			"or device reset detected\n", h->ctlr);
++		break;
++	case UNIT_ATTENTION_CLEARED:
++		dev_warn(&h->pdev->dev, "hpsa%d: unit attention "
++		    "cleared by another initiator\n", h->ctlr);
++		break;
++	default:
++		dev_warn(&h->pdev->dev, "hpsa%d: unknown "
++			"unit attention detected\n", h->ctlr);
++		break;
++	}
++	return 1;
++}
++
++static ssize_t host_store_rescan(struct device *dev,
++				 struct device_attribute *attr,
++				 const char *buf, size_t count)
++{
++	struct ctlr_info *h;
++	struct Scsi_Host *shost = class_to_shost(dev);
++	unsigned long *priv = shost_priv(shost);
++	h = (struct ctlr_info *) *priv;
++	if (add_to_scan_list(h)) {
++		wake_up_process(hpsa_scan_thread);
++		wait_for_completion_interruptible(&h->scan_wait);
++	}
++	return count;
++}
++
++/* Enqueuing and dequeuing functions for cmdlists. */
++static inline void addQ(struct hlist_head *list, struct CommandList *c)
++{
++	hlist_add_head(&c->list, list);
++}
++
++static void enqueue_cmd_and_start_io(struct ctlr_info *h,
++	struct CommandList *c)
++{
++	unsigned long flags;
++	spin_lock_irqsave(&h->lock, flags);
++	addQ(&h->reqQ, c);
++	h->Qdepth++;
++	start_io(h);
++	spin_unlock_irqrestore(&h->lock, flags);
++}
++
++static inline void removeQ(struct CommandList *c)
++{
++	if (WARN_ON(hlist_unhashed(&c->list)))
++		return;
++	hlist_del_init(&c->list);
++}
++
++static inline int is_hba_lunid(unsigned char scsi3addr[])
++{
++	return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
++}
++
++static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
++{
++	return (scsi3addr[3] & 0xC0) == 0x40;
++}
++
++static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
++	"UNKNOWN"
++};
++#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
++
++static ssize_t raid_level_show(struct device *dev,
++	     struct device_attribute *attr, char *buf)
++{
++	ssize_t l = 0;
++	int rlevel;
++	struct ctlr_info *h;
++	struct scsi_device *sdev;
++	struct hpsa_scsi_dev_t *hdev;
++	unsigned long flags;
++
++	sdev = to_scsi_device(dev);
++	h = sdev_to_hba(sdev);
++	spin_lock_irqsave(&h->lock, flags);
++	hdev = sdev->hostdata;
++	if (!hdev) {
++		spin_unlock_irqrestore(&h->lock, flags);
++		return -ENODEV;
++	}
++
++	/* Is this even a logical drive? */
++	if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
++		spin_unlock_irqrestore(&h->lock, flags);
++		l = snprintf(buf, PAGE_SIZE, "N/A\n");
++		return l;
++	}
++
++	rlevel = hdev->raid_level;
++	spin_unlock_irqrestore(&h->lock, flags);
++	if (rlevel < 0 || rlevel > RAID_UNKNOWN)
++		rlevel = RAID_UNKNOWN;
++	l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
++	return l;
++}
++
++static ssize_t lunid_show(struct device *dev,
++	     struct device_attribute *attr, char *buf)
++{
++	struct ctlr_info *h;
++	struct scsi_device *sdev;
++	struct hpsa_scsi_dev_t *hdev;
++	unsigned long flags;
++	unsigned char lunid[8];
++
++	sdev = to_scsi_device(dev);
++	h = sdev_to_hba(sdev);
++	spin_lock_irqsave(&h->lock, flags);
++	hdev = sdev->hostdata;
++	if (!hdev) {
++		spin_unlock_irqrestore(&h->lock, flags);
++		return -ENODEV;
++	}
++	memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
++	spin_unlock_irqrestore(&h->lock, flags);
++	return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
++		lunid[0], lunid[1], lunid[2], lunid[3],
++		lunid[4], lunid[5], lunid[6], lunid[7]);
++}
++
++static ssize_t unique_id_show(struct device *dev,
++	     struct device_attribute *attr, char *buf)
++{
++	struct ctlr_info *h;
++	struct scsi_device *sdev;
++	struct hpsa_scsi_dev_t *hdev;
++	unsigned long flags;
++	unsigned char sn[16];
++
++	sdev = to_scsi_device(dev);
++	h = sdev_to_hba(sdev);
++	spin_lock_irqsave(&h->lock, flags);
++	hdev = sdev->hostdata;
++	if (!hdev) {
++		spin_unlock_irqrestore(&h->lock, flags);
++		return -ENODEV;
++	}
++	memcpy(sn, hdev->device_id, sizeof(sn));
++	spin_unlock_irqrestore(&h->lock, flags);
++	return snprintf(buf, 16 * 2 + 2,
++			"%02X%02X%02X%02X%02X%02X%02X%02X"
++			"%02X%02X%02X%02X%02X%02X%02X%02X\n",
++			sn[0], sn[1], sn[2], sn[3],
++			sn[4], sn[5], sn[6], sn[7],
++			sn[8], sn[9], sn[10], sn[11],
++			sn[12], sn[13], sn[14], sn[15]);
++}
++
++static int hpsa_find_target_lun(struct ctlr_info *h,
++	unsigned char scsi3addr[], int bus, int *target, int *lun)
++{
++	/* finds an unused bus, target, lun for a new physical device
++	 * assumes h->devlock is held
++	 */
++	int i, found = 0;
++	DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA);
++
++	memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3);
++
++	for (i = 0; i < h->ndevices; i++) {
++		if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
++			set_bit(h->dev[i]->target, lun_taken);
++	}
++
++	for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) {
++		if (!test_bit(i, lun_taken)) {
++			/* *bus = 1; */
++			*target = i;
++			*lun = 0;
++			found = 1;
++			break;
++		}
++	}
++	return !found;
++}
++
++/* Add an entry into h->dev[] array. */
++static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
++		struct hpsa_scsi_dev_t *device,
++		struct hpsa_scsi_dev_t *added[], int *nadded)
++{
++	/* assumes h->devlock is held */
++	int n = h->ndevices;
++	int i;
++	unsigned char addr1[8], addr2[8];
++	struct hpsa_scsi_dev_t *sd;
++
++	if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) {
++		dev_err(&h->pdev->dev, "too many devices, some will be "
++			"inaccessible.\n");
++		return -1;
++	}
++
++	/* physical devices do not have lun or target assigned until now. */
++	if (device->lun != -1)
++		/* Logical device, lun is already assigned. */
++		goto lun_assigned;
++
++	/* If this device a non-zero lun of a multi-lun device
++	 * byte 4 of the 8-byte LUN addr will contain the logical
++	 * unit no, zero otherise.
++	 */
++	if (device->scsi3addr[4] == 0) {
++		/* This is not a non-zero lun of a multi-lun device */
++		if (hpsa_find_target_lun(h, device->scsi3addr,
++			device->bus, &device->target, &device->lun) != 0)
++			return -1;
++		goto lun_assigned;
++	}
++
++	/* This is a non-zero lun of a multi-lun device.
++	 * Search through our list and find the device which
++	 * has the same 8 byte LUN address, excepting byte 4.
++	 * Assign the same bus and target for this new LUN.
++	 * Use the logical unit number from the firmware.
++	 */
++	memcpy(addr1, device->scsi3addr, 8);
++	addr1[4] = 0;
++	for (i = 0; i < n; i++) {
++		sd = h->dev[i];
++		memcpy(addr2, sd->scsi3addr, 8);
++		addr2[4] = 0;
++		/* differ only in byte 4? */
++		if (memcmp(addr1, addr2, 8) == 0) {
++			device->bus = sd->bus;
++			device->target = sd->target;
++			device->lun = device->scsi3addr[4];
++			break;
++		}
++	}
++	if (device->lun == -1) {
++		dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
++			" suspect firmware bug or unsupported hardware "
++			"configuration.\n");
++			return -1;
++	}
++
++lun_assigned:
++
++	h->dev[n] = device;
++	h->ndevices++;
++	added[*nadded] = device;
++	(*nadded)++;
++
++	/* initially, (before registering with scsi layer) we don't
++	 * know our hostno and we don't want to print anything first
++	 * time anyway (the scsi layer's inquiries will show that info)
++	 */
++	/* if (hostno != -1) */
++		dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
++			scsi_device_type(device->devtype), hostno,
++			device->bus, device->target, device->lun);
++	return 0;
++}
++
++/* Remove an entry from h->dev[] array. */
++static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
++	struct hpsa_scsi_dev_t *removed[], int *nremoved)
++{
++	/* assumes h->devlock is held */
++	int i;
++	struct hpsa_scsi_dev_t *sd;
++
++	if (entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA)
++		BUG();
++
++	sd = h->dev[entry];
++	removed[*nremoved] = h->dev[entry];
++	(*nremoved)++;
++
++	for (i = entry; i < h->ndevices-1; i++)
++		h->dev[i] = h->dev[i+1];
++	h->ndevices--;
++	dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
++		scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
++		sd->lun);
++}
++
++#define SCSI3ADDR_EQ(a, b) ( \
++	(a)[7] == (b)[7] && \
++	(a)[6] == (b)[6] && \
++	(a)[5] == (b)[5] && \
++	(a)[4] == (b)[4] && \
++	(a)[3] == (b)[3] && \
++	(a)[2] == (b)[2] && \
++	(a)[1] == (b)[1] && \
++	(a)[0] == (b)[0])
++
++static void fixup_botched_add(struct ctlr_info *h,
++	struct hpsa_scsi_dev_t *added)
++{
++	/* called when scsi_add_device fails in order to re-adjust
++	 * h->dev[] to match the mid layer's view.
++	 */
++	unsigned long flags;
++	int i, j;
++
++	spin_lock_irqsave(&h->lock, flags);
++	for (i = 0; i < h->ndevices; i++) {
++		if (h->dev[i] == added) {
++			for (j = i; j < h->ndevices-1; j++)
++				h->dev[j] = h->dev[j+1];
++			h->ndevices--;
++			break;
++		}
++	}
++	spin_unlock_irqrestore(&h->lock, flags);
++	kfree(added);
++}
++
++static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
++	struct hpsa_scsi_dev_t *dev2)
++{
++	if ((is_logical_dev_addr_mode(dev1->scsi3addr) ||
++		(dev1->lun != -1 && dev2->lun != -1)) &&
++		dev1->devtype != 0x0C)
++		return (memcmp(dev1, dev2, sizeof(*dev1)) == 0);
++
++	/* we compare everything except lun and target as these
++	 * are not yet assigned.  Compare parts likely
++	 * to differ first
++	 */
++	if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
++		sizeof(dev1->scsi3addr)) != 0)
++		return 0;
++	if (memcmp(dev1->device_id, dev2->device_id,
++		sizeof(dev1->device_id)) != 0)
++		return 0;
++	if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
++		return 0;
++	if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
++		return 0;
++	if (memcmp(dev1->revision, dev2->revision, sizeof(dev1->revision)) != 0)
++		return 0;
++	if (dev1->devtype != dev2->devtype)
++		return 0;
++	if (dev1->raid_level != dev2->raid_level)
++		return 0;
++	if (dev1->bus != dev2->bus)
++		return 0;
++	return 1;
++}
++
++/* Find needle in haystack.  If exact match found, return DEVICE_SAME,
++ * and return needle location in *index.  If scsi3addr matches, but not
++ * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
++ * location in *index.  If needle not found, return DEVICE_NOT_FOUND.
++ */
++static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
++	struct hpsa_scsi_dev_t *haystack[], int haystack_size,
++	int *index)
++{
++	int i;
++#define DEVICE_NOT_FOUND 0
++#define DEVICE_CHANGED 1
++#define DEVICE_SAME 2
++	for (i = 0; i < haystack_size; i++) {
++		if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
++			*index = i;
++			if (device_is_the_same(needle, haystack[i]))
++				return DEVICE_SAME;
++			else
++				return DEVICE_CHANGED;
++		}
++	}
++	*index = -1;
++	return DEVICE_NOT_FOUND;
++}
++
++static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
++	struct hpsa_scsi_dev_t *sd[], int nsds)
++{
++	/* sd contains scsi3 addresses and devtypes, and inquiry
++	 * data.  This function takes what's in sd to be the current
++	 * reality and updates h->dev[] to reflect that reality.
++	 */
++	int i, entry, device_change, changes = 0;
++	struct hpsa_scsi_dev_t *csd;
++	unsigned long flags;
++	struct hpsa_scsi_dev_t **added, **removed;
++	int nadded, nremoved;
++	struct Scsi_Host *sh = NULL;
++
++	added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA,
++		GFP_KERNEL);
++	removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA,
++		GFP_KERNEL);
++
++	if (!added || !removed) {
++		dev_warn(&h->pdev->dev, "out of memory in "
++			"adjust_hpsa_scsi_table\n");
++		goto free_and_out;
++	}
++
++	spin_lock_irqsave(&h->devlock, flags);
++
++	/* find any devices in h->dev[] that are not in
++	 * sd[] and remove them from h->dev[], and for any
++	 * devices which have changed, remove the old device
++	 * info and add the new device info.
++	 */
++	i = 0;
++	nremoved = 0;
++	nadded = 0;
++	while (i < h->ndevices) {
++		csd = h->dev[i];
++		device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
++		if (device_change == DEVICE_NOT_FOUND) {
++			changes++;
++			hpsa_scsi_remove_entry(h, hostno, i,
++				removed, &nremoved);
++			continue; /* remove ^^^, hence i not incremented */
++		} else if (device_change == DEVICE_CHANGED) {
++			changes++;
++			hpsa_scsi_remove_entry(h, hostno, i,
++				removed, &nremoved);
++			(void) hpsa_scsi_add_entry(h, hostno, sd[entry],
++				added, &nadded);
++			/* add can't fail, we just removed one. */
++			sd[entry] = NULL; /* prevent it from being freed */
++		}
++		i++;
++	}
++
++	/* Now, make sure every device listed in sd[] is also
++	 * listed in h->dev[], adding them if they aren't found
++	 */
++
++	for (i = 0; i < nsds; i++) {
++		if (!sd[i]) /* if already added above. */
++			continue;
++		device_change = hpsa_scsi_find_entry(sd[i], h->dev,
++					h->ndevices, &entry);
++		if (device_change == DEVICE_NOT_FOUND) {
++			changes++;
++			if (hpsa_scsi_add_entry(h, hostno, sd[i],
++				added, &nadded) != 0)
++				break;
++			sd[i] = NULL; /* prevent from being freed later. */
++		} else if (device_change == DEVICE_CHANGED) {
++			/* should never happen... */
++			changes++;
++			dev_warn(&h->pdev->dev,
++				"device unexpectedly changed.\n");
++			/* but if it does happen, we just ignore that device */
++		}
++	}
++	spin_unlock_irqrestore(&h->devlock, flags);
++
++	/* Don't notify scsi mid layer of any changes the first time through
++	 * (or if there are no changes) scsi_scan_host will do it later the
++	 * first time through.
++	 */
++	if (hostno == -1 || !changes)
++		goto free_and_out;
++
++	sh = h->scsi_host;
++	/* Notify scsi mid layer of any removed devices */
++	for (i = 0; i < nremoved; i++) {
++		struct scsi_device *sdev =
++			scsi_device_lookup(sh, removed[i]->bus,
++				removed[i]->target, removed[i]->lun);
++		if (sdev != NULL) {
++			scsi_remove_device(sdev);
++			scsi_device_put(sdev);
++		} else {
++			/* We don't expect to get here.
++			 * future cmds to this device will get selection
++			 * timeout as if the device was gone.
++			 */
++			dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
++				" for removal.", hostno, removed[i]->bus,
++				removed[i]->target, removed[i]->lun);
++		}
++		kfree(removed[i]);
++		removed[i] = NULL;
++	}
++
++	/* Notify scsi mid layer of any added devices */
++	for (i = 0; i < nadded; i++) {
++		if (scsi_add_device(sh, added[i]->bus,
++			added[i]->target, added[i]->lun) == 0)
++			continue;
++		dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
++			"device not added.\n", hostno, added[i]->bus,
++			added[i]->target, added[i]->lun);
++		/* now we have to remove it from h->dev,
++		 * since it didn't get added to scsi mid layer
++		 */
++		fixup_botched_add(h, added[i]);
++	}
++
++free_and_out:
++	kfree(added);
++	kfree(removed);
++	return 0;
++}
++
++/*
++ * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
++ * Assume's h->devlock is held.
++ */
++static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
++	int bus, int target, int lun)
++{
++	int i;
++	struct hpsa_scsi_dev_t *sd;
++
++	for (i = 0; i < h->ndevices; i++) {
++		sd = h->dev[i];
++		if (sd->bus == bus && sd->target == target && sd->lun == lun)
++			return sd;
++	}
++	return NULL;
++}
++
++/* link sdev->hostdata to our per-device structure. */
++static int hpsa_slave_alloc(struct scsi_device *sdev)
++{
++	struct hpsa_scsi_dev_t *sd;
++	unsigned long flags;
++	struct ctlr_info *h;
++
++	h = sdev_to_hba(sdev);
++	spin_lock_irqsave(&h->devlock, flags);
++	sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
++		sdev_id(sdev), sdev->lun);
++	if (sd != NULL)
++		sdev->hostdata = sd;
++	spin_unlock_irqrestore(&h->devlock, flags);
++	return 0;
++}
++
++static void hpsa_slave_destroy(struct scsi_device *sdev)
++{
++	return; /* nothing to do. */
++}
++
++static void hpsa_scsi_setup(struct ctlr_info *h)
++{
++	h->ndevices = 0;
++	h->scsi_host = NULL;
++	spin_lock_init(&h->devlock);
++	return;
++}
++
++static void complete_scsi_command(struct CommandList *cp,
++	int timeout, __u32 tag)
++{
++	struct scsi_cmnd *cmd;
++	struct ctlr_info *h;
++	struct ErrorInfo *ei;
++
++	unsigned char sense_key;
++	unsigned char asc;      /* additional sense code */
++	unsigned char ascq;     /* additional sense code qualifier */
++
++	ei = cp->err_info;
++	cmd = (struct scsi_cmnd *) cp->scsi_cmd;
++	h = cp->h;
++
++	scsi_dma_unmap(cmd); /* undo the DMA mappings */
++
++	cmd->result = (DID_OK << 16); 		/* host byte */
++	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
++	cmd->result |= (ei->ScsiStatus << 1);
++
++	/* copy the sense data whether we need to or not. */
++	memcpy(cmd->sense_buffer, ei->SenseInfo,
++		ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
++			SCSI_SENSE_BUFFERSIZE :
++			ei->SenseLen);
++	scsi_set_resid(cmd, ei->ResidualCnt);
++
++	if (ei->CommandStatus == 0) {
++		cmd->scsi_done(cmd);
++		cmd_free(h, cp);
++		return;
++	}
++
++	/* an error has occurred */
++	switch (ei->CommandStatus) {
++
++	case CMD_TARGET_STATUS:
++		if (ei->ScsiStatus) {
++			/* Get sense key */
++			sense_key = 0xf & ei->SenseInfo[2];
++			/* Get additional sense code */
++			asc = ei->SenseInfo[12];
++			/* Get addition sense code qualifier */
++			ascq = ei->SenseInfo[13];
++		}
++
++		if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
++			if (check_for_unit_attention(h, cp)) {
++				cmd->result = DID_SOFT_ERROR << 16;
++				break;
++			}
++			if (sense_key == ILLEGAL_REQUEST) {
++				/*
++				 * SCSI REPORT_LUNS is commonly unsupported on
++				 * Smart Array.  Suppress noisy complaint.
++				 */
++				if (cp->Request.CDB[0] == REPORT_LUNS)
++					break;
++
++				/* If ASC/ASCQ indicate Logical Unit
++				 * Not Supported condition,
++				 */
++				if ((asc == 0x25) && (ascq == 0x0)) {
++					dev_warn(&h->pdev->dev, "cp %p "
++						"has check condition\n", cp);
++					break;
++				}
++			}
++
++			if (sense_key == NOT_READY) {
++				/* If Sense is Not Ready, Logical Unit
++				 * Not ready, Manual Intervention
++				 * required
++				 */
++				if ((asc == 0x04) && (ascq == 0x03)) {
++					cmd->result = DID_NO_CONNECT << 16;
++					dev_warn(&h->pdev->dev, "cp %p "
++						"has check condition: unit "
++						"not ready, manual "
++						"intervention required\n", cp);
++					break;
++				}
++			}
++
++
++			/* Must be some other type of check condition */
++			dev_warn(&h->pdev->dev, "cp %p has check condition: "
++					"unknown type: "
++					"Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
++					"Returning result: 0x%x, "
++					"cmd=[%02x %02x %02x %02x %02x "
++					"%02x %02x %02x %02x %02x]\n",
++					cp, sense_key, asc, ascq,
++					cmd->result,
++					cmd->cmnd[0], cmd->cmnd[1],
++					cmd->cmnd[2], cmd->cmnd[3],
++					cmd->cmnd[4], cmd->cmnd[5],
++					cmd->cmnd[6], cmd->cmnd[7],
++					cmd->cmnd[8], cmd->cmnd[9]);
++			break;
++		}
++
++
++		/* Problem was not a check condition
++		 * Pass it up to the upper layers...
++		 */
++		if (ei->ScsiStatus) {
++			dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
++				"Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
++				"Returning result: 0x%x\n",
++				cp, ei->ScsiStatus,
++				sense_key, asc, ascq,
++				cmd->result);
++		} else {  /* scsi status is zero??? How??? */
++			dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
++				"Returning no connection.\n", cp),
++
++			/* Ordinarily, this case should never happen,
++			 * but there is a bug in some released firmware
++			 * revisions that allows it to happen if, for
++			 * example, a 4100 backplane loses power and
++			 * the tape drive is in it.  We assume that
++			 * it's a fatal error of some kind because we
++			 * can't show that it wasn't. We will make it
++			 * look like selection timeout since that is
++			 * the most common reason for this to occur,
++			 * and it's severe enough.
++			 */
++
++			cmd->result = DID_NO_CONNECT << 16;
++		}
++		break;
++
++	case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
++		break;
++	case CMD_DATA_OVERRUN:
++		dev_warn(&h->pdev->dev, "cp %p has"
++			" completed with data overrun "
++			"reported\n", cp);
++		break;
++	case CMD_INVALID: {
++		/* print_bytes(cp, sizeof(*cp), 1, 0);
++		print_cmd(cp); */
++		/* We get CMD_INVALID if you address a non-existent device
++		 * instead of a selection timeout (no response).  You will
++		 * see this if you yank out a drive, then try to access it.
++		 * This is kind of a shame because it means that any other
++		 * CMD_INVALID (e.g. driver bug) will get interpreted as a
++		 * missing target. */
++		cmd->result = DID_NO_CONNECT << 16;
++	}
++		break;
++	case CMD_PROTOCOL_ERR:
++		dev_warn(&h->pdev->dev, "cp %p has "
++			"protocol error \n", cp);
++		break;
++	case CMD_HARDWARE_ERR:
++		cmd->result = DID_ERROR << 16;
++		dev_warn(&h->pdev->dev, "cp %p had  hardware error\n", cp);
++		break;
++	case CMD_CONNECTION_LOST:
++		cmd->result = DID_ERROR << 16;
++		dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
++		break;
++	case CMD_ABORTED:
++		cmd->result = DID_ABORT << 16;
++		dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
++				cp, ei->ScsiStatus);
++		break;
++	case CMD_ABORT_FAILED:
++		cmd->result = DID_ERROR << 16;
++		dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
++		break;
++	case CMD_UNSOLICITED_ABORT:
++		cmd->result = DID_ABORT << 16;
++		dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited "
++			"abort\n", cp);
++		break;
++	case CMD_TIMEOUT:
++		cmd->result = DID_TIME_OUT << 16;
++		dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
++		break;
++	default:
++		cmd->result = DID_ERROR << 16;
++		dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
++				cp, ei->CommandStatus);
++	}
++	cmd->scsi_done(cmd);
++	cmd_free(h, cp);
++}
++
++static int hpsa_scsi_detect(struct ctlr_info *h)
++{
++	struct Scsi_Host *sh;
++	int error;
++
++	sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
++	if (sh == NULL)
++		goto fail;
++
++	sh->io_port = 0;
++	sh->n_io_port = 0;
++	sh->this_id = -1;
++	sh->max_channel = 3;
++	sh->max_cmd_len = MAX_COMMAND_SIZE;
++	sh->max_lun = HPSA_MAX_LUN;
++	sh->max_id = HPSA_MAX_LUN;
++	h->scsi_host = sh;
++	sh->hostdata[0] = (unsigned long) h;
++	sh->irq = h->intr[SIMPLE_MODE_INT];
++	sh->unique_id = sh->irq;
++	error = scsi_add_host(sh, &h->pdev->dev);
++	if (error)
++		goto fail_host_put;
++	scsi_scan_host(sh);
++	return 0;
++
++ fail_host_put:
++	dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
++		" failed for controller %d\n", h->ctlr);
++	scsi_host_put(sh);
++	return -1;
++ fail:
++	dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
++		" failed for controller %d\n", h->ctlr);
++	return -1;
++}
++
++static void hpsa_pci_unmap(struct pci_dev *pdev,
++	struct CommandList *c, int sg_used, int data_direction)
++{
++	int i;
++	union u64bit addr64;
++
++	for (i = 0; i < sg_used; i++) {
++		addr64.val32.lower = c->SG[i].Addr.lower;
++		addr64.val32.upper = c->SG[i].Addr.upper;
++		pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
++			data_direction);
++	}
++}
++
++static void hpsa_map_one(struct pci_dev *pdev,
++		struct CommandList *cp,
++		unsigned char *buf,
++		size_t buflen,
++		int data_direction)
++{
++	__u64 addr64;
++
++	if (buflen == 0 || data_direction == PCI_DMA_NONE) {
++		cp->Header.SGList = 0;
++		cp->Header.SGTotal = 0;
++		return;
++	}
++
++	addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction);
++	cp->SG[0].Addr.lower =
++	  (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
++	cp->SG[0].Addr.upper =
++	  (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
++	cp->SG[0].Len = buflen;
++	cp->Header.SGList = (__u8) 1;   /* no. SGs contig in this cmd */
++	cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
++}
++
++static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
++	struct CommandList *c)
++{
++	DECLARE_COMPLETION_ONSTACK(wait);
++
++	c->waiting = &wait;
++	enqueue_cmd_and_start_io(h, c);
++	wait_for_completion(&wait);
++}
++
++static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
++	struct CommandList *c, int data_direction)
++{
++	int retry_count = 0;
++
++	do {
++		memset(c->err_info, 0, sizeof(c->err_info));
++		hpsa_scsi_do_simple_cmd_core(h, c);
++		retry_count++;
++	} while (check_for_unit_attention(h, c) && retry_count <= 3);
++	hpsa_pci_unmap(h->pdev, c, 1, data_direction);
++}
++
++static void hpsa_scsi_interpret_error(struct CommandList *cp)
++{
++	struct ErrorInfo *ei;
++	struct device *d = &cp->h->pdev->dev;
++
++	ei = cp->err_info;
++	switch (ei->CommandStatus) {
++	case CMD_TARGET_STATUS:
++		dev_warn(d, "cmd %p has completed with errors\n", cp);
++		dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
++				ei->ScsiStatus);
++		if (ei->ScsiStatus == 0)
++			dev_warn(d, "SCSI status is abnormally zero.  "
++			"(probably indicates selection timeout "
++			"reported incorrectly due to a known "
++			"firmware bug, circa July, 2001.)\n");
++		break;
++	case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
++			dev_info(d, "UNDERRUN\n");
++		break;
++	case CMD_DATA_OVERRUN:
++		dev_warn(d, "cp %p has completed with data overrun\n", cp);
++		break;
++	case CMD_INVALID: {
++		/* controller unfortunately reports SCSI passthru's
++		 * to non-existent targets as invalid commands.
++		 */
++		dev_warn(d, "cp %p is reported invalid (probably means "
++			"target device no longer present)\n", cp);
++		/* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
++		print_cmd(cp);  */
++		}
++		break;
++	case CMD_PROTOCOL_ERR:
++		dev_warn(d, "cp %p has protocol error \n", cp);
++		break;
++	case CMD_HARDWARE_ERR:
++		/* cmd->result = DID_ERROR << 16; */
++		dev_warn(d, "cp %p had hardware error\n", cp);
++		break;
++	case CMD_CONNECTION_LOST:
++		dev_warn(d, "cp %p had connection lost\n", cp);
++		break;
++	case CMD_ABORTED:
++		dev_warn(d, "cp %p was aborted\n", cp);
++		break;
++	case CMD_ABORT_FAILED:
++		dev_warn(d, "cp %p reports abort failed\n", cp);
++		break;
++	case CMD_UNSOLICITED_ABORT:
++		dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
++		break;
++	case CMD_TIMEOUT:
++		dev_warn(d, "cp %p timed out\n", cp);
++		break;
++	default:
++		dev_warn(d, "cp %p returned unknown status %x\n", cp,
++				ei->CommandStatus);
++	}
++}
++
++static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
++			unsigned char page, unsigned char *buf,
++			unsigned char bufsize)
++{
++	int rc = IO_OK;
++	struct CommandList *c;
++	struct ErrorInfo *ei;
++
++	c = cmd_special_alloc(h);
++
++	if (c == NULL) {			/* trouble... */
++		dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
++		return -1;
++	}
++
++	fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
++	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
++	ei = c->err_info;
++	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
++		hpsa_scsi_interpret_error(c);
++		rc = -1;
++	}
++	cmd_special_free(h, c);
++	return rc;
++}
++
++static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
++{
++	int rc = IO_OK;
++	struct CommandList *c;
++	struct ErrorInfo *ei;
++
++	c = cmd_special_alloc(h);
++
++	if (c == NULL) {			/* trouble... */
++		dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
++		return -1;
++	}
++
++	fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
++	hpsa_scsi_do_simple_cmd_core(h, c);
++	/* no unmap needed here because no data xfer. */
++
++	ei = c->err_info;
++	if (ei->CommandStatus != 0) {
++		hpsa_scsi_interpret_error(c);
++		rc = -1;
++	}
++	cmd_special_free(h, c);
++	return rc;
++}
++
++static void hpsa_get_raid_level(struct ctlr_info *h,
++	unsigned char *scsi3addr, unsigned char *raid_level)
++{
++	int rc;
++	unsigned char *buf;
++
++	*raid_level = RAID_UNKNOWN;
++	buf = kzalloc(64, GFP_KERNEL);
++	if (!buf)
++		return;
++	rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
++	if (rc == 0)
++		*raid_level = buf[8];
++	if (*raid_level > RAID_UNKNOWN)
++		*raid_level = RAID_UNKNOWN;
++	kfree(buf);
++	return;
++}
++
++/* Get the device id from inquiry page 0x83 */
++static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
++	unsigned char *device_id, int buflen)
++{
++	int rc;
++	unsigned char *buf;
++
++	if (buflen > 16)
++		buflen = 16;
++	buf = kzalloc(64, GFP_KERNEL);
++	if (!buf)
++		return -1;
++	rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
++	if (rc == 0)
++		memcpy(device_id, &buf[8], buflen);
++	kfree(buf);
++	return rc != 0;
++}
++
++static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
++		struct ReportLUNdata *buf, int bufsize,
++		int extended_response)
++{
++	int rc = IO_OK;
++	struct CommandList *c;
++	unsigned char scsi3addr[8];
++	struct ErrorInfo *ei;
++
++	c = cmd_special_alloc(h);
++	if (c == NULL) {			/* trouble... */
++		dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
++		return -1;
++	}
++
++	memset(&scsi3addr[0], 0, 8); /* address the controller */
++
++	fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
++		buf, bufsize, 0, scsi3addr, TYPE_CMD);
++	if (extended_response)
++		c->Request.CDB[1] = extended_response;
++	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
++	ei = c->err_info;
++	if (ei->CommandStatus != 0 &&
++	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
++		hpsa_scsi_interpret_error(c);
++		rc = -1;
++	}
++	cmd_special_free(h, c);
++	return rc;
++}
++
++static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
++		struct ReportLUNdata *buf,
++		int bufsize, int extended_response)
++{
++	return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
++}
++
++static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
++		struct ReportLUNdata *buf, int bufsize)
++{
++	return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
++}
++
++static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
++	int bus, int target, int lun)
++{
++	device->bus = bus;
++	device->target = target;
++	device->lun = lun;
++}
++
++static int hpsa_update_device_info(struct ctlr_info *h,
++	unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
++{
++#define OBDR_TAPE_INQ_SIZE 49
++	unsigned char *inq_buff = NULL;
++
++	inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
++	if (!inq_buff)
++		goto bail_out;
++
++	memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
++	/* Do an inquiry to the device to see what it is. */
++	if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
++		(unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
++		/* Inquiry failed (msg printed already) */
++		dev_err(&h->pdev->dev,
++			"hpsa_update_device_info: inquiry failed\n");
++		goto bail_out;
++	}
++
++	/* As a side effect, record the firmware version number
++	 * if we happen to be talking to the RAID controller.
++	 */
++	if (is_hba_lunid(scsi3addr))
++		memcpy(h->firm_ver, &inq_buff[32], 4);
++
++	this_device->devtype = (inq_buff[0] & 0x1f);
++	memcpy(this_device->scsi3addr, scsi3addr, 8);
++	memcpy(this_device->vendor, &inq_buff[8],
++		sizeof(this_device->vendor));
++	memcpy(this_device->model, &inq_buff[16],
++		sizeof(this_device->model));
++	memcpy(this_device->revision, &inq_buff[32],
++		sizeof(this_device->revision));
++	memset(this_device->device_id, 0,
++		sizeof(this_device->device_id));
++	hpsa_get_device_id(h, scsi3addr, this_device->device_id,
++		sizeof(this_device->device_id));
++
++	if (this_device->devtype == TYPE_DISK &&
++		is_logical_dev_addr_mode(scsi3addr))
++		hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
++	else
++		this_device->raid_level = RAID_UNKNOWN;
++
++	kfree(inq_buff);
++	return 0;
++
++bail_out:
++	kfree(inq_buff);
++	return 1;
++}
++
++static unsigned char *msa2xxx_model[] = {
++	"MSA2012",
++	"MSA2024",
++	"MSA2312",
++	"MSA2324",
++	NULL,
++};
++
++static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
++{
++	int i;
++
++	for (i = 0; msa2xxx_model[i]; i++)
++		if (strncmp(device->model, msa2xxx_model[i],
++			strlen(msa2xxx_model[i])) == 0)
++			return 1;
++	return 0;
++}
++
++/* Helper function to assign bus, target, lun mapping of devices.
++ * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical
++ * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
++ * Logical drive target and lun are assigned at this time, but
++ * physical device lun and target assignment are deferred (assigned
++ * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
++ */
++static void figure_bus_target_lun(struct ctlr_info *h,
++	__u8 *lunaddrbytes, int *bus, int *target, int *lun,
++	struct hpsa_scsi_dev_t *device)
++{
++
++	__u32 lunid;
++
++	if (is_logical_dev_addr_mode(lunaddrbytes)) {
++		/* logical device */
++		memcpy(&lunid, lunaddrbytes, sizeof(lunid));
++		lunid = le32_to_cpu(lunid);
++
++		if (is_msa2xxx(h, device)) {
++			*bus = 1;
++			*target = (lunid >> 16) & 0x3fff;
++			*lun = lunid & 0x00ff;
++		} else {
++			*bus = 0;
++			*lun = 0;
++			*target = lunid & 0x3fff;
++		}
++	} else {
++		/* physical device */
++		if (is_hba_lunid(lunaddrbytes))
++			*bus = 3;
++		else
++			*bus = 2;
++		*target = -1;
++		*lun = -1; /* we will fill these in later. */
++	}
++}
++
++/*
++ * If there is no lun 0 on a target, linux won't find any devices.
++ * For the MSA2xxx boxes, we have to manually detect the enclosure
++ * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
++ * it for some reason.  *tmpdevice is the target we're adding,
++ * this_device is a pointer into the current element of currentsd[]
++ * that we're building up in update_scsi_devices(), below.
++ * lunzerobits is a bitmap that tracks which targets already have a
++ * lun 0 assigned.
++ * Returns 1 if an enclosure was added, 0 if not.
++ */
++static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
++	struct hpsa_scsi_dev_t *tmpdevice,
++	struct hpsa_scsi_dev_t *this_device, __u8 *lunaddrbytes,
++	int bus, int target, int lun, unsigned long lunzerobits[],
++	int *nmsa2xxx_enclosures)
++{
++	unsigned char scsi3addr[8];
++
++	if (test_bit(target, lunzerobits))
++		return 0; /* There is already a lun 0 on this target. */
++
++	if (!is_logical_dev_addr_mode(lunaddrbytes))
++		return 0; /* It's the logical targets that may lack lun 0. */
++
++	if (!is_msa2xxx(h, tmpdevice))
++		return 0; /* It's only the MSA2xxx that have this problem. */
++
++	if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
++		return 0;
++
++	if (is_hba_lunid(scsi3addr))
++		return 0; /* Don't add the RAID controller here. */
++
++#define MAX_MSA2XXX_ENCLOSURES 32
++	if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
++		dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
++			"enclosures exceeded.  Check your hardware "
++			"configuration.");
++		return 0;
++	}
++
++	memset(scsi3addr, 0, 8);
++	scsi3addr[3] = target;
++	if (hpsa_update_device_info(h, scsi3addr, this_device))
++		return 0;
++	(*nmsa2xxx_enclosures)++;
++	hpsa_set_bus_target_lun(this_device, bus, target, 0);
++	set_bit(target, lunzerobits);
++	return 1;
++}
++
++/*
++ * Do CISS_REPORT_PHYS and CISS_REPORT_LOG.  Data is returned in physdev,
++ * logdev.  The number of luns in physdev and logdev are returned in
++ * *nphysicals and *nlogicals, respectively.
++ * Returns 0 on success, -1 otherwise.
++ */
++static int hpsa_gather_lun_info(struct ctlr_info *h,
++	int reportlunsize,
++	struct ReportLUNdata *physdev, __u32 *nphysicals,
++	struct ReportLUNdata *logdev, __u32 *nlogicals)
++{
++	if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
++		dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
++		return -1;
++	}
++	memcpy(nphysicals, &physdev->LUNListLength[0], sizeof(*nphysicals));
++	*nphysicals = be32_to_cpu(*nphysicals) / 8;
++#ifdef DEBUG
++	dev_info(&h->pdev->dev, "number of physical luns is %d\n", *nphysicals);
++#endif
++	if (*nphysicals > HPSA_MAX_PHYS_LUN) {
++		dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
++			"  %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
++			*nphysicals - HPSA_MAX_PHYS_LUN);
++		*nphysicals = HPSA_MAX_PHYS_LUN;
++	}
++	if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
++		dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
++		return -1;
++	}
++	memcpy(nlogicals, &logdev->LUNListLength[0], sizeof(*nlogicals));
++	*nlogicals = be32_to_cpu(*nlogicals) / 8;
++#ifdef DEBUG
++	dev_info(&h->pdev->dev, "number of logical luns is %d\n", *nlogicals);
++#endif
++	/* Reject Logicals in excess of our max capability. */
++	if (*nlogicals > HPSA_MAX_LUN) {
++		dev_warn(&h->pdev->dev,
++			"maximum logical LUNs (%d) exceeded.  "
++			"%d LUNs ignored.\n", HPSA_MAX_LUN,
++			*nlogicals - HPSA_MAX_LUN);
++			*nlogicals = HPSA_MAX_LUN;
++	}
++	if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
++		dev_warn(&h->pdev->dev,
++			"maximum logical + physical LUNs (%d) exceeded. "
++			"%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
++			*nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
++		*nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
++	}
++	return 0;
++}
++
++static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
++{
++	/* the idea here is we could get notified
++	 * that some devices have changed, so we do a report
++	 * physical luns and report logical luns cmd, and adjust
++	 * our list of devices accordingly.
++	 *
++	 * The scsi3addr's of devices won't change so long as the
++	 * adapter is not reset.  That means we can rescan and
++	 * tell which devices we already know about, vs. new
++	 * devices, vs.  disappearing devices.
++	 */
++	struct ReportLUNdata *physdev_list = NULL;
++	struct ReportLUNdata *logdev_list = NULL;
++	unsigned char *inq_buff = NULL;
++	__u32 nphysicals = 0;
++	__u32 nlogicals = 0;
++	__u32 ndev_allocated = 0;
++	struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
++	int ncurrent = 0;
++	int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
++	int i, nmsa2xxx_enclosures, ndevs_to_allocate;
++	int bus, target, lun;
++	DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
++
++	currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
++		GFP_KERNEL);
++	physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
++	logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
++	inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
++	tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
++
++	if (!currentsd || !physdev_list || !logdev_list ||
++		!inq_buff || !tmpdevice) {
++		dev_err(&h->pdev->dev, "out of memory\n");
++		goto out;
++	}
++	memset(lunzerobits, 0, sizeof(lunzerobits));
++
++	if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
++			logdev_list, &nlogicals))
++		goto out;
++
++	/* We might see up to 32 MSA2xxx enclosures, actually 8 of them
++	 * but each of them 4 times through different paths.  The plus 1
++	 * is for the RAID controller.
++	 */
++	ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1;
++
++	/* Allocate the per device structures */
++	for (i = 0; i < ndevs_to_allocate; i++) {
++		currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
++		if (!currentsd[i]) {
++			dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
++				__FILE__, __LINE__);
++			goto out;
++		}
++		ndev_allocated++;
++	}
++
++	/* adjust our table of devices */
++	nmsa2xxx_enclosures = 0;
++	for (i = 0; i < nphysicals + nlogicals + 1; i++) {
++		__u8 *lunaddrbytes;
++
++		/* Figure out where the LUN ID info is coming from */
++		if (i < nphysicals)
++			lunaddrbytes = &physdev_list->LUN[i][0];
++		else
++			if (i < nphysicals + nlogicals)
++				lunaddrbytes =
++					&logdev_list->LUN[i-nphysicals][0];
++			else /* jam in the RAID controller at the end */
++				lunaddrbytes = RAID_CTLR_LUNID;
++
++		/* skip masked physical devices. */
++		if (lunaddrbytes[3] & 0xC0 && i < nphysicals)
++			continue;
++
++		/* Get device type, vendor, model, device id */
++		if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice))
++			continue; /* skip it if we can't talk to it. */
++		figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
++			tmpdevice);
++		this_device = currentsd[ncurrent];
++
++		/*
++		 * For the msa2xxx boxes, we have to insert a LUN 0 which
++		 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
++		 * is nonetheless an enclosure device there.  We have to
++		 * present that otherwise linux won't find anything if
++		 * there is no lun 0.
++		 */
++		if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device,
++				lunaddrbytes, bus, target, lun, lunzerobits,
++				&nmsa2xxx_enclosures)) {
++			ncurrent++;
++			this_device = currentsd[ncurrent];
++		}
++
++		*this_device = *tmpdevice;
++		hpsa_set_bus_target_lun(this_device, bus, target, lun);
++
++		switch (this_device->devtype) {
++		case TYPE_ROM: {
++			/* We don't *really* support actual CD-ROM devices,
++			 * just "One Button Disaster Recovery" tape drive
++			 * which temporarily pretends to be a CD-ROM drive.
++			 * So we check that the device is really an OBDR tape
++			 * device by checking for "$DR-10" in bytes 43-48 of
++			 * the inquiry data.
++			 */
++				char obdr_sig[7];
++#define OBDR_TAPE_SIG "$DR-10"
++				strncpy(obdr_sig, &inq_buff[43], 6);
++				obdr_sig[6] = '\0';
++				if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
++					/* Not OBDR device, ignore it. */
++					break;
++			}
++			ncurrent++;
++			break;
++		case TYPE_DISK:
++			if (i < nphysicals)
++				break;
++			ncurrent++;
++			break;
++		case TYPE_TAPE:
++		case TYPE_MEDIUM_CHANGER:
++			ncurrent++;
++			break;
++		case TYPE_RAID:
++			/* Only present the Smartarray HBA as a RAID controller.
++			 * If it's a RAID controller other than the HBA itself
++			 * (an external RAID controller, MSA500 or similar)
++			 * don't present it.
++			 */
++			if (!is_hba_lunid(lunaddrbytes))
++				break;
++			ncurrent++;
++			break;
++		default:
++			break;
++		}
++		if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA)
++			break;
++	}
++	adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
++out:
++	kfree(tmpdevice);
++	for (i = 0; i < ndev_allocated; i++)
++		kfree(currentsd[i]);
++	kfree(currentsd);
++	kfree(inq_buff);
++	kfree(physdev_list);
++	kfree(logdev_list);
++	return;
++}
++
++/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
++ * dma mapping  and fills in the scatter gather entries of the
++ * hpsa command, cp.
++ */
++static int hpsa_scatter_gather(struct pci_dev *pdev,
++		struct CommandList *cp,
++		struct scsi_cmnd *cmd)
++{
++	unsigned int len;
++	struct scatterlist *sg;
++	__u64 addr64;
++	int use_sg, i;
++
++	BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
++
++	use_sg = scsi_dma_map(cmd);
++	if (use_sg < 0)
++		return use_sg;
++
++	if (!use_sg)
++		goto sglist_finished;
++
++	scsi_for_each_sg(cmd, sg, use_sg, i) {
++		addr64 = (__u64) sg_dma_address(sg);
++		len  = sg_dma_len(sg);
++		cp->SG[i].Addr.lower =
++			(__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
++		cp->SG[i].Addr.upper =
++			(__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
++		cp->SG[i].Len = len;
++		cp->SG[i].Ext = 0;  /* we are not chaining */
++	}
++
++sglist_finished:
++
++	cp->Header.SGList = (__u8) use_sg;   /* no. SGs contig in this cmd */
++	cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */
++	return 0;
++}
++
++
++static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
++	void (*done)(struct scsi_cmnd *))
++{
++	struct ctlr_info *h;
++	struct hpsa_scsi_dev_t *dev;
++	unsigned char scsi3addr[8];
++	struct CommandList *c;
++	unsigned long flags;
++
++	/* Get the ptr to our adapter structure out of cmd->host. */
++	h = sdev_to_hba(cmd->device);
++	dev = cmd->device->hostdata;
++	if (!dev) {
++		cmd->result = DID_NO_CONNECT << 16;
++		done(cmd);
++		return 0;
++	}
++	memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
++
++	/* Need a lock as this is being allocated from the pool */
++	spin_lock_irqsave(&h->lock, flags);
++	c = cmd_alloc(h);
++	spin_unlock_irqrestore(&h->lock, flags);
++	if (c == NULL) {			/* trouble... */
++		dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
++		return SCSI_MLQUEUE_HOST_BUSY;
++	}
++
++	/* Fill in the command list header */
++
++	cmd->scsi_done = done;    /* save this for use by completion code */
++
++	/* save c in case we have to abort it  */
++	cmd->host_scribble = (unsigned char *) c;
++
++	c->cmd_type = CMD_SCSI;
++	c->scsi_cmd = cmd;
++	c->Header.ReplyQueue = 0;  /* unused in simple mode */
++	memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
++	c->Header.Tag.lower = c->busaddr;  /* Use k. address of cmd as tag */
++
++	/* Fill in the request block... */
++
++	c->Request.Timeout = 0;
++	memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
++	BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
++	c->Request.CDBLen = cmd->cmd_len;
++	memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
++	c->Request.Type.Type = TYPE_CMD;
++	c->Request.Type.Attribute = ATTR_SIMPLE;
++	switch (cmd->sc_data_direction) {
++	case DMA_TO_DEVICE:
++		c->Request.Type.Direction = XFER_WRITE;
++		break;
++	case DMA_FROM_DEVICE:
++		c->Request.Type.Direction = XFER_READ;
++		break;
++	case DMA_NONE:
++		c->Request.Type.Direction = XFER_NONE;
++		break;
++	case DMA_BIDIRECTIONAL:
++		/* This can happen if a buggy application does a scsi passthru
++		 * and sets both inlen and outlen to non-zero. ( see
++		 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
++		 */
++
++		c->Request.Type.Direction = XFER_RSVD;
++		/* This is technically wrong, and hpsa controllers should
++		 * reject it with CMD_INVALID, which is the most correct
++		 * response, but non-fibre backends appear to let it
++		 * slide by, and give the same results as if this field
++		 * were set correctly.  Either way is acceptable for
++		 * our purposes here.
++		 */
++
++		break;
++
++	default:
++		dev_err(&h->pdev->dev, "unknown data direction: %d\n",
++			cmd->sc_data_direction);
++		BUG();
++		break;
++	}
++
++	if (hpsa_scatter_gather(h->pdev, c, cmd) < 0) { /* Fill SG list */
++		cmd_free(h, c);
++		return SCSI_MLQUEUE_HOST_BUSY;
++	}
++	enqueue_cmd_and_start_io(h, c);
++	/* the cmd'll come back via intr handler in complete_scsi_command()  */
++	return 0;
++}
++
++static void hpsa_unregister_scsi(struct ctlr_info *h)
++{
++	/* we are being forcibly unloaded, and may not refuse. */
++	scsi_remove_host(h->scsi_host);
++	scsi_host_put(h->scsi_host);
++	h->scsi_host = NULL;
++}
++
++static int hpsa_register_scsi(struct ctlr_info *h)
++{
++	int rc;
++
++	hpsa_update_scsi_devices(h, -1);
++	rc = hpsa_scsi_detect(h);
++	if (rc != 0)
++		dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
++			" hpsa_scsi_detect(), rc is %d\n", rc);
++	return rc;
++}
++
++static int wait_for_device_to_become_ready(struct ctlr_info *h,
++	unsigned char lunaddr[])
++{
++	int rc = 0;
++	int count = 0;
++	int waittime = 1; /* seconds */
++	struct CommandList *c;
++
++	c = cmd_special_alloc(h);
++	if (!c) {
++		dev_warn(&h->pdev->dev, "out of memory in "
++			"wait_for_device_to_become_ready.\n");
++		return IO_ERROR;
++	}
++
++	/* Send test unit ready until device ready, or give up. */
++	while (count < HPSA_TUR_RETRY_LIMIT) {
++
++		/* Wait for a bit.  do this first, because if we send
++		 * the TUR right away, the reset will just abort it.
++		 */
++		msleep(1000 * waittime);
++		count++;
++
++		/* Increase wait time with each try, up to a point. */
++		if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
++			waittime = waittime * 2;
++
++		/* Send the Test Unit Ready */
++		fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
++		hpsa_scsi_do_simple_cmd_core(h, c);
++		/* no unmap needed here because no data xfer. */
++
++		if (c->err_info->CommandStatus == CMD_SUCCESS)
++			break;
++
++		if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
++			c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
++			(c->err_info->SenseInfo[2] == NO_SENSE ||
++			c->err_info->SenseInfo[2] == UNIT_ATTENTION))
++			break;
++
++		dev_warn(&h->pdev->dev, "waiting %d secs "
++			"for device to become ready.\n", waittime);
++		rc = 1; /* device not ready. */
++	}
++
++	if (rc)
++		dev_warn(&h->pdev->dev, "giving up on device.\n");
++	else
++		dev_warn(&h->pdev->dev, "device is ready.\n");
++
++	cmd_special_free(h, c);
++	return rc;
++}
++
++/* Need at least one of these error handlers to keep ../scsi/hosts.c from
++ * complaining.  Doing a host- or bus-reset can't do anything good here.
++ */
++static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
++{
++	int rc;
++	struct ctlr_info *h;
++	struct hpsa_scsi_dev_t *dev;
++
++	/* find the controller to which the command to be aborted was sent */
++	h = sdev_to_hba(scsicmd->device);
++	if (h == NULL) /* paranoia */
++		return FAILED;
++	dev_warn(&h->pdev->dev, "resetting drive\n");
++
++	dev = scsicmd->device->hostdata;
++	if (!dev) {
++		dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
++			"device lookup failed.\n");
++		return FAILED;
++	}
++	/* send a reset to the SCSI LUN which the command was sent to */
++	rc = hpsa_send_reset(h, dev->scsi3addr);
++	if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
++		return SUCCESS;
++
++	dev_warn(&h->pdev->dev, "resetting device failed.\n");
++	return FAILED;
++}
++
++/*
++ * For operations that cannot sleep, a command block is allocated at init,
++ * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
++ * which ones are free or in use.  Lock must be held when calling this.
++ * cmd_free() is the complement.
++ */
++static struct CommandList *cmd_alloc(struct ctlr_info *h)
++{
++	struct CommandList *c;
++	int i;
++	union u64bit temp64;
++	dma_addr_t cmd_dma_handle, err_dma_handle;
++
++	do {
++		i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
++		if (i == h->nr_cmds)
++			return NULL;
++	} while (test_and_set_bit
++		 (i & (BITS_PER_LONG - 1),
++		  h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
++	c = h->cmd_pool + i;
++	memset(c, 0, sizeof(*c));
++	cmd_dma_handle = h->cmd_pool_dhandle
++	    + i * sizeof(*c);
++	c->err_info = h->errinfo_pool + i;
++	memset(c->err_info, 0, sizeof(*c->err_info));
++	err_dma_handle = h->errinfo_pool_dhandle
++	    + i * sizeof(*c->err_info);
++	h->nr_allocs++;
++
++	c->cmdindex = i;
++
++	INIT_HLIST_NODE(&c->list);
++	c->busaddr = (__u32) cmd_dma_handle;
++	temp64.val = (__u64) err_dma_handle;
++	c->ErrDesc.Addr.lower = temp64.val32.lower;
++	c->ErrDesc.Addr.upper = temp64.val32.upper;
++	c->ErrDesc.Len = sizeof(*c->err_info);
++
++	c->h = h;
++	return c;
++}
++
++/* For operations that can wait for kmalloc to possibly sleep,
++ * this routine can be called. Lock need not be held to call
++ * cmd_special_alloc. cmd_special_free() is the complement.
++ */
++static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
++{
++	struct CommandList *c;
++	union u64bit temp64;
++	dma_addr_t cmd_dma_handle, err_dma_handle;
++
++	c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
++	if (c == NULL)
++		return NULL;
++	memset(c, 0, sizeof(*c));
++
++	c->cmdindex = -1;
++
++	c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
++		    &err_dma_handle);
++
++	if (c->err_info == NULL) {
++		pci_free_consistent(h->pdev,
++			sizeof(*c), c, cmd_dma_handle);
++		return NULL;
++	}
++	memset(c->err_info, 0, sizeof(*c->err_info));
++
++	INIT_HLIST_NODE(&c->list);
++	c->busaddr = (__u32) cmd_dma_handle;
++	temp64.val = (__u64) err_dma_handle;
++	c->ErrDesc.Addr.lower = temp64.val32.lower;
++	c->ErrDesc.Addr.upper = temp64.val32.upper;
++	c->ErrDesc.Len = sizeof(*c->err_info);
++
++	c->h = h;
++	return c;
++}
++
++static void cmd_free(struct ctlr_info *h, struct CommandList *c)
++{
++	int i;
++
++	i = c - h->cmd_pool;
++	clear_bit(i & (BITS_PER_LONG - 1),
++		  h->cmd_pool_bits + (i / BITS_PER_LONG));
++	h->nr_frees++;
++}
++
++static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
++{
++	union u64bit temp64;
++
++	temp64.val32.lower = c->ErrDesc.Addr.lower;
++	temp64.val32.upper = c->ErrDesc.Addr.upper;
++	pci_free_consistent(h->pdev, sizeof(*c->err_info),
++			    c->err_info, (dma_addr_t) temp64.val);
++	pci_free_consistent(h->pdev, sizeof(*c),
++			    c, (dma_addr_t) c->busaddr);
++}
++
++#ifdef CONFIG_COMPAT
++
++static int do_ioctl(struct scsi_device *dev, int cmd, void *arg)
++{
++	int ret;
++
++	lock_kernel();
++	ret = hpsa_ioctl(dev, cmd, arg);
++	unlock_kernel();
++	return ret;
++}
++
++static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg);
++static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
++	int cmd, void *arg);
++
++static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
++{
++	switch (cmd) {
++	case CCISS_GETPCIINFO:
++	case CCISS_GETINTINFO:
++	case CCISS_SETINTINFO:
++	case CCISS_GETNODENAME:
++	case CCISS_SETNODENAME:
++	case CCISS_GETHEARTBEAT:
++	case CCISS_GETBUSTYPES:
++	case CCISS_GETFIRMVER:
++	case CCISS_GETDRIVVER:
++	case CCISS_REVALIDVOLS:
++	case CCISS_DEREGDISK:
++	case CCISS_REGNEWDISK:
++	case CCISS_REGNEWD:
++	case CCISS_RESCANDISK:
++	case CCISS_GETLUNINFO:
++		return do_ioctl(dev, cmd, arg);
++
++	case CCISS_PASSTHRU32:
++		return hpsa_ioctl32_passthru(dev, cmd, arg);
++	case CCISS_BIG_PASSTHRU32:
++		return hpsa_ioctl32_big_passthru(dev, cmd, arg);
++
++	default:
++		return -ENOIOCTLCMD;
++	}
++}
++
++static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
++{
++	IOCTL32_Command_struct __user *arg32 =
++	    (IOCTL32_Command_struct __user *) arg;
++	IOCTL_Command_struct arg64;
++	IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
++	int err;
++	u32 cp;
++
++	err = 0;
++	err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
++			   sizeof(arg64.LUN_info));
++	err |= copy_from_user(&arg64.Request, &arg32->Request,
++			   sizeof(arg64.Request));
++	err |= copy_from_user(&arg64.error_info, &arg32->error_info,
++			   sizeof(arg64.error_info));
++	err |= get_user(arg64.buf_size, &arg32->buf_size);
++	err |= get_user(cp, &arg32->buf);
++	arg64.buf = compat_ptr(cp);
++	err |= copy_to_user(p, &arg64, sizeof(arg64));
++
++	if (err)
++		return -EFAULT;
++
++	err = do_ioctl(dev, CCISS_PASSTHRU, (void *)p);
++	if (err)
++		return err;
++	err |= copy_in_user(&arg32->error_info, &p->error_info,
++			 sizeof(arg32->error_info));
++	if (err)
++		return -EFAULT;
++	return err;
++}
++
++static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
++	int cmd, void *arg)
++{
++	BIG_IOCTL32_Command_struct __user *arg32 =
++	    (BIG_IOCTL32_Command_struct __user *) arg;
++	BIG_IOCTL_Command_struct arg64;
++	BIG_IOCTL_Command_struct __user *p =
++	    compat_alloc_user_space(sizeof(arg64));
++	int err;
++	u32 cp;
++
++	err = 0;
++	err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
++			   sizeof(arg64.LUN_info));
++	err |= copy_from_user(&arg64.Request, &arg32->Request,
++			   sizeof(arg64.Request));
++	err |= copy_from_user(&arg64.error_info, &arg32->error_info,
++			   sizeof(arg64.error_info));
++	err |= get_user(arg64.buf_size, &arg32->buf_size);
++	err |= get_user(arg64.malloc_size, &arg32->malloc_size);
++	err |= get_user(cp, &arg32->buf);
++	arg64.buf = compat_ptr(cp);
++	err |= copy_to_user(p, &arg64, sizeof(arg64));
++
++	if (err)
++		return -EFAULT;
++
++	err = do_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
++	if (err)
++		return err;
++	err |= copy_in_user(&arg32->error_info, &p->error_info,
++			 sizeof(arg32->error_info));
++	if (err)
++		return -EFAULT;
++	return err;
++}
++#endif
++
++static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
++{
++	struct hpsa_pci_info pciinfo;
++
++	if (!argp)
++		return -EINVAL;
++	pciinfo.domain = pci_domain_nr(h->pdev->bus);
++	pciinfo.bus = h->pdev->bus->number;
++	pciinfo.dev_fn = h->pdev->devfn;
++	pciinfo.board_id = h->board_id;
++	if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
++		return -EFAULT;
++	return 0;
++}
++
++static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
++{
++	DriverVer_type DriverVer;
++	unsigned char vmaj, vmin, vsubmin;
++	int rc;
++
++	rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
++		&vmaj, &vmin, &vsubmin);
++	if (rc != 3) {
++		dev_info(&h->pdev->dev, "driver version string '%s' "
++			"unrecognized.", HPSA_DRIVER_VERSION);
++		vmaj = 0;
++		vmin = 0;
++		vsubmin = 0;
++	}
++	DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
++	if (!argp)
++		return -EINVAL;
++	if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
++		return -EFAULT;
++	return 0;
++}
++
++static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
++{
++	IOCTL_Command_struct iocommand;
++	struct CommandList *c;
++	char *buff = NULL;
++	union u64bit temp64;
++
++	if (!argp)
++		return -EINVAL;
++	if (!capable(CAP_SYS_RAWIO))
++		return -EPERM;
++	if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
++		return -EFAULT;
++	if ((iocommand.buf_size < 1) &&
++	    (iocommand.Request.Type.Direction != XFER_NONE)) {
++		return -EINVAL;
++	}
++	if (iocommand.buf_size > 0) {
++		buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
++		if (buff == NULL)
++			return -EFAULT;
++	}
++	if (iocommand.Request.Type.Direction == XFER_WRITE) {
++		/* Copy the data into the buffer we created */
++		if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
++			kfree(buff);
++			return -EFAULT;
++		}
++	} else
++		memset(buff, 0, iocommand.buf_size);
++	c = cmd_special_alloc(h);
++	if (c == NULL) {
++		kfree(buff);
++		return -ENOMEM;
++	}
++	/* Fill in the command type */
++	c->cmd_type = CMD_IOCTL_PEND;
++	/* Fill in Command Header */
++	c->Header.ReplyQueue = 0; /* unused in simple mode */
++	if (iocommand.buf_size > 0) {	/* buffer to fill */
++		c->Header.SGList = 1;
++		c->Header.SGTotal = 1;
++	} else	{ /* no buffers to fill */
++		c->Header.SGList = 0;
++		c->Header.SGTotal = 0;
++	}
++	memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
++	/* use the kernel address the cmd block for tag */
++	c->Header.Tag.lower = c->busaddr;
++
++	/* Fill in Request block */
++	memcpy(&c->Request, &iocommand.Request,
++		sizeof(c->Request));
++
++	/* Fill in the scatter gather information */
++	if (iocommand.buf_size > 0) {
++		temp64.val = pci_map_single(h->pdev, buff,
++			iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
++		c->SG[0].Addr.lower = temp64.val32.lower;
++		c->SG[0].Addr.upper = temp64.val32.upper;
++		c->SG[0].Len = iocommand.buf_size;
++		c->SG[0].Ext = 0; /* we are not chaining*/
++	}
++	hpsa_scsi_do_simple_cmd_core(h, c);
++	hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
++	check_ioctl_unit_attention(h, c);
++
++	/* Copy the error information out */
++	memcpy(&iocommand.error_info, c->err_info,
++		sizeof(iocommand.error_info));
++	if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
++		kfree(buff);
++		cmd_special_free(h, c);
++		return -EFAULT;
++	}
++
++	if (iocommand.Request.Type.Direction == XFER_READ) {
++		/* Copy the data out of the buffer we created */
++		if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
++			kfree(buff);
++			cmd_special_free(h, c);
++			return -EFAULT;
++		}
++	}
++	kfree(buff);
++	cmd_special_free(h, c);
++	return 0;
++}
++
++static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
++{
++	BIG_IOCTL_Command_struct *ioc;
++	struct CommandList *c;
++	unsigned char **buff = NULL;
++	int *buff_size = NULL;
++	union u64bit temp64;
++	BYTE sg_used = 0;
++	int status = 0;
++	int i;
++	__u32 left;
++	__u32 sz;
++	BYTE __user *data_ptr;
++
++	if (!argp)
++		return -EINVAL;
++	if (!capable(CAP_SYS_RAWIO))
++		return -EPERM;
++	ioc = (BIG_IOCTL_Command_struct *)
++	    kmalloc(sizeof(*ioc), GFP_KERNEL);
++	if (!ioc) {
++		status = -ENOMEM;
++		goto cleanup1;
++	}
++	if (copy_from_user(ioc, argp, sizeof(*ioc))) {
++		status = -EFAULT;
++		goto cleanup1;
++	}
++	if ((ioc->buf_size < 1) &&
++	    (ioc->Request.Type.Direction != XFER_NONE)) {
++		status = -EINVAL;
++		goto cleanup1;
++	}
++	/* Check kmalloc limits  using all SGs */
++	if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
++		status = -EINVAL;
++		goto cleanup1;
++	}
++	if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
++		status = -EINVAL;
++		goto cleanup1;
++	}
++	buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
++	if (!buff) {
++		status = -ENOMEM;
++		goto cleanup1;
++	}
++	buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
++	if (!buff_size) {
++		status = -ENOMEM;
++		goto cleanup1;
++	}
++	left = ioc->buf_size;
++	data_ptr = ioc->buf;
++	while (left) {
++		sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
++		buff_size[sg_used] = sz;
++		buff[sg_used] = kmalloc(sz, GFP_KERNEL);
++		if (buff[sg_used] == NULL) {
++			status = -ENOMEM;
++			goto cleanup1;
++		}
++		if (ioc->Request.Type.Direction == XFER_WRITE) {
++			if (copy_from_user(buff[sg_used], data_ptr, sz)) {
++				status = -ENOMEM;
++				goto cleanup1;
++			}
++		} else
++			memset(buff[sg_used], 0, sz);
++		left -= sz;
++		data_ptr += sz;
++		sg_used++;
++	}
++	c = cmd_special_alloc(h);
++	if (c == NULL) {
++		status = -ENOMEM;
++		goto cleanup1;
++	}
++	c->cmd_type = CMD_IOCTL_PEND;
++	c->Header.ReplyQueue = 0;
++
++	if (ioc->buf_size > 0) {
++		c->Header.SGList = sg_used;
++		c->Header.SGTotal = sg_used;
++	} else {
++		c->Header.SGList = 0;
++		c->Header.SGTotal = 0;
++	}
++	memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
++	c->Header.Tag.lower = c->busaddr;
++	memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
++	if (ioc->buf_size > 0) {
++		int i;
++		for (i = 0; i < sg_used; i++) {
++			temp64.val = pci_map_single(h->pdev, buff[i],
++				    buff_size[i], PCI_DMA_BIDIRECTIONAL);
++			c->SG[i].Addr.lower = temp64.val32.lower;
++			c->SG[i].Addr.upper = temp64.val32.upper;
++			c->SG[i].Len = buff_size[i];
++			/* we are not chaining */
++			c->SG[i].Ext = 0;
++		}
++	}
++	hpsa_scsi_do_simple_cmd_core(h, c);
++	hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
++	check_ioctl_unit_attention(h, c);
++	/* Copy the error information out */
++	memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
++	if (copy_to_user(argp, ioc, sizeof(*ioc))) {
++		cmd_special_free(h, c);
++		status = -EFAULT;
++		goto cleanup1;
++	}
++	if (ioc->Request.Type.Direction == XFER_READ) {
++		/* Copy the data out of the buffer we created */
++		BYTE __user *ptr = ioc->buf;
++		for (i = 0; i < sg_used; i++) {
++			if (copy_to_user(ptr, buff[i], buff_size[i])) {
++				cmd_special_free(h, c);
++				status = -EFAULT;
++				goto cleanup1;
++			}
++			ptr += buff_size[i];
++		}
++	}
++	cmd_special_free(h, c);
++	status = 0;
++cleanup1:
++	if (buff) {
++		for (i = 0; i < sg_used; i++)
++			kfree(buff[i]);
++		kfree(buff);
++	}
++	kfree(buff_size);
++	kfree(ioc);
++	return status;
++}
++
++static void check_ioctl_unit_attention(struct ctlr_info *h,
++	struct CommandList *c)
++{
++	if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
++			c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
++		(void) check_for_unit_attention(h, c);
++}
++/*
++ * ioctl
++ */
++static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
++{
++	struct ctlr_info *h;
++	void __user *argp = (void __user *)arg;
++
++	h = sdev_to_hba(dev);
++
++	switch (cmd) {
++	case CCISS_DEREGDISK:
++	case CCISS_REGNEWDISK:
++	case CCISS_REGNEWD:
++		hpsa_update_scsi_devices(h, dev->host->host_no);
++		return 0;
++	case CCISS_GETPCIINFO:
++		return hpsa_getpciinfo_ioctl(h, argp);
++	case CCISS_GETDRIVVER:
++		return hpsa_getdrivver_ioctl(h, argp);
++	case CCISS_PASSTHRU:
++		return hpsa_passthru_ioctl(h, argp);
++	case CCISS_BIG_PASSTHRU:
++		return hpsa_big_passthru_ioctl(h, argp);
++	default:
++		return -ENOTTY;
++	}
++}
++
++static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h,
++	void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr,
++	int cmd_type)
++{
++	int pci_dir = XFER_NONE;
++
++	c->cmd_type = CMD_IOCTL_PEND;
++	c->Header.ReplyQueue = 0;
++	if (buff != NULL && size > 0) {
++		c->Header.SGList = 1;
++		c->Header.SGTotal = 1;
++	} else {
++		c->Header.SGList = 0;
++		c->Header.SGTotal = 0;
++	}
++	c->Header.Tag.lower = c->busaddr;
++	memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
++
++	c->Request.Type.Type = cmd_type;
++	if (cmd_type == TYPE_CMD) {
++		switch (cmd) {
++		case HPSA_INQUIRY:
++			/* are we trying to read a vital product page */
++			if (page_code != 0) {
++				c->Request.CDB[1] = 0x01;
++				c->Request.CDB[2] = page_code;
++			}
++			c->Request.CDBLen = 6;
++			c->Request.Type.Attribute = ATTR_SIMPLE;
++			c->Request.Type.Direction = XFER_READ;
++			c->Request.Timeout = 0;
++			c->Request.CDB[0] = HPSA_INQUIRY;
++			c->Request.CDB[4] = size & 0xFF;
++			break;
++		case HPSA_REPORT_LOG:
++		case HPSA_REPORT_PHYS:
++			/* Talking to controller so It's a physical command
++			   mode = 00 target = 0.  Nothing to write.
++			 */
++			c->Request.CDBLen = 12;
++			c->Request.Type.Attribute = ATTR_SIMPLE;
++			c->Request.Type.Direction = XFER_READ;
++			c->Request.Timeout = 0;
++			c->Request.CDB[0] = cmd;
++			c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
++			c->Request.CDB[7] = (size >> 16) & 0xFF;
++			c->Request.CDB[8] = (size >> 8) & 0xFF;
++			c->Request.CDB[9] = size & 0xFF;
++			break;
++
++		case HPSA_READ_CAPACITY:
++			c->Request.CDBLen = 10;
++			c->Request.Type.Attribute = ATTR_SIMPLE;
++			c->Request.Type.Direction = XFER_READ;
++			c->Request.Timeout = 0;
++			c->Request.CDB[0] = cmd;
++			break;
++		case HPSA_CACHE_FLUSH:
++			c->Request.CDBLen = 12;
++			c->Request.Type.Attribute = ATTR_SIMPLE;
++			c->Request.Type.Direction = XFER_WRITE;
++			c->Request.Timeout = 0;
++			c->Request.CDB[0] = BMIC_WRITE;
++			c->Request.CDB[6] = BMIC_CACHE_FLUSH;
++			break;
++		case TEST_UNIT_READY:
++			c->Request.CDBLen = 6;
++			c->Request.Type.Attribute = ATTR_SIMPLE;
++			c->Request.Type.Direction = XFER_NONE;
++			c->Request.Timeout = 0;
++			break;
++		default:
++			dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
++			BUG();
++			return;
++		}
++	} else if (cmd_type == TYPE_MSG) {
++		switch (cmd) {
++
++		case  HPSA_DEVICE_RESET_MSG:
++			c->Request.CDBLen = 16;
++			c->Request.Type.Type =  1; /* It is a MSG not a CMD */
++			c->Request.Type.Attribute = ATTR_SIMPLE;
++			c->Request.Type.Direction = XFER_NONE;
++			c->Request.Timeout = 0; /* Don't time out */
++			c->Request.CDB[0] =  0x01; /* RESET_MSG is 0x01 */
++			c->Request.CDB[1] = 0x03;  /* Reset target above */
++			/* If bytes 4-7 are zero, it means reset the */
++			/* LunID device */
++			c->Request.CDB[4] = 0x00;
++			c->Request.CDB[5] = 0x00;
++			c->Request.CDB[6] = 0x00;
++			c->Request.CDB[7] = 0x00;
++		break;
++
++		default:
++			dev_warn(&h->pdev->dev, "unknown message type %d\n",
++				cmd);
++			BUG();
++		}
++	} else {
++		dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
++		BUG();
++	}
++
++	switch (c->Request.Type.Direction) {
++	case XFER_READ:
++		pci_dir = PCI_DMA_FROMDEVICE;
++		break;
++	case XFER_WRITE:
++		pci_dir = PCI_DMA_TODEVICE;
++		break;
++	case XFER_NONE:
++		pci_dir = PCI_DMA_NONE;
++		break;
++	default:
++		pci_dir = PCI_DMA_BIDIRECTIONAL;
++	}
++
++	hpsa_map_one(h->pdev, c, buff, size, pci_dir);
++
++	return;
++}
++
++/*
++ * Map (physical) PCI mem into (virtual) kernel space
++ */
++static void __iomem *remap_pci_mem(ulong base, ulong size)
++{
++	ulong page_base = ((ulong) base) & PAGE_MASK;
++	ulong page_offs = ((ulong) base) - page_base;
++	void __iomem *page_remapped = ioremap(page_base, page_offs + size);
++
++	return page_remapped ? (page_remapped + page_offs) : NULL;
++}
++
++/* Takes cmds off the submission queue and sends them to the hardware,
++ * then puts them on the queue of cmds waiting for completion.
++ */
++static void start_io(struct ctlr_info *h)
++{
++	struct CommandList *c;
++
++	while (!hlist_empty(&h->reqQ)) {
++		c = hlist_entry(h->reqQ.first, struct CommandList, list);
++		/* can't do anything if fifo is full */
++		if ((h->access.fifo_full(h))) {
++			dev_warn(&h->pdev->dev, "fifo full\n");
++			break;
++		}
++
++		/* Get the first entry from the Request Q */
++		removeQ(c);
++		h->Qdepth--;
++
++		/* Tell the controller execute command */
++		h->access.submit_command(h, c);
++
++		/* Put job onto the completed Q */
++		addQ(&h->cmpQ, c);
++	}
++}
++
++static inline unsigned long get_next_completion(struct ctlr_info *h)
++{
++	return h->access.command_completed(h);
++}
++
++static inline int interrupt_pending(struct ctlr_info *h)
++{
++	return h->access.intr_pending(h);
++}
++
++static inline long interrupt_not_for_us(struct ctlr_info *h)
++{
++	return ((h->access.intr_pending(h) == 0) ||
++		 (h->interrupts_enabled == 0));
++}
++
++static inline int bad_tag(struct ctlr_info *h, __u32 tag_index,
++	__u32 raw_tag)
++{
++	if (unlikely(tag_index >= h->nr_cmds)) {
++		dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
++		return 1;
++	}
++	return 0;
++}
++
++static inline void finish_cmd(struct CommandList *c, __u32 raw_tag)
++{
++	removeQ(c);
++	if (likely(c->cmd_type == CMD_SCSI))
++		complete_scsi_command(c, 0, raw_tag);
++	else if (c->cmd_type == CMD_IOCTL_PEND)
++		complete(c->waiting);
++}
++
++static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
++{
++	struct ctlr_info *h = dev_id;
++	struct CommandList *c;
++	unsigned long flags;
++	__u32 raw_tag, tag, tag_index;
++	struct hlist_node *tmp;
++
++	if (interrupt_not_for_us(h))
++		return IRQ_NONE;
++	spin_lock_irqsave(&h->lock, flags);
++	while (interrupt_pending(h)) {
++		while ((raw_tag = get_next_completion(h)) != FIFO_EMPTY) {
++			if (likely(HPSA_TAG_CONTAINS_INDEX(raw_tag))) {
++				tag_index = HPSA_TAG_TO_INDEX(raw_tag);
++				if (bad_tag(h, tag_index, raw_tag))
++					return IRQ_HANDLED;
++				c = h->cmd_pool + tag_index;
++				finish_cmd(c, raw_tag);
++				continue;
++			}
++			tag = HPSA_TAG_DISCARD_ERROR_BITS(raw_tag);
++			c = NULL;
++			hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
++				if (c->busaddr == tag) {
++					finish_cmd(c, raw_tag);
++					break;
++				}
++			}
++		}
++	}
++	spin_unlock_irqrestore(&h->lock, flags);
++	return IRQ_HANDLED;
++}
++
++/* Send a message CDB to the firmware. */
++static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
++						unsigned char type)
++{
++	struct Command {
++		struct CommandListHeader CommandHeader;
++		struct RequestBlock Request;
++		struct ErrDescriptor ErrorDescriptor;
++	};
++	struct Command *cmd;
++	static const size_t cmd_sz = sizeof(*cmd) +
++					sizeof(cmd->ErrorDescriptor);
++	dma_addr_t paddr64;
++	uint32_t paddr32, tag;
++	void __iomem *vaddr;
++	int i, err;
++
++	vaddr = pci_ioremap_bar(pdev, 0);
++	if (vaddr == NULL)
++		return -ENOMEM;
++
++	/* The Inbound Post Queue only accepts 32-bit physical addresses for the
++	 * CCISS commands, so they must be allocated from the lower 4GiB of
++	 * memory.
++	 */
++	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
++	if (err) {
++		iounmap(vaddr);
++		return -ENOMEM;
++	}
++
++	cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
++	if (cmd == NULL) {
++		iounmap(vaddr);
++		return -ENOMEM;
++	}
++
++	/* This must fit, because of the 32-bit consistent DMA mask.  Also,
++	 * although there's no guarantee, we assume that the address is at
++	 * least 4-byte aligned (most likely, it's page-aligned).
++	 */
++	paddr32 = paddr64;
++
++	cmd->CommandHeader.ReplyQueue = 0;
++	cmd->CommandHeader.SGList = 0;
++	cmd->CommandHeader.SGTotal = 0;
++	cmd->CommandHeader.Tag.lower = paddr32;
++	cmd->CommandHeader.Tag.upper = 0;
++	memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
++
++	cmd->Request.CDBLen = 16;
++	cmd->Request.Type.Type = TYPE_MSG;
++	cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
++	cmd->Request.Type.Direction = XFER_NONE;
++	cmd->Request.Timeout = 0; /* Don't time out */
++	cmd->Request.CDB[0] = opcode;
++	cmd->Request.CDB[1] = type;
++	memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
++	cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
++	cmd->ErrorDescriptor.Addr.upper = 0;
++	cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
++
++	writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
++
++	for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
++		tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
++		if (HPSA_TAG_DISCARD_ERROR_BITS(tag) == paddr32)
++			break;
++		msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
++	}
++
++	iounmap(vaddr);
++
++	/* we leak the DMA buffer here ... no choice since the controller could
++	 *  still complete the command.
++	 */
++	if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
++		dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
++			opcode, type);
++		return -ETIMEDOUT;
++	}
++
++	pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
++
++	if (tag & HPSA_ERROR_BIT) {
++		dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
++			opcode, type);
++		return -EIO;
++	}
++
++	dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
++		opcode, type);
++	return 0;
++}
++
++#define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
++#define hpsa_noop(p) hpsa_message(p, 3, 0)
++
++static __devinit int hpsa_reset_msi(struct pci_dev *pdev)
++{
++/* the #defines are stolen from drivers/pci/msi.h. */
++#define msi_control_reg(base)		(base + PCI_MSI_FLAGS)
++#define PCI_MSIX_FLAGS_ENABLE		(1 << 15)
++
++	int pos;
++	u16 control = 0;
++
++	pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
++	if (pos) {
++		pci_read_config_word(pdev, msi_control_reg(pos), &control);
++		if (control & PCI_MSI_FLAGS_ENABLE) {
++			dev_info(&pdev->dev, "resetting MSI\n");
++			pci_write_config_word(pdev, msi_control_reg(pos),
++					control & ~PCI_MSI_FLAGS_ENABLE);
++		}
++	}
++
++	pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
++	if (pos) {
++		pci_read_config_word(pdev, msi_control_reg(pos), &control);
++		if (control & PCI_MSIX_FLAGS_ENABLE) {
++			dev_info(&pdev->dev, "resetting MSI-X\n");
++			pci_write_config_word(pdev, msi_control_reg(pos),
++					control & ~PCI_MSIX_FLAGS_ENABLE);
++		}
++	}
++
++	return 0;
++}
++
++/* This does a hard reset of the controller using PCI power management
++ * states.
++ */
++static __devinit int hpsa_hard_reset_controller(struct pci_dev *pdev)
++{
++	u16 pmcsr, saved_config_space[32];
++	int i, pos;
++
++	dev_info(&pdev->dev, "using PCI PM to reset controller\n");
++
++	/* This is very nearly the same thing as
++	 *
++	 * pci_save_state(pci_dev);
++	 * pci_set_power_state(pci_dev, PCI_D3hot);
++	 * pci_set_power_state(pci_dev, PCI_D0);
++	 * pci_restore_state(pci_dev);
++	 *
++	 * but we can't use these nice canned kernel routines on
++	 * kexec, because they also check the MSI/MSI-X state in PCI
++	 * configuration space and do the wrong thing when it is
++	 * set/cleared.  Also, the pci_save/restore_state functions
++	 * violate the ordering requirements for restoring the
++	 * configuration space from the CCISS document (see the
++	 * comment below).  So we roll our own ....
++	 */
++
++	for (i = 0; i < 32; i++)
++		pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
++
++	pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
++	if (pos == 0) {
++		dev_err(&pdev->dev,
++			"hpsa_reset_controller: PCI PM not supported\n");
++		return -ENODEV;
++	}
++
++	/* Quoting from the Open CISS Specification: "The Power
++	 * Management Control/Status Register (CSR) controls the power
++	 * state of the device.  The normal operating state is D0,
++	 * CSR=00h.  The software off state is D3, CSR=03h.  To reset
++	 * the controller, place the interface device in D3 then to
++	 * D0, this causes a secondary PCI reset which will reset the
++	 * controller."
++	 */
++
++	/* enter the D3hot power management state */
++	pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
++	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
++	pmcsr |= PCI_D3hot;
++	pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
++
++	msleep(500);
++
++	/* enter the D0 power management state */
++	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
++	pmcsr |= PCI_D0;
++	pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
++
++	msleep(500);
++
++	/* Restore the PCI configuration space.  The Open CISS
++	 * Specification says, "Restore the PCI Configuration
++	 * Registers, offsets 00h through 60h. It is important to
++	 * restore the command register, 16-bits at offset 04h,
++	 * last. Do not restore the configuration status register,
++	 * 16-bits at offset 06h."  Note that the offset is 2*i.
++	 */
++	for (i = 0; i < 32; i++) {
++		if (i == 2 || i == 3)
++			continue;
++		pci_write_config_word(pdev, 2*i, saved_config_space[i]);
++	}
++	wmb();
++	pci_write_config_word(pdev, 4, saved_config_space[2]);
++
++	return 0;
++}
++
++/*
++ *  We cannot read the structure directly, for portability we must use
++ *   the io functions.
++ *   This is for debug only.
++ */
++#ifdef HPSA_DEBUG
++static void print_cfg_table(struct device *dev, struct CfgTable *tb)
++{
++	int i;
++	char temp_name[17];
++
++	dev_info(dev, "Controller Configuration information\n");
++	dev_info(dev, "------------------------------------\n");
++	for (i = 0; i < 4; i++)
++		temp_name[i] = readb(&(tb->Signature[i]));
++	temp_name[4] = '\0';
++	dev_info(dev, "   Signature = %s\n", temp_name);
++	dev_info(dev, "   Spec Number = %d\n", readl(&(tb->SpecValence)));
++	dev_info(dev, "   Transport methods supported = 0x%x\n",
++	       readl(&(tb->TransportSupport)));
++	dev_info(dev, "   Transport methods active = 0x%x\n",
++	       readl(&(tb->TransportActive)));
++	dev_info(dev, "   Requested transport Method = 0x%x\n",
++	       readl(&(tb->HostWrite.TransportRequest)));
++	dev_info(dev, "   Coalesce Interrupt Delay = 0x%x\n",
++	       readl(&(tb->HostWrite.CoalIntDelay)));
++	dev_info(dev, "   Coalesce Interrupt Count = 0x%x\n",
++	       readl(&(tb->HostWrite.CoalIntCount)));
++	dev_info(dev, "   Max outstanding commands = 0x%d\n",
++	       readl(&(tb->CmdsOutMax)));
++	dev_info(dev, "   Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
++	for (i = 0; i < 16; i++)
++		temp_name[i] = readb(&(tb->ServerName[i]));
++	temp_name[16] = '\0';
++	dev_info(dev, "   Server Name = %s\n", temp_name);
++	dev_info(dev, "   Heartbeat Counter = 0x%x\n\n\n",
++		readl(&(tb->HeartBeat)));
++}
++#endif				/* HPSA_DEBUG */
++
++static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
++{
++	int i, offset, mem_type, bar_type;
++
++	if (pci_bar_addr == PCI_BASE_ADDRESS_0)	/* looking for BAR zero? */
++		return 0;
++	offset = 0;
++	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
++		bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
++		if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
++			offset += 4;
++		else {
++			mem_type = pci_resource_flags(pdev, i) &
++			    PCI_BASE_ADDRESS_MEM_TYPE_MASK;
++			switch (mem_type) {
++			case PCI_BASE_ADDRESS_MEM_TYPE_32:
++			case PCI_BASE_ADDRESS_MEM_TYPE_1M:
++				offset += 4;	/* 32 bit */
++				break;
++			case PCI_BASE_ADDRESS_MEM_TYPE_64:
++				offset += 8;
++				break;
++			default:	/* reserved in PCI 2.2 */
++				dev_warn(&pdev->dev,
++				       "base address is invalid\n");
++				return -1;
++				break;
++			}
++		}
++		if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
++			return i + 1;
++	}
++	return -1;
++}
++
++/* If MSI/MSI-X is supported by the kernel we will try to enable it on
++ * controllers that are capable. If not, we use IO-APIC mode.
++ */
++
++static void __devinit hpsa_interrupt_mode(struct ctlr_info *h,
++					   struct pci_dev *pdev, __u32 board_id)
++{
++#ifdef CONFIG_PCI_MSI
++	int err;
++	struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1},
++	{0, 2}, {0, 3}
++	};
++
++	/* Some boards advertise MSI but don't really support it */
++	if ((board_id == 0x40700E11) ||
++	    (board_id == 0x40800E11) ||
++	    (board_id == 0x40820E11) || (board_id == 0x40830E11))
++		goto default_int_mode;
++	if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
++		dev_info(&pdev->dev, "MSIX\n");
++		err = pci_enable_msix(pdev, hpsa_msix_entries, 4);
++		if (!err) {
++			h->intr[0] = hpsa_msix_entries[0].vector;
++			h->intr[1] = hpsa_msix_entries[1].vector;
++			h->intr[2] = hpsa_msix_entries[2].vector;
++			h->intr[3] = hpsa_msix_entries[3].vector;
++			h->msix_vector = 1;
++			return;
++		}
++		if (err > 0) {
++			dev_warn(&pdev->dev, "only %d MSI-X vectors "
++			       "available\n", err);
++			goto default_int_mode;
++		} else {
++			dev_warn(&pdev->dev, "MSI-X init failed %d\n",
++			       err);
++			goto default_int_mode;
++		}
++	}
++	if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
++		dev_info(&pdev->dev, "MSI\n");
++		if (!pci_enable_msi(pdev))
++			h->msi_vector = 1;
++		else
++			dev_warn(&pdev->dev, "MSI init failed\n");
++	}
++default_int_mode:
++#endif				/* CONFIG_PCI_MSI */
++	/* if we get here we're going to use the default interrupt mode */
++	h->intr[SIMPLE_MODE_INT] = pdev->irq;
++	return;
++}
++
++static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
++{
++	ushort subsystem_vendor_id, subsystem_device_id, command;
++	__u32 board_id, scratchpad = 0;
++	__u64 cfg_offset;
++	__u32 cfg_base_addr;
++	__u64 cfg_base_addr_index;
++	int i, prod_index, err;
++
++	subsystem_vendor_id = pdev->subsystem_vendor;
++	subsystem_device_id = pdev->subsystem_device;
++	board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
++		    subsystem_vendor_id);
++
++	for (i = 0; i < ARRAY_SIZE(products); i++)
++		if (board_id == products[i].board_id)
++			break;
++
++	prod_index = i;
++
++	if (prod_index == ARRAY_SIZE(products)) {
++		prod_index--;
++		if (subsystem_vendor_id != PCI_VENDOR_ID_HP ||
++				!hpsa_allow_any) {
++			dev_warn(&pdev->dev, "unrecognized board ID:"
++				" 0x%08lx, ignoring.\n",
++				(unsigned long) board_id);
++			return -ENODEV;
++		}
++	}
++	/* check to see if controller has been disabled
++	 * BEFORE trying to enable it
++	 */
++	(void)pci_read_config_word(pdev, PCI_COMMAND, &command);
++	if (!(command & 0x02)) {
++		dev_warn(&pdev->dev, "controller appears to be disabled\n");
++		return -ENODEV;
++	}
++
++	err = pci_enable_device(pdev);
++	if (err) {
++		dev_warn(&pdev->dev, "unable to enable PCI device\n");
++		return err;
++	}
++
++	err = pci_request_regions(pdev, "hpsa");
++	if (err) {
++		dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
++		return err;
++	}
++
++	/* If the kernel supports MSI/MSI-X we will try to enable that,
++	 * else we use the IO-APIC interrupt assigned to us by system ROM.
++	 */
++	hpsa_interrupt_mode(h, pdev, board_id);
++
++	/* find the memory BAR */
++	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
++		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM)
++			break;
++	}
++	if (i == DEVICE_COUNT_RESOURCE) {
++		dev_warn(&pdev->dev, "no memory BAR found\n");
++		err = -ENODEV;
++		goto err_out_free_res;
++	}
++
++	h->paddr = pci_resource_start(pdev, i); /* addressing mode bits
++						 * already removed
++						 */
++
++	h->vaddr = remap_pci_mem(h->paddr, 0x250);
++
++	/* Wait for the board to become ready.  */
++	for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) {
++		scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
++		if (scratchpad == HPSA_FIRMWARE_READY)
++			break;
++		msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
++	}
++	if (scratchpad != HPSA_FIRMWARE_READY) {
++		dev_warn(&pdev->dev, "board not ready, timed out.\n");
++		err = -ENODEV;
++		goto err_out_free_res;
++	}
++
++	/* get the address index number */
++	cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET);
++	cfg_base_addr &= (__u32) 0x0000ffff;
++	cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
++	if (cfg_base_addr_index == -1) {
++		dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
++		err = -ENODEV;
++		goto err_out_free_res;
++	}
++
++	cfg_offset = readl(h->vaddr + SA5_CTMEM_OFFSET);
++	h->cfgtable = remap_pci_mem(pci_resource_start(pdev,
++			       cfg_base_addr_index) + cfg_offset,
++				sizeof(h->cfgtable));
++	h->board_id = board_id;
++
++	/* Query controller for max supported commands: */
++	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
++
++	h->product_name = products[prod_index].product_name;
++	h->access = *(products[prod_index].access);
++	/* Allow room for some ioctls */
++	h->nr_cmds = h->max_commands - 4;
++
++	if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
++	    (readb(&h->cfgtable->Signature[1]) != 'I') ||
++	    (readb(&h->cfgtable->Signature[2]) != 'S') ||
++	    (readb(&h->cfgtable->Signature[3]) != 'S')) {
++		dev_warn(&pdev->dev, "not a valid CISS config table\n");
++		err = -ENODEV;
++		goto err_out_free_res;
++	}
++#ifdef CONFIG_X86
++	{
++		/* Need to enable prefetch in the SCSI core for 6400 in x86 */
++		__u32 prefetch;
++		prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
++		prefetch |= 0x100;
++		writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
++	}
++#endif
++
++	/* Disabling DMA prefetch for the P600
++	 * An ASIC bug may result in a prefetch beyond
++	 * physical memory.
++	 */
++	if (board_id == 0x3225103C) {
++		__u32 dma_prefetch;
++		dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
++		dma_prefetch |= 0x8000;
++		writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
++	}
++
++	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
++	/* Update the field, and then ring the doorbell */
++	writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
++	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
++
++	/* under certain very rare conditions, this can take awhile.
++	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
++	 * as we enter this code.)
++	 */
++	for (i = 0; i < MAX_CONFIG_WAIT; i++) {
++		if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
++			break;
++		/* delay and try again */
++		msleep(10);
++	}
++
++#ifdef HPSA_DEBUG
++	print_cfg_table(&pdev->dev, h->cfgtable);
++#endif				/* HPSA_DEBUG */
++
++	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
++		dev_warn(&pdev->dev, "unable to get board into simple mode\n");
++		err = -ENODEV;
++		goto err_out_free_res;
++	}
++	return 0;
++
++err_out_free_res:
++	/*
++	 * Deliberately omit pci_disable_device(): it does something nasty to
++	 * Smart Array controllers that pci_enable_device does not undo
++	 */
++	pci_release_regions(pdev);
++	return err;
++}
++
++static int __devinit hpsa_init_one(struct pci_dev *pdev,
++				    const struct pci_device_id *ent)
++{
++	int i;
++	int dac;
++	struct ctlr_info *h;
++
++	if (number_of_controllers == 0)
++		printk(KERN_INFO DRIVER_NAME "\n");
++	if (reset_devices) {
++		/* Reset the controller with a PCI power-cycle */
++		if (hpsa_hard_reset_controller(pdev) || hpsa_reset_msi(pdev))
++			return -ENODEV;
++
++		/* Some devices (notably the HP Smart Array 5i Controller)
++		   need a little pause here */
++		msleep(HPSA_POST_RESET_PAUSE_MSECS);
++
++		/* Now try to get the controller to respond to a no-op */
++		for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
++			if (hpsa_noop(pdev) == 0)
++				break;
++			else
++				dev_warn(&pdev->dev, "no-op failed%s\n",
++						(i < 11 ? "; re-trying" : ""));
++		}
++	}
++
++	BUILD_BUG_ON(sizeof(struct CommandList) % 8);
++	h = kzalloc(sizeof(*h), GFP_KERNEL);
++	if (!h)
++		return -1;
++
++	h->busy_initializing = 1;
++	INIT_HLIST_HEAD(&h->cmpQ);
++	INIT_HLIST_HEAD(&h->reqQ);
++	mutex_init(&h->busy_shutting_down);
++	init_completion(&h->scan_wait);
++	if (hpsa_pci_init(h, pdev) != 0)
++		goto clean1;
++
++	sprintf(h->devname, "hpsa%d", number_of_controllers);
++	h->ctlr = number_of_controllers;
++	number_of_controllers++;
++	h->pdev = pdev;
++
++	/* configure PCI DMA stuff */
++	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
++		dac = 1;
++	else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
++		dac = 0;
++	else {
++		dev_err(&pdev->dev, "no suitable DMA available\n");
++		goto clean1;
++	}
++
++	/* make sure the board interrupts are off */
++	h->access.set_intr_mask(h, HPSA_INTR_OFF);
++	if (request_irq(h->intr[SIMPLE_MODE_INT], do_hpsa_intr,
++			IRQF_DISABLED | IRQF_SHARED, h->devname, h)) {
++		dev_err(&pdev->dev, "unable to get irq %d for %s\n",
++		       h->intr[SIMPLE_MODE_INT], h->devname);
++		goto clean2;
++	}
++
++	dev_info(&pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
++	       h->devname, pdev->device, pci_name(pdev),
++	       h->intr[SIMPLE_MODE_INT], dac ? "" : " not");
++
++	h->cmd_pool_bits =
++	    kmalloc(((h->nr_cmds + BITS_PER_LONG -
++		      1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
++	h->cmd_pool = pci_alloc_consistent(h->pdev,
++		    h->nr_cmds * sizeof(*h->cmd_pool),
++		    &(h->cmd_pool_dhandle));
++	h->errinfo_pool = pci_alloc_consistent(h->pdev,
++		    h->nr_cmds * sizeof(*h->errinfo_pool),
++		    &(h->errinfo_pool_dhandle));
++	if ((h->cmd_pool_bits == NULL)
++	    || (h->cmd_pool == NULL)
++	    || (h->errinfo_pool == NULL)) {
++		dev_err(&pdev->dev, "out of memory");
++		goto clean4;
++	}
++	spin_lock_init(&h->lock);
++
++	pci_set_drvdata(pdev, h);
++	memset(h->cmd_pool_bits, 0,
++	       ((h->nr_cmds + BITS_PER_LONG -
++		 1) / BITS_PER_LONG) * sizeof(unsigned long));
++
++	hpsa_scsi_setup(h);
++
++	/* Turn the interrupts on so we can service requests */
++	h->access.set_intr_mask(h, HPSA_INTR_ON);
++
++	hpsa_register_scsi(h);	/* hook ourselves into SCSI subsystem */
++	h->busy_initializing = 0;
++	return 1;
++
++clean4:
++	kfree(h->cmd_pool_bits);
++	if (h->cmd_pool)
++		pci_free_consistent(h->pdev,
++			    h->nr_cmds * sizeof(struct CommandList),
++			    h->cmd_pool, h->cmd_pool_dhandle);
++	if (h->errinfo_pool)
++		pci_free_consistent(h->pdev,
++			    h->nr_cmds * sizeof(struct ErrorInfo),
++			    h->errinfo_pool,
++			    h->errinfo_pool_dhandle);
++	free_irq(h->intr[SIMPLE_MODE_INT], h);
++clean2:
++clean1:
++	h->busy_initializing = 0;
++	kfree(h);
++	return -1;
++}
++
++static void hpsa_flush_cache(struct ctlr_info *h)
++{
++	char *flush_buf;
++	struct CommandList *c;
++
++	flush_buf = kzalloc(4, GFP_KERNEL);
++	if (!flush_buf)
++		return;
++
++	c = cmd_special_alloc(h);
++	if (!c) {
++		dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
++		goto out_of_memory;
++	}
++	fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
++		RAID_CTLR_LUNID, TYPE_CMD);
++	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
++	if (c->err_info->CommandStatus != 0)
++		dev_warn(&h->pdev->dev,
++			"error flushing cache on controller\n");
++	cmd_special_free(h, c);
++out_of_memory:
++	kfree(flush_buf);
++}
++
++static void hpsa_shutdown(struct pci_dev *pdev)
++{
++	struct ctlr_info *h;
++
++	h = pci_get_drvdata(pdev);
++	/* Turn board interrupts off  and send the flush cache command
++	 * sendcmd will turn off interrupt, and send the flush...
++	 * To write all data in the battery backed cache to disks
++	 */
++	hpsa_flush_cache(h);
++	h->access.set_intr_mask(h, HPSA_INTR_OFF);
++	free_irq(h->intr[2], h);
++#ifdef CONFIG_PCI_MSI
++	if (h->msix_vector)
++		pci_disable_msix(h->pdev);
++	else if (h->msi_vector)
++		pci_disable_msi(h->pdev);
++#endif				/* CONFIG_PCI_MSI */
++}
++
++static void __devexit hpsa_remove_one(struct pci_dev *pdev)
++{
++	struct ctlr_info *h;
++
++	if (pci_get_drvdata(pdev) == NULL) {
++		dev_err(&pdev->dev, "unable to remove device \n");
++		return;
++	}
++	h = pci_get_drvdata(pdev);
++	mutex_lock(&h->busy_shutting_down);
++	remove_from_scan_list(h);
++	hpsa_unregister_scsi(h);	/* unhook from SCSI subsystem */
++	hpsa_shutdown(pdev);
++	iounmap(h->vaddr);
++	pci_free_consistent(h->pdev,
++		h->nr_cmds * sizeof(struct CommandList),
++		h->cmd_pool, h->cmd_pool_dhandle);
++	pci_free_consistent(h->pdev,
++		h->nr_cmds * sizeof(struct ErrorInfo),
++		h->errinfo_pool, h->errinfo_pool_dhandle);
++	kfree(h->cmd_pool_bits);
++	/*
++	 * Deliberately omit pci_disable_device(): it does something nasty to
++	 * Smart Array controllers that pci_enable_device does not undo
++	 */
++	pci_release_regions(pdev);
++	pci_set_drvdata(pdev, NULL);
++	mutex_unlock(&h->busy_shutting_down);
++	kfree(h);
++}
++
++static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
++	__attribute__((unused)) pm_message_t state)
++{
++	return -ENOSYS;
++}
++
++static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
++{
++	return -ENOSYS;
++}
++
++static struct pci_driver hpsa_pci_driver = {
++	.name = "hpsa",
++	.probe = hpsa_init_one,
++	.remove = __devexit_p(hpsa_remove_one),
++	.id_table = hpsa_pci_device_id,	/* id_table */
++	.shutdown = hpsa_shutdown,
++	.suspend = hpsa_suspend,
++	.resume = hpsa_resume,
++};
++
++/*
++ *  This is it.  Register the PCI driver information for the cards we control
++ *  the OS will call our registered routines when it finds one of our cards.
++ */
++static int __init hpsa_init(void)
++{
++	int err;
++	/* Start the scan thread */
++	hpsa_scan_thread = kthread_run(hpsa_scan_func, NULL, "hpsa_scan");
++	if (IS_ERR(hpsa_scan_thread)) {
++		err = PTR_ERR(hpsa_scan_thread);
++		return -ENODEV;
++	}
++	err = pci_register_driver(&hpsa_pci_driver);
++	if (err)
++		kthread_stop(hpsa_scan_thread);
++	return err;
++}
++
++static void __exit hpsa_cleanup(void)
++{
++	pci_unregister_driver(&hpsa_pci_driver);
++	kthread_stop(hpsa_scan_thread);
++}
++
++module_init(hpsa_init);
++module_exit(hpsa_cleanup);
+diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
+new file mode 100644
+index 0000000..6bd1949
+--- /dev/null
++++ b/drivers/scsi/hpsa.h
+@@ -0,0 +1,273 @@
++/*
++ *    Disk Array driver for HP Smart Array SAS controllers
++ *    Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
++ *
++ *    This program is free software; you can redistribute it and/or modify
++ *    it under the terms of the GNU General Public License as published by
++ *    the Free Software Foundation; version 2 of the License.
++ *
++ *    This program is distributed in the hope that it will be useful,
++ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
++ *    NON INFRINGEMENT.  See the GNU General Public License for more details.
++ *
++ *    You should have received a copy of the GNU General Public License
++ *    along with this program; if not, write to the Free Software
++ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ *
++ *    Questions/Comments/Bugfixes to iss_storagedev at hp.com
++ *
++ */
++#ifndef HPSA_H
++#define HPSA_H
++
++#include <scsi/scsicam.h>
++
++#define IO_OK		0
++#define IO_ERROR	1
++
++struct ctlr_info;
++
++struct access_method {
++	void (*submit_command)(struct ctlr_info *h,
++		struct CommandList *c);
++	void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
++	unsigned long (*fifo_full)(struct ctlr_info *h);
++	unsigned long (*intr_pending)(struct ctlr_info *h);
++	unsigned long (*command_completed)(struct ctlr_info *h);
++};
++
++struct hpsa_scsi_dev_t {
++	int devtype;
++	int bus, target, lun;		/* as presented to the OS */
++	unsigned char scsi3addr[8];	/* as presented to the HW */
++#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
++	unsigned char device_id[16];    /* from inquiry pg. 0x83 */
++	unsigned char vendor[8];        /* bytes 8-15 of inquiry data */
++	unsigned char model[16];        /* bytes 16-31 of inquiry data */
++	unsigned char revision[4];      /* bytes 32-35 of inquiry data */
++	unsigned char raid_level;	/* from inquiry page 0xC1 */
++};
++
++struct ctlr_info {
++	int	ctlr;
++	char	devname[8];
++	char    *product_name;
++	char	firm_ver[4]; /* Firmware version */
++	struct pci_dev *pdev;
++	__u32	board_id;
++	void __iomem *vaddr;
++	unsigned long paddr;
++	int 	nr_cmds; /* Number of commands allowed on this controller */
++	struct CfgTable __iomem *cfgtable;
++	int	interrupts_enabled;
++	int	major;
++	int 	max_commands;
++	int	commands_outstanding;
++	int 	max_outstanding; /* Debug */
++	int	usage_count;  /* number of opens all all minor devices */
++#	define DOORBELL_INT	0
++#	define PERF_MODE_INT	1
++#	define SIMPLE_MODE_INT	2
++#	define MEMQ_MODE_INT	3
++	unsigned int intr[4];
++	unsigned int msix_vector;
++	unsigned int msi_vector;
++	struct access_method access;
++
++	/* queue and queue Info */
++	struct hlist_head reqQ;
++	struct hlist_head cmpQ;
++	unsigned int Qdepth;
++	unsigned int maxQsinceinit;
++	unsigned int maxSG;
++	spinlock_t lock;
++
++	/* pointers to command and error info pool */
++	struct CommandList 	*cmd_pool;
++	dma_addr_t		cmd_pool_dhandle;
++	struct ErrorInfo 	*errinfo_pool;
++	dma_addr_t		errinfo_pool_dhandle;
++	unsigned long  		*cmd_pool_bits;
++	int			nr_allocs;
++	int			nr_frees;
++	int			busy_initializing;
++	int			busy_scanning;
++	struct mutex		busy_shutting_down;
++	struct list_head	scan_list;
++	struct completion	scan_wait;
++
++	struct Scsi_Host *scsi_host;
++	spinlock_t devlock; /* to protect hba[ctlr]->dev[];  */
++	int ndevices; /* number of used elements in .dev[] array. */
++#define HPSA_MAX_SCSI_DEVS_PER_HBA 256
++	struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA];
++};
++#define HPSA_ABORT_MSG 0
++#define HPSA_DEVICE_RESET_MSG 1
++#define HPSA_BUS_RESET_MSG 2
++#define HPSA_HOST_RESET_MSG 3
++#define HPSA_MSG_SEND_RETRY_LIMIT 10
++#define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS 1000
++
++/* Maximum time in seconds driver will wait for command completions
++ * when polling before giving up.
++ */
++#define HPSA_MAX_POLL_TIME_SECS (20)
++
++/* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
++ * how many times to retry TEST UNIT READY on a device
++ * while waiting for it to become ready before giving up.
++ * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
++ * between sending TURs while waiting for a device
++ * to become ready.
++ */
++#define HPSA_TUR_RETRY_LIMIT (20)
++#define HPSA_MAX_WAIT_INTERVAL_SECS (30)
++
++/* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
++ * to become ready, in seconds, before giving up on it.
++ * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
++ * between polling the board to see if it is ready, in
++ * milliseconds.  HPSA_BOARD_READY_POLL_INTERVAL and
++ * HPSA_BOARD_READY_ITERATIONS are derived from those.
++ */
++#define HPSA_BOARD_READY_WAIT_SECS (120)
++#define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
++#define HPSA_BOARD_READY_POLL_INTERVAL \
++	((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
++#define HPSA_BOARD_READY_ITERATIONS \
++	((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
++		HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
++#define HPSA_POST_RESET_PAUSE_MSECS (3000)
++#define HPSA_POST_RESET_NOOP_RETRIES (12)
++
++/*  Defining the diffent access_menthods */
++/*
++ * Memory mapped FIFO interface (SMART 53xx cards)
++ */
++#define SA5_DOORBELL	0x20
++#define SA5_REQUEST_PORT_OFFSET	0x40
++#define SA5_REPLY_INTR_MASK_OFFSET	0x34
++#define SA5_REPLY_PORT_OFFSET		0x44
++#define SA5_INTR_STATUS		0x30
++#define SA5_SCRATCHPAD_OFFSET	0xB0
++
++#define SA5_CTCFG_OFFSET	0xB4
++#define SA5_CTMEM_OFFSET	0xB8
++
++#define SA5_INTR_OFF		0x08
++#define SA5B_INTR_OFF		0x04
++#define SA5_INTR_PENDING	0x08
++#define SA5B_INTR_PENDING	0x04
++#define FIFO_EMPTY		0xffffffff
++#define HPSA_FIRMWARE_READY	0xffff0000 /* value in scratchpad register */
++
++#define HPSA_ERROR_BIT		0x02
++#define HPSA_TAG_CONTAINS_INDEX(tag) ((tag) & 0x04)
++#define HPSA_TAG_TO_INDEX(tag) ((tag) >> 3)
++#define HPSA_TAG_DISCARD_ERROR_BITS(tag) ((tag) & ~3)
++
++#define HPSA_INTR_ON 	1
++#define HPSA_INTR_OFF	0
++/*
++	Send the command to the hardware
++*/
++static void SA5_submit_command(struct ctlr_info *h,
++	struct CommandList *c)
++{
++#ifdef HPSA_DEBUG
++	 printk(KERN_WARNING "hpsa: Sending %x - down to controller\n",
++		c->busaddr);
++#endif /* HPSA_DEBUG */
++	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
++	h->commands_outstanding++;
++	if (h->commands_outstanding > h->max_outstanding)
++		h->max_outstanding = h->commands_outstanding;
++}
++
++/*
++ *  This card is the opposite of the other cards.
++ *   0 turns interrupts on...
++ *   0x08 turns them off...
++ */
++static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
++{
++	if (val) { /* Turn interrupts on */
++		h->interrupts_enabled = 1;
++		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
++	} else { /* Turn them off */
++		h->interrupts_enabled = 0;
++		writel(SA5_INTR_OFF,
++			h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
++	}
++}
++/*
++ *  Returns true if fifo is full.
++ *
++ */
++static unsigned long SA5_fifo_full(struct ctlr_info *h)
++{
++	if (h->commands_outstanding >= h->max_commands)
++		return 1;
++	else
++		return 0;
++
++}
++/*
++ *   returns value read from hardware.
++ *     returns FIFO_EMPTY if there is nothing to read
++ */
++static unsigned long SA5_completed(struct ctlr_info *h)
++{
++	unsigned long register_value
++		= readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
++
++	if (register_value != FIFO_EMPTY)
++		h->commands_outstanding--;
++
++#ifdef HPSA_DEBUG
++	if (register_value != FIFO_EMPTY)
++		printk(KERN_INFO "hpsa:  Read %lx back from board\n",
++			register_value);
++	else
++		printk(KERN_INFO "hpsa:  FIFO Empty read\n");
++#endif
++
++	return register_value;
++}
++/*
++ *	Returns true if an interrupt is pending..
++ */
++static unsigned long SA5_intr_pending(struct ctlr_info *h)
++{
++	unsigned long register_value  =
++		readl(h->vaddr + SA5_INTR_STATUS);
++#ifdef HPSA_DEBUG
++	printk(KERN_INFO "hpsa: intr_pending %lx\n", register_value);
++#endif  /* HPSA_DEBUG */
++	if (register_value &  SA5_INTR_PENDING)
++		return  1;
++	return 0 ;
++}
++
++
++static struct access_method SA5_access = {
++	SA5_submit_command,
++	SA5_intr_mask,
++	SA5_fifo_full,
++	SA5_intr_pending,
++	SA5_completed,
++};
++
++struct board_type {
++	__u32	board_id;
++	char	*product_name;
++	struct access_method *access;
++};
++
++
++/* end of old hpsa_scsi.h file */
++
++#endif /* HPSA_H */
++
+diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
+new file mode 100644
+index 0000000..12d7138
+--- /dev/null
++++ b/drivers/scsi/hpsa_cmd.h
+@@ -0,0 +1,326 @@
++/*
++ *    Disk Array driver for HP Smart Array SAS controllers
++ *    Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
++ *
++ *    This program is free software; you can redistribute it and/or modify
++ *    it under the terms of the GNU General Public License as published by
++ *    the Free Software Foundation; version 2 of the License.
++ *
++ *    This program is distributed in the hope that it will be useful,
++ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
++ *    NON INFRINGEMENT.  See the GNU General Public License for more details.
++ *
++ *    You should have received a copy of the GNU General Public License
++ *    along with this program; if not, write to the Free Software
++ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ *
++ *    Questions/Comments/Bugfixes to iss_storagedev at hp.com
++ *
++ */
++#ifndef HPSA_CMD_H
++#define HPSA_CMD_H
++
++/* general boundary defintions */
++#define SENSEINFOBYTES          32 /* may vary between hbas */
++#define MAXSGENTRIES            31
++#define MAXREPLYQS              256
++
++/* Command Status value */
++#define CMD_SUCCESS             0x0000
++#define CMD_TARGET_STATUS       0x0001
++#define CMD_DATA_UNDERRUN       0x0002
++#define CMD_DATA_OVERRUN        0x0003
++#define CMD_INVALID             0x0004
++#define CMD_PROTOCOL_ERR        0x0005
++#define CMD_HARDWARE_ERR        0x0006
++#define CMD_CONNECTION_LOST     0x0007
++#define CMD_ABORTED             0x0008
++#define CMD_ABORT_FAILED        0x0009
++#define CMD_UNSOLICITED_ABORT   0x000A
++#define CMD_TIMEOUT             0x000B
++#define CMD_UNABORTABLE		0x000C
++
++/* Unit Attentions ASC's as defined for the MSA2012sa */
++#define POWER_OR_RESET			0x29
++#define STATE_CHANGED			0x2a
++#define UNIT_ATTENTION_CLEARED		0x2f
++#define LUN_FAILED			0x3e
++#define REPORT_LUNS_CHANGED		0x3f
++
++/* Unit Attentions ASCQ's as defined for the MSA2012sa */
++
++	/* These ASCQ's defined for ASC = POWER_OR_RESET */
++#define POWER_ON_RESET			0x00
++#define POWER_ON_REBOOT			0x01
++#define SCSI_BUS_RESET			0x02
++#define MSA_TARGET_RESET		0x03
++#define CONTROLLER_FAILOVER		0x04
++#define TRANSCEIVER_SE			0x05
++#define TRANSCEIVER_LVD			0x06
++
++	/* These ASCQ's defined for ASC = STATE_CHANGED */
++#define RESERVATION_PREEMPTED		0x03
++#define ASYM_ACCESS_CHANGED		0x06
++#define LUN_CAPACITY_CHANGED		0x09
++
++/* transfer direction */
++#define XFER_NONE               0x00
++#define XFER_WRITE              0x01
++#define XFER_READ               0x02
++#define XFER_RSVD               0x03
++
++/* task attribute */
++#define ATTR_UNTAGGED           0x00
++#define ATTR_SIMPLE             0x04
++#define ATTR_HEADOFQUEUE        0x05
++#define ATTR_ORDERED            0x06
++#define ATTR_ACA                0x07
++
++/* cdb type */
++#define TYPE_CMD				0x00
++#define TYPE_MSG				0x01
++
++/* config space register offsets */
++#define CFG_VENDORID            0x00
++#define CFG_DEVICEID            0x02
++#define CFG_I2OBAR              0x10
++#define CFG_MEM1BAR             0x14
++
++/* i2o space register offsets */
++#define I2O_IBDB_SET            0x20
++#define I2O_IBDB_CLEAR          0x70
++#define I2O_INT_STATUS          0x30
++#define I2O_INT_MASK            0x34
++#define I2O_IBPOST_Q            0x40
++#define I2O_OBPOST_Q            0x44
++#define I2O_DMA1_CFG		0x214
++
++/* Configuration Table */
++#define CFGTBL_ChangeReq        0x00000001l
++#define CFGTBL_AccCmds          0x00000001l
++
++#define CFGTBL_Trans_Simple     0x00000002l
++
++#define CFGTBL_BusType_Ultra2   0x00000001l
++#define CFGTBL_BusType_Ultra3   0x00000002l
++#define CFGTBL_BusType_Fibre1G  0x00000100l
++#define CFGTBL_BusType_Fibre2G  0x00000200l
++struct vals32 {
++	__u32   lower;
++	__u32   upper;
++};
++
++union u64bit {
++	struct vals32 val32;
++	__u64 val;
++};
++
++/* FIXME this is a per controller value (barf!) */
++#define HPSA_MAX_TARGETS_PER_CTLR 16
++#define HPSA_MAX_LUN 256
++#define HPSA_MAX_PHYS_LUN 1024
++
++/* SCSI-3 Commands */
++#pragma pack(1)
++
++#define HPSA_INQUIRY 0x12
++struct InquiryData {
++	__u8 data_byte[36];
++};
++
++#define HPSA_REPORT_LOG 0xc2    /* Report Logical LUNs */
++#define HPSA_REPORT_PHYS 0xc3   /* Report Physical LUNs */
++struct ReportLUNdata {
++	__u8 LUNListLength[4];
++	__u32 reserved;
++	__u8 LUN[HPSA_MAX_LUN][8];
++};
++
++struct ReportExtendedLUNdata {
++	__u8 LUNListLength[4];
++	__u8 extended_response_flag;
++	__u8 reserved[3];
++	__u8 LUN[HPSA_MAX_LUN][24];
++};
++
++struct SenseSubsystem_info {
++	__u8 reserved[36];
++	__u8 portname[8];
++	__u8 reserved1[1108];
++};
++
++#define HPSA_READ_CAPACITY 0x25 /* Read Capacity */
++struct ReadCapdata {
++	__u8 total_size[4];	/* Total size in blocks */
++	__u8 block_size[4];	/* Size of blocks in bytes */
++};
++
++#if 0
++/* 12 byte commands not implemented in firmware yet. */
++#define HPSA_READ 	0xa8
++#define HPSA_WRITE	0xaa
++#endif
++
++#define HPSA_READ   0x28    /* Read(10) */
++#define HPSA_WRITE  0x2a    /* Write(10) */
++
++/* BMIC commands */
++#define BMIC_READ 0x26
++#define BMIC_WRITE 0x27
++#define BMIC_CACHE_FLUSH 0xc2
++#define HPSA_CACHE_FLUSH 0x01	/* C2 was already being used by HPSA */
++
++/* Command List Structure */
++union SCSI3Addr {
++	struct {
++		__u8 Dev;
++		__u8 Bus:6;
++		__u8 Mode:2;        /* b00 */
++	} PeripDev;
++	struct {
++		__u8 DevLSB;
++		__u8 DevMSB:6;
++		__u8 Mode:2;        /* b01 */
++	} LogDev;
++	struct {
++		__u8 Dev:5;
++		__u8 Bus:3;
++		__u8 Targ:6;
++		__u8 Mode:2;        /* b10 */
++	} LogUnit;
++};
++
++struct PhysDevAddr {
++	__u32             TargetId:24;
++	__u32             Bus:6;
++	__u32             Mode:2;
++	/* 2 level target device addr */
++	union SCSI3Addr  Target[2];
++};
++
++struct LogDevAddr {
++	__u32            VolId:30;
++	__u32            Mode:2;
++	__u8             reserved[4];
++};
++
++union LUNAddr {
++	__u8               LunAddrBytes[8];
++	union SCSI3Addr    SCSI3Lun[4];
++	struct PhysDevAddr PhysDev;
++	struct LogDevAddr  LogDev;
++};
++
++struct CommandListHeader {
++	__u8              ReplyQueue;
++	__u8              SGList;
++	__u16             SGTotal;
++	struct vals32     Tag;
++	union LUNAddr     LUN;
++};
++
++struct RequestBlock {
++	__u8   CDBLen;
++	struct {
++		__u8 Type:3;
++		__u8 Attribute:3;
++		__u8 Direction:2;
++	} Type;
++	__u16  Timeout;
++	__u8   CDB[16];
++};
++
++struct ErrDescriptor {
++	struct vals32 Addr;
++	__u32  Len;
++};
++
++struct SGDescriptor {
++	struct vals32 Addr;
++	__u32  Len;
++	__u32  Ext;
++};
++
++union MoreErrInfo {
++	struct {
++		__u8  Reserved[3];
++		__u8  Type;
++		__u32 ErrorInfo;
++	} Common_Info;
++	struct {
++		__u8  Reserved[2];
++		__u8  offense_size; /* size of offending entry */
++		__u8  offense_num;  /* byte # of offense 0-base */
++		__u32 offense_value;
++	} Invalid_Cmd;
++};
++struct ErrorInfo {
++	__u8               ScsiStatus;
++	__u8               SenseLen;
++	__u16              CommandStatus;
++	__u32              ResidualCnt;
++	union MoreErrInfo  MoreErrInfo;
++	__u8               SenseInfo[SENSEINFOBYTES];
++};
++/* Command types */
++#define CMD_IOCTL_PEND  0x01
++#define CMD_SCSI	0x03
++
++struct ctlr_info; /* defined in hpsa.h */
++/* The size of this structure needs to be divisible by 8
++ * od on all architectures, because the controller uses 2
++ * lower bits of the address, and the driver uses 1 lower
++ * bit (3 bits total.)
++ */
++struct CommandList {
++	struct CommandListHeader Header;
++	struct RequestBlock      Request;
++	struct ErrDescriptor     ErrDesc;
++	struct SGDescriptor      SG[MAXSGENTRIES];
++	/* information associated with the command */
++	__u32			   busaddr; /* physical addr of this record */
++	struct ErrorInfo *err_info; /* pointer to the allocated mem */
++	struct ctlr_info	   *h;
++	int			   cmd_type;
++	long			   cmdindex;
++	struct hlist_node list;
++	struct CommandList *prev;
++	struct CommandList *next;
++	struct request *rq;
++	struct completion *waiting;
++	int	 retry_count;
++	void   *scsi_cmd;
++};
++
++/* Configuration Table Structure */
++struct HostWrite {
++	__u32 TransportRequest;
++	__u32 Reserved;
++	__u32 CoalIntDelay;
++	__u32 CoalIntCount;
++};
++
++struct CfgTable {
++	__u8             Signature[4];
++	__u32            SpecValence;
++	__u32            TransportSupport;
++	__u32            TransportActive;
++	struct HostWrite HostWrite;
++	__u32            CmdsOutMax;
++	__u32            BusTypes;
++	__u32            Reserved;
++	__u8             ServerName[16];
++	__u32            HeartBeat;
++	__u32            SCSI_Prefetch;
++};
++
++struct hpsa_pci_info {
++	unsigned char	bus;
++	unsigned char	dev_fn;
++	unsigned short	domain;
++	__u32		board_id;
++};
++
++#pragma pack()
++#endif /* HPSA_CMD_H */
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0002-SCSI-hpsa-fix-typo-in-comments.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0002-SCSI-hpsa-fix-typo-in-comments.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,28 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:41:28 -0600
+Subject: [PATCH 02/79] [SCSI] hpsa: fix typo in comments
+
+commit 466dc22409b97343c6b2168094d5f867093a70c2 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa_cmd.h |    2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
+index 12d7138..2df1c66 100644
+--- a/drivers/scsi/hpsa_cmd.h
++++ b/drivers/scsi/hpsa_cmd.h
+@@ -269,7 +269,7 @@ struct ErrorInfo {
+ 
+ struct ctlr_info; /* defined in hpsa.h */
+ /* The size of this structure needs to be divisible by 8
+- * od on all architectures, because the controller uses 2
++ * on all architectures, because the controller uses 2
+  * lower bits of the address, and the driver uses 1 lower
+  * bit (3 bits total.)
+  */
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0003-SCSI-hpsa-Use-kernel-integer-types-not-userland-ones.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0003-SCSI-hpsa-Use-kernel-integer-types-not-userland-ones.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,577 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:41:33 -0600
+Subject: [PATCH 03/79] [SCSI] hpsa: Use kernel integer types, not userland
+ ones
+
+commit 01a02ffcd55b74e3459bb7358140970e126d4731 upstream.
+
+That is, use u64, u32, u16 and u8 rather than __u64, __u32, __u16 and __u8.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c     |   90 ++++++++++++++--------------
+ drivers/scsi/hpsa.h     |    4 +-
+ drivers/scsi/hpsa_cmd.h |  146 +++++++++++++++++++++++-----------------------
+ 3 files changed, 120 insertions(+), 120 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index bb96fdd..0f4a1f3 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -126,8 +126,8 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c);
+ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
+ static struct CommandList *cmd_alloc(struct ctlr_info *h);
+ static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
+-static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h,
+-	void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr,
++static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
++	void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
+ 	int cmd_type);
+ 
+ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
+@@ -912,7 +912,7 @@ static void hpsa_scsi_setup(struct ctlr_info *h)
+ }
+ 
+ static void complete_scsi_command(struct CommandList *cp,
+-	int timeout, __u32 tag)
++	int timeout, u32 tag)
+ {
+ 	struct scsi_cmnd *cmd;
+ 	struct ctlr_info *h;
+@@ -1160,7 +1160,7 @@ static void hpsa_map_one(struct pci_dev *pdev,
+ 		size_t buflen,
+ 		int data_direction)
+ {
+-	__u64 addr64;
++	u64 addr64;
+ 
+ 	if (buflen == 0 || data_direction == PCI_DMA_NONE) {
+ 		cp->Header.SGList = 0;
+@@ -1168,14 +1168,14 @@ static void hpsa_map_one(struct pci_dev *pdev,
+ 		return;
+ 	}
+ 
+-	addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction);
++	addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
+ 	cp->SG[0].Addr.lower =
+-	  (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
++	  (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
+ 	cp->SG[0].Addr.upper =
+-	  (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
++	  (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
+ 	cp->SG[0].Len = buflen;
+-	cp->Header.SGList = (__u8) 1;   /* no. SGs contig in this cmd */
+-	cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
++	cp->Header.SGList = (u8) 1;   /* no. SGs contig in this cmd */
++	cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
+ }
+ 
+ static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
+@@ -1485,11 +1485,11 @@ static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
+  * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
+  */
+ static void figure_bus_target_lun(struct ctlr_info *h,
+-	__u8 *lunaddrbytes, int *bus, int *target, int *lun,
++	u8 *lunaddrbytes, int *bus, int *target, int *lun,
+ 	struct hpsa_scsi_dev_t *device)
+ {
+ 
+-	__u32 lunid;
++	u32 lunid;
+ 
+ 	if (is_logical_dev_addr_mode(lunaddrbytes)) {
+ 		/* logical device */
+@@ -1529,7 +1529,7 @@ static void figure_bus_target_lun(struct ctlr_info *h,
+  */
+ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
+ 	struct hpsa_scsi_dev_t *tmpdevice,
+-	struct hpsa_scsi_dev_t *this_device, __u8 *lunaddrbytes,
++	struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
+ 	int bus, int target, int lun, unsigned long lunzerobits[],
+ 	int *nmsa2xxx_enclosures)
+ {
+@@ -1576,8 +1576,8 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
+  */
+ static int hpsa_gather_lun_info(struct ctlr_info *h,
+ 	int reportlunsize,
+-	struct ReportLUNdata *physdev, __u32 *nphysicals,
+-	struct ReportLUNdata *logdev, __u32 *nlogicals)
++	struct ReportLUNdata *physdev, u32 *nphysicals,
++	struct ReportLUNdata *logdev, u32 *nlogicals)
+ {
+ 	if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
+ 		dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
+@@ -1636,9 +1636,9 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
+ 	struct ReportLUNdata *physdev_list = NULL;
+ 	struct ReportLUNdata *logdev_list = NULL;
+ 	unsigned char *inq_buff = NULL;
+-	__u32 nphysicals = 0;
+-	__u32 nlogicals = 0;
+-	__u32 ndev_allocated = 0;
++	u32 nphysicals = 0;
++	u32 nlogicals = 0;
++	u32 ndev_allocated = 0;
+ 	struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
+ 	int ncurrent = 0;
+ 	int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
+@@ -1684,7 +1684,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
+ 	/* adjust our table of devices */
+ 	nmsa2xxx_enclosures = 0;
+ 	for (i = 0; i < nphysicals + nlogicals + 1; i++) {
+-		__u8 *lunaddrbytes;
++		u8 *lunaddrbytes;
+ 
+ 		/* Figure out where the LUN ID info is coming from */
+ 		if (i < nphysicals)
+@@ -1790,7 +1790,7 @@ static int hpsa_scatter_gather(struct pci_dev *pdev,
+ {
+ 	unsigned int len;
+ 	struct scatterlist *sg;
+-	__u64 addr64;
++	u64 addr64;
+ 	int use_sg, i;
+ 
+ 	BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
+@@ -1803,20 +1803,20 @@ static int hpsa_scatter_gather(struct pci_dev *pdev,
+ 		goto sglist_finished;
+ 
+ 	scsi_for_each_sg(cmd, sg, use_sg, i) {
+-		addr64 = (__u64) sg_dma_address(sg);
++		addr64 = (u64) sg_dma_address(sg);
+ 		len  = sg_dma_len(sg);
+ 		cp->SG[i].Addr.lower =
+-			(__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
++			(u32) (addr64 & (u64) 0x00000000FFFFFFFF);
+ 		cp->SG[i].Addr.upper =
+-			(__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
++			(u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
+ 		cp->SG[i].Len = len;
+ 		cp->SG[i].Ext = 0;  /* we are not chaining */
+ 	}
+ 
+ sglist_finished:
+ 
+-	cp->Header.SGList = (__u8) use_sg;   /* no. SGs contig in this cmd */
+-	cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */
++	cp->Header.SGList = (u8) use_sg;   /* no. SGs contig in this cmd */
++	cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
+ 	return 0;
+ }
+ 
+@@ -2053,8 +2053,8 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
+ 	c->cmdindex = i;
+ 
+ 	INIT_HLIST_NODE(&c->list);
+-	c->busaddr = (__u32) cmd_dma_handle;
+-	temp64.val = (__u64) err_dma_handle;
++	c->busaddr = (u32) cmd_dma_handle;
++	temp64.val = (u64) err_dma_handle;
+ 	c->ErrDesc.Addr.lower = temp64.val32.lower;
+ 	c->ErrDesc.Addr.upper = temp64.val32.upper;
+ 	c->ErrDesc.Len = sizeof(*c->err_info);
+@@ -2091,8 +2091,8 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
+ 	memset(c->err_info, 0, sizeof(*c->err_info));
+ 
+ 	INIT_HLIST_NODE(&c->list);
+-	c->busaddr = (__u32) cmd_dma_handle;
+-	temp64.val = (__u64) err_dma_handle;
++	c->busaddr = (u32) cmd_dma_handle;
++	temp64.val = (u64) err_dma_handle;
+ 	c->ErrDesc.Addr.lower = temp64.val32.lower;
+ 	c->ErrDesc.Addr.upper = temp64.val32.upper;
+ 	c->ErrDesc.Len = sizeof(*c->err_info);
+@@ -2378,8 +2378,8 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
+ 	BYTE sg_used = 0;
+ 	int status = 0;
+ 	int i;
+-	__u32 left;
+-	__u32 sz;
++	u32 left;
++	u32 sz;
+ 	BYTE __user *data_ptr;
+ 
+ 	if (!argp)
+@@ -2542,8 +2542,8 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
+ 	}
+ }
+ 
+-static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h,
+-	void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr,
++static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
++	void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
+ 	int cmd_type)
+ {
+ 	int pci_dir = XFER_NONE;
+@@ -2721,8 +2721,8 @@ static inline long interrupt_not_for_us(struct ctlr_info *h)
+ 		 (h->interrupts_enabled == 0));
+ }
+ 
+-static inline int bad_tag(struct ctlr_info *h, __u32 tag_index,
+-	__u32 raw_tag)
++static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
++	u32 raw_tag)
+ {
+ 	if (unlikely(tag_index >= h->nr_cmds)) {
+ 		dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
+@@ -2731,7 +2731,7 @@ static inline int bad_tag(struct ctlr_info *h, __u32 tag_index,
+ 	return 0;
+ }
+ 
+-static inline void finish_cmd(struct CommandList *c, __u32 raw_tag)
++static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
+ {
+ 	removeQ(c);
+ 	if (likely(c->cmd_type == CMD_SCSI))
+@@ -2745,7 +2745,7 @@ static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
+ 	struct ctlr_info *h = dev_id;
+ 	struct CommandList *c;
+ 	unsigned long flags;
+-	__u32 raw_tag, tag, tag_index;
++	u32 raw_tag, tag, tag_index;
+ 	struct hlist_node *tmp;
+ 
+ 	if (interrupt_not_for_us(h))
+@@ -3063,7 +3063,7 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
+  */
+ 
+ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h,
+-					   struct pci_dev *pdev, __u32 board_id)
++					   struct pci_dev *pdev, u32 board_id)
+ {
+ #ifdef CONFIG_PCI_MSI
+ 	int err;
+@@ -3114,15 +3114,15 @@ default_int_mode:
+ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
+ {
+ 	ushort subsystem_vendor_id, subsystem_device_id, command;
+-	__u32 board_id, scratchpad = 0;
+-	__u64 cfg_offset;
+-	__u32 cfg_base_addr;
+-	__u64 cfg_base_addr_index;
++	u32 board_id, scratchpad = 0;
++	u64 cfg_offset;
++	u32 cfg_base_addr;
++	u64 cfg_base_addr_index;
+ 	int i, prod_index, err;
+ 
+ 	subsystem_vendor_id = pdev->subsystem_vendor;
+ 	subsystem_device_id = pdev->subsystem_device;
+-	board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
++	board_id = (((u32) (subsystem_device_id << 16) & 0xffff0000) |
+ 		    subsystem_vendor_id);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(products); i++)
+@@ -3199,7 +3199,7 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
+ 
+ 	/* get the address index number */
+ 	cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET);
+-	cfg_base_addr &= (__u32) 0x0000ffff;
++	cfg_base_addr &= (u32) 0x0000ffff;
+ 	cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
+ 	if (cfg_base_addr_index == -1) {
+ 		dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
+@@ -3232,7 +3232,7 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
+ #ifdef CONFIG_X86
+ 	{
+ 		/* Need to enable prefetch in the SCSI core for 6400 in x86 */
+-		__u32 prefetch;
++		u32 prefetch;
+ 		prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
+ 		prefetch |= 0x100;
+ 		writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
+@@ -3244,7 +3244,7 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
+ 	 * physical memory.
+ 	 */
+ 	if (board_id == 0x3225103C) {
+-		__u32 dma_prefetch;
++		u32 dma_prefetch;
+ 		dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
+ 		dma_prefetch |= 0x8000;
+ 		writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
+diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
+index 6bd1949..194968e 100644
+--- a/drivers/scsi/hpsa.h
++++ b/drivers/scsi/hpsa.h
+@@ -55,7 +55,7 @@ struct ctlr_info {
+ 	char    *product_name;
+ 	char	firm_ver[4]; /* Firmware version */
+ 	struct pci_dev *pdev;
+-	__u32	board_id;
++	u32	board_id;
+ 	void __iomem *vaddr;
+ 	unsigned long paddr;
+ 	int 	nr_cmds; /* Number of commands allowed on this controller */
+@@ -261,7 +261,7 @@ static struct access_method SA5_access = {
+ };
+ 
+ struct board_type {
+-	__u32	board_id;
++	u32	board_id;
+ 	char	*product_name;
+ 	struct access_method *access;
+ };
+diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
+index 2df1c66..88bb3b0 100644
+--- a/drivers/scsi/hpsa_cmd.h
++++ b/drivers/scsi/hpsa_cmd.h
+@@ -107,13 +107,13 @@
+ #define CFGTBL_BusType_Fibre1G  0x00000100l
+ #define CFGTBL_BusType_Fibre2G  0x00000200l
+ struct vals32 {
+-	__u32   lower;
+-	__u32   upper;
++	u32   lower;
++	u32   upper;
+ };
+ 
+ union u64bit {
+ 	struct vals32 val32;
+-	__u64 val;
++	u64 val;
+ };
+ 
+ /* FIXME this is a per controller value (barf!) */
+@@ -126,34 +126,34 @@ union u64bit {
+ 
+ #define HPSA_INQUIRY 0x12
+ struct InquiryData {
+-	__u8 data_byte[36];
++	u8 data_byte[36];
+ };
+ 
+ #define HPSA_REPORT_LOG 0xc2    /* Report Logical LUNs */
+ #define HPSA_REPORT_PHYS 0xc3   /* Report Physical LUNs */
+ struct ReportLUNdata {
+-	__u8 LUNListLength[4];
+-	__u32 reserved;
+-	__u8 LUN[HPSA_MAX_LUN][8];
++	u8 LUNListLength[4];
++	u32 reserved;
++	u8 LUN[HPSA_MAX_LUN][8];
+ };
+ 
+ struct ReportExtendedLUNdata {
+-	__u8 LUNListLength[4];
+-	__u8 extended_response_flag;
+-	__u8 reserved[3];
+-	__u8 LUN[HPSA_MAX_LUN][24];
++	u8 LUNListLength[4];
++	u8 extended_response_flag;
++	u8 reserved[3];
++	u8 LUN[HPSA_MAX_LUN][24];
+ };
+ 
+ struct SenseSubsystem_info {
+-	__u8 reserved[36];
+-	__u8 portname[8];
+-	__u8 reserved1[1108];
++	u8 reserved[36];
++	u8 portname[8];
++	u8 reserved1[1108];
+ };
+ 
+ #define HPSA_READ_CAPACITY 0x25 /* Read Capacity */
+ struct ReadCapdata {
+-	__u8 total_size[4];	/* Total size in blocks */
+-	__u8 block_size[4];	/* Size of blocks in bytes */
++	u8 total_size[4];	/* Total size in blocks */
++	u8 block_size[4];	/* Size of blocks in bytes */
+ };
+ 
+ #if 0
+@@ -174,94 +174,94 @@ struct ReadCapdata {
+ /* Command List Structure */
+ union SCSI3Addr {
+ 	struct {
+-		__u8 Dev;
+-		__u8 Bus:6;
+-		__u8 Mode:2;        /* b00 */
++		u8 Dev;
++		u8 Bus:6;
++		u8 Mode:2;        /* b00 */
+ 	} PeripDev;
+ 	struct {
+-		__u8 DevLSB;
+-		__u8 DevMSB:6;
+-		__u8 Mode:2;        /* b01 */
++		u8 DevLSB;
++		u8 DevMSB:6;
++		u8 Mode:2;        /* b01 */
+ 	} LogDev;
+ 	struct {
+-		__u8 Dev:5;
+-		__u8 Bus:3;
+-		__u8 Targ:6;
+-		__u8 Mode:2;        /* b10 */
++		u8 Dev:5;
++		u8 Bus:3;
++		u8 Targ:6;
++		u8 Mode:2;        /* b10 */
+ 	} LogUnit;
+ };
+ 
+ struct PhysDevAddr {
+-	__u32             TargetId:24;
+-	__u32             Bus:6;
+-	__u32             Mode:2;
++	u32             TargetId:24;
++	u32             Bus:6;
++	u32             Mode:2;
+ 	/* 2 level target device addr */
+ 	union SCSI3Addr  Target[2];
+ };
+ 
+ struct LogDevAddr {
+-	__u32            VolId:30;
+-	__u32            Mode:2;
+-	__u8             reserved[4];
++	u32            VolId:30;
++	u32            Mode:2;
++	u8             reserved[4];
+ };
+ 
+ union LUNAddr {
+-	__u8               LunAddrBytes[8];
++	u8               LunAddrBytes[8];
+ 	union SCSI3Addr    SCSI3Lun[4];
+ 	struct PhysDevAddr PhysDev;
+ 	struct LogDevAddr  LogDev;
+ };
+ 
+ struct CommandListHeader {
+-	__u8              ReplyQueue;
+-	__u8              SGList;
+-	__u16             SGTotal;
++	u8              ReplyQueue;
++	u8              SGList;
++	u16             SGTotal;
+ 	struct vals32     Tag;
+ 	union LUNAddr     LUN;
+ };
+ 
+ struct RequestBlock {
+-	__u8   CDBLen;
++	u8   CDBLen;
+ 	struct {
+-		__u8 Type:3;
+-		__u8 Attribute:3;
+-		__u8 Direction:2;
++		u8 Type:3;
++		u8 Attribute:3;
++		u8 Direction:2;
+ 	} Type;
+-	__u16  Timeout;
+-	__u8   CDB[16];
++	u16  Timeout;
++	u8   CDB[16];
+ };
+ 
+ struct ErrDescriptor {
+ 	struct vals32 Addr;
+-	__u32  Len;
++	u32  Len;
+ };
+ 
+ struct SGDescriptor {
+ 	struct vals32 Addr;
+-	__u32  Len;
+-	__u32  Ext;
++	u32  Len;
++	u32  Ext;
+ };
+ 
+ union MoreErrInfo {
+ 	struct {
+-		__u8  Reserved[3];
+-		__u8  Type;
+-		__u32 ErrorInfo;
++		u8  Reserved[3];
++		u8  Type;
++		u32 ErrorInfo;
+ 	} Common_Info;
+ 	struct {
+-		__u8  Reserved[2];
+-		__u8  offense_size; /* size of offending entry */
+-		__u8  offense_num;  /* byte # of offense 0-base */
+-		__u32 offense_value;
++		u8  Reserved[2];
++		u8  offense_size; /* size of offending entry */
++		u8  offense_num;  /* byte # of offense 0-base */
++		u32 offense_value;
+ 	} Invalid_Cmd;
+ };
+ struct ErrorInfo {
+-	__u8               ScsiStatus;
+-	__u8               SenseLen;
+-	__u16              CommandStatus;
+-	__u32              ResidualCnt;
++	u8               ScsiStatus;
++	u8               SenseLen;
++	u16              CommandStatus;
++	u32              ResidualCnt;
+ 	union MoreErrInfo  MoreErrInfo;
+-	__u8               SenseInfo[SENSEINFOBYTES];
++	u8               SenseInfo[SENSEINFOBYTES];
+ };
+ /* Command types */
+ #define CMD_IOCTL_PEND  0x01
+@@ -279,7 +279,7 @@ struct CommandList {
+ 	struct ErrDescriptor     ErrDesc;
+ 	struct SGDescriptor      SG[MAXSGENTRIES];
+ 	/* information associated with the command */
+-	__u32			   busaddr; /* physical addr of this record */
++	u32			   busaddr; /* physical addr of this record */
+ 	struct ErrorInfo *err_info; /* pointer to the allocated mem */
+ 	struct ctlr_info	   *h;
+ 	int			   cmd_type;
+@@ -295,31 +295,31 @@ struct CommandList {
+ 
+ /* Configuration Table Structure */
+ struct HostWrite {
+-	__u32 TransportRequest;
+-	__u32 Reserved;
+-	__u32 CoalIntDelay;
+-	__u32 CoalIntCount;
++	u32 TransportRequest;
++	u32 Reserved;
++	u32 CoalIntDelay;
++	u32 CoalIntCount;
+ };
+ 
+ struct CfgTable {
+-	__u8             Signature[4];
+-	__u32            SpecValence;
+-	__u32            TransportSupport;
+-	__u32            TransportActive;
++	u8             Signature[4];
++	u32            SpecValence;
++	u32            TransportSupport;
++	u32            TransportActive;
+ 	struct HostWrite HostWrite;
+-	__u32            CmdsOutMax;
+-	__u32            BusTypes;
+-	__u32            Reserved;
+-	__u8             ServerName[16];
+-	__u32            HeartBeat;
+-	__u32            SCSI_Prefetch;
++	u32            CmdsOutMax;
++	u32            BusTypes;
++	u32            Reserved;
++	u8             ServerName[16];
++	u32            HeartBeat;
++	u32            SCSI_Prefetch;
+ };
+ 
+ struct hpsa_pci_info {
+ 	unsigned char	bus;
+ 	unsigned char	dev_fn;
+ 	unsigned short	domain;
+-	__u32		board_id;
++	u32		board_id;
+ };
+ 
+ #pragma pack()
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0004-SCSI-hpsa-avoid-unwanted-promotion-from-unsigned-to-.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0004-SCSI-hpsa-avoid-unwanted-promotion-from-unsigned-to-.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,38 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:41:38 -0600
+Subject: [PATCH 04/79] [SCSI] hpsa: avoid unwanted promotion from unsigned to
+ signed for raid level index
+
+commit 82a72c0a12326a56a323093297e2bad29fe6c29d upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 0f4a1f3..ee9db5e 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -431,7 +431,7 @@ static ssize_t raid_level_show(struct device *dev,
+ 	     struct device_attribute *attr, char *buf)
+ {
+ 	ssize_t l = 0;
+-	int rlevel;
++	unsigned char rlevel;
+ 	struct ctlr_info *h;
+ 	struct scsi_device *sdev;
+ 	struct hpsa_scsi_dev_t *hdev;
+@@ -455,7 +455,7 @@ static ssize_t raid_level_show(struct device *dev,
+ 
+ 	rlevel = hdev->raid_level;
+ 	spin_unlock_irqrestore(&h->lock, flags);
+-	if (rlevel < 0 || rlevel > RAID_UNKNOWN)
++	if (rlevel > RAID_UNKNOWN)
+ 		rlevel = RAID_UNKNOWN;
+ 	l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
+ 	return l;
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0005-SCSI-hpsa-Use-BUG_ON-instead-of-an-if-statement.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0005-SCSI-hpsa-Use-BUG_ON-instead-of-an-if-statement.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,29 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:41:44 -0600
+Subject: [PATCH 05/79] [SCSI] hpsa: Use BUG_ON instead of an if statement.
+
+commit b2ed4f79194e06766327ae581b063f8747d94ea9 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    3 +--
+ 1 files changed, 1 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index ee9db5e..2c256e3 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -628,8 +628,7 @@ static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
+ 	int i;
+ 	struct hpsa_scsi_dev_t *sd;
+ 
+-	if (entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA)
+-		BUG();
++	BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
+ 
+ 	sd = h->dev[entry];
+ 	removed[*nremoved] = h->dev[entry];
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0006-SCSI-hpsa-make-adjust_hpsa_scsi_table-return-void.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0006-SCSI-hpsa-make-adjust_hpsa_scsi_table-return-void.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,36 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:41:49 -0600
+Subject: [PATCH 06/79] [SCSI] hpsa: make adjust_hpsa_scsi_table return void.
+
+commit 4967bd3e5caa87da43c251ae32504230259b18c6 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    3 +--
+ 1 files changed, 1 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 2c256e3..6fe5142 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -733,7 +733,7 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
+ 	return DEVICE_NOT_FOUND;
+ }
+ 
+-static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
++static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
+ 	struct hpsa_scsi_dev_t *sd[], int nsds)
+ {
+ 	/* sd contains scsi3 addresses and devtypes, and inquiry
+@@ -859,7 +859,6 @@ static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
+ free_and_out:
+ 	kfree(added);
+ 	kfree(removed);
+-	return 0;
+ }
+ 
+ /*
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0007-SCSI-hpsa-remove-superfluous-returns-from-void-funct.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0007-SCSI-hpsa-remove-superfluous-returns-from-void-funct.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,53 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:41:54 -0600
+Subject: [PATCH 07/79] [SCSI] hpsa: remove superfluous returns from void
+ functions.
+
+commit bcc4425548ada73475606c9c5d7cfce7eae0eb64 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    5 +----
+ 1 files changed, 1 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 6fe5142..9017e64 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -898,7 +898,7 @@ static int hpsa_slave_alloc(struct scsi_device *sdev)
+ 
+ static void hpsa_slave_destroy(struct scsi_device *sdev)
+ {
+-	return; /* nothing to do. */
++	/* nothing to do. */
+ }
+ 
+ static void hpsa_scsi_setup(struct ctlr_info *h)
+@@ -906,7 +906,6 @@ static void hpsa_scsi_setup(struct ctlr_info *h)
+ 	h->ndevices = 0;
+ 	h->scsi_host = NULL;
+ 	spin_lock_init(&h->devlock);
+-	return;
+ }
+ 
+ static void complete_scsi_command(struct CommandList *cp,
+@@ -1775,7 +1774,6 @@ out:
+ 	kfree(inq_buff);
+ 	kfree(physdev_list);
+ 	kfree(logdev_list);
+-	return;
+ }
+ 
+ /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
+@@ -3106,7 +3104,6 @@ default_int_mode:
+ #endif				/* CONFIG_PCI_MSI */
+ 	/* if we get here we're going to use the default interrupt mode */
+ 	h->intr[SIMPLE_MODE_INT] = pdev->irq;
+-	return;
+ }
+ 
+ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0008-SCSI-hpsa-return-proper-error-codes-not-minus-one.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0008-SCSI-hpsa-return-proper-error-codes-not-minus-one.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,119 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:41:59 -0600
+Subject: [PATCH 08/79] [SCSI] hpsa: return proper error codes not minus one.
+
+commit ecd9aad402765abce04a96b8d1ed15163ca6c8a1 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   37 ++++++++++++++++++++++---------------
+ 1 files changed, 22 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 9017e64..8389cce 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -1130,11 +1130,11 @@ static int hpsa_scsi_detect(struct ctlr_info *h)
+ 	dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
+ 		" failed for controller %d\n", h->ctlr);
+ 	scsi_host_put(sh);
+-	return -1;
++	return error;
+  fail:
+ 	dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
+ 		" failed for controller %d\n", h->ctlr);
+-	return -1;
++	return -ENOMEM;
+ }
+ 
+ static void hpsa_pci_unmap(struct pci_dev *pdev,
+@@ -1271,7 +1271,7 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
+ 
+ 	if (c == NULL) {			/* trouble... */
+ 		dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+-		return -1;
++		return -ENOMEM;
+ 	}
+ 
+ 	fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
+@@ -3284,7 +3284,7 @@ err_out_free_res:
+ static int __devinit hpsa_init_one(struct pci_dev *pdev,
+ 				    const struct pci_device_id *ent)
+ {
+-	int i;
++	int i, rc;
+ 	int dac;
+ 	struct ctlr_info *h;
+ 
+@@ -3312,14 +3312,15 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
+ 	BUILD_BUG_ON(sizeof(struct CommandList) % 8);
+ 	h = kzalloc(sizeof(*h), GFP_KERNEL);
+ 	if (!h)
+-		return -1;
++		return -ENOMEM;
+ 
+ 	h->busy_initializing = 1;
+ 	INIT_HLIST_HEAD(&h->cmpQ);
+ 	INIT_HLIST_HEAD(&h->reqQ);
+ 	mutex_init(&h->busy_shutting_down);
+ 	init_completion(&h->scan_wait);
+-	if (hpsa_pci_init(h, pdev) != 0)
++	rc = hpsa_pci_init(h, pdev);
++	if (rc != 0)
+ 		goto clean1;
+ 
+ 	sprintf(h->devname, "hpsa%d", number_of_controllers);
+@@ -3328,19 +3329,24 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
+ 	h->pdev = pdev;
+ 
+ 	/* configure PCI DMA stuff */
+-	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
++	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
++	if (rc == 0) {
+ 		dac = 1;
+-	else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
+-		dac = 0;
+-	else {
+-		dev_err(&pdev->dev, "no suitable DMA available\n");
+-		goto clean1;
++	} else {
++		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
++		if (rc == 0) {
++			dac = 0;
++		} else {
++			dev_err(&pdev->dev, "no suitable DMA available\n");
++			goto clean1;
++		}
+ 	}
+ 
+ 	/* make sure the board interrupts are off */
+ 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
+-	if (request_irq(h->intr[SIMPLE_MODE_INT], do_hpsa_intr,
+-			IRQF_DISABLED | IRQF_SHARED, h->devname, h)) {
++	rc = request_irq(h->intr[SIMPLE_MODE_INT], do_hpsa_intr,
++			IRQF_DISABLED | IRQF_SHARED, h->devname, h);
++	if (rc) {
+ 		dev_err(&pdev->dev, "unable to get irq %d for %s\n",
+ 		       h->intr[SIMPLE_MODE_INT], h->devname);
+ 		goto clean2;
+@@ -3363,6 +3369,7 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
+ 	    || (h->cmd_pool == NULL)
+ 	    || (h->errinfo_pool == NULL)) {
+ 		dev_err(&pdev->dev, "out of memory");
++		rc = -ENOMEM;
+ 		goto clean4;
+ 	}
+ 	spin_lock_init(&h->lock);
+@@ -3397,7 +3404,7 @@ clean2:
+ clean1:
+ 	h->busy_initializing = 0;
+ 	kfree(h);
+-	return -1;
++	return rc;
+ }
+ 
+ static void hpsa_flush_cache(struct ctlr_info *h)
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0009-SCSI-hpsa-use-sizeof-not-an-inline-constant-in-memse.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0009-SCSI-hpsa-use-sizeof-not-an-inline-constant-in-memse.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,32 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:42:04 -0600
+Subject: [PATCH 09/79] [SCSI] hpsa: use sizeof() not an inline constant in
+ memset.
+
+commit e89c0ae7babc3e702a9da128b3ac1eb04ed73c2c upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    5 ++---
+ 1 files changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 8389cce..add2ed5 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -1363,9 +1363,8 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
+ 		dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+ 		return -1;
+ 	}
+-
+-	memset(&scsi3addr[0], 0, 8); /* address the controller */
+-
++	/* address the controller */
++	memset(scsi3addr, 0, sizeof(scsi3addr));
+ 	fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
+ 		buf, bufsize, 0, scsi3addr, TYPE_CMD);
+ 	if (extended_response)
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0010-SCSI-hpsa-use-kzalloc-not-kmalloc-plus-memset.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0010-SCSI-hpsa-use-kzalloc-not-kmalloc-plus-memset.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,35 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:42:09 -0600
+Subject: [PATCH 10/79] [SCSI] hpsa: use kzalloc not kmalloc plus memset
+
+commit ea6d3bc3143a21ec5e1c9330555b36d67979431d upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    5 ++---
+ 1 files changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index add2ed5..bd081fe 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -1405,13 +1405,12 @@ static int hpsa_update_device_info(struct ctlr_info *h,
+ 	unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
+ {
+ #define OBDR_TAPE_INQ_SIZE 49
+-	unsigned char *inq_buff = NULL;
++	unsigned char *inq_buff;
+ 
+-	inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
++	inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
+ 	if (!inq_buff)
+ 		goto bail_out;
+ 
+-	memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
+ 	/* Do an inquiry to the device to see what it is. */
+ 	if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
+ 		(unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0011-SCSI-hpsa-remove-unwanted-debug-code.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0011-SCSI-hpsa-remove-unwanted-debug-code.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,39 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:42:14 -0600
+Subject: [PATCH 11/79] [SCSI] hpsa: remove unwanted debug code
+
+commit a9a44cbd4b41bf4b9535c1eb7f5e942d8b2e9d54 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    6 ------
+ 1 files changed, 0 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index bd081fe..4594200 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -1580,9 +1580,6 @@ static int hpsa_gather_lun_info(struct ctlr_info *h,
+ 	}
+ 	memcpy(nphysicals, &physdev->LUNListLength[0], sizeof(*nphysicals));
+ 	*nphysicals = be32_to_cpu(*nphysicals) / 8;
+-#ifdef DEBUG
+-	dev_info(&h->pdev->dev, "number of physical luns is %d\n", *nphysicals);
+-#endif
+ 	if (*nphysicals > HPSA_MAX_PHYS_LUN) {
+ 		dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
+ 			"  %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
+@@ -1595,9 +1592,6 @@ static int hpsa_gather_lun_info(struct ctlr_info *h,
+ 	}
+ 	memcpy(nlogicals, &logdev->LUNListLength[0], sizeof(*nlogicals));
+ 	*nlogicals = be32_to_cpu(*nlogicals) / 8;
+-#ifdef DEBUG
+-	dev_info(&h->pdev->dev, "number of logical luns is %d\n", *nlogicals);
+-#endif
+ 	/* Reject Logicals in excess of our max capability. */
+ 	if (*nlogicals > HPSA_MAX_LUN) {
+ 		dev_warn(&h->pdev->dev,
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0012-SCSI-hpsa-eliminate-unnecessary-memcpys.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0012-SCSI-hpsa-eliminate-unnecessary-memcpys.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,55 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:42:19 -0600
+Subject: [PATCH 12/79] [SCSI] hpsa: eliminate unnecessary memcpys
+
+commit 6df1e95496f8dfe08a520756187be59f7896f589 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   11 +++--------
+ 1 files changed, 3 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 4594200..ba3dead 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -1483,14 +1483,11 @@ static void figure_bus_target_lun(struct ctlr_info *h,
+ 	u8 *lunaddrbytes, int *bus, int *target, int *lun,
+ 	struct hpsa_scsi_dev_t *device)
+ {
+-
+ 	u32 lunid;
+ 
+ 	if (is_logical_dev_addr_mode(lunaddrbytes)) {
+ 		/* logical device */
+-		memcpy(&lunid, lunaddrbytes, sizeof(lunid));
+-		lunid = le32_to_cpu(lunid);
+-
++		lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
+ 		if (is_msa2xxx(h, device)) {
+ 			*bus = 1;
+ 			*target = (lunid >> 16) & 0x3fff;
+@@ -1578,8 +1575,7 @@ static int hpsa_gather_lun_info(struct ctlr_info *h,
+ 		dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
+ 		return -1;
+ 	}
+-	memcpy(nphysicals, &physdev->LUNListLength[0], sizeof(*nphysicals));
+-	*nphysicals = be32_to_cpu(*nphysicals) / 8;
++	*nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
+ 	if (*nphysicals > HPSA_MAX_PHYS_LUN) {
+ 		dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
+ 			"  %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
+@@ -1590,8 +1586,7 @@ static int hpsa_gather_lun_info(struct ctlr_info *h,
+ 		dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
+ 		return -1;
+ 	}
+-	memcpy(nlogicals, &logdev->LUNListLength[0], sizeof(*nlogicals));
+-	*nlogicals = be32_to_cpu(*nlogicals) / 8;
++	*nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
+ 	/* Reject Logicals in excess of our max capability. */
+ 	if (*nlogicals > HPSA_MAX_LUN) {
+ 		dev_warn(&h->pdev->dev,
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0013-SCSI-hpsa-make-tag-macros-into-functions.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0013-SCSI-hpsa-make-tag-macros-into-functions.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,87 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:42:24 -0600
+Subject: [PATCH 13/79] [SCSI] hpsa: make tag macros into functions
+
+commit a104c99f386191706a11d39be81b8f03cd4f2719 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   26 ++++++++++++++++++++++----
+ drivers/scsi/hpsa.h |    3 ---
+ 2 files changed, 22 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index ba3dead..82987e1 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -2723,6 +2723,24 @@ static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
+ 		complete(c->waiting);
+ }
+ 
++static inline u32 hpsa_tag_contains_index(u32 tag)
++{
++#define DIRECT_LOOKUP_BIT 0x04
++	return tag & DIRECT_LOOKUP_BIT;
++}
++
++static inline u32 hpsa_tag_to_index(u32 tag)
++{
++#define DIRECT_LOOKUP_SHIFT 3
++	return tag >> DIRECT_LOOKUP_SHIFT;
++}
++
++static inline u32 hpsa_tag_discard_error_bits(u32 tag)
++{
++#define HPSA_ERROR_BITS 0x03
++	return tag & ~HPSA_ERROR_BITS;
++}
++
+ static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
+ {
+ 	struct ctlr_info *h = dev_id;
+@@ -2736,15 +2754,15 @@ static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
+ 	spin_lock_irqsave(&h->lock, flags);
+ 	while (interrupt_pending(h)) {
+ 		while ((raw_tag = get_next_completion(h)) != FIFO_EMPTY) {
+-			if (likely(HPSA_TAG_CONTAINS_INDEX(raw_tag))) {
+-				tag_index = HPSA_TAG_TO_INDEX(raw_tag);
++			if (likely(hpsa_tag_contains_index(raw_tag))) {
++				tag_index = hpsa_tag_to_index(raw_tag);
+ 				if (bad_tag(h, tag_index, raw_tag))
+ 					return IRQ_HANDLED;
+ 				c = h->cmd_pool + tag_index;
+ 				finish_cmd(c, raw_tag);
+ 				continue;
+ 			}
+-			tag = HPSA_TAG_DISCARD_ERROR_BITS(raw_tag);
++			tag = hpsa_tag_discard_error_bits(raw_tag);
+ 			c = NULL;
+ 			hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
+ 				if (c->busaddr == tag) {
+@@ -2824,7 +2842,7 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
+ 
+ 	for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
+ 		tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
+-		if (HPSA_TAG_DISCARD_ERROR_BITS(tag) == paddr32)
++		if (hpsa_tag_discard_error_bits(tag) == paddr32)
+ 			break;
+ 		msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
+ 	}
+diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
+index 194968e..da8dd3e 100644
+--- a/drivers/scsi/hpsa.h
++++ b/drivers/scsi/hpsa.h
+@@ -164,9 +164,6 @@ struct ctlr_info {
+ #define HPSA_FIRMWARE_READY	0xffff0000 /* value in scratchpad register */
+ 
+ #define HPSA_ERROR_BIT		0x02
+-#define HPSA_TAG_CONTAINS_INDEX(tag) ((tag) & 0x04)
+-#define HPSA_TAG_TO_INDEX(tag) ((tag) >> 3)
+-#define HPSA_TAG_DISCARD_ERROR_BITS(tag) ((tag) & ~3)
+ 
+ #define HPSA_INTR_ON 	1
+ #define HPSA_INTR_OFF	0
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0014-SCSI-hpsa-fix-some-debug-printks-to-use-dev_dbg-inst.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0014-SCSI-hpsa-fix-some-debug-printks-to-use-dev_dbg-inst.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,56 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:42:30 -0600
+Subject: [PATCH 14/79] [SCSI] hpsa: fix some debug printks to use dev_dbg
+ instead
+
+commit 84ca0be2a2cd9730683310b831db9d2fa60b3b0b upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.h |   13 ++++---------
+ 1 files changed, 4 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
+index da8dd3e..cdac95b 100644
+--- a/drivers/scsi/hpsa.h
++++ b/drivers/scsi/hpsa.h
+@@ -173,10 +173,7 @@ struct ctlr_info {
+ static void SA5_submit_command(struct ctlr_info *h,
+ 	struct CommandList *c)
+ {
+-#ifdef HPSA_DEBUG
+-	 printk(KERN_WARNING "hpsa: Sending %x - down to controller\n",
+-		c->busaddr);
+-#endif /* HPSA_DEBUG */
++	dev_dbg(&h->pdev->dev, "Sending %x\n", c->busaddr);
+ 	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
+ 	h->commands_outstanding++;
+ 	if (h->commands_outstanding > h->max_outstanding)
+@@ -225,10 +222,10 @@ static unsigned long SA5_completed(struct ctlr_info *h)
+ 
+ #ifdef HPSA_DEBUG
+ 	if (register_value != FIFO_EMPTY)
+-		printk(KERN_INFO "hpsa:  Read %lx back from board\n",
++		dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
+ 			register_value);
+ 	else
+-		printk(KERN_INFO "hpsa:  FIFO Empty read\n");
++		dev_dbg(&h->pdev->dev, "hpsa: FIFO Empty read\n");
+ #endif
+ 
+ 	return register_value;
+@@ -240,9 +237,7 @@ static unsigned long SA5_intr_pending(struct ctlr_info *h)
+ {
+ 	unsigned long register_value  =
+ 		readl(h->vaddr + SA5_INTR_STATUS);
+-#ifdef HPSA_DEBUG
+-	printk(KERN_INFO "hpsa: intr_pending %lx\n", register_value);
+-#endif  /* HPSA_DEBUG */
++	dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value);
+ 	if (register_value &  SA5_INTR_PENDING)
+ 		return  1;
+ 	return 0 ;
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0015-SCSI-hpsa-interrupt-pending-function-should-return-b.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0015-SCSI-hpsa-interrupt-pending-function-should-return-b.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,60 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:42:35 -0600
+Subject: [PATCH 15/79] [SCSI] hpsa: interrupt pending function should return
+ bool not unsigned long
+
+commit 900c54404a9456b3ff10745e5e8f64b12c3a6ef7 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    2 +-
+ drivers/scsi/hpsa.h |    8 +++-----
+ 2 files changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 82987e1..314854b 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -2693,7 +2693,7 @@ static inline unsigned long get_next_completion(struct ctlr_info *h)
+ 	return h->access.command_completed(h);
+ }
+ 
+-static inline int interrupt_pending(struct ctlr_info *h)
++static inline bool interrupt_pending(struct ctlr_info *h)
+ {
+ 	return h->access.intr_pending(h);
+ }
+diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
+index cdac95b..0eab386 100644
+--- a/drivers/scsi/hpsa.h
++++ b/drivers/scsi/hpsa.h
+@@ -33,7 +33,7 @@ struct access_method {
+ 		struct CommandList *c);
+ 	void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
+ 	unsigned long (*fifo_full)(struct ctlr_info *h);
+-	unsigned long (*intr_pending)(struct ctlr_info *h);
++	bool (*intr_pending)(struct ctlr_info *h);
+ 	unsigned long (*command_completed)(struct ctlr_info *h);
+ };
+ 
+@@ -233,14 +233,12 @@ static unsigned long SA5_completed(struct ctlr_info *h)
+ /*
+  *	Returns true if an interrupt is pending..
+  */
+-static unsigned long SA5_intr_pending(struct ctlr_info *h)
++static bool SA5_intr_pending(struct ctlr_info *h)
+ {
+ 	unsigned long register_value  =
+ 		readl(h->vaddr + SA5_INTR_STATUS);
+ 	dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value);
+-	if (register_value &  SA5_INTR_PENDING)
+-		return  1;
+-	return 0 ;
++	return register_value & SA5_INTR_PENDING;
+ }
+ 
+ 
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0016-SCSI-hpsa-Allow-multiple-command-completions-per-int.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0016-SCSI-hpsa-Allow-multiple-command-completions-per-int.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,758 @@
+From: Don Brace <brace at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:42:40 -0600
+Subject: [PATCH 16/79] [SCSI] hpsa: Allow multiple command completions per
+ interrupt.
+
+commit 303932fd4ff63e8650d5d5da6cc286a8b5f8318d upstream.
+
+This is done by adding support for the so-called "performant mode"
+(that's really what they called it).  Smart Array controllers
+have a mode which enables multiple command completions to be
+delivered with a single interrupt, "performant" mode.  We want to use
+that mode, as some newer controllers will be requiring this mode.
+
+Signed-off-by: Don Brace <brace at beardog.cce.hp.com>
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: Mike Miller <mikem at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c     |  286 +++++++++++++++++++++++++++++++++++++++--------
+ drivers/scsi/hpsa.h     |  106 ++++++++++++++++-
+ drivers/scsi/hpsa_cmd.h |   78 +++++++++++---
+ 3 files changed, 404 insertions(+), 66 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 314854b..e518766 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -150,6 +150,11 @@ static int check_for_unit_attention(struct ctlr_info *h,
+ 	struct CommandList *c);
+ static void check_ioctl_unit_attention(struct ctlr_info *h,
+ 	struct CommandList *c);
++/* performant mode helper functions */
++static void calc_bucket_map(int *bucket, int num_buckets,
++	int nsgs, int *bucket_map);
++static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
++static inline u32 next_command(struct ctlr_info *h);
+ 
+ static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
+ static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
+@@ -173,10 +178,8 @@ static struct scsi_host_template hpsa_driver_template = {
+ 	.name			= "hpsa",
+ 	.proc_name		= "hpsa",
+ 	.queuecommand		= hpsa_scsi_queue_command,
+-	.can_queue		= 512,
+ 	.this_id		= -1,
+ 	.sg_tablesize		= MAXSGENTRIES,
+-	.cmd_per_lun		= 512,
+ 	.use_clustering		= ENABLE_CLUSTERING,
+ 	.eh_device_reset_handler = hpsa_eh_device_reset_handler,
+ 	.ioctl			= hpsa_ioctl,
+@@ -394,10 +397,44 @@ static inline void addQ(struct hlist_head *list, struct CommandList *c)
+ 	hlist_add_head(&c->list, list);
+ }
+ 
++static inline u32 next_command(struct ctlr_info *h)
++{
++	u32 a;
++
++	if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
++		return h->access.command_completed(h);
++
++	if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
++		a = *(h->reply_pool_head); /* Next cmd in ring buffer */
++		(h->reply_pool_head)++;
++		h->commands_outstanding--;
++	} else {
++		a = FIFO_EMPTY;
++	}
++	/* Check for wraparound */
++	if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
++		h->reply_pool_head = h->reply_pool;
++		h->reply_pool_wraparound ^= 1;
++	}
++	return a;
++}
++
++/* set_performant_mode: Modify the tag for cciss performant
++ * set bit 0 for pull model, bits 3-1 for block fetch
++ * register number
++ */
++static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
++{
++	if (likely(h->transMethod == CFGTBL_Trans_Performant))
++		c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
++}
++
+ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
+ 	struct CommandList *c)
+ {
+ 	unsigned long flags;
++
++	set_performant_mode(h, c);
+ 	spin_lock_irqsave(&h->lock, flags);
+ 	addQ(&h->reqQ, c);
+ 	h->Qdepth++;
+@@ -1116,9 +1153,11 @@ static int hpsa_scsi_detect(struct ctlr_info *h)
+ 	sh->max_cmd_len = MAX_COMMAND_SIZE;
+ 	sh->max_lun = HPSA_MAX_LUN;
+ 	sh->max_id = HPSA_MAX_LUN;
++	sh->can_queue = h->nr_cmds;
++	sh->cmd_per_lun = h->nr_cmds;
+ 	h->scsi_host = sh;
+ 	sh->hostdata[0] = (unsigned long) h;
+-	sh->irq = h->intr[SIMPLE_MODE_INT];
++	sh->irq = h->intr[PERF_MODE_INT];
+ 	sh->unique_id = sh->irq;
+ 	error = scsi_add_host(sh, &h->pdev->dev);
+ 	if (error)
+@@ -1843,7 +1882,8 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
+ 	c->scsi_cmd = cmd;
+ 	c->Header.ReplyQueue = 0;  /* unused in simple mode */
+ 	memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
+-	c->Header.Tag.lower = c->busaddr;  /* Use k. address of cmd as tag */
++	c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
++	c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
+ 
+ 	/* Fill in the request block... */
+ 
+@@ -2700,8 +2740,9 @@ static inline bool interrupt_pending(struct ctlr_info *h)
+ 
+ static inline long interrupt_not_for_us(struct ctlr_info *h)
+ {
+-	return ((h->access.intr_pending(h) == 0) ||
+-		 (h->interrupts_enabled == 0));
++	return !(h->msi_vector || h->msix_vector) &&
++		((h->access.intr_pending(h) == 0) ||
++		(h->interrupts_enabled == 0));
+ }
+ 
+ static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
+@@ -2725,13 +2766,13 @@ static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
+ 
+ static inline u32 hpsa_tag_contains_index(u32 tag)
+ {
+-#define DIRECT_LOOKUP_BIT 0x04
++#define DIRECT_LOOKUP_BIT 0x10
+ 	return tag & DIRECT_LOOKUP_BIT;
+ }
+ 
+ static inline u32 hpsa_tag_to_index(u32 tag)
+ {
+-#define DIRECT_LOOKUP_SHIFT 3
++#define DIRECT_LOOKUP_SHIFT 5
+ 	return tag >> DIRECT_LOOKUP_SHIFT;
+ }
+ 
+@@ -2741,42 +2782,61 @@ static inline u32 hpsa_tag_discard_error_bits(u32 tag)
+ 	return tag & ~HPSA_ERROR_BITS;
+ }
+ 
++/* process completion of an indexed ("direct lookup") command */
++static inline u32 process_indexed_cmd(struct ctlr_info *h,
++	u32 raw_tag)
++{
++	u32 tag_index;
++	struct CommandList *c;
++
++	tag_index = hpsa_tag_to_index(raw_tag);
++	if (bad_tag(h, tag_index, raw_tag))
++		return next_command(h);
++	c = h->cmd_pool + tag_index;
++	finish_cmd(c, raw_tag);
++	return next_command(h);
++}
++
++/* process completion of a non-indexed command */
++static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
++	u32 raw_tag)
++{
++	u32 tag;
++	struct CommandList *c = NULL;
++	struct hlist_node *tmp;
++
++	tag = hpsa_tag_discard_error_bits(raw_tag);
++	hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
++		if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
++			finish_cmd(c, raw_tag);
++			return next_command(h);
++		}
++	}
++	bad_tag(h, h->nr_cmds + 1, raw_tag);
++	return next_command(h);
++}
++
+ static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
+ {
+ 	struct ctlr_info *h = dev_id;
+-	struct CommandList *c;
+ 	unsigned long flags;
+-	u32 raw_tag, tag, tag_index;
+-	struct hlist_node *tmp;
++	u32 raw_tag;
+ 
+ 	if (interrupt_not_for_us(h))
+ 		return IRQ_NONE;
+ 	spin_lock_irqsave(&h->lock, flags);
+-	while (interrupt_pending(h)) {
+-		while ((raw_tag = get_next_completion(h)) != FIFO_EMPTY) {
+-			if (likely(hpsa_tag_contains_index(raw_tag))) {
+-				tag_index = hpsa_tag_to_index(raw_tag);
+-				if (bad_tag(h, tag_index, raw_tag))
+-					return IRQ_HANDLED;
+-				c = h->cmd_pool + tag_index;
+-				finish_cmd(c, raw_tag);
+-				continue;
+-			}
+-			tag = hpsa_tag_discard_error_bits(raw_tag);
+-			c = NULL;
+-			hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
+-				if (c->busaddr == tag) {
+-					finish_cmd(c, raw_tag);
+-					break;
+-				}
+-			}
+-		}
++	raw_tag = get_next_completion(h);
++	while (raw_tag != FIFO_EMPTY) {
++		if (hpsa_tag_contains_index(raw_tag))
++			raw_tag = process_indexed_cmd(h, raw_tag);
++		else
++			raw_tag = process_nonindexed_cmd(h, raw_tag);
+ 	}
+ 	spin_unlock_irqrestore(&h->lock, flags);
+ 	return IRQ_HANDLED;
+ }
+ 
+-/* Send a message CDB to the firmware. */
++/* Send a message CDB to the firmwart. */
+ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
+ 						unsigned char type)
+ {
+@@ -3108,7 +3168,7 @@ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h,
+ default_int_mode:
+ #endif				/* CONFIG_PCI_MSI */
+ 	/* if we get here we're going to use the default interrupt mode */
+-	h->intr[SIMPLE_MODE_INT] = pdev->irq;
++	h->intr[PERF_MODE_INT] = pdev->irq;
+ }
+ 
+ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
+@@ -3118,6 +3178,7 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
+ 	u64 cfg_offset;
+ 	u32 cfg_base_addr;
+ 	u64 cfg_base_addr_index;
++	u32 trans_offset;
+ 	int i, prod_index, err;
+ 
+ 	subsystem_vendor_id = pdev->subsystem_vendor;
+@@ -3211,11 +3272,14 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
+ 	h->cfgtable = remap_pci_mem(pci_resource_start(pdev,
+ 			       cfg_base_addr_index) + cfg_offset,
+ 				sizeof(h->cfgtable));
+-	h->board_id = board_id;
+-
+-	/* Query controller for max supported commands: */
+-	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
++	/* Find performant mode table. */
++	trans_offset = readl(&(h->cfgtable->TransMethodOffset));
++	h->transtable = remap_pci_mem(pci_resource_start(pdev,
++				cfg_base_addr_index)+cfg_offset+trans_offset,
++				sizeof(*h->transtable));
+ 
++	h->board_id = board_id;
++	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+ 	h->product_name = products[prod_index].product_name;
+ 	h->access = *(products[prod_index].access);
+ 	/* Allow room for some ioctls */
+@@ -3314,7 +3378,12 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
+ 		}
+ 	}
+ 
+-	BUILD_BUG_ON(sizeof(struct CommandList) % 8);
++	/* Command structures must be aligned on a 32-byte boundary because
++	 * the 5 lower bits of the address are used by the hardware. and by
++	 * the driver.  See comments in hpsa.h for more info.
++	 */
++#define COMMANDLIST_ALIGNMENT 32
++	BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
+ 	h = kzalloc(sizeof(*h), GFP_KERNEL);
+ 	if (!h)
+ 		return -ENOMEM;
+@@ -3349,17 +3418,17 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
+ 
+ 	/* make sure the board interrupts are off */
+ 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
+-	rc = request_irq(h->intr[SIMPLE_MODE_INT], do_hpsa_intr,
+-			IRQF_DISABLED | IRQF_SHARED, h->devname, h);
++	rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr,
++			IRQF_DISABLED, h->devname, h);
+ 	if (rc) {
+ 		dev_err(&pdev->dev, "unable to get irq %d for %s\n",
+-		       h->intr[SIMPLE_MODE_INT], h->devname);
++		       h->intr[PERF_MODE_INT], h->devname);
+ 		goto clean2;
+ 	}
+ 
+-	dev_info(&pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
+-	       h->devname, pdev->device, pci_name(pdev),
+-	       h->intr[SIMPLE_MODE_INT], dac ? "" : " not");
++	dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
++	       h->devname, pdev->device,
++	       h->intr[PERF_MODE_INT], dac ? "" : " not");
+ 
+ 	h->cmd_pool_bits =
+ 	    kmalloc(((h->nr_cmds + BITS_PER_LONG -
+@@ -3389,6 +3458,7 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
+ 	/* Turn the interrupts on so we can service requests */
+ 	h->access.set_intr_mask(h, HPSA_INTR_ON);
+ 
++	hpsa_put_ctlr_into_performant_mode(h);
+ 	hpsa_register_scsi(h);	/* hook ourselves into SCSI subsystem */
+ 	h->busy_initializing = 0;
+ 	return 1;
+@@ -3404,7 +3474,7 @@ clean4:
+ 			    h->nr_cmds * sizeof(struct ErrorInfo),
+ 			    h->errinfo_pool,
+ 			    h->errinfo_pool_dhandle);
+-	free_irq(h->intr[SIMPLE_MODE_INT], h);
++	free_irq(h->intr[PERF_MODE_INT], h);
+ clean2:
+ clean1:
+ 	h->busy_initializing = 0;
+@@ -3448,7 +3518,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
+ 	 */
+ 	hpsa_flush_cache(h);
+ 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
+-	free_irq(h->intr[2], h);
++	free_irq(h->intr[PERF_MODE_INT], h);
+ #ifdef CONFIG_PCI_MSI
+ 	if (h->msix_vector)
+ 		pci_disable_msix(h->pdev);
+@@ -3477,7 +3547,10 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
+ 	pci_free_consistent(h->pdev,
+ 		h->nr_cmds * sizeof(struct ErrorInfo),
+ 		h->errinfo_pool, h->errinfo_pool_dhandle);
++	pci_free_consistent(h->pdev, h->reply_pool_size,
++		h->reply_pool, h->reply_pool_dhandle);
+ 	kfree(h->cmd_pool_bits);
++	kfree(h->blockFetchTable);
+ 	/*
+ 	 * Deliberately omit pci_disable_device(): it does something nasty to
+ 	 * Smart Array controllers that pci_enable_device does not undo
+@@ -3509,6 +3582,129 @@ static struct pci_driver hpsa_pci_driver = {
+ 	.resume = hpsa_resume,
+ };
+ 
++/* Fill in bucket_map[], given nsgs (the max number of
++ * scatter gather elements supported) and bucket[],
++ * which is an array of 8 integers.  The bucket[] array
++ * contains 8 different DMA transfer sizes (in 16
++ * byte increments) which the controller uses to fetch
++ * commands.  This function fills in bucket_map[], which
++ * maps a given number of scatter gather elements to one of
++ * the 8 DMA transfer sizes.  The point of it is to allow the
++ * controller to only do as much DMA as needed to fetch the
++ * command, with the DMA transfer size encoded in the lower
++ * bits of the command address.
++ */
++static void  calc_bucket_map(int bucket[], int num_buckets,
++	int nsgs, int *bucket_map)
++{
++	int i, j, b, size;
++
++	/* even a command with 0 SGs requires 4 blocks */
++#define MINIMUM_TRANSFER_BLOCKS 4
++#define NUM_BUCKETS 8
++	/* Note, bucket_map must have nsgs+1 entries. */
++	for (i = 0; i <= nsgs; i++) {
++		/* Compute size of a command with i SG entries */
++		size = i + MINIMUM_TRANSFER_BLOCKS;
++		b = num_buckets; /* Assume the biggest bucket */
++		/* Find the bucket that is just big enough */
++		for (j = 0; j < 8; j++) {
++			if (bucket[j] >= size) {
++				b = j;
++				break;
++			}
++		}
++		/* for a command with i SG entries, use bucket b. */
++		bucket_map[i] = b;
++	}
++}
++
++static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
++{
++	u32 trans_support;
++	u64 trans_offset;
++	/*  5 = 1 s/g entry or 4k
++	 *  6 = 2 s/g entry or 8k
++	 *  8 = 4 s/g entry or 16k
++	 * 10 = 6 s/g entry or 24k
++	 */
++	int bft[8] = {5, 6, 8, 10, 12, 20, 28, 35}; /* for scatter/gathers */
++	int i = 0;
++	int l = 0;
++	unsigned long register_value;
++
++	trans_support = readl(&(h->cfgtable->TransportSupport));
++	if (!(trans_support & PERFORMANT_MODE))
++		return;
++
++	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
++	h->max_sg_entries = 32;
++	/* Performant mode ring buffer and supporting data structures */
++	h->reply_pool_size = h->max_commands * sizeof(u64);
++	h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
++				&(h->reply_pool_dhandle));
++
++	/* Need a block fetch table for performant mode */
++	h->blockFetchTable = kmalloc(((h->max_sg_entries+1) *
++				sizeof(u32)), GFP_KERNEL);
++
++	if ((h->reply_pool == NULL)
++		|| (h->blockFetchTable == NULL))
++		goto clean_up;
++
++	h->reply_pool_wraparound = 1; /* spec: init to 1 */
++
++	/* Controller spec: zero out this buffer. */
++	memset(h->reply_pool, 0, h->reply_pool_size);
++	h->reply_pool_head = h->reply_pool;
++
++	trans_offset = readl(&(h->cfgtable->TransMethodOffset));
++	bft[7] = h->max_sg_entries + 4;
++	calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable);
++	for (i = 0; i < 8; i++)
++		writel(bft[i], &h->transtable->BlockFetch[i]);
++
++	/* size of controller ring buffer */
++	writel(h->max_commands, &h->transtable->RepQSize);
++	writel(1, &h->transtable->RepQCount);
++	writel(0, &h->transtable->RepQCtrAddrLow32);
++	writel(0, &h->transtable->RepQCtrAddrHigh32);
++	writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
++	writel(0, &h->transtable->RepQAddr0High32);
++	writel(CFGTBL_Trans_Performant,
++		&(h->cfgtable->HostWrite.TransportRequest));
++	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
++	/* under certain very rare conditions, this can take awhile.
++	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
++	 * as we enter this code.) */
++	for (l = 0; l < MAX_CONFIG_WAIT; l++) {
++		register_value = readl(h->vaddr + SA5_DOORBELL);
++		if (!(register_value & CFGTBL_ChangeReq))
++			break;
++		/* delay and try again */
++		set_current_state(TASK_INTERRUPTIBLE);
++		schedule_timeout(10);
++	}
++	register_value = readl(&(h->cfgtable->TransportActive));
++	if (!(register_value & CFGTBL_Trans_Performant)) {
++		dev_warn(&h->pdev->dev, "unable to get board into"
++					" performant mode\n");
++		return;
++	}
++
++	/* Change the access methods to the performant access methods */
++	h->access = SA5_performant_access;
++	h->transMethod = CFGTBL_Trans_Performant;
++
++	return;
++
++clean_up:
++	if (h->reply_pool)
++		pci_free_consistent(h->pdev, h->reply_pool_size,
++			h->reply_pool, h->reply_pool_dhandle);
++	kfree(h->blockFetchTable);
++}
++
+ /*
+  *  This is it.  Register the PCI driver information for the cards we control
+  *  the OS will call our registered routines when it finds one of our cards.
+diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
+index 0eab386..0ba1aa3 100644
+--- a/drivers/scsi/hpsa.h
++++ b/drivers/scsi/hpsa.h
+@@ -60,14 +60,15 @@ struct ctlr_info {
+ 	unsigned long paddr;
+ 	int 	nr_cmds; /* Number of commands allowed on this controller */
+ 	struct CfgTable __iomem *cfgtable;
++	int     max_sg_entries;
+ 	int	interrupts_enabled;
+ 	int	major;
+ 	int 	max_commands;
+ 	int	commands_outstanding;
+ 	int 	max_outstanding; /* Debug */
+ 	int	usage_count;  /* number of opens all all minor devices */
+-#	define DOORBELL_INT	0
+-#	define PERF_MODE_INT	1
++#	define PERF_MODE_INT	0
++#	define DOORBELL_INT	1
+ #	define SIMPLE_MODE_INT	2
+ #	define MEMQ_MODE_INT	3
+ 	unsigned int intr[4];
+@@ -102,6 +103,23 @@ struct ctlr_info {
+ 	int ndevices; /* number of used elements in .dev[] array. */
+ #define HPSA_MAX_SCSI_DEVS_PER_HBA 256
+ 	struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA];
++	/*
++	 * Performant mode tables.
++	 */
++	u32 trans_support;
++	u32 trans_offset;
++	struct TransTable_struct *transtable;
++	unsigned long transMethod;
++
++	/*
++	 * Performant mode completion buffer
++	 */
++	u64 *reply_pool;
++	dma_addr_t reply_pool_dhandle;
++	u64 *reply_pool_head;
++	size_t reply_pool_size;
++	unsigned char reply_pool_wraparound;
++	u32 *blockFetchTable;
+ };
+ #define HPSA_ABORT_MSG 0
+ #define HPSA_DEVICE_RESET_MSG 1
+@@ -165,6 +183,16 @@ struct ctlr_info {
+ 
+ #define HPSA_ERROR_BIT		0x02
+ 
++/* Performant mode flags */
++#define SA5_PERF_INTR_PENDING   0x04
++#define SA5_PERF_INTR_OFF       0x05
++#define SA5_OUTDB_STATUS_PERF_BIT       0x01
++#define SA5_OUTDB_CLEAR_PERF_BIT        0x01
++#define SA5_OUTDB_CLEAR         0xA0
++#define SA5_OUTDB_CLEAR_PERF_BIT        0x01
++#define SA5_OUTDB_STATUS        0x9C
++
++
+ #define HPSA_INTR_ON 	1
+ #define HPSA_INTR_OFF	0
+ /*
+@@ -173,7 +201,8 @@ struct ctlr_info {
+ static void SA5_submit_command(struct ctlr_info *h,
+ 	struct CommandList *c)
+ {
+-	dev_dbg(&h->pdev->dev, "Sending %x\n", c->busaddr);
++	dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
++		c->Header.Tag.lower);
+ 	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
+ 	h->commands_outstanding++;
+ 	if (h->commands_outstanding > h->max_outstanding)
+@@ -196,6 +225,52 @@ static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
+ 			h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ 	}
+ }
++
++static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
++{
++	if (val) { /* turn on interrupts */
++		h->interrupts_enabled = 1;
++		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
++	} else {
++		h->interrupts_enabled = 0;
++		writel(SA5_PERF_INTR_OFF,
++			h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
++	}
++}
++
++static unsigned long SA5_performant_completed(struct ctlr_info *h)
++{
++	unsigned long register_value = FIFO_EMPTY;
++
++	/* flush the controller write of the reply queue by reading
++	 * outbound doorbell status register.
++	 */
++	register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
++	/* msi auto clears the interrupt pending bit. */
++	if (!(h->msi_vector || h->msix_vector)) {
++		writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
++		/* Do a read in order to flush the write to the controller
++		 * (as per spec.)
++		 */
++		register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
++	}
++
++	if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
++		register_value = *(h->reply_pool_head);
++		(h->reply_pool_head)++;
++		h->commands_outstanding--;
++	} else {
++		register_value = FIFO_EMPTY;
++	}
++	/* Check for wraparound */
++	if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
++		h->reply_pool_head = h->reply_pool;
++		h->reply_pool_wraparound ^= 1;
++	}
++
++	return register_value;
++}
++
+ /*
+  *  Returns true if fifo is full.
+  *
+@@ -241,6 +316,20 @@ static bool SA5_intr_pending(struct ctlr_info *h)
+ 	return register_value & SA5_INTR_PENDING;
+ }
+ 
++static bool SA5_performant_intr_pending(struct ctlr_info *h)
++{
++	unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
++
++	if (!register_value)
++		return false;
++
++	if (h->msi_vector || h->msix_vector)
++		return true;
++
++	/* Read outbound doorbell to flush */
++	register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
++	return register_value & SA5_OUTDB_STATUS_PERF_BIT;
++}
+ 
+ static struct access_method SA5_access = {
+ 	SA5_submit_command,
+@@ -250,14 +339,19 @@ static struct access_method SA5_access = {
+ 	SA5_completed,
+ };
+ 
++static struct access_method SA5_performant_access = {
++	SA5_submit_command,
++	SA5_performant_intr_mask,
++	SA5_fifo_full,
++	SA5_performant_intr_pending,
++	SA5_performant_completed,
++};
++
+ struct board_type {
+ 	u32	board_id;
+ 	char	*product_name;
+ 	struct access_method *access;
+ };
+ 
+-
+-/* end of old hpsa_scsi.h file */
+-
+ #endif /* HPSA_H */
+ 
+diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
+index 88bb3b0..3e0abdf 100644
+--- a/drivers/scsi/hpsa_cmd.h
++++ b/drivers/scsi/hpsa_cmd.h
+@@ -101,6 +101,7 @@
+ #define CFGTBL_AccCmds          0x00000001l
+ 
+ #define CFGTBL_Trans_Simple     0x00000002l
++#define CFGTBL_Trans_Performant 0x00000004l
+ 
+ #define CFGTBL_BusType_Ultra2   0x00000001l
+ #define CFGTBL_BusType_Ultra3   0x00000002l
+@@ -267,12 +268,31 @@ struct ErrorInfo {
+ #define CMD_IOCTL_PEND  0x01
+ #define CMD_SCSI	0x03
+ 
++/* This structure needs to be divisible by 32 for new
++ * indexing method and performant mode.
++ */
++#define PAD32 32
++#define PAD64DIFF 0
++#define USEEXTRA ((sizeof(void *) - 4)/4)
++#define PADSIZE (PAD32 + PAD64DIFF * USEEXTRA)
++
++#define DIRECT_LOOKUP_SHIFT 5
++#define DIRECT_LOOKUP_BIT 0x10
++
++#define HPSA_ERROR_BIT          0x02
+ struct ctlr_info; /* defined in hpsa.h */
+-/* The size of this structure needs to be divisible by 8
+- * on all architectures, because the controller uses 2
+- * lower bits of the address, and the driver uses 1 lower
+- * bit (3 bits total.)
++/* The size of this structure needs to be divisible by 32
++ * on all architectures because low 5 bits of the addresses
++ * are used as follows:
++ *
++ * bit 0: to device, used to indicate "performant mode" command
++ *        from device, indidcates error status.
++ * bit 1-3: to device, indicates block fetch table entry for
++ *          reducing DMA in fetching commands from host memory.
++ * bit 4: used to indicate whether tag is "direct lookup" (index),
++ *        or a bus address.
+  */
++
+ struct CommandList {
+ 	struct CommandListHeader Header;
+ 	struct RequestBlock      Request;
+@@ -291,6 +311,14 @@ struct CommandList {
+ 	struct completion *waiting;
+ 	int	 retry_count;
+ 	void   *scsi_cmd;
++
++/* on 64 bit architectures, to get this to be 32-byte-aligned
++ * it so happens we need no padding, on 32 bit systems,
++ * we need 8 bytes of padding.   This does that.
++ */
++#define COMMANDLIST_PAD ((8 - sizeof(long))/4 * 8)
++	u8 pad[COMMANDLIST_PAD];
++
+ };
+ 
+ /* Configuration Table Structure */
+@@ -301,18 +329,38 @@ struct HostWrite {
+ 	u32 CoalIntCount;
+ };
+ 
++#define SIMPLE_MODE     0x02
++#define PERFORMANT_MODE 0x04
++#define MEMQ_MODE       0x08
++
+ struct CfgTable {
+-	u8             Signature[4];
+-	u32            SpecValence;
+-	u32            TransportSupport;
+-	u32            TransportActive;
+-	struct HostWrite HostWrite;
+-	u32            CmdsOutMax;
+-	u32            BusTypes;
+-	u32            Reserved;
+-	u8             ServerName[16];
+-	u32            HeartBeat;
+-	u32            SCSI_Prefetch;
++	u8            Signature[4];
++	u32		SpecValence;
++	u32           TransportSupport;
++	u32           TransportActive;
++	struct 		HostWrite HostWrite;
++	u32           CmdsOutMax;
++	u32           BusTypes;
++	u32           TransMethodOffset;
++	u8            ServerName[16];
++	u32           HeartBeat;
++	u32           SCSI_Prefetch;
++	u32	 	MaxScatterGatherElements;
++	u32		MaxLogicalUnits;
++	u32		MaxPhysicalDevices;
++	u32		MaxPhysicalDrivesPerLogicalUnit;
++	u32		MaxPerformantModeCommands;
++};
++
++#define NUM_BLOCKFETCH_ENTRIES 8
++struct TransTable_struct {
++	u32            BlockFetch[NUM_BLOCKFETCH_ENTRIES];
++	u32            RepQSize;
++	u32            RepQCount;
++	u32            RepQCtrAddrLow32;
++	u32            RepQCtrAddrHigh32;
++	u32            RepQAddr0Low32;
++	u32            RepQAddr0High32;
+ };
+ 
+ struct hpsa_pci_info {
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0017-SCSI-hpsa-add-pci-ids-for-storageworks-1210m-remove-.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0017-SCSI-hpsa-add-pci-ids-for-storageworks-1210m-remove-.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,62 @@
+From: Mike Miller <mikem at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:42:45 -0600
+Subject: [PATCH 17/79] [SCSI] hpsa: add pci ids for storageworks 1210m,
+ remove p400, p800, p700m
+
+commit f8b01eb9049113920f4eb2f944a0c713ce597673 upstream.
+
+and update pci_ids.h to include new PCI ID for StorageWorks 1210m variant.
+
+Signed-off-by: Mike Miller <mikem at beardog.cce.hp.com>
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   11 +++++------
+ 1 files changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index e518766..46055e2 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -77,9 +77,6 @@ MODULE_PARM_DESC(hpsa_allow_any,
+ 
+ /* define the PCI info for the cards we can control */
+ static const struct pci_device_id hpsa_pci_device_id[] = {
+-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x3223},
+-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x3234},
+-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x323D},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3243},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
+@@ -87,6 +84,9 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324a},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324b},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
++#define PCI_DEVICE_ID_HP_CISSF 0x333f
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x333F},
+ 	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,             PCI_ANY_ID, PCI_ANY_ID,
+ 		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
+ 	{0,}
+@@ -99,9 +99,6 @@ MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
+  *  access = Address of the struct of function pointers
+  */
+ static struct board_type products[] = {
+-	{0x3223103C, "Smart Array P800", &SA5_access},
+-	{0x3234103C, "Smart Array P400", &SA5_access},
+-	{0x323d103c, "Smart Array P700M", &SA5_access},
+ 	{0x3241103C, "Smart Array P212", &SA5_access},
+ 	{0x3243103C, "Smart Array P410", &SA5_access},
+ 	{0x3245103C, "Smart Array P410i", &SA5_access},
+@@ -109,6 +106,8 @@ static struct board_type products[] = {
+ 	{0x3249103C, "Smart Array P812", &SA5_access},
+ 	{0x324a103C, "Smart Array P712m", &SA5_access},
+ 	{0x324b103C, "Smart Array P711m", &SA5_access},
++	{0x3233103C, "StorageWorks P1210m", &SA5_access},
++	{0x333F103C, "StorageWorks P1210m", &SA5_access},
+ 	{0xFFFF103C, "Unknown Smart Array", &SA5_access},
+ };
+ 
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0018-SCSI-hpsa-Fix-p1210m-LUN-assignment.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0018-SCSI-hpsa-Fix-p1210m-LUN-assignment.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,230 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:42:50 -0600
+Subject: [PATCH 18/79] [SCSI] hpsa: Fix p1210m LUN assignment.
+
+commit 339b2b14c634da58626eb742370d915591c2fb6d upstream.
+
+The p1210m responsds to SCSI report LUNs, unlike traditional Smart
+Array controllers.  This means that the bus, target, and lun
+assignments done by the driver cannot be arbitrary, but must match
+what SCSI REPORT LUNS returns.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |  117 ++++++++++++++++++++++++++++++++++++++++++---------
+ drivers/scsi/hpsa.h |    1 +
+ 2 files changed, 98 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 46055e2..cc9e92a 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -458,6 +458,15 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
+ 	return (scsi3addr[3] & 0xC0) == 0x40;
+ }
+ 
++static inline int is_scsi_rev_5(struct ctlr_info *h)
++{
++	if (!h->hba_inquiry_data)
++		return 0;
++	if ((h->hba_inquiry_data[2] & 0x07) == 5)
++		return 1;
++	return 0;
++}
++
+ static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
+ 	"UNKNOWN"
+ };
+@@ -1525,22 +1534,44 @@ static void figure_bus_target_lun(struct ctlr_info *h,
+ 
+ 	if (is_logical_dev_addr_mode(lunaddrbytes)) {
+ 		/* logical device */
+-		lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
+-		if (is_msa2xxx(h, device)) {
+-			*bus = 1;
+-			*target = (lunid >> 16) & 0x3fff;
+-			*lun = lunid & 0x00ff;
+-		} else {
++		if (unlikely(is_scsi_rev_5(h))) {
++			/* p1210m, logical drives lun assignments
++			 * match SCSI REPORT LUNS data.
++			 */
++			lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
+ 			*bus = 0;
+-			*lun = 0;
+-			*target = lunid & 0x3fff;
++			*target = 0;
++			*lun = (lunid & 0x3fff) + 1;
++		} else {
++			/* not p1210m... */
++			lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
++			if (is_msa2xxx(h, device)) {
++				/* msa2xxx way, put logicals on bus 1
++				 * and match target/lun numbers box
++				 * reports.
++				 */
++				*bus = 1;
++				*target = (lunid >> 16) & 0x3fff;
++				*lun = lunid & 0x00ff;
++			} else {
++				/* Traditional smart array way. */
++				*bus = 0;
++				*lun = 0;
++				*target = lunid & 0x3fff;
++			}
+ 		}
+ 	} else {
+ 		/* physical device */
+ 		if (is_hba_lunid(lunaddrbytes))
+-			*bus = 3;
++			if (unlikely(is_scsi_rev_5(h))) {
++				*bus = 0; /* put p1210m ctlr at 0,0,0 */
++				*target = 0;
++				*lun = 0;
++				return;
++			} else
++				*bus = 3; /* traditional smartarray */
+ 		else
+-			*bus = 2;
++			*bus = 2; /* physical disk */
+ 		*target = -1;
+ 		*lun = -1; /* we will fill these in later. */
+ 	}
+@@ -1580,6 +1611,9 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
+ 	if (is_hba_lunid(scsi3addr))
+ 		return 0; /* Don't add the RAID controller here. */
+ 
++	if (is_scsi_rev_5(h))
++		return 0; /* p1210m doesn't need to do this. */
++
+ #define MAX_MSA2XXX_ENCLOSURES 32
+ 	if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
+ 		dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
+@@ -1643,6 +1677,31 @@ static int hpsa_gather_lun_info(struct ctlr_info *h,
+ 	return 0;
+ }
+ 
++u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
++	int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
++	struct ReportLUNdata *logdev_list)
++{
++	/* Helper function, figure out where the LUN ID info is coming from
++	 * given index i, lists of physical and logical devices, where in
++	 * the list the raid controller is supposed to appear (first or last)
++	 */
++
++	int logicals_start = nphysicals + (raid_ctlr_position == 0);
++	int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
++
++	if (i == raid_ctlr_position)
++		return RAID_CTLR_LUNID;
++
++	if (i < logicals_start)
++		return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
++
++	if (i < last_device)
++		return &logdev_list->LUN[i - nphysicals -
++			(raid_ctlr_position == 0)][0];
++	BUG();
++	return NULL;
++}
++
+ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
+ {
+ 	/* the idea here is we could get notified
+@@ -1666,6 +1725,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
+ 	int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
+ 	int i, nmsa2xxx_enclosures, ndevs_to_allocate;
+ 	int bus, target, lun;
++	int raid_ctlr_position;
+ 	DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
+ 
+ 	currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
+@@ -1703,23 +1763,22 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
+ 		ndev_allocated++;
+ 	}
+ 
++	if (unlikely(is_scsi_rev_5(h)))
++		raid_ctlr_position = 0;
++	else
++		raid_ctlr_position = nphysicals + nlogicals;
++
+ 	/* adjust our table of devices */
+ 	nmsa2xxx_enclosures = 0;
+ 	for (i = 0; i < nphysicals + nlogicals + 1; i++) {
+ 		u8 *lunaddrbytes;
+ 
+ 		/* Figure out where the LUN ID info is coming from */
+-		if (i < nphysicals)
+-			lunaddrbytes = &physdev_list->LUN[i][0];
+-		else
+-			if (i < nphysicals + nlogicals)
+-				lunaddrbytes =
+-					&logdev_list->LUN[i-nphysicals][0];
+-			else /* jam in the RAID controller at the end */
+-				lunaddrbytes = RAID_CTLR_LUNID;
+-
++		lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
++			i, nphysicals, nlogicals, physdev_list, logdev_list);
+ 		/* skip masked physical devices. */
+-		if (lunaddrbytes[3] & 0xC0 && i < nphysicals)
++		if (lunaddrbytes[3] & 0xC0 &&
++			i < nphysicals + (raid_ctlr_position == 0))
+ 			continue;
+ 
+ 		/* Get device type, vendor, model, device id */
+@@ -3349,6 +3408,22 @@ err_out_free_res:
+ 	return err;
+ }
+ 
++static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
++{
++	int rc;
++
++#define HBA_INQUIRY_BYTE_COUNT 64
++	h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
++	if (!h->hba_inquiry_data)
++		return;
++	rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
++		h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
++	if (rc != 0) {
++		kfree(h->hba_inquiry_data);
++		h->hba_inquiry_data = NULL;
++	}
++}
++
+ static int __devinit hpsa_init_one(struct pci_dev *pdev,
+ 				    const struct pci_device_id *ent)
+ {
+@@ -3458,6 +3533,7 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
+ 	h->access.set_intr_mask(h, HPSA_INTR_ON);
+ 
+ 	hpsa_put_ctlr_into_performant_mode(h);
++	hpsa_hba_inquiry(h);
+ 	hpsa_register_scsi(h);	/* hook ourselves into SCSI subsystem */
+ 	h->busy_initializing = 0;
+ 	return 1;
+@@ -3550,6 +3626,7 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
+ 		h->reply_pool, h->reply_pool_dhandle);
+ 	kfree(h->cmd_pool_bits);
+ 	kfree(h->blockFetchTable);
++	kfree(h->hba_inquiry_data);
+ 	/*
+ 	 * Deliberately omit pci_disable_device(): it does something nasty to
+ 	 * Smart Array controllers that pci_enable_device does not undo
+diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
+index 0ba1aa3..1ab0c1b 100644
+--- a/drivers/scsi/hpsa.h
++++ b/drivers/scsi/hpsa.h
+@@ -120,6 +120,7 @@ struct ctlr_info {
+ 	size_t reply_pool_size;
+ 	unsigned char reply_pool_wraparound;
+ 	u32 *blockFetchTable;
++	unsigned char *hba_inquiry_data;
+ };
+ #define HPSA_ABORT_MSG 0
+ #define HPSA_DEVICE_RESET_MSG 1
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0019-SCSI-hpsa-Return-DID_RESET-for-commands-which-comple.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0019-SCSI-hpsa-Return-DID_RESET-for-commands-which-comple.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,33 @@
+From: Matt Gates <matthew.gates at hp.com>
+Date: Thu, 4 Feb 2010 08:42:55 -0600
+Subject: [PATCH 19/79] [SCSI] hpsa: Return DID_RESET for commands which
+ complete with status of UNSOLICITED ABORT
+
+commit 5f0325ab280e92c023a5610dae4a6afb6c1ef151 upstream.
+
+The commands should be retried, and this will make that happen,
+instead of resulting in an i/o error.
+
+Signed-off-by: Matt Gates <matthew.gates at hp.com>
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index cc9e92a..bcc51f9 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -1128,7 +1128,7 @@ static void complete_scsi_command(struct CommandList *cp,
+ 		dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
+ 		break;
+ 	case CMD_UNSOLICITED_ABORT:
+-		cmd->result = DID_ABORT << 16;
++		cmd->result = DID_RESET << 16;
+ 		dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited "
+ 			"abort\n", cp);
+ 		break;
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0020-SCSI-hpsa-Retry-commands-completing-with-a-sense-key.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0020-SCSI-hpsa-Retry-commands-completing-with-a-sense-key.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,39 @@
+From: Matt Gates <matthew.gates at hp.com>
+Date: Thu, 4 Feb 2010 08:43:00 -0600
+Subject: [PATCH 20/79] [SCSI] hpsa: Retry commands completing with a sense
+ key of ABORTED_COMMAND
+
+commit 1d3b36090551a31b8b8c4ba972aac95eaa8f77dd upstream.
+
+Signed-off-by: Matt Gates <matthew.gates at hp.com>
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   11 +++++++++--
+ 1 files changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index bcc51f9..d07b3d3 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -1037,8 +1037,15 @@ static void complete_scsi_command(struct CommandList *cp,
+ 					break;
+ 				}
+ 			}
+-
+-
++			if (sense_key == ABORTED_COMMAND) {
++				/* Aborted command is retryable */
++				dev_warn(&h->pdev->dev, "cp %p "
++					"has check condition: aborted command: "
++					"ASC: 0x%x, ASCQ: 0x%x\n",
++					cp, asc, ascq);
++				cmd->result = DID_SOFT_ERROR << 16;
++				break;
++			}
+ 			/* Must be some other type of check condition */
+ 			dev_warn(&h->pdev->dev, "cp %p has check condition: "
+ 					"unknown type: "
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0021-SCSI-hpsa-Don-t-return-DID_NO_CONNECT-when-a-device-.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0021-SCSI-hpsa-Don-t-return-DID_NO_CONNECT-when-a-device-.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,29 @@
+From: Matt Gates <matthew.gates at hp.com>
+Date: Thu, 4 Feb 2010 08:43:05 -0600
+Subject: [PATCH 21/79] [SCSI] hpsa: Don't return DID_NO_CONNECT when a device
+ is merely not ready
+
+commit 01fb21870d96c83ae01072674e380ac51ebc58c8 upstream.
+
+Signed-off-by: Matt Gates <matthew.gates at hp.com>
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    1 -
+ 1 files changed, 0 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index d07b3d3..320d686 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -1029,7 +1029,6 @@ static void complete_scsi_command(struct CommandList *cp,
+ 				 * required
+ 				 */
+ 				if ((asc == 0x04) && (ascq == 0x03)) {
+-					cmd->result = DID_NO_CONNECT << 16;
+ 					dev_warn(&h->pdev->dev, "cp %p "
+ 						"has check condition: unit "
+ 						"not ready, manual "
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0022-SCSI-hpsa-Add-an-shost_to_hba-helper-function.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0022-SCSI-hpsa-Add-an-shost_to_hba-helper-function.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,42 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:43:11 -0600
+Subject: [PATCH 22/79] [SCSI] hpsa: Add an shost_to_hba helper function.
+
+commit a23513e8413e02b7e34e96a03d6bfd1c1948ac00 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    9 +++++++--
+ 1 files changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 320d686..e81df76 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -197,6 +197,12 @@ static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
+ 	return (struct ctlr_info *) *priv;
+ }
+ 
++static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
++{
++	unsigned long *priv = shost_priv(sh);
++	return (struct ctlr_info *) *priv;
++}
++
+ static struct task_struct *hpsa_scan_thread;
+ static DEFINE_MUTEX(hpsa_scan_mutex);
+ static LIST_HEAD(hpsa_scan_q);
+@@ -381,8 +387,7 @@ static ssize_t host_store_rescan(struct device *dev,
+ {
+ 	struct ctlr_info *h;
+ 	struct Scsi_Host *shost = class_to_shost(dev);
+-	unsigned long *priv = shost_priv(shost);
+-	h = (struct ctlr_info *) *priv;
++	h = shost_to_hba(shost);
+ 	if (add_to_scan_list(h)) {
+ 		wake_up_process(hpsa_scan_thread);
+ 		wait_for_completion_interruptible(&h->scan_wait);
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0023-SCSI-hpsa-use-scan_start-and-scan_finished-entry-poi.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0023-SCSI-hpsa-use-scan_start-and-scan_finished-entry-poi.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,143 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:43:16 -0600
+Subject: [PATCH 23/79] [SCSI] hpsa: use scan_start and scan_finished entry
+ points for scanning
+
+commit a08a8471b7aed3d50df8e9c852dc2baa08ec8b01 upstream.
+
+use scan_start and scan_finished entry points for scanning and route
+the CCISS_REGNEWD ioctl and sysfs triggering of same functionality
+through hpsa_scan_start.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   55 ++++++++++++++++++++++++++++++++++++++++++++++++--
+ drivers/scsi/hpsa.h |    3 ++
+ 2 files changed, 55 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index e81df76..f889ec8 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -131,6 +131,9 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+ 
+ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
+ 		void (*done)(struct scsi_cmnd *));
++static void hpsa_scan_start(struct Scsi_Host *);
++static int hpsa_scan_finished(struct Scsi_Host *sh,
++	unsigned long elapsed_time);
+ 
+ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
+ static int hpsa_slave_alloc(struct scsi_device *sdev);
+@@ -177,6 +180,8 @@ static struct scsi_host_template hpsa_driver_template = {
+ 	.name			= "hpsa",
+ 	.proc_name		= "hpsa",
+ 	.queuecommand		= hpsa_scsi_queue_command,
++	.scan_start		= hpsa_scan_start,
++	.scan_finished		= hpsa_scan_finished,
+ 	.this_id		= -1,
+ 	.sg_tablesize		= MAXSGENTRIES,
+ 	.use_clustering		= ENABLE_CLUSTERING,
+@@ -320,7 +325,7 @@ static int hpsa_scan_func(__attribute__((unused)) void *data)
+ 			h->busy_scanning = 1;
+ 			mutex_unlock(&hpsa_scan_mutex);
+ 			host_no = h->scsi_host ?  h->scsi_host->host_no : -1;
+-			hpsa_update_scsi_devices(h, host_no);
++			hpsa_scan_start(h->scsi_host);
+ 			complete_all(&h->scan_wait);
+ 			mutex_lock(&hpsa_scan_mutex);
+ 			h->busy_scanning = 0;
+@@ -2006,6 +2011,48 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
+ 	return 0;
+ }
+ 
++static void hpsa_scan_start(struct Scsi_Host *sh)
++{
++	struct ctlr_info *h = shost_to_hba(sh);
++	unsigned long flags;
++
++	/* wait until any scan already in progress is finished. */
++	while (1) {
++		spin_lock_irqsave(&h->scan_lock, flags);
++		if (h->scan_finished)
++			break;
++		spin_unlock_irqrestore(&h->scan_lock, flags);
++		wait_event(h->scan_wait_queue, h->scan_finished);
++		/* Note: We don't need to worry about a race between this
++		 * thread and driver unload because the midlayer will
++		 * have incremented the reference count, so unload won't
++		 * happen if we're in here.
++		 */
++	}
++	h->scan_finished = 0; /* mark scan as in progress */
++	spin_unlock_irqrestore(&h->scan_lock, flags);
++
++	hpsa_update_scsi_devices(h, h->scsi_host->host_no);
++
++	spin_lock_irqsave(&h->scan_lock, flags);
++	h->scan_finished = 1; /* mark scan as finished. */
++	wake_up_all(&h->scan_wait_queue);
++	spin_unlock_irqrestore(&h->scan_lock, flags);
++}
++
++static int hpsa_scan_finished(struct Scsi_Host *sh,
++	unsigned long elapsed_time)
++{
++	struct ctlr_info *h = shost_to_hba(sh);
++	unsigned long flags;
++	int finished;
++
++	spin_lock_irqsave(&h->scan_lock, flags);
++	finished = h->scan_finished;
++	spin_unlock_irqrestore(&h->scan_lock, flags);
++	return finished;
++}
++
+ static void hpsa_unregister_scsi(struct ctlr_info *h)
+ {
+ 	/* we are being forcibly unloaded, and may not refuse. */
+@@ -2018,7 +2065,6 @@ static int hpsa_register_scsi(struct ctlr_info *h)
+ {
+ 	int rc;
+ 
+-	hpsa_update_scsi_devices(h, -1);
+ 	rc = hpsa_scsi_detect(h);
+ 	if (rc != 0)
+ 		dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
+@@ -2619,7 +2665,7 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
+ 	case CCISS_DEREGDISK:
+ 	case CCISS_REGNEWDISK:
+ 	case CCISS_REGNEWD:
+-		hpsa_update_scsi_devices(h, dev->host->host_no);
++		hpsa_scan_start(h->scsi_host);
+ 		return 0;
+ 	case CCISS_GETPCIINFO:
+ 		return hpsa_getpciinfo_ioctl(h, argp);
+@@ -3532,6 +3578,9 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
+ 		goto clean4;
+ 	}
+ 	spin_lock_init(&h->lock);
++	spin_lock_init(&h->scan_lock);
++	init_waitqueue_head(&h->scan_wait_queue);
++	h->scan_finished = 1; /* no scan currently in progress */
+ 
+ 	pci_set_drvdata(pdev, h);
+ 	memset(h->cmd_pool_bits, 0,
+diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
+index 1ab0c1b..a0502b3 100644
+--- a/drivers/scsi/hpsa.h
++++ b/drivers/scsi/hpsa.h
+@@ -94,6 +94,9 @@ struct ctlr_info {
+ 	int			nr_frees;
+ 	int			busy_initializing;
+ 	int			busy_scanning;
++	int			scan_finished;
++	spinlock_t		scan_lock;
++	wait_queue_head_t	scan_wait_queue;
+ 	struct mutex		busy_shutting_down;
+ 	struct list_head	scan_list;
+ 	struct completion	scan_wait;
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0024-SCSI-hpsa-when-resetting-devices-print-out-which-dev.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0024-SCSI-hpsa-when-resetting-devices-print-out-which-dev.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,37 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:43:21 -0600
+Subject: [PATCH 24/79] [SCSI] hpsa: when resetting devices, print out which
+ device
+
+commit d416b0c75fc52e1ac840e6c8c1857ac52d1d7132 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index f889ec8..db12bc2 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -2141,14 +2141,14 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
+ 	h = sdev_to_hba(scsicmd->device);
+ 	if (h == NULL) /* paranoia */
+ 		return FAILED;
+-	dev_warn(&h->pdev->dev, "resetting drive\n");
+-
+ 	dev = scsicmd->device->hostdata;
+ 	if (!dev) {
+ 		dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
+ 			"device lookup failed.\n");
+ 		return FAILED;
+ 	}
++	dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
++		h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
+ 	/* send a reset to the SCSI LUN which the command was sent to */
+ 	rc = hpsa_send_reset(h, dev->scsi3addr);
+ 	if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0025-SCSI-hpsa-print-all-the-bytes-of-the-CDB-not-just-th.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0025-SCSI-hpsa-print-all-the-bytes-of-the-CDB-not-just-th.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,41 @@
+From: Mike Miller <mikem at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:43:26 -0600
+Subject: [PATCH 25/79] [SCSI] hpsa: print all the bytes of the CDB, not just
+ the first one.
+
+commit 807be732f928c75b3dfb4273fe5f61b34f05df86 upstream.
+
+Signed-off-by: Mike Miller <mikem at beardog.cce.hp.com>
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    6 +++++-
+ 1 files changed, 5 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index db12bc2..b91ccd4 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -1061,6 +1061,7 @@ static void complete_scsi_command(struct CommandList *cp,
+ 					"Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
+ 					"Returning result: 0x%x, "
+ 					"cmd=[%02x %02x %02x %02x %02x "
++					"%02x %02x %02x %02x %02x %02x "
+ 					"%02x %02x %02x %02x %02x]\n",
+ 					cp, sense_key, asc, ascq,
+ 					cmd->result,
+@@ -1068,7 +1069,10 @@ static void complete_scsi_command(struct CommandList *cp,
+ 					cmd->cmnd[2], cmd->cmnd[3],
+ 					cmd->cmnd[4], cmd->cmnd[5],
+ 					cmd->cmnd[6], cmd->cmnd[7],
+-					cmd->cmnd[8], cmd->cmnd[9]);
++					cmd->cmnd[8], cmd->cmnd[9],
++					cmd->cmnd[10], cmd->cmnd[11],
++					cmd->cmnd[12], cmd->cmnd[13],
++					cmd->cmnd[14], cmd->cmnd[15]);
+ 			break;
+ 		}
+ 
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0026-SCSI-hpsa-clarify-obscure-comment-in-adjust_hpsa_scs.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0026-SCSI-hpsa-clarify-obscure-comment-in-adjust_hpsa_scs.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,33 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:43:31 -0600
+Subject: [PATCH 26/79] [SCSI] hpsa: clarify obscure comment in
+ adjust_hpsa_scsi_table
+
+commit c7f172dca210bbd105aee02353c6b385c774caac upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    6 +++++-
+ 1 files changed, 5 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index b91ccd4..42295c4 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -838,7 +838,11 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
+ 			(void) hpsa_scsi_add_entry(h, hostno, sd[entry],
+ 				added, &nadded);
+ 			/* add can't fail, we just removed one. */
+-			sd[entry] = NULL; /* prevent it from being freed */
++
++			/* Set it to NULL to prevent it from being freed
++			 * at the bottom of hpsa_update_scsi_devices()
++			 */
++			sd[entry] = NULL;
+ 		}
+ 		i++;
+ 	}
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0027-SCSI-hpsa-Fix-hpsa_find_scsi_entry-so-that-it-doesn-.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0027-SCSI-hpsa-Fix-hpsa_find_scsi_entry-so-that-it-doesn-.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,29 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:43:36 -0600
+Subject: [PATCH 27/79] [SCSI] hpsa: Fix hpsa_find_scsi_entry so that it
+ doesn't try to dereference NULL pointers
+
+commit 23231048309ea8eed0189f1eb8f870f08703cac0 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    2 ++
+ 1 files changed, 2 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 42295c4..2764cb6 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -776,6 +776,8 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
+ #define DEVICE_CHANGED 1
+ #define DEVICE_SAME 2
+ 	for (i = 0; i < haystack_size; i++) {
++		if (haystack[i] == NULL) /* previously removed. */
++			continue;
+ 		if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
+ 			*index = i;
+ 			if (device_is_the_same(needle, haystack[i]))
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0028-SCSI-hpsa-fix-bug-in-adjust_hpsa_scsi_table.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0028-SCSI-hpsa-fix-bug-in-adjust_hpsa_scsi_table.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,93 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:43:41 -0600
+Subject: [PATCH 28/79] [SCSI] hpsa: fix bug in adjust_hpsa_scsi_table
+
+commit 2a8ccf3187aff6defed72f7d8fa562ff2f69ef2a upstream.
+
+fix bug in adjust_hpsa_scsi_table which caused devices which have
+changed size, etc. to do the wrong thing.
+
+The problem was as follows:
+
+The driver maintains its current idea of what devices are present
+in the h->dev[] array.  When it updates this array, it scans the
+hardware, and produces a new list of devices, call it sd[], for
+scsi devices.
+
+Then, it compares each item in h->dev[] vs. sd[], and any items which
+are not present sd it removes from h->dev[], and any items present
+in sd[], but different, it modifies in h->dev[].
+
+Then, it looks for items in sd[] which are not present in h->dev[],
+and adds those items into h->dev[].  All the while, it keeps track
+of what items were added and removed to/from h->dev[].
+
+Finally, it updates the SCSI mid-layer by removing and adding
+the same devices it removed and added to/from h->dev[]. (modified
+devices count as a remove then add.)
+
+originally, when a "changed" device was discovered, it was
+removed then added to h->dev[].  The item was added to the *end*
+of h->dev[].  And, the item was removed from sd[] as well
+(nulled out).  As it processed h->dev[], these newly added items
+at the end of the list were encountered, and sd[] was searched,
+but those items were nulled out.  So they ended up getting removed
+immediately after they were added.
+
+The solution is to have a way to replace items in the h->dev[]
+array instead of doing a remove + add.  Then the "changed" items.
+are not encountered a second time, and removed.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   26 ++++++++++++++++++++------
+ 1 files changed, 20 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 2764cb6..6b40221 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -675,6 +675,24 @@ lun_assigned:
+ 	return 0;
+ }
+ 
++/* Replace an entry from h->dev[] array. */
++static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
++	int entry, struct hpsa_scsi_dev_t *new_entry,
++	struct hpsa_scsi_dev_t *added[], int *nadded,
++	struct hpsa_scsi_dev_t *removed[], int *nremoved)
++{
++	/* assumes h->devlock is held */
++	BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
++	removed[*nremoved] = h->dev[entry];
++	(*nremoved)++;
++	h->dev[entry] = new_entry;
++	added[*nadded] = new_entry;
++	(*nadded)++;
++	dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
++		scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
++			new_entry->target, new_entry->lun);
++}
++
+ /* Remove an entry from h->dev[] array. */
+ static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
+ 	struct hpsa_scsi_dev_t *removed[], int *nremoved)
+@@ -835,12 +853,8 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
+ 			continue; /* remove ^^^, hence i not incremented */
+ 		} else if (device_change == DEVICE_CHANGED) {
+ 			changes++;
+-			hpsa_scsi_remove_entry(h, hostno, i,
+-				removed, &nremoved);
+-			(void) hpsa_scsi_add_entry(h, hostno, sd[entry],
+-				added, &nadded);
+-			/* add can't fail, we just removed one. */
+-
++			hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
++				added, &nadded, removed, &nremoved);
+ 			/* Set it to NULL to prevent it from being freed
+ 			 * at the bottom of hpsa_update_scsi_devices()
+ 			 */
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0029-SCSI-hpsa-eliminate-lock_kernel-in-compat_ioctl.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0029-SCSI-hpsa-eliminate-lock_kernel-in-compat_ioctl.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,66 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:43:46 -0600
+Subject: [PATCH 29/79] [SCSI] hpsa: eliminate lock_kernel in compat_ioctl
+
+commit e39eeaed1f75fcd46783aad34cb9ab8a6046bb01 upstream.
+
+The use of the big kernel lock here  appears
+to be ancient cruft that is no longer needed.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   16 +++-------------
+ 1 files changed, 3 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 6b40221..225a787 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -2287,16 +2287,6 @@ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
+ 
+ #ifdef CONFIG_COMPAT
+ 
+-static int do_ioctl(struct scsi_device *dev, int cmd, void *arg)
+-{
+-	int ret;
+-
+-	lock_kernel();
+-	ret = hpsa_ioctl(dev, cmd, arg);
+-	unlock_kernel();
+-	return ret;
+-}
+-
+ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg);
+ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
+ 	int cmd, void *arg);
+@@ -2319,7 +2309,7 @@ static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
+ 	case CCISS_REGNEWD:
+ 	case CCISS_RESCANDISK:
+ 	case CCISS_GETLUNINFO:
+-		return do_ioctl(dev, cmd, arg);
++		return hpsa_ioctl(dev, cmd, arg);
+ 
+ 	case CCISS_PASSTHRU32:
+ 		return hpsa_ioctl32_passthru(dev, cmd, arg);
+@@ -2355,7 +2345,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
+ 	if (err)
+ 		return -EFAULT;
+ 
+-	err = do_ioctl(dev, CCISS_PASSTHRU, (void *)p);
++	err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
+ 	if (err)
+ 		return err;
+ 	err |= copy_in_user(&arg32->error_info, &p->error_info,
+@@ -2392,7 +2382,7 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
+ 	if (err)
+ 		return -EFAULT;
+ 
+-	err = do_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
++	err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
+ 	if (err)
+ 		return err;
+ 	err |= copy_in_user(&arg32->error_info, &p->error_info,
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0030-SCSI-hpsa-Reorder-compat-ioctl-functions-to-eliminat.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0030-SCSI-hpsa-Reorder-compat-ioctl-functions-to-eliminat.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,98 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:43:51 -0600
+Subject: [PATCH 30/79] [SCSI] hpsa: Reorder compat ioctl functions to
+ eliminate some forward declarations.
+
+commit 71fe75a705aa4eabda23334095c382ad8c48f2d1 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   64 ++++++++++++++++++++++++---------------------------
+ 1 files changed, 30 insertions(+), 34 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 225a787..af20dbd 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -2287,40 +2287,6 @@ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
+ 
+ #ifdef CONFIG_COMPAT
+ 
+-static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg);
+-static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
+-	int cmd, void *arg);
+-
+-static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
+-{
+-	switch (cmd) {
+-	case CCISS_GETPCIINFO:
+-	case CCISS_GETINTINFO:
+-	case CCISS_SETINTINFO:
+-	case CCISS_GETNODENAME:
+-	case CCISS_SETNODENAME:
+-	case CCISS_GETHEARTBEAT:
+-	case CCISS_GETBUSTYPES:
+-	case CCISS_GETFIRMVER:
+-	case CCISS_GETDRIVVER:
+-	case CCISS_REVALIDVOLS:
+-	case CCISS_DEREGDISK:
+-	case CCISS_REGNEWDISK:
+-	case CCISS_REGNEWD:
+-	case CCISS_RESCANDISK:
+-	case CCISS_GETLUNINFO:
+-		return hpsa_ioctl(dev, cmd, arg);
+-
+-	case CCISS_PASSTHRU32:
+-		return hpsa_ioctl32_passthru(dev, cmd, arg);
+-	case CCISS_BIG_PASSTHRU32:
+-		return hpsa_ioctl32_big_passthru(dev, cmd, arg);
+-
+-	default:
+-		return -ENOIOCTLCMD;
+-	}
+-}
+-
+ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
+ {
+ 	IOCTL32_Command_struct __user *arg32 =
+@@ -2391,6 +2357,36 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
+ 		return -EFAULT;
+ 	return err;
+ }
++
++static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
++{
++	switch (cmd) {
++	case CCISS_GETPCIINFO:
++	case CCISS_GETINTINFO:
++	case CCISS_SETINTINFO:
++	case CCISS_GETNODENAME:
++	case CCISS_SETNODENAME:
++	case CCISS_GETHEARTBEAT:
++	case CCISS_GETBUSTYPES:
++	case CCISS_GETFIRMVER:
++	case CCISS_GETDRIVVER:
++	case CCISS_REVALIDVOLS:
++	case CCISS_DEREGDISK:
++	case CCISS_REGNEWDISK:
++	case CCISS_REGNEWD:
++	case CCISS_RESCANDISK:
++	case CCISS_GETLUNINFO:
++		return hpsa_ioctl(dev, cmd, arg);
++
++	case CCISS_PASSTHRU32:
++		return hpsa_ioctl32_passthru(dev, cmd, arg);
++	case CCISS_BIG_PASSTHRU32:
++		return hpsa_ioctl32_big_passthru(dev, cmd, arg);
++
++	default:
++		return -ENOIOCTLCMD;
++	}
++}
+ #endif
+ 
+ static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0031-SCSI-hpsa-update-driver-version-to-2.0.1-3.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0031-SCSI-hpsa-update-driver-version-to-2.0.1-3.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,28 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 4 Feb 2010 08:43:57 -0600
+Subject: [PATCH 31/79] [SCSI] hpsa: update driver version to 2.0.1-3
+
+commit 4b5aa7cff0e2bd1a9c81b59553ba8ecfa3aa7e1b upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index af20dbd..03697ba 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -52,7 +52,7 @@
+ #include "hpsa.h"
+ 
+ /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
+-#define HPSA_DRIVER_VERSION "1.0.0"
++#define HPSA_DRIVER_VERSION "2.0.1-3"
+ #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
+ 
+ /* How long to wait (in milliseconds) for board to go into simple mode */
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0033-SCSI-hpsa-fix-firmwart-typo.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0033-SCSI-hpsa-fix-firmwart-typo.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,28 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 25 Feb 2010 14:02:56 -0600
+Subject: [PATCH 33/79] [SCSI] hpsa: fix firmwart typo
+
+commit f0edafc6628f924a424ab4059df74f46f4f4241e upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 745c624..3734f31 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -2982,7 +2982,7 @@ static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
+ 	return IRQ_HANDLED;
+ }
+ 
+-/* Send a message CDB to the firmwart. */
++/* Send a message CDB to the firmware. */
+ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
+ 						unsigned char type)
+ {
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0034-SCSI-hpsa-fix-scsi-status-mis-shift.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0034-SCSI-hpsa-fix-scsi-status-mis-shift.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,30 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 25 Feb 2010 14:03:01 -0600
+Subject: [PATCH 34/79] [SCSI] hpsa: fix scsi status mis-shift
+
+commit 5512672f75611e9239669c6a4dce648b8d60fedd upstream.
+
+The SCSI status does not need to be shifted.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 3734f31..604b4c9 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -1006,7 +1006,7 @@ static void complete_scsi_command(struct CommandList *cp,
+ 
+ 	cmd->result = (DID_OK << 16); 		/* host byte */
+ 	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
+-	cmd->result |= (ei->ScsiStatus << 1);
++	cmd->result |= ei->ScsiStatus;
+ 
+ 	/* copy the sense data whether we need to or not. */
+ 	memcpy(cmd->sense_buffer, ei->SenseInfo,
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0035-SCSI-hpsa-return-ENOMEM-not-1.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0035-SCSI-hpsa-return-ENOMEM-not-1.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,28 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 25 Feb 2010 14:03:06 -0600
+Subject: [PATCH 35/79] [SCSI] hpsa: return -ENOMEM, not -1
+
+commit e9ea04a65ad842452cbee92b5c865af7fed17f63 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 604b4c9..a72a18e 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -1386,7 +1386,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
+ 
+ 	if (c == NULL) {			/* trouble... */
+ 		dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+-		return -1;
++		return -ENOMEM;
+ 	}
+ 
+ 	fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0036-SCSI-hpsa-remove-scan-thread.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0036-SCSI-hpsa-remove-scan-thread.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,274 @@
+From: Mike Miller <mikem at beardog.cce.hp.com>
+Date: Thu, 25 Feb 2010 14:03:12 -0600
+Subject: [PATCH 36/79] [SCSI] hpsa: remove scan thread
+
+commit 31468401ccf64322ca99fe05fbe64f1551240f57 upstream.
+
+The intent of the scan thread was to allow a UNIT ATTENTION/LUN
+DATA CHANGED condition encountered in the interrupt handler
+to trigger a rescan of devices, which can't be done in interrupt
+context.  However, we weren't able to get this to work, due to
+multiple such UNIT ATTENTION conditions arriving during the rescan,
+during updating of the SCSI mid layer, etc.  There's no way to tell
+the devices, "stand still while I scan you!"  Since it doesn't work,
+there's no point in having the thread, as the rescan triggered via
+ioctl or sysfs can be done without such a thread.
+
+Signed-off-by: Mike Miller <mikem at beardog.cce.hp.com>
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |  167 +-------------------------------------------------
+ drivers/scsi/hpsa.h |    3 -
+ 2 files changed, 4 insertions(+), 166 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index a72a18e..3d43bb2 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -53,7 +53,7 @@
+ #include "hpsa.h"
+ 
+ /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
+-#define HPSA_DRIVER_VERSION "2.0.1-3"
++#define HPSA_DRIVER_VERSION "2.0.2-1"
+ #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
+ 
+ /* How long to wait (in milliseconds) for board to go into simple mode */
+@@ -212,133 +212,6 @@ static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
+ 	return (struct ctlr_info *) *priv;
+ }
+ 
+-static struct task_struct *hpsa_scan_thread;
+-static DEFINE_MUTEX(hpsa_scan_mutex);
+-static LIST_HEAD(hpsa_scan_q);
+-static int hpsa_scan_func(void *data);
+-
+-/**
+- * add_to_scan_list() - add controller to rescan queue
+- * @h:		      Pointer to the controller.
+- *
+- * Adds the controller to the rescan queue if not already on the queue.
+- *
+- * returns 1 if added to the queue, 0 if skipped (could be on the
+- * queue already, or the controller could be initializing or shutting
+- * down).
+- **/
+-static int add_to_scan_list(struct ctlr_info *h)
+-{
+-	struct ctlr_info *test_h;
+-	int found = 0;
+-	int ret = 0;
+-
+-	if (h->busy_initializing)
+-		return 0;
+-
+-	/*
+-	 * If we don't get the lock, it means the driver is unloading
+-	 * and there's no point in scheduling a new scan.
+-	 */
+-	if (!mutex_trylock(&h->busy_shutting_down))
+-		return 0;
+-
+-	mutex_lock(&hpsa_scan_mutex);
+-	list_for_each_entry(test_h, &hpsa_scan_q, scan_list) {
+-		if (test_h == h) {
+-			found = 1;
+-			break;
+-		}
+-	}
+-	if (!found && !h->busy_scanning) {
+-		INIT_COMPLETION(h->scan_wait);
+-		list_add_tail(&h->scan_list, &hpsa_scan_q);
+-		ret = 1;
+-	}
+-	mutex_unlock(&hpsa_scan_mutex);
+-	mutex_unlock(&h->busy_shutting_down);
+-
+-	return ret;
+-}
+-
+-/**
+- * remove_from_scan_list() - remove controller from rescan queue
+- * @h:			   Pointer to the controller.
+- *
+- * Removes the controller from the rescan queue if present. Blocks if
+- * the controller is currently conducting a rescan.  The controller
+- * can be in one of three states:
+- * 1. Doesn't need a scan
+- * 2. On the scan list, but not scanning yet (we remove it)
+- * 3. Busy scanning (and not on the list). In this case we want to wait for
+- *    the scan to complete to make sure the scanning thread for this
+- *    controller is completely idle.
+- **/
+-static void remove_from_scan_list(struct ctlr_info *h)
+-{
+-	struct ctlr_info *test_h, *tmp_h;
+-
+-	mutex_lock(&hpsa_scan_mutex);
+-	list_for_each_entry_safe(test_h, tmp_h, &hpsa_scan_q, scan_list) {
+-		if (test_h == h) { /* state 2. */
+-			list_del(&h->scan_list);
+-			complete_all(&h->scan_wait);
+-			mutex_unlock(&hpsa_scan_mutex);
+-			return;
+-		}
+-	}
+-	if (h->busy_scanning) { /* state 3. */
+-		mutex_unlock(&hpsa_scan_mutex);
+-		wait_for_completion(&h->scan_wait);
+-	} else { /* state 1, nothing to do. */
+-		mutex_unlock(&hpsa_scan_mutex);
+-	}
+-}
+-
+-/* hpsa_scan_func() - kernel thread used to rescan controllers
+- * @data:	 Ignored.
+- *
+- * A kernel thread used scan for drive topology changes on
+- * controllers. The thread processes only one controller at a time
+- * using a queue.  Controllers are added to the queue using
+- * add_to_scan_list() and removed from the queue either after done
+- * processing or using remove_from_scan_list().
+- *
+- * returns 0.
+- **/
+-static int hpsa_scan_func(__attribute__((unused)) void *data)
+-{
+-	struct ctlr_info *h;
+-	int host_no;
+-
+-	while (1) {
+-		set_current_state(TASK_INTERRUPTIBLE);
+-		schedule();
+-		if (kthread_should_stop())
+-			break;
+-
+-		while (1) {
+-			mutex_lock(&hpsa_scan_mutex);
+-			if (list_empty(&hpsa_scan_q)) {
+-				mutex_unlock(&hpsa_scan_mutex);
+-				break;
+-			}
+-			h = list_entry(hpsa_scan_q.next, struct ctlr_info,
+-					scan_list);
+-			list_del(&h->scan_list);
+-			h->busy_scanning = 1;
+-			mutex_unlock(&hpsa_scan_mutex);
+-			host_no = h->scsi_host ?  h->scsi_host->host_no : -1;
+-			hpsa_scan_start(h->scsi_host);
+-			complete_all(&h->scan_wait);
+-			mutex_lock(&hpsa_scan_mutex);
+-			h->busy_scanning = 0;
+-			mutex_unlock(&hpsa_scan_mutex);
+-		}
+-	}
+-	return 0;
+-}
+-
+ static int check_for_unit_attention(struct ctlr_info *h,
+ 	struct CommandList *c)
+ {
+@@ -356,21 +229,8 @@ static int check_for_unit_attention(struct ctlr_info *h,
+ 		break;
+ 	case REPORT_LUNS_CHANGED:
+ 		dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
+-			"changed\n", h->ctlr);
++			"changed, action required\n", h->ctlr);
+ 	/*
+-	 * Here, we could call add_to_scan_list and wake up the scan thread,
+-	 * except that it's quite likely that we will get more than one
+-	 * REPORT_LUNS_CHANGED condition in quick succession, which means
+-	 * that those which occur after the first one will likely happen
+-	 * *during* the hpsa_scan_thread's rescan.  And the rescan code is not
+-	 * robust enough to restart in the middle, undoing what it has already
+-	 * done, and it's not clear that it's even possible to do this, since
+-	 * part of what it does is notify the SCSI mid layer, which starts
+-	 * doing it's own i/o to read partition tables and so on, and the
+-	 * driver doesn't have visibility to know what might need undoing.
+-	 * In any event, if possible, it is horribly complicated to get right
+-	 * so we just don't do it for now.
+-	 *
+ 	 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
+ 	 */
+ 		break;
+@@ -397,10 +257,7 @@ static ssize_t host_store_rescan(struct device *dev,
+ 	struct ctlr_info *h;
+ 	struct Scsi_Host *shost = class_to_shost(dev);
+ 	h = shost_to_hba(shost);
+-	if (add_to_scan_list(h)) {
+-		wake_up_process(hpsa_scan_thread);
+-		wait_for_completion_interruptible(&h->scan_wait);
+-	}
++	hpsa_scan_start(h->scsi_host);
+ 	return count;
+ }
+ 
+@@ -3553,8 +3410,6 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
+ 	h->busy_initializing = 1;
+ 	INIT_HLIST_HEAD(&h->cmpQ);
+ 	INIT_HLIST_HEAD(&h->reqQ);
+-	mutex_init(&h->busy_shutting_down);
+-	init_completion(&h->scan_wait);
+ 	rc = hpsa_pci_init(h, pdev);
+ 	if (rc != 0)
+ 		goto clean1;
+@@ -3702,8 +3557,6 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
+ 		return;
+ 	}
+ 	h = pci_get_drvdata(pdev);
+-	mutex_lock(&h->busy_shutting_down);
+-	remove_from_scan_list(h);
+ 	hpsa_unregister_scsi(h);	/* unhook from SCSI subsystem */
+ 	hpsa_shutdown(pdev);
+ 	iounmap(h->vaddr);
+@@ -3724,7 +3577,6 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
+ 	 */
+ 	pci_release_regions(pdev);
+ 	pci_set_drvdata(pdev, NULL);
+-	mutex_unlock(&h->busy_shutting_down);
+ 	kfree(h);
+ }
+ 
+@@ -3878,23 +3730,12 @@ clean_up:
+  */
+ static int __init hpsa_init(void)
+ {
+-	int err;
+-	/* Start the scan thread */
+-	hpsa_scan_thread = kthread_run(hpsa_scan_func, NULL, "hpsa_scan");
+-	if (IS_ERR(hpsa_scan_thread)) {
+-		err = PTR_ERR(hpsa_scan_thread);
+-		return -ENODEV;
+-	}
+-	err = pci_register_driver(&hpsa_pci_driver);
+-	if (err)
+-		kthread_stop(hpsa_scan_thread);
+-	return err;
++	return pci_register_driver(&hpsa_pci_driver);
+ }
+ 
+ static void __exit hpsa_cleanup(void)
+ {
+ 	pci_unregister_driver(&hpsa_pci_driver);
+-	kthread_stop(hpsa_scan_thread);
+ }
+ 
+ module_init(hpsa_init);
+diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
+index a0502b3..fc15215 100644
+--- a/drivers/scsi/hpsa.h
++++ b/drivers/scsi/hpsa.h
+@@ -97,9 +97,6 @@ struct ctlr_info {
+ 	int			scan_finished;
+ 	spinlock_t		scan_lock;
+ 	wait_queue_head_t	scan_wait_queue;
+-	struct mutex		busy_shutting_down;
+-	struct list_head	scan_list;
+-	struct completion	scan_wait;
+ 
+ 	struct Scsi_Host *scsi_host;
+ 	spinlock_t devlock; /* to protect hba[ctlr]->dev[];  */
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0037-SCSI-hpsa-mark-hpsa_pci_init-as-__devinit.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0037-SCSI-hpsa-mark-hpsa_pci_init-as-__devinit.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,28 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 25 Feb 2010 14:03:17 -0600
+Subject: [PATCH 37/79] [SCSI] hpsa: mark hpsa_pci_init as __devinit
+
+commit ff9fea94546afa2a496c15354533f06088347f6e upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 3d43bb2..2e1edce 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3174,7 +3174,7 @@ default_int_mode:
+ 	h->intr[PERF_MODE_INT] = pdev->irq;
+ }
+ 
+-static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
++static int __devinit hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
+ {
+ 	ushort subsystem_vendor_id, subsystem_device_id, command;
+ 	u32 board_id, scratchpad = 0;
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0038-SCSI-hpsa-Clarify-calculation-of-padding-for-command.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0038-SCSI-hpsa-Clarify-calculation-of-padding-for-command.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,43 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 25 Feb 2010 14:03:22 -0600
+Subject: [PATCH 38/79] [SCSI] hpsa: Clarify calculation of padding for
+ commandlist structure
+
+commit db61bfcfe2a68dc71402c270686cd73b80971efc upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa_cmd.h |   14 ++++++++++----
+ 1 files changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
+index 3e0abdf..43b6f1c 100644
+--- a/drivers/scsi/hpsa_cmd.h
++++ b/drivers/scsi/hpsa_cmd.h
+@@ -313,12 +313,18 @@ struct CommandList {
+ 	void   *scsi_cmd;
+ 
+ /* on 64 bit architectures, to get this to be 32-byte-aligned
+- * it so happens we need no padding, on 32 bit systems,
+- * we need 8 bytes of padding.   This does that.
++ * it so happens we need PAD_64 bytes of padding, on 32 bit systems,
++ * we need PAD_32 bytes of padding (see below).   This does that.
++ * If it happens that 64 bit and 32 bit systems need different
++ * padding, PAD_32 and PAD_64 can be set independently, and.
++ * the code below will do the right thing.
+  */
+-#define COMMANDLIST_PAD ((8 - sizeof(long))/4 * 8)
++#define IS_32_BIT ((8 - sizeof(long))/4)
++#define IS_64_BIT (!IS_32_BIT)
++#define PAD_32 (8)
++#define PAD_64 (0)
++#define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
+ 	u8 pad[COMMANDLIST_PAD];
+-
+ };
+ 
+ /* Configuration Table Structure */
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0039-SCSI-hpsa-Increase-the-number-of-scatter-gather-elem.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0039-SCSI-hpsa-Increase-the-number-of-scatter-gather-elem.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,292 @@
+From 33a2ffce51d9598380d73c515a27fc6cff3bd9c4 Mon Sep 17 00:00:00 2001
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 25 Feb 2010 14:03:27 -0600
+Subject: [PATCH 39/78] [SCSI] hpsa: Increase the number of scatter gather
+ elements supported.
+
+This uses the scatter-gather chaining feature of Smart Array
+controllers.  32 scatter-gather elements are embedded in the
+"command list", and the last element in the list may be marked
+as a "chain pointer", and point to an additional block of
+scatter gather elements.  The precise number of scatter gather
+elements supported is dependent on the particular kind of
+Smart Array, and is determined at runtime by querying the
+hardware.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+[bwh: Adjust context for omission of commit 667e23d]
+---
+ drivers/scsi/hpsa.c     |  134 +++++++++++++++++++++++++++++++++++++++++++----
+ drivers/scsi/hpsa.h     |    4 ++
+ drivers/scsi/hpsa_cmd.h |    7 ++-
+ 3 files changed, 131 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 2e1edce..183d3a4 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -186,7 +186,6 @@ static struct scsi_host_template hpsa_driver_template = {
+ 	.scan_start		= hpsa_scan_start,
+ 	.scan_finished		= hpsa_scan_finished,
+ 	.this_id		= -1,
+-	.sg_tablesize		= MAXSGENTRIES,
+ 	.use_clustering		= ENABLE_CLUSTERING,
+ 	.eh_device_reset_handler = hpsa_eh_device_reset_handler,
+ 	.ioctl			= hpsa_ioctl,
+@@ -844,6 +843,76 @@ static void hpsa_scsi_setup(struct ctlr_info *h)
+ 	spin_lock_init(&h->devlock);
+ }
+ 
++static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
++{
++	int i;
++
++	if (!h->cmd_sg_list)
++		return;
++	for (i = 0; i < h->nr_cmds; i++) {
++		kfree(h->cmd_sg_list[i]);
++		h->cmd_sg_list[i] = NULL;
++	}
++	kfree(h->cmd_sg_list);
++	h->cmd_sg_list = NULL;
++}
++
++static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
++{
++	int i;
++
++	if (h->chainsize <= 0)
++		return 0;
++
++	h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
++				GFP_KERNEL);
++	if (!h->cmd_sg_list)
++		return -ENOMEM;
++	for (i = 0; i < h->nr_cmds; i++) {
++		h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
++						h->chainsize, GFP_KERNEL);
++		if (!h->cmd_sg_list[i])
++			goto clean;
++	}
++	return 0;
++
++clean:
++	hpsa_free_sg_chain_blocks(h);
++	return -ENOMEM;
++}
++
++static void hpsa_map_sg_chain_block(struct ctlr_info *h,
++	struct CommandList *c)
++{
++	struct SGDescriptor *chain_sg, *chain_block;
++	u64 temp64;
++
++	chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
++	chain_block = h->cmd_sg_list[c->cmdindex];
++	chain_sg->Ext = HPSA_SG_CHAIN;
++	chain_sg->Len = sizeof(*chain_sg) *
++		(c->Header.SGTotal - h->max_cmd_sg_entries);
++	temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
++				PCI_DMA_TODEVICE);
++	chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
++	chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
++}
++
++static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
++	struct CommandList *c)
++{
++	struct SGDescriptor *chain_sg;
++	union u64bit temp64;
++
++	if (c->Header.SGTotal <= h->max_cmd_sg_entries)
++		return;
++
++	chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
++	temp64.val32.lower = chain_sg->Addr.lower;
++	temp64.val32.upper = chain_sg->Addr.upper;
++	pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
++}
++
+ static void complete_scsi_command(struct CommandList *cp,
+ 	int timeout, u32 tag)
+ {
+@@ -860,6 +929,8 @@ static void complete_scsi_command(struct CommandList *cp,
+ 	h = cp->h;
+ 
+ 	scsi_dma_unmap(cmd); /* undo the DMA mappings */
++	if (cp->Header.SGTotal > h->max_cmd_sg_entries)
++		hpsa_unmap_sg_chain_block(h, cp);
+ 
+ 	cmd->result = (DID_OK << 16); 		/* host byte */
+ 	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
+@@ -1064,6 +1135,7 @@ static int hpsa_scsi_detect(struct ctlr_info *h)
+ 	sh->max_id = HPSA_MAX_LUN;
+ 	sh->can_queue = h->nr_cmds;
+ 	sh->cmd_per_lun = h->nr_cmds;
++	sh->sg_tablesize = h->maxsgentries;
+ 	h->scsi_host = sh;
+ 	sh->hostdata[0] = (unsigned long) h;
+ 	sh->irq = h->intr[PERF_MODE_INT];
+@@ -1765,16 +1837,17 @@ out:
+  * dma mapping  and fills in the scatter gather entries of the
+  * hpsa command, cp.
+  */
+-static int hpsa_scatter_gather(struct pci_dev *pdev,
++static int hpsa_scatter_gather(struct ctlr_info *h,
+ 		struct CommandList *cp,
+ 		struct scsi_cmnd *cmd)
+ {
+ 	unsigned int len;
+ 	struct scatterlist *sg;
+ 	u64 addr64;
+-	int use_sg, i;
++	int use_sg, i, sg_index, chained;
++	struct SGDescriptor *curr_sg;
+ 
+-	BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
++	BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
+ 
+ 	use_sg = scsi_dma_map(cmd);
+ 	if (use_sg < 0)
+@@ -1783,15 +1856,33 @@ static int hpsa_scatter_gather(struct pci_dev *pdev,
+ 	if (!use_sg)
+ 		goto sglist_finished;
+ 
++	curr_sg = cp->SG;
++	chained = 0;
++	sg_index = 0;
+ 	scsi_for_each_sg(cmd, sg, use_sg, i) {
++		if (i == h->max_cmd_sg_entries - 1 &&
++			use_sg > h->max_cmd_sg_entries) {
++			chained = 1;
++			curr_sg = h->cmd_sg_list[cp->cmdindex];
++			sg_index = 0;
++		}
+ 		addr64 = (u64) sg_dma_address(sg);
+ 		len  = sg_dma_len(sg);
+-		cp->SG[i].Addr.lower =
+-			(u32) (addr64 & (u64) 0x00000000FFFFFFFF);
+-		cp->SG[i].Addr.upper =
+-			(u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
+-		cp->SG[i].Len = len;
+-		cp->SG[i].Ext = 0;  /* we are not chaining */
++		curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
++		curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
++		curr_sg->Len = len;
++		curr_sg->Ext = 0;  /* we are not chaining */
++		curr_sg++;
++	}
++
++	if (use_sg + chained > h->maxSG)
++		h->maxSG = use_sg + chained;
++
++	if (chained) {
++		cp->Header.SGList = h->max_cmd_sg_entries;
++		cp->Header.SGTotal = (u16) (use_sg + 1);
++		hpsa_map_sg_chain_block(h, cp);
++		return 0;
+ 	}
+ 
+ sglist_finished:
+@@ -1887,7 +1978,7 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
+ 		break;
+ 	}
+ 
+-	if (hpsa_scatter_gather(h->pdev, c, cmd) < 0) { /* Fill SG list */
++	if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
+ 		cmd_free(h, c);
+ 		return SCSI_MLQUEUE_HOST_BUSY;
+ 	}
+@@ -3283,6 +3374,23 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
+ 
+ 	h->board_id = board_id;
+ 	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
++	h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
++
++	/*
++	 * Limit in-command s/g elements to 32 save dma'able memory.
++	 * Howvever spec says if 0, use 31
++	 */
++
++	h->max_cmd_sg_entries = 31;
++	if (h->maxsgentries > 512) {
++		h->max_cmd_sg_entries = 32;
++		h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
++		h->maxsgentries--; /* save one for chain pointer */
++	} else {
++		h->maxsgentries = 31; /* default to traditional values */
++		h->chainsize = 0;
++	}
++
+ 	h->product_name = products[prod_index].product_name;
+ 	h->access = *(products[prod_index].access);
+ 	/* Allow room for some ioctls */
+@@ -3463,6 +3571,8 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
+ 		rc = -ENOMEM;
+ 		goto clean4;
+ 	}
++	if (hpsa_allocate_sg_chain_blocks(h))
++		goto clean4;
+ 	spin_lock_init(&h->lock);
+ 	spin_lock_init(&h->scan_lock);
+ 	init_waitqueue_head(&h->scan_wait_queue);
+@@ -3485,6 +3595,7 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
+ 	return 1;
+ 
+ clean4:
++	hpsa_free_sg_chain_blocks(h);
+ 	kfree(h->cmd_pool_bits);
+ 	if (h->cmd_pool)
+ 		pci_free_consistent(h->pdev,
+@@ -3560,6 +3671,7 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
+ 	hpsa_unregister_scsi(h);	/* unhook from SCSI subsystem */
+ 	hpsa_shutdown(pdev);
+ 	iounmap(h->vaddr);
++	hpsa_free_sg_chain_blocks(h);
+ 	pci_free_consistent(h->pdev,
+ 		h->nr_cmds * sizeof(struct CommandList),
+ 		h->cmd_pool, h->cmd_pool_dhandle);
+diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
+index fc15215..1bb5233 100644
+--- a/drivers/scsi/hpsa.h
++++ b/drivers/scsi/hpsa.h
+@@ -83,6 +83,10 @@ struct ctlr_info {
+ 	unsigned int maxQsinceinit;
+ 	unsigned int maxSG;
+ 	spinlock_t lock;
++	int maxsgentries;
++	u8 max_cmd_sg_entries;
++	int chainsize;
++	struct SGDescriptor **cmd_sg_list;
+ 
+ 	/* pointers to command and error info pool */
+ 	struct CommandList 	*cmd_pool;
+diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
+index 43b6f1c..cb0c238 100644
+--- a/drivers/scsi/hpsa_cmd.h
++++ b/drivers/scsi/hpsa_cmd.h
+@@ -23,7 +23,8 @@
+ 
+ /* general boundary defintions */
+ #define SENSEINFOBYTES          32 /* may vary between hbas */
+-#define MAXSGENTRIES            31
++#define MAXSGENTRIES            32
++#define HPSA_SG_CHAIN		0x80000000
+ #define MAXREPLYQS              256
+ 
+ /* Command Status value */
+@@ -321,8 +322,8 @@ struct CommandList {
+  */
+ #define IS_32_BIT ((8 - sizeof(long))/4)
+ #define IS_64_BIT (!IS_32_BIT)
+-#define PAD_32 (8)
+-#define PAD_64 (0)
++#define PAD_32 (24)
++#define PAD_64 (16)
+ #define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
+ 	u8 pad[COMMANDLIST_PAD];
+ };
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0040-SCSI-hpsa-remove-unused-members-next-prev-and-retry_.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0040-SCSI-hpsa-remove-unused-members-next-prev-and-retry_.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,43 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 25 Feb 2010 14:03:32 -0600
+Subject: [PATCH 40/79] [SCSI] hpsa: remove unused members next, prev, and
+ retry_count from command list structure.
+
+commit 43aebfa12e7631124472237dc945c27af54ca646 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa_cmd.h |    7 ++-----
+ 1 files changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
+index cb0c238..56fb982 100644
+--- a/drivers/scsi/hpsa_cmd.h
++++ b/drivers/scsi/hpsa_cmd.h
+@@ -306,11 +306,8 @@ struct CommandList {
+ 	int			   cmd_type;
+ 	long			   cmdindex;
+ 	struct hlist_node list;
+-	struct CommandList *prev;
+-	struct CommandList *next;
+ 	struct request *rq;
+ 	struct completion *waiting;
+-	int	 retry_count;
+ 	void   *scsi_cmd;
+ 
+ /* on 64 bit architectures, to get this to be 32-byte-aligned
+@@ -322,8 +319,8 @@ struct CommandList {
+  */
+ #define IS_32_BIT ((8 - sizeof(long))/4)
+ #define IS_64_BIT (!IS_32_BIT)
+-#define PAD_32 (24)
+-#define PAD_64 (16)
++#define PAD_32 (4)
++#define PAD_64 (4)
+ #define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
+ 	u8 pad[COMMANDLIST_PAD];
+ };
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0041-SCSI-hpsa-remove-unneeded-defines.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0041-SCSI-hpsa-remove-unneeded-defines.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,64 @@
+From: Mike Miller <mike.miller at hp.com>
+Date: Fri, 16 Apr 2010 13:28:41 -0500
+Subject: [PATCH 41/79] [SCSI] hpsa: remove unneeded defines
+
+commit 859e816704b4139d15b1ec6a3505f12faef5333a upstream.
+
+This patch removes unnecessary #define's from hpsa. The SCSI midlayer
+handles all this for us.
+
+Signed-off-by: Mike Miller <mike.miller at hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c     |    8 --------
+ drivers/scsi/hpsa_cmd.h |   15 ---------------
+ 2 files changed, 0 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 183d3a4..c016426b 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -2708,14 +2708,6 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+ 			c->Request.CDB[8] = (size >> 8) & 0xFF;
+ 			c->Request.CDB[9] = size & 0xFF;
+ 			break;
+-
+-		case HPSA_READ_CAPACITY:
+-			c->Request.CDBLen = 10;
+-			c->Request.Type.Attribute = ATTR_SIMPLE;
+-			c->Request.Type.Direction = XFER_READ;
+-			c->Request.Timeout = 0;
+-			c->Request.CDB[0] = cmd;
+-			break;
+ 		case HPSA_CACHE_FLUSH:
+ 			c->Request.CDBLen = 12;
+ 			c->Request.Type.Attribute = ATTR_SIMPLE;
+diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
+index 56fb982..78de9b6 100644
+--- a/drivers/scsi/hpsa_cmd.h
++++ b/drivers/scsi/hpsa_cmd.h
+@@ -152,21 +152,6 @@ struct SenseSubsystem_info {
+ 	u8 reserved1[1108];
+ };
+ 
+-#define HPSA_READ_CAPACITY 0x25 /* Read Capacity */
+-struct ReadCapdata {
+-	u8 total_size[4];	/* Total size in blocks */
+-	u8 block_size[4];	/* Size of blocks in bytes */
+-};
+-
+-#if 0
+-/* 12 byte commands not implemented in firmware yet. */
+-#define HPSA_READ 	0xa8
+-#define HPSA_WRITE	0xaa
+-#endif
+-
+-#define HPSA_READ   0x28    /* Read(10) */
+-#define HPSA_WRITE  0x2a    /* Write(10) */
+-
+ /* BMIC commands */
+ #define BMIC_READ 0x26
+ #define BMIC_WRITE 0x27
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0042-SCSI-hpsa-save-pdev-pointer-in-per-hba-structure-ear.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0042-SCSI-hpsa-save-pdev-pointer-in-per-hba-structure-ear.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,241 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:12:46 -0500
+Subject: [PATCH 42/79] [SCSI] hpsa: save pdev pointer in per hba structure
+ early to avoid passing it around so much.
+
+commit 55c06c7171f63aaac1f9009729d1cb5107fa0626 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   75 ++++++++++++++++++++++++++-------------------------
+ 1 files changed, 38 insertions(+), 37 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index c016426b..d6f8970 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3209,8 +3209,7 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
+  * controllers that are capable. If not, we use IO-APIC mode.
+  */
+ 
+-static void __devinit hpsa_interrupt_mode(struct ctlr_info *h,
+-					   struct pci_dev *pdev, u32 board_id)
++static void __devinit hpsa_interrupt_mode(struct ctlr_info *h, u32 board_id)
+ {
+ #ifdef CONFIG_PCI_MSI
+ 	int err;
+@@ -3223,9 +3222,9 @@ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h,
+ 	    (board_id == 0x40800E11) ||
+ 	    (board_id == 0x40820E11) || (board_id == 0x40830E11))
+ 		goto default_int_mode;
+-	if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
+-		dev_info(&pdev->dev, "MSIX\n");
+-		err = pci_enable_msix(pdev, hpsa_msix_entries, 4);
++	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
++		dev_info(&h->pdev->dev, "MSIX\n");
++		err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4);
+ 		if (!err) {
+ 			h->intr[0] = hpsa_msix_entries[0].vector;
+ 			h->intr[1] = hpsa_msix_entries[1].vector;
+@@ -3235,29 +3234,29 @@ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h,
+ 			return;
+ 		}
+ 		if (err > 0) {
+-			dev_warn(&pdev->dev, "only %d MSI-X vectors "
++			dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
+ 			       "available\n", err);
+ 			goto default_int_mode;
+ 		} else {
+-			dev_warn(&pdev->dev, "MSI-X init failed %d\n",
++			dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
+ 			       err);
+ 			goto default_int_mode;
+ 		}
+ 	}
+-	if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
+-		dev_info(&pdev->dev, "MSI\n");
+-		if (!pci_enable_msi(pdev))
++	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
++		dev_info(&h->pdev->dev, "MSI\n");
++		if (!pci_enable_msi(h->pdev))
+ 			h->msi_vector = 1;
+ 		else
+-			dev_warn(&pdev->dev, "MSI init failed\n");
++			dev_warn(&h->pdev->dev, "MSI init failed\n");
+ 	}
+ default_int_mode:
+ #endif				/* CONFIG_PCI_MSI */
+ 	/* if we get here we're going to use the default interrupt mode */
+-	h->intr[PERF_MODE_INT] = pdev->irq;
++	h->intr[PERF_MODE_INT] = h->pdev->irq;
+ }
+ 
+-static int __devinit hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
++static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ {
+ 	ushort subsystem_vendor_id, subsystem_device_id, command;
+ 	u32 board_id, scratchpad = 0;
+@@ -3267,8 +3266,8 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
+ 	u32 trans_offset;
+ 	int i, prod_index, err;
+ 
+-	subsystem_vendor_id = pdev->subsystem_vendor;
+-	subsystem_device_id = pdev->subsystem_device;
++	subsystem_vendor_id = h->pdev->subsystem_vendor;
++	subsystem_device_id = h->pdev->subsystem_device;
+ 	board_id = (((u32) (subsystem_device_id << 16) & 0xffff0000) |
+ 		    subsystem_vendor_id);
+ 
+@@ -3282,7 +3281,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
+ 		prod_index--;
+ 		if (subsystem_vendor_id != PCI_VENDOR_ID_HP ||
+ 				!hpsa_allow_any) {
+-			dev_warn(&pdev->dev, "unrecognized board ID:"
++			dev_warn(&h->pdev->dev, "unrecognized board ID:"
+ 				" 0x%08lx, ignoring.\n",
+ 				(unsigned long) board_id);
+ 			return -ENODEV;
+@@ -3291,41 +3290,42 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
+ 	/* check to see if controller has been disabled
+ 	 * BEFORE trying to enable it
+ 	 */
+-	(void)pci_read_config_word(pdev, PCI_COMMAND, &command);
++	(void)pci_read_config_word(h->pdev, PCI_COMMAND, &command);
+ 	if (!(command & 0x02)) {
+-		dev_warn(&pdev->dev, "controller appears to be disabled\n");
++		dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
+ 		return -ENODEV;
+ 	}
+ 
+-	err = pci_enable_device(pdev);
++	err = pci_enable_device(h->pdev);
+ 	if (err) {
+-		dev_warn(&pdev->dev, "unable to enable PCI device\n");
++		dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
+ 		return err;
+ 	}
+ 
+-	err = pci_request_regions(pdev, "hpsa");
++	err = pci_request_regions(h->pdev, "hpsa");
+ 	if (err) {
+-		dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
++		dev_err(&h->pdev->dev,
++			"cannot obtain PCI resources, aborting\n");
+ 		return err;
+ 	}
+ 
+ 	/* If the kernel supports MSI/MSI-X we will try to enable that,
+ 	 * else we use the IO-APIC interrupt assigned to us by system ROM.
+ 	 */
+-	hpsa_interrupt_mode(h, pdev, board_id);
++	hpsa_interrupt_mode(h, board_id);
+ 
+ 	/* find the memory BAR */
+ 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+-		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM)
++		if (pci_resource_flags(h->pdev, i) & IORESOURCE_MEM)
+ 			break;
+ 	}
+ 	if (i == DEVICE_COUNT_RESOURCE) {
+-		dev_warn(&pdev->dev, "no memory BAR found\n");
++		dev_warn(&h->pdev->dev, "no memory BAR found\n");
+ 		err = -ENODEV;
+ 		goto err_out_free_res;
+ 	}
+ 
+-	h->paddr = pci_resource_start(pdev, i); /* addressing mode bits
++	h->paddr = pci_resource_start(h->pdev, i); /* addressing mode bits
+ 						 * already removed
+ 						 */
+ 
+@@ -3339,7 +3339,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
+ 		msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
+ 	}
+ 	if (scratchpad != HPSA_FIRMWARE_READY) {
+-		dev_warn(&pdev->dev, "board not ready, timed out.\n");
++		dev_warn(&h->pdev->dev, "board not ready, timed out.\n");
+ 		err = -ENODEV;
+ 		goto err_out_free_res;
+ 	}
+@@ -3347,20 +3347,20 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
+ 	/* get the address index number */
+ 	cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET);
+ 	cfg_base_addr &= (u32) 0x0000ffff;
+-	cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
++	cfg_base_addr_index = find_PCI_BAR_index(h->pdev, cfg_base_addr);
+ 	if (cfg_base_addr_index == -1) {
+-		dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
++		dev_warn(&h->pdev->dev, "cannot find cfg_base_addr_index\n");
+ 		err = -ENODEV;
+ 		goto err_out_free_res;
+ 	}
+ 
+ 	cfg_offset = readl(h->vaddr + SA5_CTMEM_OFFSET);
+-	h->cfgtable = remap_pci_mem(pci_resource_start(pdev,
++	h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
+ 			       cfg_base_addr_index) + cfg_offset,
+ 				sizeof(h->cfgtable));
+ 	/* Find performant mode table. */
+ 	trans_offset = readl(&(h->cfgtable->TransMethodOffset));
+-	h->transtable = remap_pci_mem(pci_resource_start(pdev,
++	h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
+ 				cfg_base_addr_index)+cfg_offset+trans_offset,
+ 				sizeof(*h->transtable));
+ 
+@@ -3392,7 +3392,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
+ 	    (readb(&h->cfgtable->Signature[1]) != 'I') ||
+ 	    (readb(&h->cfgtable->Signature[2]) != 'S') ||
+ 	    (readb(&h->cfgtable->Signature[3]) != 'S')) {
+-		dev_warn(&pdev->dev, "not a valid CISS config table\n");
++		dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
+ 		err = -ENODEV;
+ 		goto err_out_free_res;
+ 	}
+@@ -3434,11 +3434,12 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
+ 	}
+ 
+ #ifdef HPSA_DEBUG
+-	print_cfg_table(&pdev->dev, h->cfgtable);
++	print_cfg_table(&h->pdev->dev, h->cfgtable);
+ #endif				/* HPSA_DEBUG */
+ 
+ 	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
+-		dev_warn(&pdev->dev, "unable to get board into simple mode\n");
++		dev_warn(&h->pdev->dev,
++			"unable to get board into simple mode\n");
+ 		err = -ENODEV;
+ 		goto err_out_free_res;
+ 	}
+@@ -3449,7 +3450,7 @@ err_out_free_res:
+ 	 * Deliberately omit pci_disable_device(): it does something nasty to
+ 	 * Smart Array controllers that pci_enable_device does not undo
+ 	 */
+-	pci_release_regions(pdev);
++	pci_release_regions(h->pdev);
+ 	return err;
+ }
+ 
+@@ -3507,17 +3508,17 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
+ 	if (!h)
+ 		return -ENOMEM;
+ 
++	h->pdev = pdev;
+ 	h->busy_initializing = 1;
+ 	INIT_HLIST_HEAD(&h->cmpQ);
+ 	INIT_HLIST_HEAD(&h->reqQ);
+-	rc = hpsa_pci_init(h, pdev);
++	rc = hpsa_pci_init(h);
+ 	if (rc != 0)
+ 		goto clean1;
+ 
+ 	sprintf(h->devname, "hpsa%d", number_of_controllers);
+ 	h->ctlr = number_of_controllers;
+ 	number_of_controllers++;
+-	h->pdev = pdev;
+ 
+ 	/* configure PCI DMA stuff */
+ 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0043-SCSI-hpsa-factor-out-hpsa_lookup_board_id.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0043-SCSI-hpsa-factor-out-hpsa_lookup_board_id.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,121 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:12:52 -0500
+Subject: [PATCH 43/79] [SCSI] hpsa: factor out hpsa_lookup_board_id
+
+commit e5c880d1d5923c341ac7ba537fb1e6b73c5977a2 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   58 +++++++++++++++++++++++++++-----------------------
+ 1 files changed, 31 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index d6f8970..b6c6e7f 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3256,37 +3256,44 @@ default_int_mode:
+ 	h->intr[PERF_MODE_INT] = h->pdev->irq;
+ }
+ 
++static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
++{
++	int i;
++	u32 subsystem_vendor_id, subsystem_device_id;
++
++	subsystem_vendor_id = pdev->subsystem_vendor;
++	subsystem_device_id = pdev->subsystem_device;
++	*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
++		    subsystem_vendor_id;
++
++	for (i = 0; i < ARRAY_SIZE(products); i++)
++		if (*board_id == products[i].board_id)
++			return i;
++
++	if (subsystem_vendor_id != PCI_VENDOR_ID_HP || !hpsa_allow_any) {
++		dev_warn(&pdev->dev, "unrecognized board ID: "
++			"0x%08x, ignoring.\n", *board_id);
++			return -ENODEV;
++	}
++	return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
++}
++
+ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ {
+-	ushort subsystem_vendor_id, subsystem_device_id, command;
+-	u32 board_id, scratchpad = 0;
++	ushort command;
++	u32 scratchpad = 0;
+ 	u64 cfg_offset;
+ 	u32 cfg_base_addr;
+ 	u64 cfg_base_addr_index;
+ 	u32 trans_offset;
+ 	int i, prod_index, err;
+ 
+-	subsystem_vendor_id = h->pdev->subsystem_vendor;
+-	subsystem_device_id = h->pdev->subsystem_device;
+-	board_id = (((u32) (subsystem_device_id << 16) & 0xffff0000) |
+-		    subsystem_vendor_id);
+-
+-	for (i = 0; i < ARRAY_SIZE(products); i++)
+-		if (board_id == products[i].board_id)
+-			break;
+-
+-	prod_index = i;
++	prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
++	if (prod_index < 0)
++		return -ENODEV;
++	h->product_name = products[prod_index].product_name;
++	h->access = *(products[prod_index].access);
+ 
+-	if (prod_index == ARRAY_SIZE(products)) {
+-		prod_index--;
+-		if (subsystem_vendor_id != PCI_VENDOR_ID_HP ||
+-				!hpsa_allow_any) {
+-			dev_warn(&h->pdev->dev, "unrecognized board ID:"
+-				" 0x%08lx, ignoring.\n",
+-				(unsigned long) board_id);
+-			return -ENODEV;
+-		}
+-	}
+ 	/* check to see if controller has been disabled
+ 	 * BEFORE trying to enable it
+ 	 */
+@@ -3312,7 +3319,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ 	/* If the kernel supports MSI/MSI-X we will try to enable that,
+ 	 * else we use the IO-APIC interrupt assigned to us by system ROM.
+ 	 */
+-	hpsa_interrupt_mode(h, board_id);
++	hpsa_interrupt_mode(h, h->board_id);
+ 
+ 	/* find the memory BAR */
+ 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+@@ -3364,7 +3371,6 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ 				cfg_base_addr_index)+cfg_offset+trans_offset,
+ 				sizeof(*h->transtable));
+ 
+-	h->board_id = board_id;
+ 	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+ 	h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
+ 
+@@ -3383,8 +3389,6 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ 		h->chainsize = 0;
+ 	}
+ 
+-	h->product_name = products[prod_index].product_name;
+-	h->access = *(products[prod_index].access);
+ 	/* Allow room for some ioctls */
+ 	h->nr_cmds = h->max_commands - 4;
+ 
+@@ -3410,7 +3414,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ 	 * An ASIC bug may result in a prefetch beyond
+ 	 * physical memory.
+ 	 */
+-	if (board_id == 0x3225103C) {
++	if (h->board_id == 0x3225103C) {
+ 		u32 dma_prefetch;
+ 		dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
+ 		dma_prefetch |= 0x8000;
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0044-SCSI-hpsa-factor-out-hpsa_board_disabled.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0044-SCSI-hpsa-factor-out-hpsa_board_disabled.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,54 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:12:57 -0500
+Subject: [PATCH 44/79] [SCSI] hpsa: factor out hpsa_board_disabled
+
+commit 85bdbabbd97ff797f91e6ec839ab053776bc72b4 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   16 +++++++++-------
+ 1 files changed, 9 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index b6c6e7f..4233b93 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3278,9 +3278,16 @@ static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
+ 	return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
+ }
+ 
++static inline bool hpsa_board_disabled(struct pci_dev *pdev)
++{
++	u16 command;
++
++	(void) pci_read_config_word(pdev, PCI_COMMAND, &command);
++	return ((command & PCI_COMMAND_MEMORY) == 0);
++}
++
+ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ {
+-	ushort command;
+ 	u32 scratchpad = 0;
+ 	u64 cfg_offset;
+ 	u32 cfg_base_addr;
+@@ -3294,15 +3301,10 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ 	h->product_name = products[prod_index].product_name;
+ 	h->access = *(products[prod_index].access);
+ 
+-	/* check to see if controller has been disabled
+-	 * BEFORE trying to enable it
+-	 */
+-	(void)pci_read_config_word(h->pdev, PCI_COMMAND, &command);
+-	if (!(command & 0x02)) {
++	if (hpsa_board_disabled(h->pdev)) {
+ 		dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
+ 		return -ENODEV;
+ 	}
+-
+ 	err = pci_enable_device(h->pdev);
+ 	if (err) {
+ 		dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0045-SCSI-hpsa-remove-redundant-board_id-parameter-from-h.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0045-SCSI-hpsa-remove-redundant-board_id-parameter-from-h.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,56 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:13:02 -0500
+Subject: [PATCH 45/79] [SCSI] hpsa: remove redundant board_id parameter from
+ hpsa_interrupt_mode
+
+commit 6b3f4c52b29eee17285a6cd57071c9ac7736d172 upstream.
+
+and delete duplicated comment
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   13 ++++---------
+ 1 files changed, 4 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 4233b93..66b7dcf 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3209,7 +3209,7 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
+  * controllers that are capable. If not, we use IO-APIC mode.
+  */
+ 
+-static void __devinit hpsa_interrupt_mode(struct ctlr_info *h, u32 board_id)
++static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
+ {
+ #ifdef CONFIG_PCI_MSI
+ 	int err;
+@@ -3218,9 +3218,8 @@ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h, u32 board_id)
+ 	};
+ 
+ 	/* Some boards advertise MSI but don't really support it */
+-	if ((board_id == 0x40700E11) ||
+-	    (board_id == 0x40800E11) ||
+-	    (board_id == 0x40820E11) || (board_id == 0x40830E11))
++	if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
++	    (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
+ 		goto default_int_mode;
+ 	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
+ 		dev_info(&h->pdev->dev, "MSIX\n");
+@@ -3317,11 +3316,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ 			"cannot obtain PCI resources, aborting\n");
+ 		return err;
+ 	}
+-
+-	/* If the kernel supports MSI/MSI-X we will try to enable that,
+-	 * else we use the IO-APIC interrupt assigned to us by system ROM.
+-	 */
+-	hpsa_interrupt_mode(h, h->board_id);
++	hpsa_interrupt_mode(h);
+ 
+ 	/* find the memory BAR */
+ 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0046-SCSI-hpsa-factor-out-hpsa_find_memory_BAR.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0046-SCSI-hpsa-factor-out-hpsa_find_memory_BAR.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,68 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:13:07 -0500
+Subject: [PATCH 46/79] [SCSI] hpsa: factor out hpsa_find_memory_BAR
+
+commit 3a7774ceb89f02f78e269b5c900096b066b66c3c upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   34 +++++++++++++++++++---------------
+ 1 files changed, 19 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 66b7dcf..0582f2f 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3285,6 +3285,23 @@ static inline bool hpsa_board_disabled(struct pci_dev *pdev)
+ 	return ((command & PCI_COMMAND_MEMORY) == 0);
+ }
+ 
++static int __devinit hpsa_pci_find_memory_BAR(struct ctlr_info *h,
++	unsigned long *memory_bar)
++{
++	int i;
++
++	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++		if (pci_resource_flags(h->pdev, i) & IORESOURCE_MEM) {
++			/* addressing mode bits already removed */
++			*memory_bar = pci_resource_start(h->pdev, i);
++			dev_dbg(&h->pdev->dev, "memory BAR = %lx\n",
++				*memory_bar);
++			return 0;
++		}
++	dev_warn(&h->pdev->dev, "no memory BAR found\n");
++	return -ENODEV;
++}
++
+ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ {
+ 	u32 scratchpad = 0;
+@@ -3317,22 +3334,9 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ 		return err;
+ 	}
+ 	hpsa_interrupt_mode(h);
+-
+-	/* find the memory BAR */
+-	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+-		if (pci_resource_flags(h->pdev, i) & IORESOURCE_MEM)
+-			break;
+-	}
+-	if (i == DEVICE_COUNT_RESOURCE) {
+-		dev_warn(&h->pdev->dev, "no memory BAR found\n");
+-		err = -ENODEV;
++	err = hpsa_pci_find_memory_BAR(h, &h->paddr);
++	if (err)
+ 		goto err_out_free_res;
+-	}
+-
+-	h->paddr = pci_resource_start(h->pdev, i); /* addressing mode bits
+-						 * already removed
+-						 */
+-
+ 	h->vaddr = remap_pci_mem(h->paddr, 0x250);
+ 
+ 	/* Wait for the board to become ready.  */
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0047-SCSI-hpsa-factor-out-hpsa_wait_for_board_ready.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0047-SCSI-hpsa-factor-out-hpsa_wait_for_board_ready.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,65 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:13:12 -0500
+Subject: [PATCH 47/79] [SCSI] hpsa: factor out hpsa_wait_for_board_ready
+
+commit 2c4c8c8b662286230a798c60408d217aeab55f7f upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   29 +++++++++++++++++------------
+ 1 files changed, 17 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 0582f2f..66c4fc3 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3302,9 +3302,23 @@ static int __devinit hpsa_pci_find_memory_BAR(struct ctlr_info *h,
+ 	return -ENODEV;
+ }
+ 
++static int __devinit hpsa_wait_for_board_ready(struct ctlr_info *h)
++{
++	int i;
++	u32 scratchpad;
++
++	for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) {
++		scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
++		if (scratchpad == HPSA_FIRMWARE_READY)
++			return 0;
++		msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
++	}
++	dev_warn(&h->pdev->dev, "board not ready, timed out.\n");
++	return -ENODEV;
++}
++
+ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ {
+-	u32 scratchpad = 0;
+ 	u64 cfg_offset;
+ 	u32 cfg_base_addr;
+ 	u64 cfg_base_addr_index;
+@@ -3339,18 +3353,9 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ 		goto err_out_free_res;
+ 	h->vaddr = remap_pci_mem(h->paddr, 0x250);
+ 
+-	/* Wait for the board to become ready.  */
+-	for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) {
+-		scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+-		if (scratchpad == HPSA_FIRMWARE_READY)
+-			break;
+-		msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
+-	}
+-	if (scratchpad != HPSA_FIRMWARE_READY) {
+-		dev_warn(&h->pdev->dev, "board not ready, timed out.\n");
+-		err = -ENODEV;
++	err = hpsa_wait_for_board_ready(h);
++	if (err)
+ 		goto err_out_free_res;
+-	}
+ 
+ 	/* get the address index number */
+ 	cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET);
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0048-SCSI-hpsa-factor-out-hpsa_find_cfgtables.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0048-SCSI-hpsa-factor-out-hpsa_find_cfgtables.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,89 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:13:17 -0500
+Subject: [PATCH 48/79] [SCSI] hpsa: factor out hpsa_find_cfgtables
+
+commit 77c4495c17d7508bdef1cfd2c3c933ff5379908b upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   50 ++++++++++++++++++++++++++++++--------------------
+ 1 files changed, 30 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 66c4fc3..cb0cc09 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3317,12 +3317,39 @@ static int __devinit hpsa_wait_for_board_ready(struct ctlr_info *h)
+ 	return -ENODEV;
+ }
+ 
+-static int __devinit hpsa_pci_init(struct ctlr_info *h)
++static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
+ {
+ 	u64 cfg_offset;
+ 	u32 cfg_base_addr;
+ 	u64 cfg_base_addr_index;
+ 	u32 trans_offset;
++
++	/* get the address index number */
++	cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET);
++	cfg_base_addr &= (u32) 0x0000ffff;
++	cfg_base_addr_index = find_PCI_BAR_index(h->pdev, cfg_base_addr);
++	if (cfg_base_addr_index == -1) {
++		dev_warn(&h->pdev->dev, "cannot find cfg_base_addr_index\n");
++		return -ENODEV;
++	}
++	cfg_offset = readl(h->vaddr + SA5_CTMEM_OFFSET);
++	h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
++			       cfg_base_addr_index) + cfg_offset,
++				sizeof(h->cfgtable));
++	if (!h->cfgtable)
++		return -ENOMEM;
++	/* Find performant mode table. */
++	trans_offset = readl(&(h->cfgtable->TransMethodOffset));
++	h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
++				cfg_base_addr_index)+cfg_offset+trans_offset,
++				sizeof(*h->transtable));
++	if (!h->transtable)
++		return -ENOMEM;
++	return 0;
++}
++
++static int __devinit hpsa_pci_init(struct ctlr_info *h)
++{
+ 	int i, prod_index, err;
+ 
+ 	prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
+@@ -3356,26 +3383,9 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ 	err = hpsa_wait_for_board_ready(h);
+ 	if (err)
+ 		goto err_out_free_res;
+-
+-	/* get the address index number */
+-	cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET);
+-	cfg_base_addr &= (u32) 0x0000ffff;
+-	cfg_base_addr_index = find_PCI_BAR_index(h->pdev, cfg_base_addr);
+-	if (cfg_base_addr_index == -1) {
+-		dev_warn(&h->pdev->dev, "cannot find cfg_base_addr_index\n");
+-		err = -ENODEV;
++	err = hpsa_find_cfgtables(h);
++	if (err)
+ 		goto err_out_free_res;
+-	}
+-
+-	cfg_offset = readl(h->vaddr + SA5_CTMEM_OFFSET);
+-	h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
+-			       cfg_base_addr_index) + cfg_offset,
+-				sizeof(h->cfgtable));
+-	/* Find performant mode table. */
+-	trans_offset = readl(&(h->cfgtable->TransMethodOffset));
+-	h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
+-				cfg_base_addr_index)+cfg_offset+trans_offset,
+-				sizeof(*h->transtable));
+ 
+ 	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+ 	h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0049-SCSI-hpsa-fix-leak-of-ioremapped-memory-in-hpsa_pci_.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0049-SCSI-hpsa-fix-leak-of-ioremapped-memory-in-hpsa_pci_.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,54 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:13:22 -0500
+Subject: [PATCH 49/79] [SCSI] hpsa: fix leak of ioremapped memory in
+ hpsa_pci_init error path.
+
+commit 204892e9717790cd17689aaebf2790a477492734 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   13 ++++++++++++-
+ 1 files changed, 12 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index cb0cc09..4983f34 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3379,7 +3379,10 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ 	if (err)
+ 		goto err_out_free_res;
+ 	h->vaddr = remap_pci_mem(h->paddr, 0x250);
+-
++	if (!h->vaddr) {
++		err = -ENOMEM;
++		goto err_out_free_res;
++	}
+ 	err = hpsa_wait_for_board_ready(h);
+ 	if (err)
+ 		goto err_out_free_res;
+@@ -3466,6 +3469,12 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ 	return 0;
+ 
+ err_out_free_res:
++	if (h->transtable)
++		iounmap(h->transtable);
++	if (h->cfgtable)
++		iounmap(h->cfgtable);
++	if (h->vaddr)
++		iounmap(h->vaddr);
+ 	/*
+ 	 * Deliberately omit pci_disable_device(): it does something nasty to
+ 	 * Smart Array controllers that pci_enable_device does not undo
+@@ -3684,6 +3693,8 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
+ 	hpsa_unregister_scsi(h);	/* unhook from SCSI subsystem */
+ 	hpsa_shutdown(pdev);
+ 	iounmap(h->vaddr);
++	iounmap(h->transtable);
++	iounmap(h->cfgtable);
+ 	hpsa_free_sg_chain_blocks(h);
+ 	pci_free_consistent(h->pdev,
+ 		h->nr_cmds * sizeof(struct CommandList),
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0050-SCSI-hpsa-hpsa-factor-out-hpsa_find_board_params.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0050-SCSI-hpsa-hpsa-factor-out-hpsa_find_board_params.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,79 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:13:27 -0500
+Subject: [PATCH 50/79] [SCSI] hpsa: hpsa factor out hpsa_find_board_params
+
+commit b93d7536eaa1206ad4a00ad8ea700ff0bd75a0da upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   46 +++++++++++++++++++++++++---------------------
+ 1 files changed, 25 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 4983f34..dcbe54b 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3348,6 +3348,30 @@ static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
+ 	return 0;
+ }
+ 
++/* Interrogate the hardware for some limits:
++ * max commands, max SG elements without chaining, and with chaining,
++ * SG chain block size, etc.
++ */
++static void __devinit hpsa_find_board_params(struct ctlr_info *h)
++{
++	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
++	h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
++	h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
++	/*
++	 * Limit in-command s/g elements to 32 save dma'able memory.
++	 * Howvever spec says if 0, use 31
++	 */
++	h->max_cmd_sg_entries = 31;
++	if (h->maxsgentries > 512) {
++		h->max_cmd_sg_entries = 32;
++		h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
++		h->maxsgentries--; /* save one for chain pointer */
++	} else {
++		h->maxsgentries = 31; /* default to traditional values */
++		h->chainsize = 0;
++	}
++}
++
+ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ {
+ 	int i, prod_index, err;
+@@ -3389,27 +3413,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ 	err = hpsa_find_cfgtables(h);
+ 	if (err)
+ 		goto err_out_free_res;
+-
+-	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+-	h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
+-
+-	/*
+-	 * Limit in-command s/g elements to 32 save dma'able memory.
+-	 * Howvever spec says if 0, use 31
+-	 */
+-
+-	h->max_cmd_sg_entries = 31;
+-	if (h->maxsgentries > 512) {
+-		h->max_cmd_sg_entries = 32;
+-		h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
+-		h->maxsgentries--; /* save one for chain pointer */
+-	} else {
+-		h->maxsgentries = 31; /* default to traditional values */
+-		h->chainsize = 0;
+-	}
+-
+-	/* Allow room for some ioctls */
+-	h->nr_cmds = h->max_commands - 4;
++	hpsa_find_board_params(h);
+ 
+ 	if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
+ 	    (readb(&h->cfgtable->Signature[1]) != 'I') ||
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0051-SCSI-hpsa-factor-out-hpsa-CISS-signature-present.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0051-SCSI-hpsa-factor-out-hpsa-CISS-signature-present.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,51 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:13:32 -0500
+Subject: [PATCH 51/79] [SCSI] hpsa: factor out hpsa-CISS-signature-present
+
+commit 76c46e4970f7ee6d8c54220a767e93d67b74cd33 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   18 +++++++++++++-----
+ 1 files changed, 13 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index dcbe54b..f2a9af6 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3372,6 +3372,18 @@ static void __devinit hpsa_find_board_params(struct ctlr_info *h)
+ 	}
+ }
+ 
++static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
++{
++	if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
++	    (readb(&h->cfgtable->Signature[1]) != 'I') ||
++	    (readb(&h->cfgtable->Signature[2]) != 'S') ||
++	    (readb(&h->cfgtable->Signature[3]) != 'S')) {
++		dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
++		return false;
++	}
++	return true;
++}
++
+ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ {
+ 	int i, prod_index, err;
+@@ -3415,11 +3427,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ 		goto err_out_free_res;
+ 	hpsa_find_board_params(h);
+ 
+-	if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
+-	    (readb(&h->cfgtable->Signature[1]) != 'I') ||
+-	    (readb(&h->cfgtable->Signature[2]) != 'S') ||
+-	    (readb(&h->cfgtable->Signature[3]) != 'S')) {
+-		dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
++	if (!hpsa_CISS_signature_present(h)) {
+ 		err = -ENODEV;
+ 		goto err_out_free_res;
+ 	}
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0052-SCSI-hpsa-factor-out-hpsa_enable_scsi_prefetch.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0052-SCSI-hpsa-factor-out-hpsa_enable_scsi_prefetch.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,55 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:13:38 -0500
+Subject: [PATCH 52/79] [SCSI] hpsa: factor out hpsa_enable_scsi_prefetch
+
+commit f7c391015ab64c835a9bb403626b38a51d6432cc upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   22 +++++++++++++---------
+ 1 files changed, 13 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index f2a9af6..62f9784 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3384,6 +3384,18 @@ static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
+ 	return true;
+ }
+ 
++/* Need to enable prefetch in the SCSI core for 6400 in x86 */
++static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h)
++{
++#ifdef CONFIG_X86
++	u32 prefetch;
++
++	prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
++	prefetch |= 0x100;
++	writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
++#endif
++}
++
+ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ {
+ 	int i, prod_index, err;
+@@ -3431,15 +3443,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ 		err = -ENODEV;
+ 		goto err_out_free_res;
+ 	}
+-#ifdef CONFIG_X86
+-	{
+-		/* Need to enable prefetch in the SCSI core for 6400 in x86 */
+-		u32 prefetch;
+-		prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
+-		prefetch |= 0x100;
+-		writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
+-	}
+-#endif
++	hpsa_enable_scsi_prefetch(h);
+ 
+ 	/* Disabling DMA prefetch for the P600
+ 	 * An ASIC bug may result in a prefetch beyond
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0053-SCSI-hpsa-factor-out-hpsa_p600_dma_prefetch_quirk.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0053-SCSI-hpsa-factor-out-hpsa_p600_dma_prefetch_quirk.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,59 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:13:43 -0500
+Subject: [PATCH 53/79] [SCSI] hpsa: factor out hpsa_p600_dma_prefetch_quirk
+
+commit 3d0eab67cf556db4430a42fbb45dc90ef690d30c upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   26 +++++++++++++++-----------
+ 1 files changed, 15 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 62f9784..9e81e0b 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3396,6 +3396,20 @@ static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h)
+ #endif
+ }
+ 
++/* Disable DMA prefetch for the P600.  Otherwise an ASIC bug may result
++ * in a prefetch beyond physical memory.
++ */
++static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
++{
++	u32 dma_prefetch;
++
++	if (h->board_id != 0x3225103C)
++		return;
++	dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
++	dma_prefetch |= 0x8000;
++	writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
++}
++
+ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ {
+ 	int i, prod_index, err;
+@@ -3444,17 +3458,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ 		goto err_out_free_res;
+ 	}
+ 	hpsa_enable_scsi_prefetch(h);
+-
+-	/* Disabling DMA prefetch for the P600
+-	 * An ASIC bug may result in a prefetch beyond
+-	 * physical memory.
+-	 */
+-	if (h->board_id == 0x3225103C) {
+-		u32 dma_prefetch;
+-		dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
+-		dma_prefetch |= 0x8000;
+-		writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
+-	}
++	hpsa_p600_dma_prefetch_quirk(h);
+ 
+ 	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
+ 	/* Update the field, and then ring the doorbell */
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0054-SCSI-hpsa-factor-out-hpsa_enter_simple_mode.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0054-SCSI-hpsa-factor-out-hpsa_enter_simple_mode.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,98 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:13:48 -0500
+Subject: [PATCH 54/79] [SCSI] hpsa: factor out hpsa_enter_simple_mode
+
+commit eb6b2ae9058accd183fe8b31f1985312bf333624 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   62 ++++++++++++++++++++++++++++----------------------
+ 1 files changed, 35 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 9e81e0b..4d4ecca 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3410,9 +3410,41 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
+ 	writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
+ }
+ 
++static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
++{
++	int i;
++
++	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
++	/* Update the field, and then ring the doorbell */
++	writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
++	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
++
++	/* under certain very rare conditions, this can take awhile.
++	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
++	 * as we enter this code.)
++	 */
++	for (i = 0; i < MAX_CONFIG_WAIT; i++) {
++		if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
++			break;
++		/* delay and try again */
++		msleep(10);
++	}
++
++#ifdef HPSA_DEBUG
++	print_cfg_table(&h->pdev->dev, h->cfgtable);
++#endif				/* HPSA_DEBUG */
++
++	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
++		dev_warn(&h->pdev->dev,
++			"unable to get board into simple mode\n");
++		return -ENODEV;
++	}
++	return 0;
++}
++
+ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ {
+-	int i, prod_index, err;
++	int prod_index, err;
+ 
+ 	prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
+ 	if (prod_index < 0)
+@@ -3459,33 +3491,9 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ 	}
+ 	hpsa_enable_scsi_prefetch(h);
+ 	hpsa_p600_dma_prefetch_quirk(h);
+-
+-	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
+-	/* Update the field, and then ring the doorbell */
+-	writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
+-	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+-
+-	/* under certain very rare conditions, this can take awhile.
+-	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
+-	 * as we enter this code.)
+-	 */
+-	for (i = 0; i < MAX_CONFIG_WAIT; i++) {
+-		if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
+-			break;
+-		/* delay and try again */
+-		msleep(10);
+-	}
+-
+-#ifdef HPSA_DEBUG
+-	print_cfg_table(&h->pdev->dev, h->cfgtable);
+-#endif				/* HPSA_DEBUG */
+-
+-	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
+-		dev_warn(&h->pdev->dev,
+-			"unable to get board into simple mode\n");
+-		err = -ENODEV;
++	err = hpsa_enter_simple_mode(h);
++	if (err)
+ 		goto err_out_free_res;
+-	}
+ 	return 0;
+ 
+ err_out_free_res:
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0055-SCSI-hpsa-check-that-simple-mode-is-supported.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0055-SCSI-hpsa-check-that-simple-mode-is-supported.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,33 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:13:53 -0500
+Subject: [PATCH 55/79] [SCSI] hpsa: check that simple mode is supported
+
+commit cda7612d4b96d51324c6fc4d5e47d629da6cb500 upstream.
+
+before trying to enter simple mode transport method.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    5 +++++
+ 1 files changed, 5 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 4d4ecca..57d0380 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3413,6 +3413,11 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
+ static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
+ {
+ 	int i;
++	u32 trans_support;
++
++	trans_support = readl(&(h->cfgtable->TransportSupport));
++	if (!(trans_support & SIMPLE_MODE))
++		return -ENOTSUPP;
+ 
+ 	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
+ 	/* Update the field, and then ring the doorbell */
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0056-SCSI-hpsa-clean-up-debug-ifdefs.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0056-SCSI-hpsa-clean-up-debug-ifdefs.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,52 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:13:58 -0500
+Subject: [PATCH 56/79] [SCSI] hpsa: clean up debug ifdefs
+
+commit 58f8665cc369b9633af072afb741b8f0a01622fa upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    8 ++------
+ 1 files changed, 2 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 57d0380..7e602d2 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3135,9 +3135,9 @@ static __devinit int hpsa_hard_reset_controller(struct pci_dev *pdev)
+  *   the io functions.
+  *   This is for debug only.
+  */
+-#ifdef HPSA_DEBUG
+ static void print_cfg_table(struct device *dev, struct CfgTable *tb)
+ {
++#ifdef HPSA_DEBUG
+ 	int i;
+ 	char temp_name[17];
+ 
+@@ -3167,8 +3167,8 @@ static void print_cfg_table(struct device *dev, struct CfgTable *tb)
+ 	dev_info(dev, "   Server Name = %s\n", temp_name);
+ 	dev_info(dev, "   Heartbeat Counter = 0x%x\n\n\n",
+ 		readl(&(tb->HeartBeat)));
+-}
+ #endif				/* HPSA_DEBUG */
++}
+ 
+ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
+ {
+@@ -3434,11 +3434,7 @@ static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
+ 		/* delay and try again */
+ 		msleep(10);
+ 	}
+-
+-#ifdef HPSA_DEBUG
+ 	print_cfg_table(&h->pdev->dev, h->cfgtable);
+-#endif				/* HPSA_DEBUG */
+-
+ 	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
+ 		dev_warn(&h->pdev->dev,
+ 			"unable to get board into simple mode\n");
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0057-SCSI-hpsa-mark-hpsa_mark_hpsa_put_ctlr_into_performa.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0057-SCSI-hpsa-mark-hpsa_mark_hpsa_put_ctlr_into_performa.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,39 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:14:03 -0500
+Subject: [PATCH 57/79] [SCSI] hpsa: mark
+ hpsa_mark_hpsa_put_ctlr_into_performant_mode as
+ __devinit
+
+commit 7136f9a78eece43226dee1a46ec6fc144f561239 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 7e602d2..d495d8b 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -158,7 +158,7 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
+ /* performant mode helper functions */
+ static void calc_bucket_map(int *bucket, int num_buckets,
+ 	int nsgs, int *bucket_map);
+-static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
++static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
+ static inline u32 next_command(struct ctlr_info *h);
+ 
+ static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
+@@ -3803,7 +3803,7 @@ static void  calc_bucket_map(int bucket[], int num_buckets,
+ 	}
+ }
+ 
+-static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
++static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
+ {
+ 	u32 trans_support;
+ 	u64 trans_offset;
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0058-SCSI-hpsa-factor-out-hpsa_wait_for_mode_change_ack.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0058-SCSI-hpsa-factor-out-hpsa_wait_for_mode_change_ack.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,89 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:14:08 -0500
+Subject: [PATCH 58/79] [SCSI] hpsa: factor out hpsa_wait_for_mode_change_ack
+
+commit 3f4336f33314e7d3687ff46af1fcaa970e3f4e00 upstream.
+
+Signed-off-by:  Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   40 +++++++++++++++++-----------------------
+ 1 files changed, 17 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index d495d8b..e89c40a 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3410,19 +3410,9 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
+ 	writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
+ }
+ 
+-static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
++static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
+ {
+ 	int i;
+-	u32 trans_support;
+-
+-	trans_support = readl(&(h->cfgtable->TransportSupport));
+-	if (!(trans_support & SIMPLE_MODE))
+-		return -ENOTSUPP;
+-
+-	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
+-	/* Update the field, and then ring the doorbell */
+-	writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
+-	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+ 
+ 	/* under certain very rare conditions, this can take awhile.
+ 	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
+@@ -3434,6 +3424,21 @@ static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
+ 		/* delay and try again */
+ 		msleep(10);
+ 	}
++}
++
++static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
++{
++	u32 trans_support;
++
++	trans_support = readl(&(h->cfgtable->TransportSupport));
++	if (!(trans_support & SIMPLE_MODE))
++		return -ENOTSUPP;
++
++	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
++	/* Update the field, and then ring the doorbell */
++	writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
++	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
++	hpsa_wait_for_mode_change_ack(h);
+ 	print_cfg_table(&h->pdev->dev, h->cfgtable);
+ 	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
+ 		dev_warn(&h->pdev->dev,
+@@ -3814,7 +3819,6 @@ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
+ 	 */
+ 	int bft[8] = {5, 6, 8, 10, 12, 20, 28, 35}; /* for scatter/gathers */
+ 	int i = 0;
+-	int l = 0;
+ 	unsigned long register_value;
+ 
+ 	trans_support = readl(&(h->cfgtable->TransportSupport));
+@@ -3858,17 +3862,7 @@ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
+ 	writel(CFGTBL_Trans_Performant,
+ 		&(h->cfgtable->HostWrite.TransportRequest));
+ 	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+-	/* under certain very rare conditions, this can take awhile.
+-	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
+-	 * as we enter this code.) */
+-	for (l = 0; l < MAX_CONFIG_WAIT; l++) {
+-		register_value = readl(h->vaddr + SA5_DOORBELL);
+-		if (!(register_value & CFGTBL_ChangeReq))
+-			break;
+-		/* delay and try again */
+-		set_current_state(TASK_INTERRUPTIBLE);
+-		schedule_timeout(10);
+-	}
++	hpsa_wait_for_mode_change_ack(h);
+ 	register_value = readl(&(h->cfgtable->TransportActive));
+ 	if (!(register_value & CFGTBL_Trans_Performant)) {
+ 		dev_warn(&h->pdev->dev, "unable to get board into"
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0059-SCSI-hpsa-remove-unused-variable-trans_offset.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0059-SCSI-hpsa-remove-unused-variable-trans_offset.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,35 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:14:13 -0500
+Subject: [PATCH 59/79] [SCSI] hpsa: remove unused variable trans_offset
+
+commit ec18d2abad04091c5125b0a37ad80a00099d8ac0 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    2 --
+ 1 files changed, 0 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index e89c40a..ad70f3e 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3811,7 +3811,6 @@ static void  calc_bucket_map(int bucket[], int num_buckets,
+ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
+ {
+ 	u32 trans_support;
+-	u64 trans_offset;
+ 	/*  5 = 1 s/g entry or 4k
+ 	 *  6 = 2 s/g entry or 8k
+ 	 *  8 = 4 s/g entry or 16k
+@@ -3846,7 +3845,6 @@ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
+ 	memset(h->reply_pool, 0, h->reply_pool_size);
+ 	h->reply_pool_head = h->reply_pool;
+ 
+-	trans_offset = readl(&(h->cfgtable->TransMethodOffset));
+ 	bft[7] = h->max_sg_entries + 4;
+ 	calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable);
+ 	for (i = 0; i < 8; i++)
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0060-SCSI-hpsa-factor-out-hpsa_enter_performant_mode.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0060-SCSI-hpsa-factor-out-hpsa_enter_performant_mode.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,93 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:14:19 -0500
+Subject: [PATCH 60/79] [SCSI] hpsa: factor out hpsa_enter_performant_mode
+
+commit 6c311b5725b9500bdd0f527cd97496b11999fbbd upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   54 ++++++++++++++++++++++++++++----------------------
+ 1 files changed, 30 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index ad70f3e..3c51544 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3808,36 +3808,16 @@ static void  calc_bucket_map(int bucket[], int num_buckets,
+ 	}
+ }
+ 
+-static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
++static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
+ {
+-	u32 trans_support;
++	int i;
++	unsigned long register_value;
++	int bft[8] = {5, 6, 8, 10, 12, 20, 28, 35}; /* for scatter/gathers */
+ 	/*  5 = 1 s/g entry or 4k
+ 	 *  6 = 2 s/g entry or 8k
+ 	 *  8 = 4 s/g entry or 16k
+ 	 * 10 = 6 s/g entry or 24k
+ 	 */
+-	int bft[8] = {5, 6, 8, 10, 12, 20, 28, 35}; /* for scatter/gathers */
+-	int i = 0;
+-	unsigned long register_value;
+-
+-	trans_support = readl(&(h->cfgtable->TransportSupport));
+-	if (!(trans_support & PERFORMANT_MODE))
+-		return;
+-
+-	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+-	h->max_sg_entries = 32;
+-	/* Performant mode ring buffer and supporting data structures */
+-	h->reply_pool_size = h->max_commands * sizeof(u64);
+-	h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
+-				&(h->reply_pool_dhandle));
+-
+-	/* Need a block fetch table for performant mode */
+-	h->blockFetchTable = kmalloc(((h->max_sg_entries+1) *
+-				sizeof(u32)), GFP_KERNEL);
+-
+-	if ((h->reply_pool == NULL)
+-		|| (h->blockFetchTable == NULL))
+-		goto clean_up;
+ 
+ 	h->reply_pool_wraparound = 1; /* spec: init to 1 */
+ 
+@@ -3867,6 +3847,32 @@ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
+ 					" performant mode\n");
+ 		return;
+ 	}
++}
++
++static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
++{
++	u32 trans_support;
++
++	trans_support = readl(&(h->cfgtable->TransportSupport));
++	if (!(trans_support & PERFORMANT_MODE))
++		return;
++
++	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
++	h->max_sg_entries = 32;
++	/* Performant mode ring buffer and supporting data structures */
++	h->reply_pool_size = h->max_commands * sizeof(u64);
++	h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
++				&(h->reply_pool_dhandle));
++
++	/* Need a block fetch table for performant mode */
++	h->blockFetchTable = kmalloc(((h->max_sg_entries+1) *
++				sizeof(u32)), GFP_KERNEL);
++
++	if ((h->reply_pool == NULL)
++		|| (h->blockFetchTable == NULL))
++		goto clean_up;
++
++	hpsa_enter_performant_mode(h);
+ 
+ 	/* Change the access methods to the performant access methods */
+ 	h->access = SA5_performant_access;
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0061-SCSI-hpsa-remove-unused-firm_ver-member-of-the-per-h.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0061-SCSI-hpsa-remove-unused-firm_ver-member-of-the-per-h.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,46 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:14:24 -0500
+Subject: [PATCH 61/79] [SCSI] hpsa: remove unused firm_ver member of the
+ per-hba structure
+
+commit 873f339fc53750c1e715f5e1d2dfdb9869b7ea3f upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    6 ------
+ drivers/scsi/hpsa.h |    1 -
+ 2 files changed, 0 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 3c51544..50fddf8 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -1440,12 +1440,6 @@ static int hpsa_update_device_info(struct ctlr_info *h,
+ 		goto bail_out;
+ 	}
+ 
+-	/* As a side effect, record the firmware version number
+-	 * if we happen to be talking to the RAID controller.
+-	 */
+-	if (is_hba_lunid(scsi3addr))
+-		memcpy(h->firm_ver, &inq_buff[32], 4);
+-
+ 	this_device->devtype = (inq_buff[0] & 0x1f);
+ 	memcpy(this_device->scsi3addr, scsi3addr, 8);
+ 	memcpy(this_device->vendor, &inq_buff[8],
+diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
+index 1bb5233..a203ef6 100644
+--- a/drivers/scsi/hpsa.h
++++ b/drivers/scsi/hpsa.h
+@@ -53,7 +53,6 @@ struct ctlr_info {
+ 	int	ctlr;
+ 	char	devname[8];
+ 	char    *product_name;
+-	char	firm_ver[4]; /* Firmware version */
+ 	struct pci_dev *pdev;
+ 	u32	board_id;
+ 	void __iomem *vaddr;
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0062-SCSI-hpsa-Add-hpsa.txt-to-Documentation-scsi.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0062-SCSI-hpsa-Add-hpsa.txt-to-Documentation-scsi.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,122 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:14:29 -0500
+Subject: [PATCH 62/79] [SCSI] hpsa: Add hpsa.txt to Documentation/scsi
+
+commit 992ebcf14f3cf029b8d0da4f479d752c19d8c726 upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ Documentation/scsi/hpsa.txt |  100 +++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 100 insertions(+), 0 deletions(-)
+ create mode 100644 Documentation/scsi/hpsa.txt
+
+diff --git a/Documentation/scsi/hpsa.txt b/Documentation/scsi/hpsa.txt
+new file mode 100644
+index 0000000..4055657
+--- /dev/null
++++ b/Documentation/scsi/hpsa.txt
+@@ -0,0 +1,100 @@
++
++HPSA - Hewlett Packard Smart Array driver
++-----------------------------------------
++
++This file describes the hpsa SCSI driver for HP Smart Array controllers.
++The hpsa driver is intended to supplant the cciss driver for newer
++Smart Array controllers.  The hpsa driver is a SCSI driver, while the
++cciss driver is a "block" driver.  Actually cciss is both a block
++driver (for logical drives) AND a SCSI driver (for tape drives). This
++"split-brained" design of the cciss driver is a source of excess
++complexity and eliminating that complexity is one of the reasons
++for hpsa to exist.
++
++Supported devices:
++------------------
++
++Smart Array P212
++Smart Array P410
++Smart Array P410i
++Smart Array P411
++Smart Array P812
++Smart Array P712m
++Smart Array P711m
++StorageWorks P1210m
++
++Additionally, older Smart Arrays may work with the hpsa driver if the kernel
++boot parameter "hpsa_allow_any=1" is specified, however these are not tested
++nor supported by HP with this driver.  For older Smart Arrays, the cciss
++driver should still be used.
++
++HPSA specific entries in /sys
++-----------------------------
++
++  In addition to the generic SCSI attributes available in /sys, hpsa supports
++  the following attributes:
++
++  HPSA specific host attributes:
++  ------------------------------
++
++  /sys/class/scsi_host/host*/rescan
++
++  the host "rescan" attribute is a write only attribute.  Writing to this
++  attribute will cause the driver to scan for new, changed, or removed devices
++  (e.g. hot-plugged tape drives, or newly configured or deleted logical drives,
++  etc.) and notify the SCSI midlayer of any changes detected.  Normally this is
++  triggered automatically by HP's Array Configuration Utility (either the GUI or
++  command line variety) so for logical drive changes, the user should not
++  normally have to use this.  It may be useful when hot plugging devices like
++  tape drives, or entire storage boxes containing pre-configured logical drives.
++
++  HPSA specific disk attributes:
++  ------------------------------
++
++  /sys/class/scsi_disk/c:b:t:l/device/unique_id
++  /sys/class/scsi_disk/c:b:t:l/device/raid_level
++  /sys/class/scsi_disk/c:b:t:l/device/lunid
++
++  (where c:b:t:l are the controller, bus, target and lun of the device)
++
++  For example:
++
++	root at host:/sys/class/scsi_disk/4:0:0:0/device# cat unique_id
++	600508B1001044395355323037570F77
++	root at host:/sys/class/scsi_disk/4:0:0:0/device# cat lunid
++	0x0000004000000000
++	root at host:/sys/class/scsi_disk/4:0:0:0/device# cat raid_level
++	RAID 0
++
++HPSA specific ioctls:
++---------------------
++
++  For compatibility with applications written for the cciss driver, many, but
++  not all of the ioctls supported by the cciss driver are also supported by the
++  hpsa driver.  The data structures used by these are described in
++  include/linux/cciss_ioctl.h
++
++  CCISS_DEREGDISK
++  CCISS_REGNEWDISK
++  CCISS_REGNEWD
++
++  The above three ioctls all do exactly the same thing, which is to cause the driver
++  to rescan for new devices.  This does exactly the same thing as writing to the
++  hpsa specific host "rescan" attribute.
++
++  CCISS_GETPCIINFO
++
++	Returns PCI domain, bus, device and function and "board ID" (PCI subsystem ID).
++
++  CCISS_GETDRIVVER
++
++	Returns driver version in three bytes encoded as:
++		(major_version << 16) | (minor_version << 8) | (subminor_version)
++
++  CCISS_PASSTHRU
++  CCISS_BIG_PASSTHRU
++
++	Allows "BMIC" and "CISS" commands to be passed through to the Smart Array.
++	These are used extensively by the HP Array Configuration Utility, SNMP storage
++	agents, etc.  See cciss_vol_status at http://cciss.sf.net for some examples.
++
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0063-SCSI-hpsa-expose-controller-firmware-revision-via-sy.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0063-SCSI-hpsa-expose-controller-firmware-revision-via-sy.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,94 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:14:34 -0500
+Subject: [PATCH 63/79] [SCSI] hpsa: expose controller firmware revision via
+ /sys.
+
+commit d28ce020fb0ef9254fc9e0bd07f5898c69af9f7d upstream.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ Documentation/scsi/hpsa.txt |    7 +++++++
+ drivers/scsi/hpsa.c         |   20 ++++++++++++++++++++
+ 2 files changed, 27 insertions(+), 0 deletions(-)
+
+diff --git a/Documentation/scsi/hpsa.txt b/Documentation/scsi/hpsa.txt
+index 4055657..dca6583 100644
+--- a/Documentation/scsi/hpsa.txt
++++ b/Documentation/scsi/hpsa.txt
+@@ -38,6 +38,7 @@ HPSA specific entries in /sys
+   ------------------------------
+ 
+   /sys/class/scsi_host/host*/rescan
++  /sys/class/scsi_host/host*/firmware_revision
+ 
+   the host "rescan" attribute is a write only attribute.  Writing to this
+   attribute will cause the driver to scan for new, changed, or removed devices
+@@ -48,6 +49,12 @@ HPSA specific entries in /sys
+   normally have to use this.  It may be useful when hot plugging devices like
+   tape drives, or entire storage boxes containing pre-configured logical drives.
+ 
++  The "firmware_revision" attribute contains the firmware version of the Smart Array.
++  For example:
++
++	root at host:/sys/class/scsi_host/host4# cat firmware_revision
++	7.14
++
+   HPSA specific disk attributes:
+   ------------------------------
+ 
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 50fddf8..4109107 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -148,6 +148,8 @@ static ssize_t lunid_show(struct device *dev,
+ 	struct device_attribute *attr, char *buf);
+ static ssize_t unique_id_show(struct device *dev,
+ 	struct device_attribute *attr, char *buf);
++static ssize_t host_show_firmware_revision(struct device *dev,
++	     struct device_attribute *attr, char *buf);
+ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
+ static ssize_t host_store_rescan(struct device *dev,
+ 	 struct device_attribute *attr, const char *buf, size_t count);
+@@ -165,6 +167,8 @@ static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
+ static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
+ static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
+ static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
++static DEVICE_ATTR(firmware_revision, S_IRUGO,
++	host_show_firmware_revision, NULL);
+ 
+ static struct device_attribute *hpsa_sdev_attrs[] = {
+ 	&dev_attr_raid_level,
+@@ -175,6 +179,7 @@ static struct device_attribute *hpsa_sdev_attrs[] = {
+ 
+ static struct device_attribute *hpsa_shost_attrs[] = {
+ 	&dev_attr_rescan,
++	&dev_attr_firmware_revision,
+ 	NULL,
+ };
+ 
+@@ -260,6 +265,21 @@ static ssize_t host_store_rescan(struct device *dev,
+ 	return count;
+ }
+ 
++static ssize_t host_show_firmware_revision(struct device *dev,
++	     struct device_attribute *attr, char *buf)
++{
++	struct ctlr_info *h;
++	struct Scsi_Host *shost = class_to_shost(dev);
++	unsigned char *fwrev;
++
++	h = shost_to_hba(shost);
++	if (!h->hba_inquiry_data)
++		return 0;
++	fwrev = &h->hba_inquiry_data[32];
++	return snprintf(buf, 20, "%c%c%c%c\n",
++		fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
++}
++
+ /* Enqueuing and dequeuing functions for cmdlists. */
+ static inline void addQ(struct hlist_head *list, struct CommandList *c)
+ {
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0064-SCSI-hpsa-fix-block-fetch-table-problem.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0064-SCSI-hpsa-fix-block-fetch-table-problem.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,60 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 27 May 2010 15:14:39 -0500
+Subject: [PATCH 64/79] [SCSI] hpsa: fix block fetch table problem.
+
+commit def342bd745d88ed73541b9c07cefb13d8c576fd upstream.
+
+We have 32 (MAXSGENTRIES) scatter gather elements embedded
+in the command.  With all these, the total command size is
+about 576 bytes.  However, the last entry in the block fetch table
+is 35.  (the block fetch table contains the number of 16-byte chunks
+the firmware needs to fetch for a given number of scatter gather
+elements.)  35 * 16 = 560 bytes, which isn't enough.  It needs to be
+36. (36 * 16 == 576) or, MAXSGENTRIES + 4.  (plus 4 because there's a
+bunch of stuff at the front of the command before the first scatter
+gather element that takes up 4 * 16 bytes.)  Without this fix, the
+controller may have to perform two DMA operations to fetch the
+command since the first one may not get the whole thing.
+
+Signed-off-by: Don Brace <brace at beardog.cce.hp.com>
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   21 ++++++++++++++++++++-
+ 1 files changed, 20 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 4109107..1133b5f 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3826,7 +3826,26 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
+ {
+ 	int i;
+ 	unsigned long register_value;
+-	int bft[8] = {5, 6, 8, 10, 12, 20, 28, 35}; /* for scatter/gathers */
++
++	/* This is a bit complicated.  There are 8 registers on
++	 * the controller which we write to to tell it 8 different
++	 * sizes of commands which there may be.  It's a way of
++	 * reducing the DMA done to fetch each command.  Encoded into
++	 * each command's tag are 3 bits which communicate to the controller
++	 * which of the eight sizes that command fits within.  The size of
++	 * each command depends on how many scatter gather entries there are.
++	 * Each SG entry requires 16 bytes.  The eight registers are programmed
++	 * with the number of 16-byte blocks a command of that size requires.
++	 * The smallest command possible requires 5 such 16 byte blocks.
++	 * the largest command possible requires MAXSGENTRIES + 4 16-byte
++	 * blocks.  Note, this only extends to the SG entries contained
++	 * within the command block, and does not extend to chained blocks
++	 * of SG elements.   bft[] contains the eight values we write to
++	 * the registers.  They are not evenly distributed, but have more
++	 * sizes for small commands, and fewer sizes for larger commands.
++	 */
++	int bft[8] = {5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4};
++	BUILD_BUG_ON(28 > MAXSGENTRIES + 4);
+ 	/*  5 = 1 s/g entry or 4k
+ 	 *  6 = 2 s/g entry or 8k
+ 	 *  8 = 4 s/g entry or 16k
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0065-SCSI-hpsa-add-new-controllers.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0065-SCSI-hpsa-add-new-controllers.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,45 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Wed, 16 Jun 2010 13:51:15 -0500
+Subject: [PATCH 65/79] [SCSI] hpsa: add new controllers
+
+commit 2e931f3176d61c693ace27498fdb823ef605e619 upstream.
+
+Add 5 CCISSE smart array controllers
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   10 ++++++++++
+ 1 files changed, 10 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 1133b5f..ec9b3a2 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -86,6 +86,11 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324a},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324b},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3250},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3251},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3252},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3253},
++	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3254},
+ #define PCI_DEVICE_ID_HP_CISSF 0x333f
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x333F},
+ 	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,             PCI_ANY_ID, PCI_ANY_ID,
+@@ -109,6 +114,11 @@ static struct board_type products[] = {
+ 	{0x324b103C, "Smart Array P711m", &SA5_access},
+ 	{0x3233103C, "StorageWorks P1210m", &SA5_access},
+ 	{0x333F103C, "StorageWorks P1210m", &SA5_access},
++	{0x3250103C, "Smart Array", &SA5_access},
++	{0x3250113C, "Smart Array", &SA5_access},
++	{0x3250123C, "Smart Array", &SA5_access},
++	{0x3250133C, "Smart Array", &SA5_access},
++	{0x3250143C, "Smart Array", &SA5_access},
+ 	{0xFFFF103C, "Unknown Smart Array", &SA5_access},
+ };
+ 
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0066-SCSI-hpsa-Make-hpsa_allow_any-1-boot-param-enable-Co.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0066-SCSI-hpsa-Make-hpsa_allow_any-1-boot-param-enable-Co.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,42 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Wed, 16 Jun 2010 13:51:20 -0500
+Subject: [PATCH 66/79] [SCSI] hpsa: Make "hpsa_allow_any=1" boot param enable
+ Compaq Smart Arrays.
+
+commit 6798cc0a4964ceabc27de762c41929f8a875e3fd upstream.
+
+We were previously only accepting HP boards.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    6 +++++-
+ 1 files changed, 5 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index ec9b3a2..25faaae 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -95,6 +95,8 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x333F},
+ 	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,             PCI_ANY_ID, PCI_ANY_ID,
+ 		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
++	{PCI_VENDOR_ID_COMPAQ,     PCI_ANY_ID,             PCI_ANY_ID, PCI_ANY_ID,
++		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
+ 	{0,}
+ };
+ 
+@@ -3293,7 +3295,9 @@ static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
+ 		if (*board_id == products[i].board_id)
+ 			return i;
+ 
+-	if (subsystem_vendor_id != PCI_VENDOR_ID_HP || !hpsa_allow_any) {
++	if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
++		subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
++		!hpsa_allow_any) {
+ 		dev_warn(&pdev->dev, "unrecognized board ID: "
+ 			"0x%08x, ignoring.\n", *board_id);
+ 			return -ENODEV;
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0067-SCSI-hpsa-make-hpsa_find_memory_BAR-not-require-the-.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0067-SCSI-hpsa-make-hpsa_find_memory_BAR-not-require-the-.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,59 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Wed, 16 Jun 2010 13:51:25 -0500
+Subject: [PATCH 67/79] [SCSI] hpsa: make hpsa_find_memory_BAR not require the
+ per HBA structure.
+
+commit 12d2cd4711b9e3ddcf911281ec4fe511b5cfff63 upstream.
+
+Rationale for this is that in order to fix the hard reset code used
+by kdump, we need to use this function before we even have the per
+HBA structure.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   12 ++++++------
+ 1 files changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 25faaae..f5305a4 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3313,20 +3313,20 @@ static inline bool hpsa_board_disabled(struct pci_dev *pdev)
+ 	return ((command & PCI_COMMAND_MEMORY) == 0);
+ }
+ 
+-static int __devinit hpsa_pci_find_memory_BAR(struct ctlr_info *h,
++static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
+ 	unsigned long *memory_bar)
+ {
+ 	int i;
+ 
+ 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+-		if (pci_resource_flags(h->pdev, i) & IORESOURCE_MEM) {
++		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
+ 			/* addressing mode bits already removed */
+-			*memory_bar = pci_resource_start(h->pdev, i);
+-			dev_dbg(&h->pdev->dev, "memory BAR = %lx\n",
++			*memory_bar = pci_resource_start(pdev, i);
++			dev_dbg(&pdev->dev, "memory BAR = %lx\n",
+ 				*memory_bar);
+ 			return 0;
+ 		}
+-	dev_warn(&h->pdev->dev, "no memory BAR found\n");
++	dev_warn(&pdev->dev, "no memory BAR found\n");
+ 	return -ENODEV;
+ }
+ 
+@@ -3503,7 +3503,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
+ 		return err;
+ 	}
+ 	hpsa_interrupt_mode(h);
+-	err = hpsa_pci_find_memory_BAR(h, &h->paddr);
++	err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
+ 	if (err)
+ 		goto err_out_free_res;
+ 	h->vaddr = remap_pci_mem(h->paddr, 0x250);
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0068-SCSI-hpsa-factor-out-hpsa_find_cfg_addrs.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0068-SCSI-hpsa-factor-out-hpsa_find_cfg_addrs.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,74 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Wed, 16 Jun 2010 13:51:30 -0500
+Subject: [PATCH 68/79] [SCSI] hpsa: factor out hpsa_find_cfg_addrs.
+
+commit a51fd47f1b8f2b9937011c433269d2ec182b9879 upstream.
+
+Rationale for this is that I will also need to use this code
+in fixing kdump host reset code prior to having the hba structure.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   34 ++++++++++++++++++++++------------
+ 1 files changed, 22 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index f5305a4..44f81a0 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3345,29 +3345,39 @@ static int __devinit hpsa_wait_for_board_ready(struct ctlr_info *h)
+ 	return -ENODEV;
+ }
+ 
++static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
++	void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
++	u64 *cfg_offset)
++{
++	*cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
++	*cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
++	*cfg_base_addr &= (u32) 0x0000ffff;
++	*cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
++	if (*cfg_base_addr_index == -1) {
++		dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
++		return -ENODEV;
++	}
++	return 0;
++}
++
+ static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
+ {
+ 	u64 cfg_offset;
+ 	u32 cfg_base_addr;
+ 	u64 cfg_base_addr_index;
+ 	u32 trans_offset;
++	int rc;
+ 
+-	/* get the address index number */
+-	cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET);
+-	cfg_base_addr &= (u32) 0x0000ffff;
+-	cfg_base_addr_index = find_PCI_BAR_index(h->pdev, cfg_base_addr);
+-	if (cfg_base_addr_index == -1) {
+-		dev_warn(&h->pdev->dev, "cannot find cfg_base_addr_index\n");
+-		return -ENODEV;
+-	}
+-	cfg_offset = readl(h->vaddr + SA5_CTMEM_OFFSET);
++	rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
++		&cfg_base_addr_index, &cfg_offset);
++	if (rc)
++		return rc;
+ 	h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
+-			       cfg_base_addr_index) + cfg_offset,
+-				sizeof(h->cfgtable));
++		       cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
+ 	if (!h->cfgtable)
+ 		return -ENOMEM;
+ 	/* Find performant mode table. */
+-	trans_offset = readl(&(h->cfgtable->TransMethodOffset));
++	trans_offset = readl(&h->cfgtable->TransMethodOffset);
+ 	h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
+ 				cfg_base_addr_index)+cfg_offset+trans_offset,
+ 				sizeof(*h->transtable));
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0069-SCSI-hpsa-factor-out-the-code-to-reset-controllers-o.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0069-SCSI-hpsa-factor-out-the-code-to-reset-controllers-o.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,86 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Wed, 16 Jun 2010 13:51:35 -0500
+Subject: [PATCH 69/79] [SCSI] hpsa: factor out the code to reset controllers
+ on driver load
+
+commit 4c2a8c40d877effc25774f00406a4a7df1967102 upstream.
+
+for kdump support
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   49 ++++++++++++++++++++++++++++++-------------------
+ 1 files changed, 30 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 44f81a0..b2f4785 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3571,33 +3571,44 @@ static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
+ 	}
+ }
+ 
++static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
++{
++	int i;
++
++	if (!reset_devices)
++		return 0;
++
++	/* Reset the controller with a PCI power-cycle */
++	if (hpsa_hard_reset_controller(pdev) || hpsa_reset_msi(pdev))
++		return -ENODEV;
++
++	/* Some devices (notably the HP Smart Array 5i Controller)
++	   need a little pause here */
++	msleep(HPSA_POST_RESET_PAUSE_MSECS);
++
++	/* Now try to get the controller to respond to a no-op */
++	for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
++		if (hpsa_noop(pdev) == 0)
++			break;
++		else
++			dev_warn(&pdev->dev, "no-op failed%s\n",
++					(i < 11 ? "; re-trying" : ""));
++	}
++	return 0;
++}
++
+ static int __devinit hpsa_init_one(struct pci_dev *pdev,
+ 				    const struct pci_device_id *ent)
+ {
+-	int i, rc;
+-	int dac;
++	int dac, rc;
+ 	struct ctlr_info *h;
+ 
+ 	if (number_of_controllers == 0)
+ 		printk(KERN_INFO DRIVER_NAME "\n");
+-	if (reset_devices) {
+-		/* Reset the controller with a PCI power-cycle */
+-		if (hpsa_hard_reset_controller(pdev) || hpsa_reset_msi(pdev))
+-			return -ENODEV;
+-
+-		/* Some devices (notably the HP Smart Array 5i Controller)
+-		   need a little pause here */
+-		msleep(HPSA_POST_RESET_PAUSE_MSECS);
+ 
+-		/* Now try to get the controller to respond to a no-op */
+-		for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
+-			if (hpsa_noop(pdev) == 0)
+-				break;
+-			else
+-				dev_warn(&pdev->dev, "no-op failed%s\n",
+-						(i < 11 ? "; re-trying" : ""));
+-		}
+-	}
++	rc = hpsa_init_reset_devices(pdev);
++	if (rc)
++		return rc;
+ 
+ 	/* Command structures must be aligned on a 32-byte boundary because
+ 	 * the 5 lower bits of the address are used by the hardware. and by
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0070-SCSI-hpsa-Fix-hard-reset-code.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0070-SCSI-hpsa-Fix-hard-reset-code.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,293 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Wed, 16 Jun 2010 13:51:40 -0500
+Subject: [PATCH 70/79] [SCSI] hpsa: Fix hard reset code.
+
+commit 1df8552abf36519ca8b9e2a8d1e204bac2076d51 upstream.
+
+Smart Array controllers newer than the P600 do not honor the
+PCI power state method of resetting the controllers.  Instead,
+in these cases we can get them to reset via the "doorbell" register.
+
+This escaped notice until we began using "performant" mode because
+the fact that the controllers did not reset did not normally
+impede subsequent operation, and so things generally appeared to
+"work".  Once the performant mode code was added, if the controller
+does not reset, it remains in performant mode.  The code immediately
+after the reset presumes the controller is in "simple" mode
+(which previously, it had remained in simple mode the whole time).
+If the controller remains in performant mode any code which presumes
+it is in simple mode will not work.  So the reset needs to be fixed.
+
+Unfortunately there are some controllers which cannot be reset by
+either method. (eg. p800).  We detect these cases by noticing that
+the controller seems to remain in performant mode even after a
+reset has been attempted.  In those case, we proceed anyway,
+as if the reset has happened (and skip the step of waiting for
+the controller to become ready -- which is expecting it to be in
+"simple" mode.)  To sum up, we try to do a better job of resetting
+the controller if "reset_devices" is set, and if it doesn't work,
+we print a message and try to continue anyway.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c     |  174 ++++++++++++++++++++++++++++++++++++-----------
+ drivers/scsi/hpsa_cmd.h |    4 +
+ 2 files changed, 137 insertions(+), 41 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index b2f4785..f57d533 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -174,6 +174,11 @@ static void calc_bucket_map(int *bucket, int num_buckets,
+ 	int nsgs, int *bucket_map);
+ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
+ static inline u32 next_command(struct ctlr_info *h);
++static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
++	void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
++	u64 *cfg_offset);
++static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
++	unsigned long *memory_bar);
+ 
+ static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
+ static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
+@@ -3078,17 +3083,75 @@ static __devinit int hpsa_reset_msi(struct pci_dev *pdev)
+ 	return 0;
+ }
+ 
++static int hpsa_controller_hard_reset(struct pci_dev *pdev,
++	void * __iomem vaddr, bool use_doorbell)
++{
++	u16 pmcsr;
++	int pos;
++
++	if (use_doorbell) {
++		/* For everything after the P600, the PCI power state method
++		 * of resetting the controller doesn't work, so we have this
++		 * other way using the doorbell register.
++		 */
++		dev_info(&pdev->dev, "using doorbell to reset controller\n");
++		writel(DOORBELL_CTLR_RESET, vaddr + SA5_DOORBELL);
++		msleep(1000);
++	} else { /* Try to do it the PCI power state way */
++
++		/* Quoting from the Open CISS Specification: "The Power
++		 * Management Control/Status Register (CSR) controls the power
++		 * state of the device.  The normal operating state is D0,
++		 * CSR=00h.  The software off state is D3, CSR=03h.  To reset
++		 * the controller, place the interface device in D3 then to D0,
++		 * this causes a secondary PCI reset which will reset the
++		 * controller." */
++
++		pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
++		if (pos == 0) {
++			dev_err(&pdev->dev,
++				"hpsa_reset_controller: "
++				"PCI PM not supported\n");
++			return -ENODEV;
++		}
++		dev_info(&pdev->dev, "using PCI PM to reset controller\n");
++		/* enter the D3hot power management state */
++		pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
++		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
++		pmcsr |= PCI_D3hot;
++		pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
++
++		msleep(500);
++
++		/* enter the D0 power management state */
++		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
++		pmcsr |= PCI_D0;
++		pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
++
++		msleep(500);
++	}
++	return 0;
++}
++
+ /* This does a hard reset of the controller using PCI power management
+- * states.
++ * states or the using the doorbell register.
+  */
+-static __devinit int hpsa_hard_reset_controller(struct pci_dev *pdev)
++static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
+ {
+-	u16 pmcsr, saved_config_space[32];
+-	int i, pos;
++	u16 saved_config_space[32];
++	u64 cfg_offset;
++	u32 cfg_base_addr;
++	u64 cfg_base_addr_index;
++	void __iomem *vaddr;
++	unsigned long paddr;
++	u32 misc_fw_support, active_transport;
++	int rc, i;
++	struct CfgTable __iomem *cfgtable;
++	bool use_doorbell;
+ 
+-	dev_info(&pdev->dev, "using PCI PM to reset controller\n");
+ 
+-	/* This is very nearly the same thing as
++	/* For controllers as old as the P600, this is very nearly
++	 * the same thing as
+ 	 *
+ 	 * pci_save_state(pci_dev);
+ 	 * pci_set_power_state(pci_dev, PCI_D3hot);
+@@ -3102,41 +3165,42 @@ static __devinit int hpsa_hard_reset_controller(struct pci_dev *pdev)
+ 	 * violate the ordering requirements for restoring the
+ 	 * configuration space from the CCISS document (see the
+ 	 * comment below).  So we roll our own ....
++	 *
++	 * For controllers newer than the P600, the pci power state
++	 * method of resetting doesn't work so we have another way
++	 * using the doorbell register.
+ 	 */
+-
+ 	for (i = 0; i < 32; i++)
+ 		pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
+ 
+-	pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
+-	if (pos == 0) {
+-		dev_err(&pdev->dev,
+-			"hpsa_reset_controller: PCI PM not supported\n");
+-		return -ENODEV;
+-	}
+-
+-	/* Quoting from the Open CISS Specification: "The Power
+-	 * Management Control/Status Register (CSR) controls the power
+-	 * state of the device.  The normal operating state is D0,
+-	 * CSR=00h.  The software off state is D3, CSR=03h.  To reset
+-	 * the controller, place the interface device in D3 then to
+-	 * D0, this causes a secondary PCI reset which will reset the
+-	 * controller."
+-	 */
+ 
+-	/* enter the D3hot power management state */
+-	pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
+-	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+-	pmcsr |= PCI_D3hot;
+-	pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
++	/* find the first memory BAR, so we can find the cfg table */
++	rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
++	if (rc)
++		return rc;
++	vaddr = remap_pci_mem(paddr, 0x250);
++	if (!vaddr)
++		return -ENOMEM;
+ 
+-	msleep(500);
++	/* find cfgtable in order to check if reset via doorbell is supported */
++	rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
++					&cfg_base_addr_index, &cfg_offset);
++	if (rc)
++		goto unmap_vaddr;
++	cfgtable = remap_pci_mem(pci_resource_start(pdev,
++		       cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
++	if (!cfgtable) {
++		rc = -ENOMEM;
++		goto unmap_vaddr;
++	}
+ 
+-	/* enter the D0 power management state */
+-	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+-	pmcsr |= PCI_D0;
+-	pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
++	/* If reset via doorbell register is supported, use that. */
++	misc_fw_support = readl(&cfgtable->misc_fw_support);
++	use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
+ 
+-	msleep(500);
++	rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
++	if (rc)
++		goto unmap_cfgtable;
+ 
+ 	/* Restore the PCI configuration space.  The Open CISS
+ 	 * Specification says, "Restore the PCI Configuration
+@@ -3153,7 +3217,29 @@ static __devinit int hpsa_hard_reset_controller(struct pci_dev *pdev)
+ 	wmb();
+ 	pci_write_config_word(pdev, 4, saved_config_space[2]);
+ 
+-	return 0;
++	/* Some devices (notably the HP Smart Array 5i Controller)
++	   need a little pause here */
++	msleep(HPSA_POST_RESET_PAUSE_MSECS);
++
++	/* Controller should be in simple mode at this point.  If it's not,
++	 * It means we're on one of those controllers which doesn't support
++	 * the doorbell reset method and on which the PCI power management reset
++	 * method doesn't work (P800, for example.)
++	 * In those cases, pretend the reset worked and hope for the best.
++	 */
++	active_transport = readl(&cfgtable->TransportActive);
++	if (active_transport & PERFORMANT_MODE) {
++		dev_warn(&pdev->dev, "Unable to successfully reset controller,"
++			" proceeding anyway.\n");
++		rc = -ENOTSUPP;
++	}
++
++unmap_cfgtable:
++	iounmap(cfgtable);
++
++unmap_vaddr:
++	iounmap(vaddr);
++	return rc;
+ }
+ 
+ /*
+@@ -3573,18 +3659,24 @@ static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
+ 
+ static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
+ {
+-	int i;
++	int rc, i;
+ 
+ 	if (!reset_devices)
+ 		return 0;
+ 
+-	/* Reset the controller with a PCI power-cycle */
+-	if (hpsa_hard_reset_controller(pdev) || hpsa_reset_msi(pdev))
+-		return -ENODEV;
++	/* Reset the controller with a PCI power-cycle or via doorbell */
++	rc = hpsa_kdump_hard_reset_controller(pdev);
+ 
+-	/* Some devices (notably the HP Smart Array 5i Controller)
+-	   need a little pause here */
+-	msleep(HPSA_POST_RESET_PAUSE_MSECS);
++	/* -ENOTSUPP here means we cannot reset the controller
++	 * but it's already (and still) up and running in
++	 * "performant mode".
++	 */
++	if (rc == -ENOTSUPP)
++		return 0; /* just try to do the kdump anyhow. */
++	if (rc)
++		return -ENODEV;
++	if (hpsa_reset_msi(pdev))
++		return -ENODEV;
+ 
+ 	/* Now try to get the controller to respond to a no-op */
+ 	for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
+diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
+index 78de9b6..f5c4c3c 100644
+--- a/drivers/scsi/hpsa_cmd.h
++++ b/drivers/scsi/hpsa_cmd.h
+@@ -100,6 +100,7 @@
+ /* Configuration Table */
+ #define CFGTBL_ChangeReq        0x00000001l
+ #define CFGTBL_AccCmds          0x00000001l
++#define DOORBELL_CTLR_RESET	0x00000004l
+ 
+ #define CFGTBL_Trans_Simple     0x00000002l
+ #define CFGTBL_Trans_Performant 0x00000004l
+@@ -339,6 +340,9 @@ struct CfgTable {
+ 	u32		MaxPhysicalDevices;
+ 	u32		MaxPhysicalDrivesPerLogicalUnit;
+ 	u32		MaxPerformantModeCommands;
++	u8		reserved[0x78 - 0x58];
++	u32		misc_fw_support; /* offset 0x78 */
++#define			MISC_FW_DOORBELL_RESET (0x02)
+ };
+ 
+ #define NUM_BLOCKFETCH_ENTRIES 8
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0071-SCSI-hpsa-forbid-hard-reset-of-640x-boards.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0071-SCSI-hpsa-forbid-hard-reset-of-640x-boards.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,73 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Wed, 16 Jun 2010 13:51:45 -0500
+Subject: [PATCH 71/79] [SCSI] hpsa: forbid hard reset of 640x boards
+
+commit 1886765906686cdb08c35afae20e4ad8f82367f5 upstream.
+
+The 6402/6404 are two PCI devices -- two Smart Array controllers
+-- that fit into one slot.  It is possible to reset them independently,
+however, they share a battery backed cache module.  One of the pair
+controls the cache and the 2nd one access the cache through the first
+one.  If you reset the one controlling the cache, the other one will
+not be a happy camper.  So we just forbid resetting this conjoined
+mess.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   18 ++++++++++++++++--
+ 1 files changed, 16 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index f57d533..d903cc6 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -179,6 +179,7 @@ static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
+ 	u64 *cfg_offset);
+ static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
+ 	unsigned long *memory_bar);
++static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
+ 
+ static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
+ static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
+@@ -3148,7 +3149,7 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
+ 	int rc, i;
+ 	struct CfgTable __iomem *cfgtable;
+ 	bool use_doorbell;
+-
++	u32 board_id;
+ 
+ 	/* For controllers as old as the P600, this is very nearly
+ 	 * the same thing as
+@@ -3170,6 +3171,18 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
+ 	 * method of resetting doesn't work so we have another way
+ 	 * using the doorbell register.
+ 	 */
++
++	/* Exclude 640x boards.  These are two pci devices in one slot
++	 * which share a battery backed cache module.  One controls the
++	 * cache, the other accesses the cache through the one that controls
++	 * it.  If we reset the one controlling the cache, the other will
++	 * likely not be happy.  Just forbid resetting this conjoined mess.
++	 * The 640x isn't really supported by hpsa anyway.
++	 */
++	hpsa_lookup_board_id(pdev, &board_id);
++	if (board_id == 0x409C0E11 || board_id == 0x409D0E11)
++		return -ENOTSUPP;
++
+ 	for (i = 0; i < 32; i++)
+ 		pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
+ 
+@@ -3669,7 +3682,8 @@ static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
+ 
+ 	/* -ENOTSUPP here means we cannot reset the controller
+ 	 * but it's already (and still) up and running in
+-	 * "performant mode".
++	 * "performant mode".  Or, it might be 640x, which can't reset
++	 * due to concerns about shared bbwc between 6402/6404 pair.
+ 	 */
+ 	if (rc == -ENOTSUPP)
+ 		return 0; /* just try to do the kdump anyhow. */
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0072-SCSI-hpsa-separate-intx-and-msi-msix-interrupt-handl.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0072-SCSI-hpsa-separate-intx-and-msi-msix-interrupt-handl.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,103 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Wed, 16 Jun 2010 13:51:50 -0500
+Subject: [PATCH 72/79] [SCSI] hpsa: separate intx and msi/msix interrupt
+ handlers
+
+commit 10f66018088fd0c9fe81b1e328e3264c7b10caa5 upstream.
+
+There are things which need to be done in the intx
+interrupt handler which do not need to be done in
+the msi/msix interrupt handler, like checking that
+the interrupt is actually for us, and checking that the
+interrupt pending bit on the hardware is set (which we
+weren't previously doing at all, which means old controllers
+wouldn't work), so it makes sense to separate these into
+two functions.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   39 ++++++++++++++++++++++++++++++++-------
+ 1 files changed, 32 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index d903cc6..f8b614b 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -126,7 +126,8 @@ static struct board_type products[] = {
+ 
+ static int number_of_controllers;
+ 
+-static irqreturn_t do_hpsa_intr(int irq, void *dev_id);
++static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
++static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
+ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
+ static void start_io(struct ctlr_info *h);
+ 
+@@ -2858,9 +2859,8 @@ static inline bool interrupt_pending(struct ctlr_info *h)
+ 
+ static inline long interrupt_not_for_us(struct ctlr_info *h)
+ {
+-	return !(h->msi_vector || h->msix_vector) &&
+-		((h->access.intr_pending(h) == 0) ||
+-		(h->interrupts_enabled == 0));
++	return (h->access.intr_pending(h) == 0) ||
++		(h->interrupts_enabled == 0);
+ }
+ 
+ static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
+@@ -2934,7 +2934,7 @@ static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
+ 	return next_command(h);
+ }
+ 
+-static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
++static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id)
+ {
+ 	struct ctlr_info *h = dev_id;
+ 	unsigned long flags;
+@@ -2943,6 +2943,26 @@ static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
+ 	if (interrupt_not_for_us(h))
+ 		return IRQ_NONE;
+ 	spin_lock_irqsave(&h->lock, flags);
++	while (interrupt_pending(h)) {
++		raw_tag = get_next_completion(h);
++		while (raw_tag != FIFO_EMPTY) {
++			if (hpsa_tag_contains_index(raw_tag))
++				raw_tag = process_indexed_cmd(h, raw_tag);
++			else
++				raw_tag = process_nonindexed_cmd(h, raw_tag);
++		}
++	}
++	spin_unlock_irqrestore(&h->lock, flags);
++	return IRQ_HANDLED;
++}
++
++static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id)
++{
++	struct ctlr_info *h = dev_id;
++	unsigned long flags;
++	u32 raw_tag;
++
++	spin_lock_irqsave(&h->lock, flags);
+ 	raw_tag = get_next_completion(h);
+ 	while (raw_tag != FIFO_EMPTY) {
+ 		if (hpsa_tag_contains_index(raw_tag))
+@@ -3754,8 +3774,13 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
+ 
+ 	/* make sure the board interrupts are off */
+ 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
+-	rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr,
+-			IRQF_DISABLED, h->devname, h);
++
++	if (h->msix_vector || h->msi_vector)
++		rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_msi,
++				IRQF_DISABLED, h->devname, h);
++	else
++		rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_intx,
++				IRQF_DISABLED, h->devname, h);
+ 	if (rc) {
+ 		dev_err(&pdev->dev, "unable to get irq %d for %s\n",
+ 		       h->intr[PERF_MODE_INT], h->devname);
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0073-SCSI-hpsa-sanitize-max-commands.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0073-SCSI-hpsa-sanitize-max-commands.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,61 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Wed, 16 Jun 2010 13:51:56 -0500
+Subject: [PATCH 73/79] [SCSI] hpsa: sanitize max commands
+
+commit cba3d38b6cf85bd91b7c6f65f43863d1fd19259c upstream.
+
+Some controllers might try to tell us they support 0 commands
+in performant mode.  This is a lie told by buggy firmware.
+We have to be wary of this lest we try to allocate a negative
+number of command blocks, which will be treated as unsigned,
+and get an out of memory condition.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |   16 ++++++++++++++--
+ 1 files changed, 14 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index f8b614b..4f5551b 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3505,13 +3505,25 @@ static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
+ 	return 0;
+ }
+ 
++static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
++{
++	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
++	if (h->max_commands < 16) {
++		dev_warn(&h->pdev->dev, "Controller reports "
++			"max supported commands of %d, an obvious lie. "
++			"Using 16.  Ensure that firmware is up to date.\n",
++			h->max_commands);
++		h->max_commands = 16;
++	}
++}
++
+ /* Interrogate the hardware for some limits:
+  * max commands, max SG elements without chaining, and with chaining,
+  * SG chain block size, etc.
+  */
+ static void __devinit hpsa_find_board_params(struct ctlr_info *h)
+ {
+-	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
++	hpsa_get_max_perf_mode_cmds(h);
+ 	h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
+ 	h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
+ 	/*
+@@ -4056,7 +4068,7 @@ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
+ 	if (!(trans_support & PERFORMANT_MODE))
+ 		return;
+ 
+-	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
++	hpsa_get_max_perf_mode_cmds(h);
+ 	h->max_sg_entries = 32;
+ 	/* Performant mode ring buffer and supporting data structures */
+ 	h->reply_pool_size = h->max_commands * sizeof(u64);
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0074-SCSI-hpsa-disable-doorbell-reset-on-reset_devices.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0074-SCSI-hpsa-disable-doorbell-reset-on-reset_devices.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,42 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Wed, 25 Aug 2010 10:44:14 -0500
+Subject: [PATCH 74/79] [SCSI] hpsa: disable doorbell reset on reset_devices
+
+commit 36ed2176fedaa180b8ea3cdacf68c958e0090a3c upstream.
+
+The doorbell reset initially appears to work correctly,
+the controller resets, comes up, some i/o can even be
+done, but on at least some Smart Arrays in some servers,
+it eventually causes a subsequent controller lockup due
+to some kind of PCIe error, and kdump can end up leaving
+the root filesystem in an unbootable state.  For this
+reason, until the problem is fixed, or at least isolated
+to certain hardware enough to be avoided, the doorbell
+reset should not be used at all.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    6 ++++++
+ 1 files changed, 6 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 4f5551b..c5d0606 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -3231,6 +3231,12 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
+ 	misc_fw_support = readl(&cfgtable->misc_fw_support);
+ 	use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
+ 
++	/* The doorbell reset seems to cause lockups on some Smart
++	 * Arrays (e.g. P410, P410i, maybe others).  Until this is
++	 * fixed or at least isolated, avoid the doorbell reset.
++	 */
++	use_doorbell = 0;
++
+ 	rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
+ 	if (rc)
+ 		goto unmap_cfgtable;
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0077-SCSI-hpsa-fix-redefinition-of-PCI_DEVICE_ID_CISSF.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0077-SCSI-hpsa-fix-redefinition-of-PCI_DEVICE_ID_CISSF.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,47 @@
+From: Mike Miller <mike.miller at hp.com>
+Date: Wed, 1 Dec 2010 11:16:07 -0600
+Subject: [PATCH 77/79] [SCSI] hpsa: fix redefinition of PCI_DEVICE_ID_CISSF
+
+commit 7c03b87048f2467087ae7e51392e5fb57ea8f58b upstream.
+
+PCI_DEVICE_ID_CISSF is defined as 323b in pci_ids.h but redefined as 3fff in
+hpsa.c. The ID of 3fff will _never_ ship as a standalone controller. It is
+intended only as part a complete storage solution. As such, this patch
+removes the redefinition and the StorageWorks P1210m from the product table.
+
+It also removes a duplicate line for the "unknown" controller support.
+
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    8 +-------
+ 1 files changed, 1 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index b2fb2b2..a6dea08 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -90,11 +90,7 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3252},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3253},
+ 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3254},
+-#define PCI_DEVICE_ID_HP_CISSF 0x333f
+-	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x333F},
+-	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,             PCI_ANY_ID, PCI_ANY_ID,
+-		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
+-	{PCI_VENDOR_ID_COMPAQ,     PCI_ANY_ID,             PCI_ANY_ID, PCI_ANY_ID,
++	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,	PCI_ANY_ID, PCI_ANY_ID,
+ 		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
+ 	{0,}
+ };
+@@ -113,8 +109,6 @@ static struct board_type products[] = {
+ 	{0x3249103C, "Smart Array P812", &SA5_access},
+ 	{0x324a103C, "Smart Array P712m", &SA5_access},
+ 	{0x324b103C, "Smart Array P711m", &SA5_access},
+-	{0x3233103C, "StorageWorks P1210m", &SA5_access},
+-	{0x333F103C, "StorageWorks P1210m", &SA5_access},
+ 	{0x3250103C, "Smart Array", &SA5_access},
+ 	{0x3250113C, "Smart Array", &SA5_access},
+ 	{0x3250123C, "Smart Array", &SA5_access},
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0078-SCSI-hpsa-do-not-consider-firmware-revision-when-loo.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0078-SCSI-hpsa-do-not-consider-firmware-revision-when-loo.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,67 @@
+From: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Date: Thu, 16 Dec 2010 13:00:58 -0600
+Subject: [PATCH 78/79] [SCSI] hpsa: do not consider firmware revision when
+ looking for device changes.
+
+commit a0b89872b305bd0f6f5af1dd26274a3f057a2303 upstream.
+
+The firmware may have been updated, in which case, it's the same device,
+and in that case, we do not want to remove and add the device, we want to
+let it continue as is.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    9 ---------
+ drivers/scsi/hpsa.h |    1 -
+ 2 files changed, 0 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index a6dea08..a2408e5 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -641,11 +641,6 @@ static void fixup_botched_add(struct ctlr_info *h,
+ static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
+ 	struct hpsa_scsi_dev_t *dev2)
+ {
+-	if ((is_logical_dev_addr_mode(dev1->scsi3addr) ||
+-		(dev1->lun != -1 && dev2->lun != -1)) &&
+-		dev1->devtype != 0x0C)
+-		return (memcmp(dev1, dev2, sizeof(*dev1)) == 0);
+-
+ 	/* we compare everything except lun and target as these
+ 	 * are not yet assigned.  Compare parts likely
+ 	 * to differ first
+@@ -660,8 +655,6 @@ static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
+ 		return 0;
+ 	if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
+ 		return 0;
+-	if (memcmp(dev1->revision, dev2->revision, sizeof(dev1->revision)) != 0)
+-		return 0;
+ 	if (dev1->devtype != dev2->devtype)
+ 		return 0;
+ 	if (dev1->raid_level != dev2->raid_level)
+@@ -1477,8 +1470,6 @@ static int hpsa_update_device_info(struct ctlr_info *h,
+ 		sizeof(this_device->vendor));
+ 	memcpy(this_device->model, &inq_buff[16],
+ 		sizeof(this_device->model));
+-	memcpy(this_device->revision, &inq_buff[32],
+-		sizeof(this_device->revision));
+ 	memset(this_device->device_id, 0,
+ 		sizeof(this_device->device_id));
+ 	hpsa_get_device_id(h, scsi3addr, this_device->device_id,
+diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
+index a203ef6..19586e1 100644
+--- a/drivers/scsi/hpsa.h
++++ b/drivers/scsi/hpsa.h
+@@ -45,7 +45,6 @@ struct hpsa_scsi_dev_t {
+ 	unsigned char device_id[16];    /* from inquiry pg. 0x83 */
+ 	unsigned char vendor[8];        /* bytes 8-15 of inquiry data */
+ 	unsigned char model[16];        /* bytes 16-31 of inquiry data */
+-	unsigned char revision[4];      /* bytes 32-35 of inquiry data */
+ 	unsigned char raid_level;	/* from inquiry page 0xC1 */
+ };
+ 
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0079-SCSI-hpsa-do-not-consider-RAID-level-to-be-part-of-d.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/hpsa/0079-SCSI-hpsa-do-not-consider-RAID-level-to-be-part-of-d.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,32 @@
+From: Stephen M. Cameron <StephenM.Cameron>
+Date: Thu, 16 Dec 2010 13:01:03 -0600
+Subject: [PATCH 79/79] [SCSI] hpsa: do not consider RAID level to be part of
+ device identity
+
+commit 35dd3039e09cd46ca3a8733ff1c817bf7b7b19ce upstream.
+
+Otherwise, after doing a RAID level migration, the disk will be
+disruptively removed and re-added as a different disk on rescan.
+
+Signed-off-by: Stephen M. Cameron <scameron at beardog.cce.hp.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/hpsa.c |    2 --
+ 1 files changed, 0 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index a2408e5..12deffc 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -657,8 +657,6 @@ static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
+ 		return 0;
+ 	if (dev1->devtype != dev2->devtype)
+ 		return 0;
+-	if (dev1->raid_level != dev2->raid_level)
+-		return 0;
+ 	if (dev1->bus != dev2->bus)
+ 		return 0;
+ 	return 1;
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/kernel.h-add-pr_warn-for-symmetry-to-dev_warn-netdev.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/kernel.h-add-pr_warn-for-symmetry-to-dev_warn-netdev.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,45 @@
+From: Joe Perches <joe at perches.com>
+Date: Mon, 24 May 2010 14:33:08 -0700
+Subject: [PATCH] kernel.h: add pr_warn for symmetry to dev_warn, netdev_warn
+
+commit fc62f2f19edf46c9bdbd1a54725b56b18c43e94f upstream.
+
+The current logging macros are
+pr_<level>, dev_<level>, netdev_<level>, and netif_<level>.
+pr_ uses warning, the other use warn.
+
+Standardize these logging macros a bit more by adding pr_warn and
+pr_warn_ratelimited.
+
+Right now, there are:
+
+$ for level in emerg alert crit err warn warning notice info ; do \
+    for prefix in pr dev netdev netif ; do \
+      echo -n "${prefix}_${level}:	`git grep -w "${prefix}_${level}" | wc -l`	" ; \
+    done ; \
+    echo ; \
+  done
+pr_emerg: 	45	dev_emerg: 	4	netdev_emerg: 	1	netif_emerg: 	4
+pr_alert: 	24	dev_alert: 	36	netdev_alert: 	1	netif_alert: 	6
+pr_crit: 	24	dev_crit: 	22	netdev_crit: 	1	netif_crit: 	4
+pr_err:  	2013	dev_err: 	8467	netdev_err: 	267	netif_err: 	240
+pr_warn: 	0	dev_warn: 	1818	netdev_warn: 	126	netif_warn: 	23
+pr_warning: 	773	dev_warning: 	0	netdev_warning:	0	netif_warning: 	0
+pr_notice: 	148	dev_notice: 	111	netdev_notice: 	9	netif_notice: 	3
+pr_info: 	1717	dev_info: 	3007	netdev_info: 	101	netif_info: 	85
+
+Signed-off-by: Joe Perches <joe at perches.com>
+Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
+[bwh: Drop pr_warn_ratelimited as there are no rate-limited versions in 2.6.32]
+---
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -389,6 +389,7 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
+         printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+ #define pr_warning(fmt, ...) \
+         printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
++#define pr_warn pr_warning
+ #define pr_notice(fmt, ...) \
+         printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+ #define pr_info(fmt, ...) \

Added: dists/squeeze/linux-2.6/debian/patches/features/all/net-use-helpers-to-access-mc-list-V2.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/net-use-helpers-to-access-mc-list-V2.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,43 @@
+From: Jiri Pirko <jpirko at redhat.com>
+Date: Thu, 4 Feb 2010 10:22:25 -0800
+Subject: [PATCH] net: use helpers to access mc list V2
+
+commit 6683ece36e3531fc8c75f69e7165c5f20930be88 upstream.
+
+This patch introduces the similar helpers as those already done for uc list.
+However multicast lists are no list_head lists but "mademanually". The three
+macros added by this patch will make the transition of mc_list to list_head
+smooth in two steps:
+
+1) convert all drivers to use these macros (with the original iterator of type
+   "struct dev_mc_list")
+2) once all drivers are converted, convert list type and iterators to "struct
+   netdev_hw_addr" in one patch.
+
+>From now on, drivers can (and should) use "netdev_for_each_mc_addr" to iterate
+over the addresses with iterator of type "struct netdev_hw_addr". Also macros
+"netdev_mc_count" and "netdev_mc_empty" to read list's length. This is the state
+which should be reached in all drivers.
+
+Signed-off-by: Jiri Pirko <jpirko at redhat.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+[bwh: Adjust context for lack of unicast list helpers]
+---
+ include/linux/netdevice.h |    6 ++++++
+ 1 files changed, 6 insertions(+), 0 deletions(-)
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -227,6 +227,12 @@ struct netdev_hw_addr_list {
+ 	int			count;
+ };
+ 
++#define netdev_mc_count(dev) ((dev)->mc_count)
++#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0)
++
++#define netdev_for_each_mc_addr(mclist, dev) \
++	for (mclist = dev->mc_list; mclist; mclist = mclist->next)
++
+ struct hh_cache
+ {
+ 	struct hh_cache *hh_next;	/* Next entry			     */

Added: dists/squeeze/linux-2.6/debian/patches/features/all/netdevice.h-Add-netdev_printk-helpers-like-dev_printk.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/netdevice.h-Add-netdev_printk-helpers-like-dev_printk.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,105 @@
+From: Joe Perches <joe at perches.com>
+Date: Tue, 9 Feb 2010 11:49:47 +0000
+Subject: [PATCH] netdevice.h: Add netdev_printk helpers like dev_printk
+
+commit 571ba42303813106d533bf6bda929d8e289f51bf upstream.
+
+These netdev_printk routines take a struct net_device * and emit
+dev_printk logging messages adding "%s: " ... netdev->dev.parent
+to the dev_printk format and arguments.
+
+This can create some uniformity in the output message log.
+
+These helpers should not be used until a successful alloc_netdev.
+
+Signed-off-by: Joe Perches <joe at perches.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ include/linux/netdevice.h |   71 +++++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 71 insertions(+), 0 deletions(-)
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index cdf53a8..a51228a 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2095,6 +2095,77 @@ static inline u32 dev_ethtool_get_flags(struct net_device *dev)
+ 		return 0;
+ 	return dev->ethtool_ops->get_flags(dev);
+ }
++
++/* Logging, debugging and troubleshooting/diagnostic helpers. */
++
++/* netdev_printk helpers, similar to dev_printk */
++
++static inline const char *netdev_name(const struct net_device *dev)
++{
++	if (dev->reg_state != NETREG_REGISTERED)
++		return "(unregistered net_device)";
++	return dev->name;
++}
++
++#define netdev_printk(level, netdev, format, args...)		\
++	dev_printk(level, (netdev)->dev.parent,			\
++		   "%s: " format,				\
++		   netdev_name(netdev), ##args)
++
++#define netdev_emerg(dev, format, args...)			\
++	netdev_printk(KERN_EMERG, dev, format, ##args)
++#define netdev_alert(dev, format, args...)			\
++	netdev_printk(KERN_ALERT, dev, format, ##args)
++#define netdev_crit(dev, format, args...)			\
++	netdev_printk(KERN_CRIT, dev, format, ##args)
++#define netdev_err(dev, format, args...)			\
++	netdev_printk(KERN_ERR, dev, format, ##args)
++#define netdev_warn(dev, format, args...)			\
++	netdev_printk(KERN_WARNING, dev, format, ##args)
++#define netdev_notice(dev, format, args...)			\
++	netdev_printk(KERN_NOTICE, dev, format, ##args)
++#define netdev_info(dev, format, args...)			\
++	netdev_printk(KERN_INFO, dev, format, ##args)
++
++#if defined(DEBUG)
++#define netdev_dbg(__dev, format, args...)			\
++	netdev_printk(KERN_DEBUG, __dev, format, ##args)
++#elif defined(CONFIG_DYNAMIC_DEBUG)
++#define netdev_dbg(__dev, format, args...)			\
++do {								\
++	dynamic_dev_dbg((__dev)->dev.parent, "%s: " format,	\
++			netdev_name(__dev), ##args);		\
++} while (0)
++#else
++#define netdev_dbg(__dev, format, args...)			\
++({								\
++	if (0)							\
++		netdev_printk(KERN_DEBUG, __dev, format, ##args); \
++	0;							\
++})
++#endif
++
++#if defined(VERBOSE_DEBUG)
++#define netdev_vdbg	netdev_dbg
++#else
++
++#define netdev_vdbg(dev, format, args...)			\
++({								\
++	if (0)							\
++		netdev_printk(KERN_DEBUG, dev, format, ##args);	\
++	0;							\
++})
++#endif
++
++/*
++ * netdev_WARN() acts like dev_printk(), but with the key difference
++ * of using a WARN/WARN_ON to get the message out, including the
++ * file/line information and a backtrace.
++ */
++#define netdev_WARN(dev, format, args...)			\
++	WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
++
++
+ #endif /* __KERNEL__ */
+ 
+ #endif	/* _LINUX_NETDEVICE_H */
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/netdevice.h-Add-netif_printk-helpers.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/netdevice.h-Add-netif_printk-helpers.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,84 @@
+From: Joe Perches <joe at perches.com>
+Date: Tue, 9 Feb 2010 11:49:49 +0000
+Subject: [PATCH] include/linux/netdevice.h: Add netif_printk helpers
+
+commit b3d95c5c93d4b57eaea0ad3f582b08a6b5fb3eb1 upstream.
+
+Add macros to test a private structure for msg_enable bits
+and the netif_msg_##bit to test and call netdev_printk if set
+
+Simplifies logic in callers and adds message logging consistency
+
+Signed-off-by: Joe Perches <joe at perches.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ include/linux/netdevice.h |   53 +++++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 53 insertions(+), 0 deletions(-)
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index a51228a..1412dde 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2165,6 +2165,59 @@ do {								\
+ #define netdev_WARN(dev, format, args...)			\
+ 	WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
+ 
++/* netif printk helpers, similar to netdev_printk */
++
++#define netif_printk(priv, type, level, dev, fmt, args...)	\
++do {					  			\
++	if (netif_msg_##type(priv))				\
++		netdev_printk(level, (dev), fmt, ##args);	\
++} while (0)
++
++#define netif_emerg(priv, type, dev, fmt, args...)		\
++	netif_printk(priv, type, KERN_EMERG, dev, fmt, ##args)
++#define netif_alert(priv, type, dev, fmt, args...)		\
++	netif_printk(priv, type, KERN_ALERT, dev, fmt, ##args)
++#define netif_crit(priv, type, dev, fmt, args...)		\
++	netif_printk(priv, type, KERN_CRIT, dev, fmt, ##args)
++#define netif_err(priv, type, dev, fmt, args...)		\
++	netif_printk(priv, type, KERN_ERR, dev, fmt, ##args)
++#define netif_warn(priv, type, dev, fmt, args...)		\
++	netif_printk(priv, type, KERN_WARNING, dev, fmt, ##args)
++#define netif_notice(priv, type, dev, fmt, args...)		\
++	netif_printk(priv, type, KERN_NOTICE, dev, fmt, ##args)
++#define netif_info(priv, type, dev, fmt, args...)		\
++	netif_printk(priv, type, KERN_INFO, (dev), fmt, ##args)
++
++#if defined(DEBUG)
++#define netif_dbg(priv, type, dev, format, args...)		\
++	netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
++#elif defined(CONFIG_DYNAMIC_DEBUG)
++#define netif_dbg(priv, type, netdev, format, args...)		\
++do {								\
++	if (netif_msg_##type(priv))				\
++		dynamic_dev_dbg((netdev)->dev.parent,		\
++				"%s: " format,			\
++				netdev_name(netdev), ##args);	\
++} while (0)
++#else
++#define netif_dbg(priv, type, dev, format, args...)			\
++({									\
++	if (0)								\
++		netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
++	0;								\
++})
++#endif
++
++#if defined(VERBOSE_DEBUG)
++#define netif_vdbg	netdev_dbg
++#else
++#define netif_vdbg(priv, type, dev, format, args...)		\
++({								\
++	if (0)							\
++		netif_printk(KERN_DEBUG, dev, format, ##args);	\
++	0;							\
++})
++#endif
+ 
+ #endif /* __KERNEL__ */
+ 
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0001-SCSI-pm8001-add-SAS-SATA-HBA-driver.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0001-SCSI-pm8001-add-SAS-SATA-HBA-driver.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,8873 @@
+From: jack wang <jack_wang at usish.com>
+Date: Wed, 14 Oct 2009 16:19:21 +0800
+Subject: [PATCH 01/25] [SCSI] pm8001: add SAS/SATA HBA driver
+
+commit dbf9bfe615717d1145f263c0049fe2328e6ed395 upstream.
+
+This driver supports PMC-Sierra PCIe SAS/SATA 8x6G SPC 8001 chip based
+host adapters.
+
+Signed-off-by: Jack Wang <jack_wang at usish.com>
+Signed-off-by: Lindar Liu <lindar_liu at usish.com>
+Signed-off-by: Tom Peng <tom_peng at usish.com>
+Signed-off-by: Kevin Ao <aoqingyun at usish.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ MAINTAINERS                        |    7 +
+ drivers/scsi/Kconfig               |    8 +
+ drivers/scsi/Makefile              |    1 +
+ drivers/scsi/pm8001/Makefile       |   12 +
+ drivers/scsi/pm8001/pm8001_chips.h |   89 +
+ drivers/scsi/pm8001/pm8001_ctl.c   |  573 +++++
+ drivers/scsi/pm8001/pm8001_ctl.h   |   67 +
+ drivers/scsi/pm8001/pm8001_defs.h  |  112 +
+ drivers/scsi/pm8001/pm8001_hwi.c   | 4371 ++++++++++++++++++++++++++++++++++++
+ drivers/scsi/pm8001/pm8001_hwi.h   | 1011 +++++++++
+ drivers/scsi/pm8001/pm8001_init.c  |  888 ++++++++
+ drivers/scsi/pm8001/pm8001_sas.c   | 1104 +++++++++
+ drivers/scsi/pm8001/pm8001_sas.h   |  480 ++++
+ include/linux/pci_ids.h            |    2 +
+ 14 files changed, 8725 insertions(+), 0 deletions(-)
+ create mode 100644 drivers/scsi/pm8001/Makefile
+ create mode 100644 drivers/scsi/pm8001/pm8001_chips.h
+ create mode 100644 drivers/scsi/pm8001/pm8001_ctl.c
+ create mode 100644 drivers/scsi/pm8001/pm8001_ctl.h
+ create mode 100644 drivers/scsi/pm8001/pm8001_defs.h
+ create mode 100644 drivers/scsi/pm8001/pm8001_hwi.c
+ create mode 100644 drivers/scsi/pm8001/pm8001_hwi.h
+ create mode 100644 drivers/scsi/pm8001/pm8001_init.c
+ create mode 100644 drivers/scsi/pm8001/pm8001_sas.c
+ create mode 100644 drivers/scsi/pm8001/pm8001_sas.h
+
+diff --git a/MAINTAINERS b/MAINTAINERS
+index a1a2ace..016411c 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -4116,6 +4116,13 @@ W:	http://www.pmc-sierra.com/
+ S:	Supported
+ F:	drivers/scsi/pmcraid.*
+ 
++PMC SIERRA PM8001 DRIVER
++M:	jack_wang at usish.com
++M:	lindar_liu at usish.com
++L:	linux-scsi at vger.kernel.org
++S:	Supported
++F:	drivers/scsi/pm8001/
++
+ POSIX CLOCKS and TIMERS
+ M:	Thomas Gleixner <tglx at linutronix.de>
+ S:	Supported
+diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
+index e11cca4..2e4f7d0 100644
+--- a/drivers/scsi/Kconfig
++++ b/drivers/scsi/Kconfig
+@@ -1818,6 +1818,14 @@ config SCSI_PMCRAID
+ 	---help---
+ 	  This driver supports the PMC SIERRA MaxRAID adapters.
+ 
++config SCSI_PM8001
++	tristate "PMC-Sierra SPC 8001 SAS/SATA Based Host Adapter driver"
++	depends on PCI && SCSI
++	select SCSI_SAS_LIBSAS
++	help
++	  This driver supports PMC-Sierra PCIE SAS/SATA 8x6G SPC 8001 chip
++	  based host adapters.
++
+ config SCSI_SRP
+ 	tristate "SCSI RDMA Protocol helper library"
+ 	depends on SCSI && PCI
+diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
+index 3ad61db..53b1dac 100644
+--- a/drivers/scsi/Makefile
++++ b/drivers/scsi/Makefile
+@@ -70,6 +70,7 @@ obj-$(CONFIG_SCSI_AIC79XX)	+= aic7xxx/
+ obj-$(CONFIG_SCSI_AACRAID)	+= aacraid/
+ obj-$(CONFIG_SCSI_AIC7XXX_OLD)	+= aic7xxx_old.o
+ obj-$(CONFIG_SCSI_AIC94XX)	+= aic94xx/
++obj-$(CONFIG_SCSI_PM8001)	+= pm8001/
+ obj-$(CONFIG_SCSI_IPS)		+= ips.o
+ obj-$(CONFIG_SCSI_FD_MCS)	+= fd_mcs.o
+ obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
+diff --git a/drivers/scsi/pm8001/Makefile b/drivers/scsi/pm8001/Makefile
+new file mode 100644
+index 0000000..52f0429
+--- /dev/null
++++ b/drivers/scsi/pm8001/Makefile
+@@ -0,0 +1,12 @@
++#
++# Kernel configuration file for the PM8001 SAS/SATA 8x6G based HBA driver
++#
++# Copyright (C) 2008-2009  USI Co., Ltd.
++
++
++obj-$(CONFIG_SCSI_PM8001) += pm8001.o
++pm8001-y += pm8001_init.o \
++		pm8001_sas.o  \
++		pm8001_ctl.o  \
++		pm8001_hwi.o
++
+diff --git a/drivers/scsi/pm8001/pm8001_chips.h b/drivers/scsi/pm8001/pm8001_chips.h
+new file mode 100644
+index 0000000..4efa4d0
+--- /dev/null
++++ b/drivers/scsi/pm8001/pm8001_chips.h
+@@ -0,0 +1,89 @@
++/*
++ * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
++ *
++ * Copyright (c) 2008-2009 USI Co., Ltd.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions, and the following disclaimer,
++ *    without modification.
++ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
++ *    substantially similar to the "NO WARRANTY" disclaimer below
++ *    ("Disclaimer") and any redistribution must be conditioned upon
++ *    including a substantially similar Disclaimer requirement for further
++ *    binary redistribution.
++ * 3. Neither the names of the above-listed copyright holders nor the names
++ *    of any contributors may be used to endorse or promote products derived
++ *    from this software without specific prior written permission.
++ *
++ * Alternatively, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") version 2 as published by the Free
++ * Software Foundation.
++ *
++ * NO WARRANTY
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
++ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
++ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGES.
++ *
++ */
++
++#ifndef _PM8001_CHIPS_H_
++#define _PM8001_CHIPS_H_
++
++static inline u32 pm8001_read_32(void *virt_addr)
++{
++	return *((u32 *)virt_addr);
++}
++
++static inline void pm8001_write_32(void *addr, u32 offset, u32 val)
++{
++	*((u32 *)(addr + offset)) = val;
++}
++
++static inline u32 pm8001_cr32(struct pm8001_hba_info *pm8001_ha, u32 bar,
++		u32 offset)
++{
++	return readl(pm8001_ha->io_mem[bar].memvirtaddr + offset);
++}
++
++static inline void pm8001_cw32(struct pm8001_hba_info *pm8001_ha, u32 bar,
++		u32 addr, u32 val)
++{
++	writel(val, pm8001_ha->io_mem[bar].memvirtaddr + addr);
++}
++static inline u32 pm8001_mr32(void __iomem *addr, u32 offset)
++{
++	return readl(addr + offset);
++}
++static inline void pm8001_mw32(void __iomem *addr, u32 offset, u32 val)
++{
++	writel(val, addr + offset);
++}
++static inline u32 get_pci_bar_index(u32 pcibar)
++{
++		switch (pcibar) {
++		case 0x18:
++		case 0x1C:
++			return 1;
++		case 0x20:
++			return 2;
++		case 0x24:
++			return 3;
++		default:
++			return 0;
++	}
++}
++
++#endif  /* _PM8001_CHIPS_H_ */
++
+diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
+new file mode 100644
+index 0000000..14b13ac
+--- /dev/null
++++ b/drivers/scsi/pm8001/pm8001_ctl.c
+@@ -0,0 +1,573 @@
++/*
++ * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
++ *
++ * Copyright (c) 2008-2009 USI Co., Ltd.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions, and the following disclaimer,
++ *    without modification.
++ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
++ *    substantially similar to the "NO WARRANTY" disclaimer below
++ *    ("Disclaimer") and any redistribution must be conditioned upon
++ *    including a substantially similar Disclaimer requirement for further
++ *    binary redistribution.
++ * 3. Neither the names of the above-listed copyright holders nor the names
++ *    of any contributors may be used to endorse or promote products derived
++ *    from this software without specific prior written permission.
++ *
++ * Alternatively, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") version 2 as published by the Free
++ * Software Foundation.
++ *
++ * NO WARRANTY
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
++ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
++ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGES.
++ *
++ */
++#include <linux/firmware.h>
++#include "pm8001_sas.h"
++#include "pm8001_ctl.h"
++
++/* scsi host attributes */
++
++/**
++ * pm8001_ctl_mpi_interface_rev_show - MPI interface revision number
++ * @cdev: pointer to embedded class device
++ * @buf: the buffer returned
++ *
++ * A sysfs 'read-only' shost attribute.
++ */
++static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev,
++	struct device_attribute *attr, char *buf)
++{
++	struct Scsi_Host *shost = class_to_shost(cdev);
++	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
++	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
++
++	return snprintf(buf, PAGE_SIZE, "%d\n",
++		pm8001_ha->main_cfg_tbl.interface_rev);
++}
++static
++DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL);
++
++/**
++ * pm8001_ctl_fw_version_show - firmware version
++ * @cdev: pointer to embedded class device
++ * @buf: the buffer returned
++ *
++ * A sysfs 'read-only' shost attribute.
++ */
++static ssize_t pm8001_ctl_fw_version_show(struct device *cdev,
++	struct device_attribute *attr, char *buf)
++{
++	struct Scsi_Host *shost = class_to_shost(cdev);
++	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
++	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
++
++	return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
++		       (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 24),
++		       (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 16),
++		       (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 8),
++		       (u8)(pm8001_ha->main_cfg_tbl.firmware_rev));
++}
++static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL);
++/**
++ * pm8001_ctl_max_out_io_show - max outstanding io supported
++ * @cdev: pointer to embedded class device
++ * @buf: the buffer returned
++ *
++ * A sysfs 'read-only' shost attribute.
++ */
++static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev,
++	struct device_attribute *attr, char *buf)
++{
++	struct Scsi_Host *shost = class_to_shost(cdev);
++	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
++	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
++
++	return snprintf(buf, PAGE_SIZE, "%d\n",
++			pm8001_ha->main_cfg_tbl.max_out_io);
++}
++static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL);
++/**
++ * pm8001_ctl_max_devices_show - max devices support
++ * @cdev: pointer to embedded class device
++ * @buf: the buffer returned
++ *
++ * A sysfs 'read-only' shost attribute.
++ */
++static ssize_t pm8001_ctl_max_devices_show(struct device *cdev,
++	struct device_attribute *attr, char *buf)
++{
++	struct Scsi_Host *shost = class_to_shost(cdev);
++	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
++	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
++
++	return snprintf(buf, PAGE_SIZE, "%04d\n",
++			(u16)(pm8001_ha->main_cfg_tbl.max_sgl >> 16));
++}
++static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL);
++/**
++ * pm8001_ctl_max_sg_list_show - max sg list supported iff not 0.0 for no
++ * hardware limitation
++ * @cdev: pointer to embedded class device
++ * @buf: the buffer returned
++ *
++ * A sysfs 'read-only' shost attribute.
++ */
++static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev,
++	struct device_attribute *attr, char *buf)
++{
++	struct Scsi_Host *shost = class_to_shost(cdev);
++	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
++	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
++
++	return snprintf(buf, PAGE_SIZE, "%04d\n",
++			pm8001_ha->main_cfg_tbl.max_sgl & 0x0000FFFF);
++}
++static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL);
++
++#define SAS_1_0 0x1
++#define SAS_1_1 0x2
++#define SAS_2_0 0x4
++
++static ssize_t
++show_sas_spec_support_status(unsigned int mode, char *buf)
++{
++	ssize_t len = 0;
++
++	if (mode & SAS_1_1)
++		len = sprintf(buf, "%s", "SAS1.1");
++	if (mode & SAS_2_0)
++		len += sprintf(buf + len, "%s%s", len ? ", " : "", "SAS2.0");
++	len += sprintf(buf + len, "\n");
++
++	return len;
++}
++
++/**
++ * pm8001_ctl_sas_spec_support_show - sas spec supported
++ * @cdev: pointer to embedded class device
++ * @buf: the buffer returned
++ *
++ * A sysfs 'read-only' shost attribute.
++ */
++static ssize_t pm8001_ctl_sas_spec_support_show(struct device *cdev,
++	struct device_attribute *attr, char *buf)
++{
++	unsigned int mode;
++	struct Scsi_Host *shost = class_to_shost(cdev);
++	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
++	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
++	mode = (pm8001_ha->main_cfg_tbl.ctrl_cap_flag & 0xfe000000)>>25;
++	return show_sas_spec_support_status(mode, buf);
++}
++static DEVICE_ATTR(sas_spec_support, S_IRUGO,
++		   pm8001_ctl_sas_spec_support_show, NULL);
++
++/**
++ * pm8001_ctl_sas_address_show - sas address
++ * @cdev: pointer to embedded class device
++ * @buf: the buffer returned
++ *
++ * This is the controller sas address
++ *
++ * A sysfs 'read-only' shost attribute.
++ */
++static ssize_t pm8001_ctl_host_sas_address_show(struct device *cdev,
++	struct device_attribute *attr, char *buf)
++{
++	struct Scsi_Host *shost = class_to_shost(cdev);
++	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
++	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
++	return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
++			be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr));
++}
++static DEVICE_ATTR(host_sas_address, S_IRUGO,
++		   pm8001_ctl_host_sas_address_show, NULL);
++
++/**
++ * pm8001_ctl_logging_level_show - logging level
++ * @cdev: pointer to embedded class device
++ * @buf: the buffer returned
++ *
++ * A sysfs 'read/write' shost attribute.
++ */
++static ssize_t pm8001_ctl_logging_level_show(struct device *cdev,
++	struct device_attribute *attr, char *buf)
++{
++	struct Scsi_Host *shost = class_to_shost(cdev);
++	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
++	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
++
++	return snprintf(buf, PAGE_SIZE, "%08xh\n", pm8001_ha->logging_level);
++}
++static ssize_t pm8001_ctl_logging_level_store(struct device *cdev,
++	struct device_attribute *attr, const char *buf, size_t count)
++{
++	struct Scsi_Host *shost = class_to_shost(cdev);
++	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
++	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
++	int val = 0;
++
++	if (sscanf(buf, "%x", &val) != 1)
++		return -EINVAL;
++
++	pm8001_ha->logging_level = val;
++	return strlen(buf);
++}
++
++static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR,
++	pm8001_ctl_logging_level_show, pm8001_ctl_logging_level_store);
++/**
++ * pm8001_ctl_aap_log_show - aap1 event log
++ * @cdev: pointer to embedded class device
++ * @buf: the buffer returned
++ *
++ * A sysfs 'read-only' shost attribute.
++ */
++static ssize_t pm8001_ctl_aap_log_show(struct device *cdev,
++	struct device_attribute *attr, char *buf)
++{
++	struct Scsi_Host *shost = class_to_shost(cdev);
++	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
++	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
++	int i;
++#define AAP1_MEMMAP(r, c) \
++	(*(u32 *)((u8*)pm8001_ha->memoryMap.region[AAP1].virt_ptr + (r) * 32 \
++	+ (c)))
++
++	char *str = buf;
++	int max = 2;
++	for (i = 0; i < max; i++) {
++		str += sprintf(str, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x"
++			       "0x%08x 0x%08x\n",
++			       AAP1_MEMMAP(i, 0),
++			       AAP1_MEMMAP(i, 4),
++			       AAP1_MEMMAP(i, 8),
++			       AAP1_MEMMAP(i, 12),
++			       AAP1_MEMMAP(i, 16),
++			       AAP1_MEMMAP(i, 20),
++			       AAP1_MEMMAP(i, 24),
++			       AAP1_MEMMAP(i, 28));
++	}
++
++	return str - buf;
++}
++static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL);
++/**
++ * pm8001_ctl_aap_log_show - IOP event log
++ * @cdev: pointer to embedded class device
++ * @buf: the buffer returned
++ *
++ * A sysfs 'read-only' shost attribute.
++ */
++static ssize_t pm8001_ctl_iop_log_show(struct device *cdev,
++	struct device_attribute *attr, char *buf)
++{
++	struct Scsi_Host *shost = class_to_shost(cdev);
++	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
++	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
++#define IOP_MEMMAP(r, c) \
++	(*(u32 *)((u8*)pm8001_ha->memoryMap.region[IOP].virt_ptr + (r) * 32 \
++	+ (c)))
++	int i;
++	char *str = buf;
++	int max = 2;
++	for (i = 0; i < max; i++) {
++		str += sprintf(str, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x"
++			       "0x%08x 0x%08x\n",
++			       IOP_MEMMAP(i, 0),
++			       IOP_MEMMAP(i, 4),
++			       IOP_MEMMAP(i, 8),
++			       IOP_MEMMAP(i, 12),
++			       IOP_MEMMAP(i, 16),
++			       IOP_MEMMAP(i, 20),
++			       IOP_MEMMAP(i, 24),
++			       IOP_MEMMAP(i, 28));
++	}
++
++	return str - buf;
++}
++static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL);
++
++#define FLASH_CMD_NONE      0x00
++#define FLASH_CMD_UPDATE    0x01
++#define FLASH_CMD_SET_NVMD    0x02
++
++struct flash_command {
++     u8      command[8];
++     int     code;
++};
++
++static struct flash_command flash_command_table[] =
++{
++     {"set_nvmd",    FLASH_CMD_SET_NVMD},
++     {"update",      FLASH_CMD_UPDATE},
++     {"",            FLASH_CMD_NONE} /* Last entry should be NULL. */
++};
++
++struct error_fw {
++     char    *reason;
++     int     err_code;
++};
++
++static struct error_fw flash_error_table[] =
++{
++     {"Failed to open fw image file",	FAIL_OPEN_BIOS_FILE},
++     {"image header mismatch",		FLASH_UPDATE_HDR_ERR},
++     {"image offset mismatch",		FLASH_UPDATE_OFFSET_ERR},
++     {"image CRC Error",		FLASH_UPDATE_CRC_ERR},
++     {"image length Error.",		FLASH_UPDATE_LENGTH_ERR},
++     {"Failed to program flash chip",	FLASH_UPDATE_HW_ERR},
++     {"Flash chip not supported.",	FLASH_UPDATE_DNLD_NOT_SUPPORTED},
++     {"Flash update disabled.",		FLASH_UPDATE_DISABLED},
++     {"Flash in progress",		FLASH_IN_PROGRESS},
++     {"Image file size Error",		FAIL_FILE_SIZE},
++     {"Input parameter error",		FAIL_PARAMETERS},
++     {"Out of memory",			FAIL_OUT_MEMORY},
++     {"OK", 0}	/* Last entry err_code = 0. */
++};
++
++static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha)
++{
++	struct pm8001_ioctl_payload	*payload;
++	DECLARE_COMPLETION_ONSTACK(completion);
++	u8		*ioctlbuffer = NULL;
++	u32		length = 0;
++	u32		ret = 0;
++
++	length = 1024 * 5 + sizeof(*payload) - 1;
++	ioctlbuffer = kzalloc(length, GFP_KERNEL);
++	if (!ioctlbuffer)
++		return -ENOMEM;
++	if ((pm8001_ha->fw_image->size <= 0) ||
++	    (pm8001_ha->fw_image->size > 4096)) {
++		ret = FAIL_FILE_SIZE;
++		goto out;
++	}
++	payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
++	memcpy((u8 *)payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
++				pm8001_ha->fw_image->size);
++	payload->length = pm8001_ha->fw_image->size;
++	payload->id = 0;
++	pm8001_ha->nvmd_completion = &completion;
++	ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload);
++	wait_for_completion(&completion);
++out:
++	kfree(ioctlbuffer);
++	return ret;
++}
++
++static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
++{
++	struct pm8001_ioctl_payload	*payload;
++	DECLARE_COMPLETION_ONSTACK(completion);
++	u8		*ioctlbuffer = NULL;
++	u32		length = 0;
++	struct fw_control_info	*fwControl;
++	u32		loopNumber, loopcount = 0;
++	u32		sizeRead = 0;
++	u32		partitionSize, partitionSizeTmp;
++	u32		ret = 0;
++	u32		partitionNumber = 0;
++	struct pm8001_fw_image_header *image_hdr;
++
++	length = 1024 * 16 + sizeof(*payload) - 1;
++	ioctlbuffer = kzalloc(length, GFP_KERNEL);
++	image_hdr = (struct pm8001_fw_image_header *)pm8001_ha->fw_image->data;
++	if (!ioctlbuffer)
++		return -ENOMEM;
++	if (pm8001_ha->fw_image->size < 28) {
++		ret = FAIL_FILE_SIZE;
++		goto out;
++	}
++
++	while (sizeRead < pm8001_ha->fw_image->size) {
++		partitionSizeTmp =
++			*(u32 *)((u8 *)&image_hdr->image_length + sizeRead);
++		partitionSize = be32_to_cpu(partitionSizeTmp);
++		loopcount = (partitionSize + HEADER_LEN)/IOCTL_BUF_SIZE;
++		if (loopcount % IOCTL_BUF_SIZE)
++			loopcount++;
++		if (loopcount == 0)
++			loopcount++;
++		for (loopNumber = 0; loopNumber < loopcount; loopNumber++) {
++			payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
++			payload->length = 1024*16;
++			payload->id = 0;
++			fwControl =
++			      (struct fw_control_info *)payload->func_specific;
++			fwControl->len = IOCTL_BUF_SIZE;   /* IN */
++			fwControl->size = partitionSize + HEADER_LEN;/* IN */
++			fwControl->retcode = 0;/* OUT */
++			fwControl->offset = loopNumber * IOCTL_BUF_SIZE;/*OUT */
++
++		/* for the last chunk of data in case file size is not even with
++		4k, load only the rest*/
++		if (((loopcount-loopNumber) == 1) &&
++			((partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE)) {
++			fwControl->len =
++				(partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE;
++			memcpy((u8 *)fwControl->buffer,
++				(u8 *)pm8001_ha->fw_image->data + sizeRead,
++				(partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE);
++			sizeRead +=
++				(partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE;
++		} else {
++			memcpy((u8 *)fwControl->buffer,
++				(u8 *)pm8001_ha->fw_image->data + sizeRead,
++				IOCTL_BUF_SIZE);
++			sizeRead += IOCTL_BUF_SIZE;
++		}
++
++		pm8001_ha->nvmd_completion = &completion;
++		ret = PM8001_CHIP_DISP->fw_flash_update_req(pm8001_ha, payload);
++		wait_for_completion(&completion);
++		if (ret || (fwControl->retcode > FLASH_UPDATE_IN_PROGRESS)) {
++			ret = fwControl->retcode;
++			kfree(ioctlbuffer);
++			ioctlbuffer = NULL;
++			break;
++		}
++	}
++	if (ret)
++		break;
++	partitionNumber++;
++}
++out:
++	kfree(ioctlbuffer);
++	return ret;
++}
++static ssize_t pm8001_store_update_fw(struct device *cdev,
++				      struct device_attribute *attr,
++				      const char *buf, size_t count)
++{
++	struct Scsi_Host *shost = class_to_shost(cdev);
++	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
++	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
++	char *cmd_ptr, *filename_ptr;
++	int res, i;
++	int flash_command = FLASH_CMD_NONE;
++	int err = 0;
++	if (!capable(CAP_SYS_ADMIN))
++		return -EACCES;
++
++	cmd_ptr = kzalloc(count*2, GFP_KERNEL);
++
++	if (!cmd_ptr) {
++		err = FAIL_OUT_MEMORY;
++		goto out;
++	}
++
++	filename_ptr = cmd_ptr + count;
++	res = sscanf(buf, "%s %s", cmd_ptr, filename_ptr);
++	if (res != 2) {
++		err = FAIL_PARAMETERS;
++		goto out1;
++	}
++
++	for (i = 0; flash_command_table[i].code != FLASH_CMD_NONE; i++) {
++		if (!memcmp(flash_command_table[i].command,
++				 cmd_ptr, strlen(cmd_ptr))) {
++			flash_command = flash_command_table[i].code;
++			break;
++		}
++	}
++	if (flash_command == FLASH_CMD_NONE) {
++		err = FAIL_PARAMETERS;
++		goto out1;
++	}
++
++	if (pm8001_ha->fw_status == FLASH_IN_PROGRESS) {
++		err = FLASH_IN_PROGRESS;
++		goto out1;
++	}
++	err = request_firmware(&pm8001_ha->fw_image,
++			       filename_ptr,
++			       pm8001_ha->dev);
++
++	if (err) {
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("Failed to load firmware image file %s,"
++			" error %d\n", filename_ptr, err));
++		err = FAIL_OPEN_BIOS_FILE;
++		goto out1;
++	}
++
++	switch (flash_command) {
++	case FLASH_CMD_UPDATE:
++		pm8001_ha->fw_status = FLASH_IN_PROGRESS;
++		err = pm8001_update_flash(pm8001_ha);
++		break;
++	case FLASH_CMD_SET_NVMD:
++		pm8001_ha->fw_status = FLASH_IN_PROGRESS;
++		err = pm8001_set_nvmd(pm8001_ha);
++		break;
++	default:
++		pm8001_ha->fw_status = FAIL_PARAMETERS;
++		err = FAIL_PARAMETERS;
++		break;
++	}
++	release_firmware(pm8001_ha->fw_image);
++out1:
++	kfree(cmd_ptr);
++out:
++	pm8001_ha->fw_status = err;
++
++	if (!err)
++		return count;
++	else
++		return -err;
++}
++
++static ssize_t pm8001_show_update_fw(struct device *cdev,
++				     struct device_attribute *attr, char *buf)
++{
++	int i;
++	struct Scsi_Host *shost = class_to_shost(cdev);
++	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
++	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
++
++	for (i = 0; flash_error_table[i].err_code != 0; i++) {
++		if (flash_error_table[i].err_code == pm8001_ha->fw_status)
++			break;
++	}
++	if (pm8001_ha->fw_status != FLASH_IN_PROGRESS)
++		pm8001_ha->fw_status = FLASH_OK;
++
++	return snprintf(buf, PAGE_SIZE, "status=%x %s\n",
++			flash_error_table[i].err_code,
++			flash_error_table[i].reason);
++}
++
++static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUGO,
++	pm8001_show_update_fw, pm8001_store_update_fw);
++struct device_attribute *pm8001_host_attrs[] = {
++	&dev_attr_interface_rev,
++	&dev_attr_fw_version,
++	&dev_attr_update_fw,
++	&dev_attr_aap_log,
++	&dev_attr_iop_log,
++	&dev_attr_max_out_io,
++	&dev_attr_max_devices,
++	&dev_attr_max_sg_list,
++	&dev_attr_sas_spec_support,
++	&dev_attr_logging_level,
++	&dev_attr_host_sas_address,
++	NULL,
++};
++
+diff --git a/drivers/scsi/pm8001/pm8001_ctl.h b/drivers/scsi/pm8001/pm8001_ctl.h
+new file mode 100644
+index 0000000..22644de
+--- /dev/null
++++ b/drivers/scsi/pm8001/pm8001_ctl.h
+@@ -0,0 +1,67 @@
++ /*
++  * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
++  *
++  * Copyright (c) 2008-2009 USI Co., Ltd.
++  * All rights reserved.
++  *
++  * Redistribution and use in source and binary forms, with or without
++  * modification, are permitted provided that the following conditions
++  * are met:
++  * 1. Redistributions of source code must retain the above copyright
++  *    notice, this list of conditions, and the following disclaimer,
++  *    without modification.
++  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
++  *    substantially similar to the "NO WARRANTY" disclaimer below
++  *    ("Disclaimer") and any redistribution must be conditioned upon
++  *    including a substantially similar Disclaimer requirement for further
++  *    binary redistribution.
++  * 3. Neither the names of the above-listed copyright holders nor the names
++  *    of any contributors may be used to endorse or promote products derived
++  *    from this software without specific prior written permission.
++  *
++  * Alternatively, this software may be distributed under the terms of the
++  * GNU General Public License ("GPL") version 2 as published by the Free
++  * Software Foundation.
++  *
++  * NO WARRANTY
++  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
++  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
++  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
++  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++  * POSSIBILITY OF SUCH DAMAGES.
++  *
++  */
++
++#ifndef PM8001_CTL_H_INCLUDED
++#define PM8001_CTL_H_INCLUDED
++
++#define IOCTL_BUF_SIZE		4096
++#define HEADER_LEN			28
++#define SIZE_OFFSET			16
++
++struct pm8001_ioctl_payload {
++	u32	signature;
++	u16	major_function;
++	u16	minor_function;
++	u16	length;
++	u16	status;
++	u16	offset;
++	u16	id;
++	u8	func_specific[1];
++};
++
++#define FLASH_OK                        0x000000
++#define FAIL_OPEN_BIOS_FILE             0x000100
++#define FAIL_FILE_SIZE                  0x000a00
++#define FAIL_PARAMETERS                 0x000b00
++#define FAIL_OUT_MEMORY                 0x000c00
++#define FLASH_IN_PROGRESS               0x001000
++
++#endif /* PM8001_CTL_H_INCLUDED */
++
+diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
+new file mode 100644
+index 0000000..944afad
+--- /dev/null
++++ b/drivers/scsi/pm8001/pm8001_defs.h
+@@ -0,0 +1,112 @@
++/*
++ * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
++ *
++ * Copyright (c) 2008-2009 USI Co., Ltd.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions, and the following disclaimer,
++ *    without modification.
++ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
++ *    substantially similar to the "NO WARRANTY" disclaimer below
++ *    ("Disclaimer") and any redistribution must be conditioned upon
++ *    including a substantially similar Disclaimer requirement for further
++ *    binary redistribution.
++ * 3. Neither the names of the above-listed copyright holders nor the names
++ *    of any contributors may be used to endorse or promote products derived
++ *    from this software without specific prior written permission.
++ *
++ * Alternatively, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") version 2 as published by the Free
++ * Software Foundation.
++ *
++ * NO WARRANTY
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
++ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
++ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGES.
++ *
++ */
++
++#ifndef _PM8001_DEFS_H_
++#define _PM8001_DEFS_H_
++
++enum chip_flavors {
++	chip_8001,
++};
++#define USI_MAX_MEMCNT			9
++#define PM8001_MAX_DMA_SG		SG_ALL
++enum phy_speed {
++	PHY_SPEED_15 = 0x01,
++	PHY_SPEED_30 = 0x02,
++	PHY_SPEED_60 = 0x04,
++};
++
++enum data_direction {
++	DATA_DIR_NONE = 0x0,	/* NO TRANSFER */
++	DATA_DIR_IN = 0x01,	/* INBOUND */
++	DATA_DIR_OUT = 0x02,	/* OUTBOUND */
++	DATA_DIR_BYRECIPIENT = 0x04, /* UNSPECIFIED */
++};
++
++enum port_type {
++	PORT_TYPE_SAS = (1L << 1),
++	PORT_TYPE_SATA = (1L << 0),
++};
++
++/* driver compile-time configuration */
++#define	PM8001_MAX_CCB		 512	/* max ccbs supported */
++#define	PM8001_MAX_INB_NUM	 1
++#define	PM8001_MAX_OUTB_NUM	 1
++#define	PM8001_CAN_QUEUE	 128	/* SCSI Queue depth */
++
++/* unchangeable hardware details */
++#define	PM8001_MAX_PHYS		 8	/* max. possible phys */
++#define	PM8001_MAX_PORTS	 8	/* max. possible ports */
++#define	PM8001_MAX_DEVICES	 1024	/* max supported device */
++
++enum memory_region_num {
++	AAP1 = 0x0, /* application acceleration processor */
++	IOP,	    /* IO processor */
++	CI,	    /* consumer index */
++	PI,	    /* producer index */
++	IB,	    /* inbound queue */
++	OB,	    /* outbound queue */
++	NVMD,	    /* NVM device */
++	DEV_MEM,    /* memory for devices */
++	CCB_MEM,    /* memory for command control block */
++};
++#define	PM8001_EVENT_LOG_SIZE	 (128 * 1024)
++
++/*error code*/
++enum mpi_err {
++	MPI_IO_STATUS_SUCCESS = 0x0,
++	MPI_IO_STATUS_BUSY = 0x01,
++	MPI_IO_STATUS_FAIL = 0x02,
++};
++
++/**
++ * Phy Control constants
++ */
++enum phy_control_type {
++	PHY_LINK_RESET = 0x01,
++	PHY_HARD_RESET = 0x02,
++	PHY_NOTIFY_ENABLE_SPINUP = 0x10,
++};
++
++enum pm8001_hba_info_flags {
++	PM8001F_INIT_TIME	= (1U << 0),
++	PM8001F_RUN_TIME	= (1U << 1),
++};
++
++#endif
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
+new file mode 100644
+index 0000000..aa5756f
+--- /dev/null
++++ b/drivers/scsi/pm8001/pm8001_hwi.c
+@@ -0,0 +1,4371 @@
++/*
++ * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
++ *
++ * Copyright (c) 2008-2009 USI Co., Ltd.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions, and the following disclaimer,
++ *    without modification.
++ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
++ *    substantially similar to the "NO WARRANTY" disclaimer below
++ *    ("Disclaimer") and any redistribution must be conditioned upon
++ *    including a substantially similar Disclaimer requirement for further
++ *    binary redistribution.
++ * 3. Neither the names of the above-listed copyright holders nor the names
++ *    of any contributors may be used to endorse or promote products derived
++ *    from this software without specific prior written permission.
++ *
++ * Alternatively, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") version 2 as published by the Free
++ * Software Foundation.
++ *
++ * NO WARRANTY
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
++ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
++ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGES.
++ *
++ */
++ #include "pm8001_sas.h"
++ #include "pm8001_hwi.h"
++ #include "pm8001_chips.h"
++ #include "pm8001_ctl.h"
++
++/**
++ * read_main_config_table - read the configure table and save it.
++ * @pm8001_ha: our hba card information
++ */
++static void __devinit read_main_config_table(struct pm8001_hba_info *pm8001_ha)
++{
++	void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
++	pm8001_ha->main_cfg_tbl.signature	= pm8001_mr32(address, 0x00);
++	pm8001_ha->main_cfg_tbl.interface_rev	= pm8001_mr32(address, 0x04);
++	pm8001_ha->main_cfg_tbl.firmware_rev	= pm8001_mr32(address, 0x08);
++	pm8001_ha->main_cfg_tbl.max_out_io	= pm8001_mr32(address, 0x0C);
++	pm8001_ha->main_cfg_tbl.max_sgl		= pm8001_mr32(address, 0x10);
++	pm8001_ha->main_cfg_tbl.ctrl_cap_flag	= pm8001_mr32(address, 0x14);
++	pm8001_ha->main_cfg_tbl.gst_offset	= pm8001_mr32(address, 0x18);
++	pm8001_ha->main_cfg_tbl.inbound_queue_offset =
++		pm8001_mr32(address, 0x1C);
++	pm8001_ha->main_cfg_tbl.outbound_queue_offset =
++		pm8001_mr32(address, 0x20);
++	pm8001_ha->main_cfg_tbl.hda_mode_flag	=
++		pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET);
++
++	/* read analog Setting offset from the configuration table */
++	pm8001_ha->main_cfg_tbl.anolog_setup_table_offset =
++		pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET);
++
++	/* read Error Dump Offset and Length */
++	pm8001_ha->main_cfg_tbl.fatal_err_dump_offset0 =
++		pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET);
++	pm8001_ha->main_cfg_tbl.fatal_err_dump_length0 =
++		pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH);
++	pm8001_ha->main_cfg_tbl.fatal_err_dump_offset1 =
++		pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET);
++	pm8001_ha->main_cfg_tbl.fatal_err_dump_length1 =
++		pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH);
++}
++
++/**
++ * read_general_status_table - read the general status table and save it.
++ * @pm8001_ha: our hba card information
++ */
++static void __devinit
++read_general_status_table(struct pm8001_hba_info *pm8001_ha)
++{
++	void __iomem *address = pm8001_ha->general_stat_tbl_addr;
++	pm8001_ha->gs_tbl.gst_len_mpistate	= pm8001_mr32(address, 0x00);
++	pm8001_ha->gs_tbl.iq_freeze_state0	= pm8001_mr32(address, 0x04);
++	pm8001_ha->gs_tbl.iq_freeze_state1	= pm8001_mr32(address, 0x08);
++	pm8001_ha->gs_tbl.msgu_tcnt		= pm8001_mr32(address, 0x0C);
++	pm8001_ha->gs_tbl.iop_tcnt		= pm8001_mr32(address, 0x10);
++	pm8001_ha->gs_tbl.reserved		= pm8001_mr32(address, 0x14);
++	pm8001_ha->gs_tbl.phy_state[0]	= pm8001_mr32(address, 0x18);
++	pm8001_ha->gs_tbl.phy_state[1]	= pm8001_mr32(address, 0x1C);
++	pm8001_ha->gs_tbl.phy_state[2]	= pm8001_mr32(address, 0x20);
++	pm8001_ha->gs_tbl.phy_state[3]	= pm8001_mr32(address, 0x24);
++	pm8001_ha->gs_tbl.phy_state[4]	= pm8001_mr32(address, 0x28);
++	pm8001_ha->gs_tbl.phy_state[5]	= pm8001_mr32(address, 0x2C);
++	pm8001_ha->gs_tbl.phy_state[6]	= pm8001_mr32(address, 0x30);
++	pm8001_ha->gs_tbl.phy_state[7]	= pm8001_mr32(address, 0x34);
++	pm8001_ha->gs_tbl.reserved1		= pm8001_mr32(address, 0x38);
++	pm8001_ha->gs_tbl.reserved2		= pm8001_mr32(address, 0x3C);
++	pm8001_ha->gs_tbl.reserved3		= pm8001_mr32(address, 0x40);
++	pm8001_ha->gs_tbl.recover_err_info[0]	= pm8001_mr32(address, 0x44);
++	pm8001_ha->gs_tbl.recover_err_info[1]	= pm8001_mr32(address, 0x48);
++	pm8001_ha->gs_tbl.recover_err_info[2]	= pm8001_mr32(address, 0x4C);
++	pm8001_ha->gs_tbl.recover_err_info[3]	= pm8001_mr32(address, 0x50);
++	pm8001_ha->gs_tbl.recover_err_info[4]	= pm8001_mr32(address, 0x54);
++	pm8001_ha->gs_tbl.recover_err_info[5]	= pm8001_mr32(address, 0x58);
++	pm8001_ha->gs_tbl.recover_err_info[6]	= pm8001_mr32(address, 0x5C);
++	pm8001_ha->gs_tbl.recover_err_info[7]	= pm8001_mr32(address, 0x60);
++}
++
++/**
++ * read_inbnd_queue_table - read the inbound queue table and save it.
++ * @pm8001_ha: our hba card information
++ */
++static void __devinit
++read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
++{
++	int inbQ_num = 1;
++	int i;
++	void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
++	for (i = 0; i < inbQ_num; i++) {
++		u32 offset = i * 0x24;
++		pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
++		      get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
++		pm8001_ha->inbnd_q_tbl[i].pi_offset =
++			pm8001_mr32(address, (offset + 0x18));
++	}
++}
++
++/**
++ * read_outbnd_queue_table - read the outbound queue table and save it.
++ * @pm8001_ha: our hba card information
++ */
++static void __devinit
++read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
++{
++	int outbQ_num = 1;
++	int i;
++	void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
++	for (i = 0; i < outbQ_num; i++) {
++		u32 offset = i * 0x24;
++		pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
++		      get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
++		pm8001_ha->outbnd_q_tbl[i].ci_offset =
++			pm8001_mr32(address, (offset + 0x18));
++	}
++}
++
++/**
++ * init_default_table_values - init the default table.
++ * @pm8001_ha: our hba card information
++ */
++static void __devinit
++init_default_table_values(struct pm8001_hba_info *pm8001_ha)
++{
++	int qn = 1;
++	int i;
++	u32 offsetib, offsetob;
++	void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
++	void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
++
++	pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd			= 0;
++	pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3 		= 0;
++	pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7		= 0;
++	pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3		= 0;
++	pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7		= 0;
++	pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3	= 0;
++	pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7	= 0;
++	pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3	= 0;
++	pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7	= 0;
++	pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3	= 0;
++	pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7	= 0;
++
++	pm8001_ha->main_cfg_tbl.upper_event_log_addr		=
++		pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
++	pm8001_ha->main_cfg_tbl.lower_event_log_addr		=
++		pm8001_ha->memoryMap.region[AAP1].phys_addr_lo;
++	pm8001_ha->main_cfg_tbl.event_log_size	= PM8001_EVENT_LOG_SIZE;
++	pm8001_ha->main_cfg_tbl.event_log_option		= 0x01;
++	pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr	=
++		pm8001_ha->memoryMap.region[IOP].phys_addr_hi;
++	pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr	=
++		pm8001_ha->memoryMap.region[IOP].phys_addr_lo;
++	pm8001_ha->main_cfg_tbl.iop_event_log_size	= PM8001_EVENT_LOG_SIZE;
++	pm8001_ha->main_cfg_tbl.iop_event_log_option		= 0x01;
++	pm8001_ha->main_cfg_tbl.fatal_err_interrupt		= 0x01;
++	for (i = 0; i < qn; i++) {
++		pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt	=
++			0x00000100 | (0x00000040 << 16) | (0x00<<30);
++		pm8001_ha->inbnd_q_tbl[i].upper_base_addr	=
++			pm8001_ha->memoryMap.region[IB].phys_addr_hi;
++		pm8001_ha->inbnd_q_tbl[i].lower_base_addr	=
++		pm8001_ha->memoryMap.region[IB].phys_addr_lo;
++		pm8001_ha->inbnd_q_tbl[i].base_virt		=
++			(u8 *)pm8001_ha->memoryMap.region[IB].virt_ptr;
++		pm8001_ha->inbnd_q_tbl[i].total_length		=
++			pm8001_ha->memoryMap.region[IB].total_len;
++		pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr	=
++			pm8001_ha->memoryMap.region[CI].phys_addr_hi;
++		pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr	=
++			pm8001_ha->memoryMap.region[CI].phys_addr_lo;
++		pm8001_ha->inbnd_q_tbl[i].ci_virt		=
++			pm8001_ha->memoryMap.region[CI].virt_ptr;
++		offsetib = i * 0x20;
++		pm8001_ha->inbnd_q_tbl[i].pi_pci_bar		=
++			get_pci_bar_index(pm8001_mr32(addressib,
++				(offsetib + 0x14)));
++		pm8001_ha->inbnd_q_tbl[i].pi_offset		=
++			pm8001_mr32(addressib, (offsetib + 0x18));
++		pm8001_ha->inbnd_q_tbl[i].producer_idx		= 0;
++		pm8001_ha->inbnd_q_tbl[i].consumer_index	= 0;
++	}
++	for (i = 0; i < qn; i++) {
++		pm8001_ha->outbnd_q_tbl[i].element_size_cnt	=
++			256 | (64 << 16) | (1<<30);
++		pm8001_ha->outbnd_q_tbl[i].upper_base_addr	=
++			pm8001_ha->memoryMap.region[OB].phys_addr_hi;
++		pm8001_ha->outbnd_q_tbl[i].lower_base_addr	=
++			pm8001_ha->memoryMap.region[OB].phys_addr_lo;
++		pm8001_ha->outbnd_q_tbl[i].base_virt		=
++			(u8 *)pm8001_ha->memoryMap.region[OB].virt_ptr;
++		pm8001_ha->outbnd_q_tbl[i].total_length		=
++			pm8001_ha->memoryMap.region[OB].total_len;
++		pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr	=
++			pm8001_ha->memoryMap.region[PI].phys_addr_hi;
++		pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr	=
++			pm8001_ha->memoryMap.region[PI].phys_addr_lo;
++		pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay	=
++			0 | (0 << 16) | (0 << 24);
++		pm8001_ha->outbnd_q_tbl[i].pi_virt		=
++			pm8001_ha->memoryMap.region[PI].virt_ptr;
++		offsetob = i * 0x24;
++		pm8001_ha->outbnd_q_tbl[i].ci_pci_bar		=
++			get_pci_bar_index(pm8001_mr32(addressob,
++			offsetob + 0x14));
++		pm8001_ha->outbnd_q_tbl[i].ci_offset		=
++			pm8001_mr32(addressob, (offsetob + 0x18));
++		pm8001_ha->outbnd_q_tbl[i].consumer_idx		= 0;
++		pm8001_ha->outbnd_q_tbl[i].producer_index	= 0;
++	}
++}
++
++/**
++ * update_main_config_table - update the main default table to the HBA.
++ * @pm8001_ha: our hba card information
++ */
++static void __devinit
++update_main_config_table(struct pm8001_hba_info *pm8001_ha)
++{
++	void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
++	pm8001_mw32(address, 0x24,
++		pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd);
++	pm8001_mw32(address, 0x28,
++		pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3);
++	pm8001_mw32(address, 0x2C,
++		pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7);
++	pm8001_mw32(address, 0x30,
++		pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3);
++	pm8001_mw32(address, 0x34,
++		pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7);
++	pm8001_mw32(address, 0x38,
++		pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3);
++	pm8001_mw32(address, 0x3C,
++		pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7);
++	pm8001_mw32(address, 0x40,
++		pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3);
++	pm8001_mw32(address, 0x44,
++		pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7);
++	pm8001_mw32(address, 0x48,
++		pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3);
++	pm8001_mw32(address, 0x4C,
++		pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7);
++	pm8001_mw32(address, 0x50,
++		pm8001_ha->main_cfg_tbl.upper_event_log_addr);
++	pm8001_mw32(address, 0x54,
++		pm8001_ha->main_cfg_tbl.lower_event_log_addr);
++	pm8001_mw32(address, 0x58, pm8001_ha->main_cfg_tbl.event_log_size);
++	pm8001_mw32(address, 0x5C, pm8001_ha->main_cfg_tbl.event_log_option);
++	pm8001_mw32(address, 0x60,
++		pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr);
++	pm8001_mw32(address, 0x64,
++		pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr);
++	pm8001_mw32(address, 0x68, pm8001_ha->main_cfg_tbl.iop_event_log_size);
++	pm8001_mw32(address, 0x6C,
++		pm8001_ha->main_cfg_tbl.iop_event_log_option);
++	pm8001_mw32(address, 0x70,
++		pm8001_ha->main_cfg_tbl.fatal_err_interrupt);
++}
++
++/**
++ * update_inbnd_queue_table - update the inbound queue table to the HBA.
++ * @pm8001_ha: our hba card information
++ */
++static void __devinit
++update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number)
++{
++	void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
++	u16 offset = number * 0x20;
++	pm8001_mw32(address, offset + 0x00,
++		pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt);
++	pm8001_mw32(address, offset + 0x04,
++		pm8001_ha->inbnd_q_tbl[number].upper_base_addr);
++	pm8001_mw32(address, offset + 0x08,
++		pm8001_ha->inbnd_q_tbl[number].lower_base_addr);
++	pm8001_mw32(address, offset + 0x0C,
++		pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr);
++	pm8001_mw32(address, offset + 0x10,
++		pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
++}
++
++/**
++ * update_outbnd_queue_table - update the outbound queue table to the HBA.
++ * @pm8001_ha: our hba card information
++ */
++static void __devinit
++update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number)
++{
++	void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
++	u16 offset = number * 0x24;
++	pm8001_mw32(address, offset + 0x00,
++		pm8001_ha->outbnd_q_tbl[number].element_size_cnt);
++	pm8001_mw32(address, offset + 0x04,
++		pm8001_ha->outbnd_q_tbl[number].upper_base_addr);
++	pm8001_mw32(address, offset + 0x08,
++		pm8001_ha->outbnd_q_tbl[number].lower_base_addr);
++	pm8001_mw32(address, offset + 0x0C,
++		pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr);
++	pm8001_mw32(address, offset + 0x10,
++		pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr);
++	pm8001_mw32(address, offset + 0x1C,
++		pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay);
++}
++
++/**
++ * bar4_shift - function is called to shift BAR base address
++ * @pm8001_ha : our hba card infomation
++ * @shiftValue : shifting value in memory bar.
++ */
++static u32 bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
++{
++	u32 regVal;
++	u32 max_wait_count;
++
++	/* program the inbound AXI translation Lower Address */
++	pm8001_cw32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW, shiftValue);
++
++	/* confirm the setting is written */
++	max_wait_count = 1 * 1000 * 1000;  /* 1 sec */
++	do {
++		udelay(1);
++		regVal = pm8001_cr32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW);
++	} while ((regVal != shiftValue) && (--max_wait_count));
++
++	if (!max_wait_count) {
++		PM8001_INIT_DBG(pm8001_ha,
++			pm8001_printk("TIMEOUT:SPC_IBW_AXI_TRANSLATION_LOW"
++			" = 0x%x\n", regVal));
++		return -1;
++	}
++	return 0;
++}
++
++/**
++ * mpi_set_phys_g3_with_ssc
++ * @pm8001_ha: our hba card information
++ * @SSCbit: set SSCbit to 0 to disable all phys ssc; 1 to enable all phys ssc.
++ */
++static void __devinit
++mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
++{
++	u32 offset;
++	u32 value;
++	u32 i;
++
++#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000
++#define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000
++#define SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET 0x1074
++#define SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET 0x1074
++#define PHY_SSC_BIT_SHIFT 13
++
++   /*
++    * Using shifted destination address 0x3_0000:0x1074 + 0x4000*N (N=0:3)
++    * Using shifted destination address 0x4_0000:0x1074 + 0x4000*(N-4) (N=4:7)
++    */
++	if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR))
++		return;
++	/* set SSC bit of PHY 0 - 3 */
++	for (i = 0; i < 4; i++) {
++		offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i;
++		value = pm8001_cr32(pm8001_ha, 2, offset);
++		if (SSCbit)
++			value = value | (0x00000001 << PHY_SSC_BIT_SHIFT);
++		else
++			value = value & (~(0x00000001<<PHY_SSC_BIT_SHIFT));
++		pm8001_cw32(pm8001_ha, 2, offset, value);
++	}
++
++	/* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */
++	if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR))
++		return;
++
++	/* set SSC bit of PHY 4 - 7 */
++	for (i = 4; i < 8; i++) {
++		offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
++		value = pm8001_cr32(pm8001_ha, 2, offset);
++		if (SSCbit)
++			value = value | (0x00000001 << PHY_SSC_BIT_SHIFT);
++		else
++			value = value & (~(0x00000001<<PHY_SSC_BIT_SHIFT));
++		pm8001_cw32(pm8001_ha, 2, offset, value);
++	}
++
++	/*set the shifted destination address to 0x0 to avoid error operation */
++	bar4_shift(pm8001_ha, 0x0);
++	return;
++}
++
++/**
++ * mpi_set_open_retry_interval_reg
++ * @pm8001_ha: our hba card information
++ * @interval - interval time for each OPEN_REJECT (RETRY). The units are in 1us.
++ */
++static void __devinit
++mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha,
++				u32 interval)
++{
++	u32 offset;
++	u32 value;
++	u32 i;
++
++#define OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR 0x00030000
++#define OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR 0x00040000
++#define OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET 0x30B4
++#define OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET 0x30B4
++#define OPEN_RETRY_INTERVAL_REG_MASK 0x0000FFFF
++
++	value = interval & OPEN_RETRY_INTERVAL_REG_MASK;
++	/* shift bar and set the OPEN_REJECT(RETRY) interval time of PHY 0 -3.*/
++	if (-1 == bar4_shift(pm8001_ha,
++			     OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR))
++		return;
++	for (i = 0; i < 4; i++) {
++		offset = OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET + 0x4000 * i;
++		pm8001_cw32(pm8001_ha, 2, offset, value);
++	}
++
++	if (-1 == bar4_shift(pm8001_ha,
++			     OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR))
++		return;
++	for (i = 4; i < 8; i++) {
++		offset = OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
++		pm8001_cw32(pm8001_ha, 2, offset, value);
++	}
++	/*set the shifted destination address to 0x0 to avoid error operation */
++	bar4_shift(pm8001_ha, 0x0);
++	return;
++}
++
++/**
++ * mpi_init_check - check firmware initialization status.
++ * @pm8001_ha: our hba card information
++ */
++static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
++{
++	u32 max_wait_count;
++	u32 value;
++	u32 gst_len_mpistate;
++	/* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the
++	table is updated */
++	pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_UPDATE);
++	/* wait until Inbound DoorBell Clear Register toggled */
++	max_wait_count = 1 * 1000 * 1000;/* 1 sec */
++	do {
++		udelay(1);
++		value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
++		value &= SPC_MSGU_CFG_TABLE_UPDATE;
++	} while ((value != 0) && (--max_wait_count));
++
++	if (!max_wait_count)
++		return -1;
++	/* check the MPI-State for initialization */
++	gst_len_mpistate =
++		pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
++		GST_GSTLEN_MPIS_OFFSET);
++	if (GST_MPI_STATE_INIT != (gst_len_mpistate & GST_MPI_STATE_MASK))
++		return -1;
++	/* check MPI Initialization error */
++	gst_len_mpistate = gst_len_mpistate >> 16;
++	if (0x0000 != gst_len_mpistate)
++		return -1;
++	return 0;
++}
++
++/**
++ * check_fw_ready - The LLDD check if the FW is ready, if not, return error.
++ * @pm8001_ha: our hba card information
++ */
++static int check_fw_ready(struct pm8001_hba_info *pm8001_ha)
++{
++	u32 value, value1;
++	u32 max_wait_count;
++	/* check error state */
++	value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
++	value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
++	/* check AAP error */
++	if (SCRATCH_PAD1_ERR == (value & SCRATCH_PAD_STATE_MASK)) {
++		/* error state */
++		value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
++		return -1;
++	}
++
++	/* check IOP error */
++	if (SCRATCH_PAD2_ERR == (value1 & SCRATCH_PAD_STATE_MASK)) {
++		/* error state */
++		value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
++		return -1;
++	}
++
++	/* bit 4-31 of scratch pad1 should be zeros if it is not
++	in error state*/
++	if (value & SCRATCH_PAD1_STATE_MASK) {
++		/* error case */
++		pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
++		return -1;
++	}
++
++	/* bit 2, 4-31 of scratch pad2 should be zeros if it is not
++	in error state */
++	if (value1 & SCRATCH_PAD2_STATE_MASK) {
++		/* error case */
++		return -1;
++	}
++
++	max_wait_count = 1 * 1000 * 1000;/* 1 sec timeout */
++
++	/* wait until scratch pad 1 and 2 registers in ready state  */
++	do {
++		udelay(1);
++		value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1)
++			& SCRATCH_PAD1_RDY;
++		value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)
++			& SCRATCH_PAD2_RDY;
++		if ((--max_wait_count) == 0)
++			return -1;
++	} while ((value != SCRATCH_PAD1_RDY) || (value1 != SCRATCH_PAD2_RDY));
++	return 0;
++}
++
++static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
++{
++	void __iomem *base_addr;
++	u32	value;
++	u32	offset;
++	u32	pcibar;
++	u32	pcilogic;
++
++	value = pm8001_cr32(pm8001_ha, 0, 0x44);
++	offset = value & 0x03FFFFFF;
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("Scratchpad 0 Offset: %x \n", offset));
++	pcilogic = (value & 0xFC000000) >> 26;
++	pcibar = get_pci_bar_index(pcilogic);
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("Scratchpad 0 PCI BAR: %d \n", pcibar));
++	pm8001_ha->main_cfg_tbl_addr = base_addr =
++		pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
++	pm8001_ha->general_stat_tbl_addr =
++		base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x18);
++	pm8001_ha->inbnd_q_tbl_addr =
++		base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C);
++	pm8001_ha->outbnd_q_tbl_addr =
++		base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x20);
++}
++
++/**
++ * pm8001_chip_init - the main init function that initialize whole PM8001 chip.
++ * @pm8001_ha: our hba card information
++ */
++static int __devinit pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
++{
++	/* check the firmware status */
++	if (-1 == check_fw_ready(pm8001_ha)) {
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("Firmware is not ready!\n"));
++		return -EBUSY;
++	}
++
++	/* Initialize pci space address eg: mpi offset */
++	init_pci_device_addresses(pm8001_ha);
++	init_default_table_values(pm8001_ha);
++	read_main_config_table(pm8001_ha);
++	read_general_status_table(pm8001_ha);
++	read_inbnd_queue_table(pm8001_ha);
++	read_outbnd_queue_table(pm8001_ha);
++	/* update main config table ,inbound table and outbound table */
++	update_main_config_table(pm8001_ha);
++	update_inbnd_queue_table(pm8001_ha, 0);
++	update_outbnd_queue_table(pm8001_ha, 0);
++	mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
++	mpi_set_open_retry_interval_reg(pm8001_ha, 7);
++	/* notify firmware update finished and check initialization status */
++	if (0 == mpi_init_check(pm8001_ha)) {
++		PM8001_INIT_DBG(pm8001_ha,
++			pm8001_printk("MPI initialize successful!\n"));
++	} else
++		return -EBUSY;
++	/*This register is a 16-bit timer with a resolution of 1us. This is the
++	timer used for interrupt delay/coalescing in the PCIe Application Layer.
++	Zero is not a valid value. A value of 1 in the register will cause the
++	interrupts to be normal. A value greater than 1 will cause coalescing
++	delays.*/
++	pm8001_cw32(pm8001_ha, 1, 0x0033c0, 0x1);
++	pm8001_cw32(pm8001_ha, 1, 0x0033c4, 0x0);
++	return 0;
++}
++
++static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
++{
++	u32 max_wait_count;
++	u32 value;
++	u32 gst_len_mpistate;
++	init_pci_device_addresses(pm8001_ha);
++	/* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
++	table is stop */
++	pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_RESET);
++
++	/* wait until Inbound DoorBell Clear Register toggled */
++	max_wait_count = 1 * 1000 * 1000;/* 1 sec */
++	do {
++		udelay(1);
++		value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
++		value &= SPC_MSGU_CFG_TABLE_RESET;
++	} while ((value != 0) && (--max_wait_count));
++
++	if (!max_wait_count) {
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("TIMEOUT:IBDB value/=0x%x\n", value));
++		return -1;
++	}
++
++	/* check the MPI-State for termination in progress */
++	/* wait until Inbound DoorBell Clear Register toggled */
++	max_wait_count = 1 * 1000 * 1000;  /* 1 sec */
++	do {
++		udelay(1);
++		gst_len_mpistate =
++			pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
++			GST_GSTLEN_MPIS_OFFSET);
++		if (GST_MPI_STATE_UNINIT ==
++			(gst_len_mpistate & GST_MPI_STATE_MASK))
++			break;
++	} while (--max_wait_count);
++	if (!max_wait_count) {
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk(" TIME OUT MPI State = 0x%x\n",
++				gst_len_mpistate & GST_MPI_STATE_MASK));
++		return -1;
++	}
++	return 0;
++}
++
++/**
++ * soft_reset_ready_check - Function to check FW is ready for soft reset.
++ * @pm8001_ha: our hba card information
++ */
++static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
++{
++	u32 regVal, regVal1, regVal2;
++	if (mpi_uninit_check(pm8001_ha) != 0) {
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("MPI state is not ready\n"));
++		return -1;
++	}
++	/* read the scratch pad 2 register bit 2 */
++	regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)
++		& SCRATCH_PAD2_FWRDY_RST;
++	if (regVal == SCRATCH_PAD2_FWRDY_RST) {
++		PM8001_INIT_DBG(pm8001_ha,
++			pm8001_printk("Firmware is ready for reset .\n"));
++	} else {
++	/* Trigger NMI twice via RB6 */
++		if (-1 == bar4_shift(pm8001_ha, RB6_ACCESS_REG)) {
++			PM8001_FAIL_DBG(pm8001_ha,
++				pm8001_printk("Shift Bar4 to 0x%x failed\n",
++					RB6_ACCESS_REG));
++			return -1;
++		}
++		pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET,
++			RB6_MAGIC_NUMBER_RST);
++		pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET, RB6_MAGIC_NUMBER_RST);
++		/* wait for 100 ms */
++		mdelay(100);
++		regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) &
++			SCRATCH_PAD2_FWRDY_RST;
++		if (regVal != SCRATCH_PAD2_FWRDY_RST) {
++			regVal1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
++			regVal2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
++			PM8001_FAIL_DBG(pm8001_ha,
++				pm8001_printk("TIMEOUT:MSGU_SCRATCH_PAD1"
++				"=0x%x, MSGU_SCRATCH_PAD2=0x%x\n",
++				regVal1, regVal2));
++			PM8001_FAIL_DBG(pm8001_ha,
++				pm8001_printk("SCRATCH_PAD0 value = 0x%x\n",
++				pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0)));
++			PM8001_FAIL_DBG(pm8001_ha,
++				pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
++				pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)));
++			return -1;
++		}
++	}
++	return 0;
++}
++
++/**
++ * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
++ * the FW register status to the originated status.
++ * @pm8001_ha: our hba card information
++ * @signature: signature in host scratch pad0 register.
++ */
++static int
++pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
++{
++	u32	regVal, toggleVal;
++	u32	max_wait_count;
++	u32	regVal1, regVal2, regVal3;
++
++	/* step1: Check FW is ready for soft reset */
++	if (soft_reset_ready_check(pm8001_ha) != 0) {
++		PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("FW is not ready\n"));
++		return -1;
++	}
++
++	/* step 2: clear NMI status register on AAP1 and IOP, write the same
++	value to clear */
++	/* map 0x60000 to BAR4(0x20), BAR2(win) */
++	if (-1 == bar4_shift(pm8001_ha, MBIC_AAP1_ADDR_BASE)) {
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("Shift Bar4 to 0x%x failed\n",
++			MBIC_AAP1_ADDR_BASE));
++		return -1;
++	}
++	regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP);
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("MBIC - NMI Enable VPE0 (IOP)= 0x%x\n", regVal));
++	pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP, 0x0);
++	/* map 0x70000 to BAR4(0x20), BAR2(win) */
++	if (-1 == bar4_shift(pm8001_ha, MBIC_IOP_ADDR_BASE)) {
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("Shift Bar4 to 0x%x failed\n",
++			MBIC_IOP_ADDR_BASE));
++		return -1;
++	}
++	regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1);
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("MBIC - NMI Enable VPE0 (AAP1)= 0x%x\n", regVal));
++	pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1, 0x0);
++
++	regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE);
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("PCIE -Event Interrupt Enable = 0x%x\n", regVal));
++	pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE, 0x0);
++
++	regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT);
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("PCIE - Event Interrupt  = 0x%x\n", regVal));
++	pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT, regVal);
++
++	regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE);
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("PCIE -Error Interrupt Enable = 0x%x\n", regVal));
++	pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE, 0x0);
++
++	regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT);
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("PCIE - Error Interrupt = 0x%x\n", regVal));
++	pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT, regVal);
++
++	/* read the scratch pad 1 register bit 2 */
++	regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1)
++		& SCRATCH_PAD1_RST;
++	toggleVal = regVal ^ SCRATCH_PAD1_RST;
++
++	/* set signature in host scratch pad0 register to tell SPC that the
++	host performs the soft reset */
++	pm8001_cw32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0, signature);
++
++	/* read required registers for confirmming */
++	/* map 0x0700000 to BAR4(0x20), BAR2(win) */
++	if (-1 == bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("Shift Bar4 to 0x%x failed\n",
++			GSM_ADDR_BASE));
++		return -1;
++	}
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("GSM 0x0(0x00007b88)-GSM Configuration and"
++		" Reset = 0x%x\n",
++		pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
++
++	/* step 3: host read GSM Configuration and Reset register */
++	regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET);
++	/* Put those bits to low */
++	/* GSM XCBI offset = 0x70 0000
++	0x00 Bit 13 COM_SLV_SW_RSTB 1
++	0x00 Bit 12 QSSP_SW_RSTB 1
++	0x00 Bit 11 RAAE_SW_RSTB 1
++	0x00 Bit 9 RB_1_SW_RSTB 1
++	0x00 Bit 8 SM_SW_RSTB 1
++	*/
++	regVal &= ~(0x00003b00);
++	/* host write GSM Configuration and Reset register */
++	pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal);
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("GSM 0x0 (0x00007b88 ==> 0x00004088) - GSM "
++		"Configuration and Reset is set to = 0x%x\n",
++		pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
++
++	/* step 4: */
++	/* disable GSM - Read Address Parity Check */
++	regVal1 = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK);
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("GSM 0x700038 - Read Address Parity Check "
++		"Enable = 0x%x\n", regVal1));
++	pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, 0x0);
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("GSM 0x700038 - Read Address Parity Check Enable"
++		"is set to = 0x%x\n",
++		pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK)));
++
++	/* disable GSM - Write Address Parity Check */
++	regVal2 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK);
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("GSM 0x700040 - Write Address Parity Check"
++		" Enable = 0x%x\n", regVal2));
++	pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, 0x0);
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("GSM 0x700040 - Write Address Parity Check "
++		"Enable is set to = 0x%x\n",
++		pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK)));
++
++	/* disable GSM - Write Data Parity Check */
++	regVal3 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK);
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("GSM 0x300048 - Write Data Parity Check"
++		" Enable = 0x%x\n", regVal3));
++	pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, 0x0);
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("GSM 0x300048 - Write Data Parity Check Enable"
++		"is set to = 0x%x\n",
++	pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK)));
++
++	/* step 5: delay 10 usec */
++	udelay(10);
++	/* step 5-b: set GPIO-0 output control to tristate anyway */
++	if (-1 == bar4_shift(pm8001_ha, GPIO_ADDR_BASE)) {
++		PM8001_INIT_DBG(pm8001_ha,
++				pm8001_printk("Shift Bar4 to 0x%x failed\n",
++				GPIO_ADDR_BASE));
++		return -1;
++	}
++	regVal = pm8001_cr32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET);
++		PM8001_INIT_DBG(pm8001_ha,
++				pm8001_printk("GPIO Output Control Register:"
++				" = 0x%x\n", regVal));
++	/* set GPIO-0 output control to tri-state */
++	regVal &= 0xFFFFFFFC;
++	pm8001_cw32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET, regVal);
++
++	/* Step 6: Reset the IOP and AAP1 */
++	/* map 0x00000 to BAR4(0x20), BAR2(win) */
++	if (-1 == bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("SPC Shift Bar4 to 0x%x failed\n",
++			SPC_TOP_LEVEL_ADDR_BASE));
++		return -1;
++	}
++	regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("Top Register before resetting IOP/AAP1"
++		":= 0x%x\n", regVal));
++	regVal &= ~(SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS);
++	pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
++
++	/* step 7: Reset the BDMA/OSSP */
++	regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("Top Register before resetting BDMA/OSSP"
++		": = 0x%x\n", regVal));
++	regVal &= ~(SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP);
++	pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
++
++	/* step 8: delay 10 usec */
++	udelay(10);
++
++	/* step 9: bring the BDMA and OSSP out of reset */
++	regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("Top Register before bringing up BDMA/OSSP"
++		":= 0x%x\n", regVal));
++	regVal |= (SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP);
++	pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
++
++	/* step 10: delay 10 usec */
++	udelay(10);
++
++	/* step 11: reads and sets the GSM Configuration and Reset Register */
++	/* map 0x0700000 to BAR4(0x20), BAR2(win) */
++	if (-1 == bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("SPC Shift Bar4 to 0x%x failed\n",
++			GSM_ADDR_BASE));
++		return -1;
++	}
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("GSM 0x0 (0x00007b88)-GSM Configuration and "
++		"Reset = 0x%x\n", pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
++	regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET);
++	/* Put those bits to high */
++	/* GSM XCBI offset = 0x70 0000
++	0x00 Bit 13 COM_SLV_SW_RSTB 1
++	0x00 Bit 12 QSSP_SW_RSTB 1
++	0x00 Bit 11 RAAE_SW_RSTB 1
++	0x00 Bit 9   RB_1_SW_RSTB 1
++	0x00 Bit 8   SM_SW_RSTB 1
++	*/
++	regVal |= (GSM_CONFIG_RESET_VALUE);
++	pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal);
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("GSM (0x00004088 ==> 0x00007b88) - GSM"
++		" Configuration and Reset is set to = 0x%x\n",
++		pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET)));
++
++	/* step 12: Restore GSM - Read Address Parity Check */
++	regVal = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK);
++	/* just for debugging */
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("GSM 0x700038 - Read Address Parity Check Enable"
++		" = 0x%x\n", regVal));
++	pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, regVal1);
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("GSM 0x700038 - Read Address Parity"
++		" Check Enable is set to = 0x%x\n",
++		pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK)));
++	/* Restore GSM - Write Address Parity Check */
++	regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK);
++	pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, regVal2);
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("GSM 0x700040 - Write Address Parity Check"
++		" Enable is set to = 0x%x\n",
++		pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK)));
++	/* Restore GSM - Write Data Parity Check */
++	regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK);
++	pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, regVal3);
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("GSM 0x700048 - Write Data Parity Check Enable"
++		"is set to = 0x%x\n",
++		pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK)));
++
++	/* step 13: bring the IOP and AAP1 out of reset */
++	/* map 0x00000 to BAR4(0x20), BAR2(win) */
++	if (-1 == bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("Shift Bar4 to 0x%x failed\n",
++			SPC_TOP_LEVEL_ADDR_BASE));
++		return -1;
++	}
++	regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET);
++	regVal |= (SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS);
++	pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal);
++
++	/* step 14: delay 10 usec - Normal Mode */
++	udelay(10);
++	/* check Soft Reset Normal mode or Soft Reset HDA mode */
++	if (signature == SPC_SOFT_RESET_SIGNATURE) {
++		/* step 15 (Normal Mode): wait until scratch pad1 register
++		bit 2 toggled */
++		max_wait_count = 2 * 1000 * 1000;/* 2 sec */
++		do {
++			udelay(1);
++			regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) &
++				SCRATCH_PAD1_RST;
++		} while ((regVal != toggleVal) && (--max_wait_count));
++
++		if (!max_wait_count) {
++			regVal = pm8001_cr32(pm8001_ha, 0,
++				MSGU_SCRATCH_PAD_1);
++			PM8001_FAIL_DBG(pm8001_ha,
++				pm8001_printk("TIMEOUT : ToggleVal 0x%x,"
++				"MSGU_SCRATCH_PAD1 = 0x%x\n",
++				toggleVal, regVal));
++			PM8001_FAIL_DBG(pm8001_ha,
++				pm8001_printk("SCRATCH_PAD0 value = 0x%x\n",
++				pm8001_cr32(pm8001_ha, 0,
++				MSGU_SCRATCH_PAD_0)));
++			PM8001_FAIL_DBG(pm8001_ha,
++				pm8001_printk("SCRATCH_PAD2 value = 0x%x\n",
++				pm8001_cr32(pm8001_ha, 0,
++				MSGU_SCRATCH_PAD_2)));
++			PM8001_FAIL_DBG(pm8001_ha,
++				pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
++				pm8001_cr32(pm8001_ha, 0,
++				MSGU_SCRATCH_PAD_3)));
++			return -1;
++		}
++
++		/* step 16 (Normal) - Clear ODMR and ODCR */
++		pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
++		pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
++
++		/* step 17 (Normal Mode): wait for the FW and IOP to get
++		ready - 1 sec timeout */
++		/* Wait for the SPC Configuration Table to be ready */
++		if (check_fw_ready(pm8001_ha) == -1) {
++			regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
++			/* return error if MPI Configuration Table not ready */
++			PM8001_INIT_DBG(pm8001_ha,
++				pm8001_printk("FW not ready SCRATCH_PAD1"
++				" = 0x%x\n", regVal));
++			regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
++			/* return error if MPI Configuration Table not ready */
++			PM8001_INIT_DBG(pm8001_ha,
++				pm8001_printk("FW not ready SCRATCH_PAD2"
++				" = 0x%x\n", regVal));
++			PM8001_INIT_DBG(pm8001_ha,
++				pm8001_printk("SCRATCH_PAD0 value = 0x%x\n",
++				pm8001_cr32(pm8001_ha, 0,
++				MSGU_SCRATCH_PAD_0)));
++			PM8001_INIT_DBG(pm8001_ha,
++				pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
++				pm8001_cr32(pm8001_ha, 0,
++				MSGU_SCRATCH_PAD_3)));
++			return -1;
++		}
++	}
++
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("SPC soft reset Complete\n"));
++	return 0;
++}
++
++static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
++{
++	u32 i;
++	u32 regVal;
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("chip reset start\n"));
++
++	/* do SPC chip reset. */
++	regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET);
++	regVal &= ~(SPC_REG_RESET_DEVICE);
++	pm8001_cw32(pm8001_ha, 1, SPC_REG_RESET, regVal);
++
++	/* delay 10 usec */
++	udelay(10);
++
++	/* bring chip reset out of reset */
++	regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET);
++	regVal |= SPC_REG_RESET_DEVICE;
++	pm8001_cw32(pm8001_ha, 1, SPC_REG_RESET, regVal);
++
++	/* delay 10 usec */
++	udelay(10);
++
++	/* wait for 20 msec until the firmware gets reloaded */
++	i = 20;
++	do {
++		mdelay(1);
++	} while ((--i) != 0);
++
++	PM8001_INIT_DBG(pm8001_ha,
++		pm8001_printk("chip reset finished\n"));
++}
++
++/**
++ * pm8001_chip_iounmap - which maped when initilized.
++ * @pm8001_ha: our hba card information
++ */
++static void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
++{
++	s8 bar, logical = 0;
++	for (bar = 0; bar < 6; bar++) {
++		/*
++		** logical BARs for SPC:
++		** bar 0 and 1 - logical BAR0
++		** bar 2 and 3 - logical BAR1
++		** bar4 - logical BAR2
++		** bar5 - logical BAR3
++		** Skip the appropriate assignments:
++		*/
++		if ((bar == 1) || (bar == 3))
++			continue;
++		if (pm8001_ha->io_mem[logical].memvirtaddr) {
++			iounmap(pm8001_ha->io_mem[logical].memvirtaddr);
++			logical++;
++		}
++	}
++}
++
++/**
++ * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
++ * @pm8001_ha: our hba card information
++ */
++static void
++pm8001_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
++{
++	pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
++	pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
++}
++
++ /**
++  * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
++  * @pm8001_ha: our hba card information
++  */
++static void
++pm8001_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
++{
++	pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_MASK_ALL);
++}
++
++/**
++ * pm8001_chip_msix_interrupt_enable - enable PM8001 chip interrupt
++ * @pm8001_ha: our hba card information
++ */
++static void
++pm8001_chip_msix_interrupt_enable(struct pm8001_hba_info *pm8001_ha,
++	u32 int_vec_idx)
++{
++	u32 msi_index;
++	u32 value;
++	msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE;
++	msi_index += MSIX_TABLE_BASE;
++	pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_ENABLE);
++	value = (1 << int_vec_idx);
++	pm8001_cw32(pm8001_ha, 0,  MSGU_ODCR, value);
++
++}
++
++/**
++ * pm8001_chip_msix_interrupt_disable - disable PM8001 chip interrupt
++ * @pm8001_ha: our hba card information
++ */
++static void
++pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
++	u32 int_vec_idx)
++{
++	u32 msi_index;
++	msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE;
++	msi_index += MSIX_TABLE_BASE;
++	pm8001_cw32(pm8001_ha, 0,  msi_index, MSIX_INTERRUPT_DISABLE);
++
++}
++/**
++ * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
++ * @pm8001_ha: our hba card information
++ */
++static void
++pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
++{
++#ifdef PM8001_USE_MSIX
++	pm8001_chip_msix_interrupt_enable(pm8001_ha, 0);
++	return;
++#endif
++	pm8001_chip_intx_interrupt_enable(pm8001_ha);
++
++}
++
++/**
++ * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
++ * @pm8001_ha: our hba card information
++ */
++static void
++pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
++{
++#ifdef PM8001_USE_MSIX
++	pm8001_chip_msix_interrupt_disable(pm8001_ha, 0);
++	return;
++#endif
++	pm8001_chip_intx_interrupt_disable(pm8001_ha);
++
++}
++
++/**
++ * mpi_msg_free_get- get the free message buffer for transfer inbound queue.
++ * @circularQ: the inbound queue  we want to transfer to HBA.
++ * @messageSize: the message size of this transfer, normally it is 64 bytes
++ * @messagePtr: the pointer to message.
++ */
++static u32 mpi_msg_free_get(struct inbound_queue_table *circularQ,
++			    u16 messageSize, void **messagePtr)
++{
++	u32 offset, consumer_index;
++	struct mpi_msg_hdr *msgHeader;
++	u8 bcCount = 1; /* only support single buffer */
++
++	/* Checks is the requested message size can be allocated in this queue*/
++	if (messageSize > 64) {
++		*messagePtr = NULL;
++		return -1;
++	}
++
++	/* Stores the new consumer index */
++	consumer_index = pm8001_read_32(circularQ->ci_virt);
++	circularQ->consumer_index = cpu_to_le32(consumer_index);
++	if (((circularQ->producer_idx + bcCount) % 256) ==
++		circularQ->consumer_index) {
++		*messagePtr = NULL;
++		return -1;
++	}
++	/* get memory IOMB buffer address */
++	offset = circularQ->producer_idx * 64;
++	/* increment to next bcCount element */
++	circularQ->producer_idx = (circularQ->producer_idx + bcCount) % 256;
++	/* Adds that distance to the base of the region virtual address plus
++	the message header size*/
++	msgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt	+ offset);
++	*messagePtr = ((void *)msgHeader) + sizeof(struct mpi_msg_hdr);
++	return 0;
++}
++
++/**
++ * mpi_build_cmd- build the message queue for transfer, update the PI to FW
++ * to tell the fw to get this message from IOMB.
++ * @pm8001_ha: our hba card information
++ * @circularQ: the inbound queue we want to transfer to HBA.
++ * @opCode: the operation code represents commands which LLDD and fw recognized.
++ * @payload: the command payload of each operation command.
++ */
++static u32 mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
++			 struct inbound_queue_table *circularQ,
++			 u32 opCode, void *payload)
++{
++	u32 Header = 0, hpriority = 0, bc = 1, category = 0x02;
++	u32 responseQueue = 0;
++	void *pMessage;
++
++	if (mpi_msg_free_get(circularQ, 64, &pMessage) < 0) {
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("No free mpi buffer \n"));
++		return -1;
++	}
++
++	/*Copy to the payload*/
++	memcpy(pMessage, payload, (64 - sizeof(struct mpi_msg_hdr)));
++
++	/*Build the header*/
++	Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24)
++		| ((responseQueue & 0x3F) << 16)
++		| ((category & 0xF) << 12) | (opCode & 0xFFF));
++
++	pm8001_write_32((pMessage - 4), 0, cpu_to_le32(Header));
++	/*Update the PI to the firmware*/
++	pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar,
++		circularQ->pi_offset, circularQ->producer_idx);
++	PM8001_IO_DBG(pm8001_ha,
++		pm8001_printk("after PI= %d CI= %d \n", circularQ->producer_idx,
++		circularQ->consumer_index));
++	return 0;
++}
++
++static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha,
++			    struct outbound_queue_table *circularQ, u8 bc)
++{
++	u32 producer_index;
++	/* free the circular queue buffer elements associated with the message*/
++	circularQ->consumer_idx = (circularQ->consumer_idx + bc) % 256;
++	/* update the CI of outbound queue */
++	pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, circularQ->ci_offset,
++		circularQ->consumer_idx);
++	/* Update the producer index from SPC*/
++	producer_index = pm8001_read_32(circularQ->pi_virt);
++	circularQ->producer_index = cpu_to_le32(producer_index);
++	PM8001_IO_DBG(pm8001_ha,
++		pm8001_printk(" CI=%d PI=%d\n", circularQ->consumer_idx,
++		circularQ->producer_index));
++	return 0;
++}
++
++/**
++ * mpi_msg_consume- get the MPI message from  outbound queue message table.
++ * @pm8001_ha: our hba card information
++ * @circularQ: the outbound queue  table.
++ * @messagePtr1: the message contents of this outbound message.
++ * @pBC: the message size.
++ */
++static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
++			   struct outbound_queue_table *circularQ,
++			   void **messagePtr1, u8 *pBC)
++{
++	struct mpi_msg_hdr	*msgHeader;
++	__le32	msgHeader_tmp;
++	u32 header_tmp;
++	do {
++		/* If there are not-yet-delivered messages ... */
++		if (circularQ->producer_index != circularQ->consumer_idx) {
++			PM8001_IO_DBG(pm8001_ha,
++				pm8001_printk("process an IOMB\n"));
++			/*Get the pointer to the circular queue buffer element*/
++			msgHeader = (struct mpi_msg_hdr *)
++				(circularQ->base_virt +
++				circularQ->consumer_idx * 64);
++			/* read header */
++			header_tmp = pm8001_read_32(msgHeader);
++			msgHeader_tmp = cpu_to_le32(header_tmp);
++			if (0 != (msgHeader_tmp & 0x80000000)) {
++				if (OPC_OUB_SKIP_ENTRY !=
++					(msgHeader_tmp & 0xfff)) {
++					*messagePtr1 =
++						((u8 *)msgHeader) +
++						sizeof(struct mpi_msg_hdr);
++					*pBC = (u8)((msgHeader_tmp >> 24) &
++						0x1f);
++					PM8001_IO_DBG(pm8001_ha,
++						pm8001_printk("mpi_msg_consume"
++						": CI=%d PI=%d msgHeader=%x\n",
++						circularQ->consumer_idx,
++						circularQ->producer_index,
++						msgHeader_tmp));
++					return MPI_IO_STATUS_SUCCESS;
++				} else {
++					u32 producer_index;
++					void *pi_virt = circularQ->pi_virt;
++					/* free the circular queue buffer
++					elements associated with the message*/
++					circularQ->consumer_idx =
++						(circularQ->consumer_idx +
++						((msgHeader_tmp >> 24) & 0x1f))
++						% 256;
++					/* update the CI of outbound queue */
++					pm8001_cw32(pm8001_ha,
++						circularQ->ci_pci_bar,
++						circularQ->ci_offset,
++						circularQ->consumer_idx);
++					/* Update the producer index from SPC */
++					producer_index =
++						pm8001_read_32(pi_virt);
++					circularQ->producer_index =
++						cpu_to_le32(producer_index);
++				}
++			} else
++				return MPI_IO_STATUS_FAIL;
++		}
++	} while (circularQ->producer_index != circularQ->consumer_idx);
++	/* while we don't have any more not-yet-delivered message */
++	/* report empty */
++	return MPI_IO_STATUS_BUSY;
++}
++
++static void pm8001_work_queue(struct work_struct *work)
++{
++	struct delayed_work *dw = container_of(work, struct delayed_work, work);
++	struct pm8001_wq *wq = container_of(dw, struct pm8001_wq, work_q);
++	struct pm8001_device *pm8001_dev;
++	struct domain_device	*dev;
++
++	switch (wq->handler) {
++	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
++		pm8001_dev = wq->data;
++		dev = pm8001_dev->sas_device;
++		pm8001_I_T_nexus_reset(dev);
++		break;
++	case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
++		pm8001_dev = wq->data;
++		dev = pm8001_dev->sas_device;
++		pm8001_I_T_nexus_reset(dev);
++		break;
++	case IO_DS_IN_ERROR:
++		pm8001_dev = wq->data;
++		dev = pm8001_dev->sas_device;
++		pm8001_I_T_nexus_reset(dev);
++		break;
++	case IO_DS_NON_OPERATIONAL:
++		pm8001_dev = wq->data;
++		dev = pm8001_dev->sas_device;
++		pm8001_I_T_nexus_reset(dev);
++		break;
++	}
++	list_del(&wq->entry);
++	kfree(wq);
++}
++
++static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
++			       int handler)
++{
++	struct pm8001_wq *wq;
++	int ret = 0;
++
++	wq = kmalloc(sizeof(struct pm8001_wq), GFP_ATOMIC);
++	if (wq) {
++		wq->pm8001_ha = pm8001_ha;
++		wq->data = data;
++		wq->handler = handler;
++		INIT_DELAYED_WORK(&wq->work_q, pm8001_work_queue);
++		list_add_tail(&wq->entry, &pm8001_ha->wq_list);
++		schedule_delayed_work(&wq->work_q, 0);
++	} else
++		ret = -ENOMEM;
++
++	return ret;
++}
++
++/**
++ * mpi_ssp_completion- process the event that FW response to the SSP request.
++ * @pm8001_ha: our hba card information
++ * @piomb: the message contents of this outbound message.
++ *
++ * When FW has completed a ssp request for example a IO request, after it has
++ * filled the SG data with the data, it will trigger this event represent
++ * that he has finished the job,please check the coresponding buffer.
++ * So we will tell the caller who maybe waiting the result to tell upper layer
++ * that the task has been finished.
++ */
++static int
++mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
++{
++	struct sas_task *t;
++	struct pm8001_ccb_info *ccb;
++	unsigned long flags;
++	u32 status;
++	u32 param;
++	u32 tag;
++	struct ssp_completion_resp *psspPayload;
++	struct task_status_struct *ts;
++	struct ssp_response_iu *iu;
++	struct pm8001_device *pm8001_dev;
++	psspPayload = (struct ssp_completion_resp *)(piomb + 4);
++	status = le32_to_cpu(psspPayload->status);
++	tag = le32_to_cpu(psspPayload->tag);
++	ccb = &pm8001_ha->ccb_info[tag];
++	pm8001_dev = ccb->device;
++	param = le32_to_cpu(psspPayload->param);
++
++	PM8001_IO_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SSP_COMP\n"));
++	t = ccb->task;
++
++	if (status)
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("sas IO status 0x%x\n", status));
++	if (unlikely(!t || !t->lldd_task || !t->dev))
++		return -1;
++	ts = &t->task_status;
++	switch (status) {
++	case IO_SUCCESS:
++		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS"
++			",param = %d \n", param));
++		if (param == 0) {
++			ts->resp = SAS_TASK_COMPLETE;
++			ts->stat = SAM_GOOD;
++		} else {
++			ts->resp = SAS_TASK_COMPLETE;
++			ts->stat = SAS_PROTO_RESPONSE;
++			ts->residual = param;
++			iu = &psspPayload->ssp_resp_iu;
++			sas_ssp_task_response(pm8001_ha->dev, t, iu);
++		}
++		if (pm8001_dev)
++			pm8001_dev->running_req--;
++		break;
++	case IO_ABORTED:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_ABORTED IOMB Tag \n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_ABORTED_TASK;
++		break;
++	case IO_UNDERFLOW:
++		/* SSP Completion with error */
++		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW"
++			",param = %d \n", param));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DATA_UNDERRUN;
++		ts->residual = param;
++		if (pm8001_dev)
++			pm8001_dev->running_req--;
++		break;
++	case IO_NO_DEVICE:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_NO_DEVICE\n"));
++		ts->resp = SAS_TASK_UNDELIVERED;
++		ts->stat = SAS_PHY_DOWN;
++		break;
++	case IO_XFER_ERROR_BREAK:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_BREAK\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		break;
++	case IO_XFER_ERROR_PHY_NOT_READY:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
++		break;
++	case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
++		PM8001_IO_DBG(pm8001_ha,
++		pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_EPROTO;
++		break;
++	case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
++		break;
++	case IO_OPEN_CNX_ERROR_BREAK:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
++		break;
++	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
++		if (!t->uldd_task)
++			pm8001_handle_event(pm8001_ha,
++				pm8001_dev,
++				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
++		break;
++	case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_BAD_DEST;
++		break;
++	case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
++			"NOT_SUPPORTED\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_CONN_RATE;
++		break;
++	case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
++		ts->resp = SAS_TASK_UNDELIVERED;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
++		break;
++	case IO_XFER_ERROR_NAK_RECEIVED:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		break;
++	case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_NAK_R_ERR;
++		break;
++	case IO_XFER_ERROR_DMA:
++		PM8001_IO_DBG(pm8001_ha,
++		pm8001_printk("IO_XFER_ERROR_DMA\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		break;
++	case IO_XFER_OPEN_RETRY_TIMEOUT:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
++		break;
++	case IO_XFER_ERROR_OFFSET_MISMATCH:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		break;
++	case IO_PORT_IN_RESET:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_PORT_IN_RESET\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		break;
++	case IO_DS_NON_OPERATIONAL:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		if (!t->uldd_task)
++			pm8001_handle_event(pm8001_ha,
++				pm8001_dev,
++				IO_DS_NON_OPERATIONAL);
++		break;
++	case IO_DS_IN_RECOVERY:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_DS_IN_RECOVERY\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		break;
++	case IO_TM_TAG_NOT_FOUND:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_TM_TAG_NOT_FOUND\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		break;
++	case IO_SSP_EXT_IU_ZERO_LEN_ERROR:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		break;
++	case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
++	default:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("Unknown status 0x%x\n", status));
++		/* not allowed case. Therefore, return failed status */
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		break;
++	}
++	PM8001_IO_DBG(pm8001_ha,
++		pm8001_printk("scsi_satus = %x \n ",
++		psspPayload->ssp_resp_iu.status));
++	spin_lock_irqsave(&t->task_state_lock, flags);
++	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
++	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
++	t->task_state_flags |= SAS_TASK_STATE_DONE;
++	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
++		spin_unlock_irqrestore(&t->task_state_lock, flags);
++		PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with"
++			" io_status 0x%x resp 0x%x "
++			"stat 0x%x but aborted by upper layer!\n",
++			t, status, ts->resp, ts->stat));
++		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
++	} else {
++		spin_unlock_irqrestore(&t->task_state_lock, flags);
++		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
++		mb();/* in order to force CPU ordering */
++		t->task_done(t);
++	}
++	return 0;
++}
++
++/*See the comments for mpi_ssp_completion */
++static int mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
++{
++	struct sas_task *t;
++	unsigned long flags;
++	struct task_status_struct *ts;
++	struct pm8001_ccb_info *ccb;
++	struct pm8001_device *pm8001_dev;
++	struct ssp_event_resp *psspPayload =
++		(struct ssp_event_resp *)(piomb + 4);
++	u32 event = le32_to_cpu(psspPayload->event);
++	u32 tag = le32_to_cpu(psspPayload->tag);
++	u32 port_id = le32_to_cpu(psspPayload->port_id);
++	u32 dev_id = le32_to_cpu(psspPayload->device_id);
++
++	ccb = &pm8001_ha->ccb_info[tag];
++	t = ccb->task;
++	pm8001_dev = ccb->device;
++	if (event)
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("sas IO status 0x%x\n", event));
++	if (unlikely(!t || !t->lldd_task || !t->dev))
++		return -1;
++	ts = &t->task_status;
++	PM8001_IO_DBG(pm8001_ha,
++		pm8001_printk("port_id = %x,device_id = %x\n",
++		port_id, dev_id));
++	switch (event) {
++	case IO_OVERFLOW:
++		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");)
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DATA_OVERRUN;
++		ts->residual = 0;
++		if (pm8001_dev)
++			pm8001_dev->running_req--;
++		break;
++	case IO_XFER_ERROR_BREAK:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_BREAK\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_INTERRUPTED;
++		break;
++	case IO_XFER_ERROR_PHY_NOT_READY:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
++		break;
++	case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT"
++			"_SUPPORTED\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_EPROTO;
++		break;
++	case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
++		break;
++	case IO_OPEN_CNX_ERROR_BREAK:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
++		break;
++	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
++		if (!t->uldd_task)
++			pm8001_handle_event(pm8001_ha,
++				pm8001_dev,
++				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
++		break;
++	case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_BAD_DEST;
++		break;
++	case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
++			"NOT_SUPPORTED\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_CONN_RATE;
++		break;
++	case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
++		PM8001_IO_DBG(pm8001_ha,
++		       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
++		break;
++	case IO_XFER_ERROR_NAK_RECEIVED:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		break;
++	case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_NAK_R_ERR;
++		break;
++	case IO_XFER_OPEN_RETRY_TIMEOUT:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
++		break;
++	case IO_XFER_ERROR_UNEXPECTED_PHASE:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DATA_OVERRUN;
++		break;
++	case IO_XFER_ERROR_XFER_RDY_OVERRUN:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DATA_OVERRUN;
++		break;
++	case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
++		PM8001_IO_DBG(pm8001_ha,
++		       pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DATA_OVERRUN;
++		break;
++	case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
++		PM8001_IO_DBG(pm8001_ha,
++		pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DATA_OVERRUN;
++		break;
++	case IO_XFER_ERROR_OFFSET_MISMATCH:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DATA_OVERRUN;
++		break;
++	case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DATA_OVERRUN;
++		break;
++	case IO_XFER_CMD_FRAME_ISSUED:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("  IO_XFER_CMD_FRAME_ISSUED\n"));
++		return 0;
++	default:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("Unknown status 0x%x\n", event));
++		/* not allowed case. Therefore, return failed status */
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DATA_OVERRUN;
++		break;
++	}
++	spin_lock_irqsave(&t->task_state_lock, flags);
++	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
++	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
++	t->task_state_flags |= SAS_TASK_STATE_DONE;
++	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
++		spin_unlock_irqrestore(&t->task_state_lock, flags);
++		PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with"
++			" event 0x%x resp 0x%x "
++			"stat 0x%x but aborted by upper layer!\n",
++			t, event, ts->resp, ts->stat));
++		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
++	} else {
++		spin_unlock_irqrestore(&t->task_state_lock, flags);
++		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
++		mb();/* in order to force CPU ordering */
++		t->task_done(t);
++	}
++	return 0;
++}
++
++/*See the comments for mpi_ssp_completion */
++static int
++mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
++{
++	struct sas_task *t;
++	struct pm8001_ccb_info *ccb;
++	unsigned long flags;
++	u32 param;
++	u32 status;
++	u32 tag;
++	struct sata_completion_resp *psataPayload;
++	struct task_status_struct *ts;
++	struct ata_task_resp *resp ;
++	u32 *sata_resp;
++	struct pm8001_device *pm8001_dev;
++
++	psataPayload = (struct sata_completion_resp *)(piomb + 4);
++	status = le32_to_cpu(psataPayload->status);
++	tag = le32_to_cpu(psataPayload->tag);
++
++	ccb = &pm8001_ha->ccb_info[tag];
++	param = le32_to_cpu(psataPayload->param);
++	t = ccb->task;
++	ts = &t->task_status;
++	pm8001_dev = ccb->device;
++	if (status)
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("sata IO status 0x%x\n", status));
++	if (unlikely(!t || !t->lldd_task || !t->dev))
++		return -1;
++
++	switch (status) {
++	case IO_SUCCESS:
++		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
++		if (param == 0) {
++			ts->resp = SAS_TASK_COMPLETE;
++			ts->stat = SAM_GOOD;
++		} else {
++			u8 len;
++			ts->resp = SAS_TASK_COMPLETE;
++			ts->stat = SAS_PROTO_RESPONSE;
++			ts->residual = param;
++			PM8001_IO_DBG(pm8001_ha,
++				pm8001_printk("SAS_PROTO_RESPONSE len = %d\n",
++				param));
++			sata_resp = &psataPayload->sata_resp[0];
++			resp = (struct ata_task_resp *)ts->buf;
++			if (t->ata_task.dma_xfer == 0 &&
++			t->data_dir == PCI_DMA_FROMDEVICE) {
++				len = sizeof(struct pio_setup_fis);
++				PM8001_IO_DBG(pm8001_ha,
++				pm8001_printk("PIO read len = %d\n", len));
++			} else if (t->ata_task.use_ncq) {
++				len = sizeof(struct set_dev_bits_fis);
++				PM8001_IO_DBG(pm8001_ha,
++					pm8001_printk("FPDMA len = %d\n", len));
++			} else {
++				len = sizeof(struct dev_to_host_fis);
++				PM8001_IO_DBG(pm8001_ha,
++				pm8001_printk("other len = %d\n", len));
++			}
++			if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
++				resp->frame_len = len;
++				memcpy(&resp->ending_fis[0], sata_resp, len);
++				ts->buf_valid_size = sizeof(*resp);
++			} else
++				PM8001_IO_DBG(pm8001_ha,
++					pm8001_printk("response to large \n"));
++		}
++		if (pm8001_dev)
++			pm8001_dev->running_req--;
++		break;
++	case IO_ABORTED:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_ABORTED IOMB Tag \n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_ABORTED_TASK;
++		if (pm8001_dev)
++			pm8001_dev->running_req--;
++		break;
++		/* following cases are to do cases */
++	case IO_UNDERFLOW:
++		/* SATA Completion with error */
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_UNDERFLOW param = %d\n", param));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DATA_UNDERRUN;
++		ts->residual =  param;
++		if (pm8001_dev)
++			pm8001_dev->running_req--;
++		break;
++	case IO_NO_DEVICE:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_NO_DEVICE\n"));
++		ts->resp = SAS_TASK_UNDELIVERED;
++		ts->stat = SAS_PHY_DOWN;
++		break;
++	case IO_XFER_ERROR_BREAK:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_BREAK\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_INTERRUPTED;
++		break;
++	case IO_XFER_ERROR_PHY_NOT_READY:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
++		break;
++	case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT"
++			"_SUPPORTED\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_EPROTO;
++		break;
++	case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
++		break;
++	case IO_OPEN_CNX_ERROR_BREAK:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
++		break;
++	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DEV_NO_RESPONSE;
++		if (!t->uldd_task) {
++			pm8001_handle_event(pm8001_ha,
++				pm8001_dev,
++				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
++			ts->resp = SAS_TASK_UNDELIVERED;
++			ts->stat = SAS_QUEUE_FULL;
++			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
++			mb();/*in order to force CPU ordering*/
++			t->task_done(t);
++			return 0;
++		}
++		break;
++	case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
++		ts->resp = SAS_TASK_UNDELIVERED;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_BAD_DEST;
++		if (!t->uldd_task) {
++			pm8001_handle_event(pm8001_ha,
++				pm8001_dev,
++				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
++			ts->resp = SAS_TASK_UNDELIVERED;
++			ts->stat = SAS_QUEUE_FULL;
++			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
++			mb();/*ditto*/
++			t->task_done(t);
++			return 0;
++		}
++		break;
++	case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
++			"NOT_SUPPORTED\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_CONN_RATE;
++		break;
++	case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_STP_RESOURCES"
++			"_BUSY\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DEV_NO_RESPONSE;
++		if (!t->uldd_task) {
++			pm8001_handle_event(pm8001_ha,
++				pm8001_dev,
++				IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
++			ts->resp = SAS_TASK_UNDELIVERED;
++			ts->stat = SAS_QUEUE_FULL;
++			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
++			mb();/* ditto*/
++			t->task_done(t);
++			return 0;
++		}
++		break;
++	case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
++		PM8001_IO_DBG(pm8001_ha,
++		       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
++		break;
++	case IO_XFER_ERROR_NAK_RECEIVED:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_NAK_R_ERR;
++		break;
++	case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_NAK_R_ERR;
++		break;
++	case IO_XFER_ERROR_DMA:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_DMA\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_ABORTED_TASK;
++		break;
++	case IO_XFER_ERROR_SATA_LINK_TIMEOUT:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n"));
++		ts->resp = SAS_TASK_UNDELIVERED;
++		ts->stat = SAS_DEV_NO_RESPONSE;
++		break;
++	case IO_XFER_ERROR_REJECTED_NCQ_MODE:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DATA_UNDERRUN;
++		break;
++	case IO_XFER_OPEN_RETRY_TIMEOUT:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_TO;
++		break;
++	case IO_PORT_IN_RESET:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_PORT_IN_RESET\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DEV_NO_RESPONSE;
++		break;
++	case IO_DS_NON_OPERATIONAL:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DEV_NO_RESPONSE;
++		if (!t->uldd_task) {
++			pm8001_handle_event(pm8001_ha, pm8001_dev,
++				    IO_DS_NON_OPERATIONAL);
++			ts->resp = SAS_TASK_UNDELIVERED;
++			ts->stat = SAS_QUEUE_FULL;
++			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
++			mb();/*ditto*/
++			t->task_done(t);
++			return 0;
++		}
++		break;
++	case IO_DS_IN_RECOVERY:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("  IO_DS_IN_RECOVERY\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DEV_NO_RESPONSE;
++		break;
++	case IO_DS_IN_ERROR:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_DS_IN_ERROR\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DEV_NO_RESPONSE;
++		if (!t->uldd_task) {
++			pm8001_handle_event(pm8001_ha, pm8001_dev,
++				    IO_DS_IN_ERROR);
++			ts->resp = SAS_TASK_UNDELIVERED;
++			ts->stat = SAS_QUEUE_FULL;
++			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
++			mb();/*ditto*/
++			t->task_done(t);
++			return 0;
++		}
++		break;
++	case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
++	default:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("Unknown status 0x%x\n", status));
++		/* not allowed case. Therefore, return failed status */
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DEV_NO_RESPONSE;
++		break;
++	}
++	spin_lock_irqsave(&t->task_state_lock, flags);
++	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
++	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
++	t->task_state_flags |= SAS_TASK_STATE_DONE;
++	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
++		spin_unlock_irqrestore(&t->task_state_lock, flags);
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("task 0x%p done with io_status 0x%x"
++			" resp 0x%x stat 0x%x but aborted by upper layer!\n",
++			t, status, ts->resp, ts->stat));
++		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
++	} else {
++		spin_unlock_irqrestore(&t->task_state_lock, flags);
++		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
++		mb();/* ditto */
++		t->task_done(t);
++	}
++	return 0;
++}
++
++/*See the comments for mpi_ssp_completion */
++static int mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
++{
++	struct sas_task *t;
++	unsigned long flags;
++	struct task_status_struct *ts;
++	struct pm8001_ccb_info *ccb;
++	struct pm8001_device *pm8001_dev;
++	struct sata_event_resp *psataPayload =
++		(struct sata_event_resp *)(piomb + 4);
++	u32 event = le32_to_cpu(psataPayload->event);
++	u32 tag = le32_to_cpu(psataPayload->tag);
++	u32 port_id = le32_to_cpu(psataPayload->port_id);
++	u32 dev_id = le32_to_cpu(psataPayload->device_id);
++
++	ccb = &pm8001_ha->ccb_info[tag];
++	t = ccb->task;
++	pm8001_dev = ccb->device;
++	if (event)
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("sata IO status 0x%x\n", event));
++	if (unlikely(!t || !t->lldd_task || !t->dev))
++		return -1;
++	ts = &t->task_status;
++	PM8001_IO_DBG(pm8001_ha,
++		pm8001_printk("port_id = %x,device_id = %x\n",
++		port_id, dev_id));
++	switch (event) {
++	case IO_OVERFLOW:
++		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DATA_OVERRUN;
++		ts->residual = 0;
++		if (pm8001_dev)
++			pm8001_dev->running_req--;
++		break;
++	case IO_XFER_ERROR_BREAK:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_BREAK\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_INTERRUPTED;
++		break;
++	case IO_XFER_ERROR_PHY_NOT_READY:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
++		break;
++	case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT"
++			"_SUPPORTED\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_EPROTO;
++		break;
++	case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
++		break;
++	case IO_OPEN_CNX_ERROR_BREAK:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
++		break;
++	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
++		ts->resp = SAS_TASK_UNDELIVERED;
++		ts->stat = SAS_DEV_NO_RESPONSE;
++		if (!t->uldd_task) {
++			pm8001_handle_event(pm8001_ha,
++				pm8001_dev,
++				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
++			ts->resp = SAS_TASK_COMPLETE;
++			ts->stat = SAS_QUEUE_FULL;
++			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
++			mb();/*ditto*/
++			t->task_done(t);
++			return 0;
++		}
++		break;
++	case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
++		ts->resp = SAS_TASK_UNDELIVERED;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_BAD_DEST;
++		break;
++	case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
++			"NOT_SUPPORTED\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_CONN_RATE;
++		break;
++	case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
++		PM8001_IO_DBG(pm8001_ha,
++		       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
++		break;
++	case IO_XFER_ERROR_NAK_RECEIVED:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_NAK_R_ERR;
++		break;
++	case IO_XFER_ERROR_PEER_ABORTED:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_NAK_R_ERR;
++		break;
++	case IO_XFER_ERROR_REJECTED_NCQ_MODE:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DATA_UNDERRUN;
++		break;
++	case IO_XFER_OPEN_RETRY_TIMEOUT:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_TO;
++		break;
++	case IO_XFER_ERROR_UNEXPECTED_PHASE:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_TO;
++		break;
++	case IO_XFER_ERROR_XFER_RDY_OVERRUN:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_TO;
++		break;
++	case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
++		PM8001_IO_DBG(pm8001_ha,
++		       pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_TO;
++		break;
++	case IO_XFER_ERROR_OFFSET_MISMATCH:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_TO;
++		break;
++	case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_TO;
++		break;
++	case IO_XFER_CMD_FRAME_ISSUED:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
++		break;
++	case IO_XFER_PIO_SETUP_ERROR:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_TO;
++		break;
++	default:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("Unknown status 0x%x\n", event));
++		/* not allowed case. Therefore, return failed status */
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_TO;
++		break;
++	}
++	spin_lock_irqsave(&t->task_state_lock, flags);
++	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
++	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
++	t->task_state_flags |= SAS_TASK_STATE_DONE;
++	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
++		spin_unlock_irqrestore(&t->task_state_lock, flags);
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("task 0x%p done with io_status 0x%x"
++			" resp 0x%x stat 0x%x but aborted by upper layer!\n",
++			t, event, ts->resp, ts->stat));
++		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
++	} else {
++		spin_unlock_irqrestore(&t->task_state_lock, flags);
++		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
++		mb();/* in order to force CPU ordering */
++		t->task_done(t);
++	}
++	return 0;
++}
++
++/*See the comments for mpi_ssp_completion */
++static int
++mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
++{
++	u32 param;
++	struct sas_task *t;
++	struct pm8001_ccb_info *ccb;
++	unsigned long flags;
++	u32 status;
++	u32 tag;
++	struct smp_completion_resp *psmpPayload;
++	struct task_status_struct *ts;
++	struct pm8001_device *pm8001_dev;
++
++	psmpPayload = (struct smp_completion_resp *)(piomb + 4);
++	status = le32_to_cpu(psmpPayload->status);
++	tag = le32_to_cpu(psmpPayload->tag);
++
++	ccb = &pm8001_ha->ccb_info[tag];
++	param = le32_to_cpu(psmpPayload->param);
++	t = ccb->task;
++	ts = &t->task_status;
++	pm8001_dev = ccb->device;
++	if (status)
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("smp IO status 0x%x\n", status));
++	if (unlikely(!t || !t->lldd_task || !t->dev))
++		return -1;
++
++	switch (status) {
++	case IO_SUCCESS:
++		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAM_GOOD;
++	if (pm8001_dev)
++			pm8001_dev->running_req--;
++		break;
++	case IO_ABORTED:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_ABORTED IOMB\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_ABORTED_TASK;
++		if (pm8001_dev)
++			pm8001_dev->running_req--;
++		break;
++	case IO_OVERFLOW:
++		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DATA_OVERRUN;
++		ts->residual = 0;
++		if (pm8001_dev)
++			pm8001_dev->running_req--;
++		break;
++	case IO_NO_DEVICE:
++		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_PHY_DOWN;
++		break;
++	case IO_ERROR_HW_TIMEOUT:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_ERROR_HW_TIMEOUT\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAM_BUSY;
++		break;
++	case IO_XFER_ERROR_BREAK:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_BREAK\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAM_BUSY;
++		break;
++	case IO_XFER_ERROR_PHY_NOT_READY:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAM_BUSY;
++		break;
++	case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
++		PM8001_IO_DBG(pm8001_ha,
++		pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
++		break;
++	case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
++		break;
++	case IO_OPEN_CNX_ERROR_BREAK:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
++		break;
++	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_UNKNOWN;
++		pm8001_handle_event(pm8001_ha,
++				pm8001_dev,
++				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
++		break;
++	case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_BAD_DEST;
++		break;
++	case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_"
++			"NOT_SUPPORTED\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_CONN_RATE;
++		break;
++	case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
++		PM8001_IO_DBG(pm8001_ha,
++		       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
++		break;
++	case IO_XFER_ERROR_RX_FRAME:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_ERROR_RX_FRAME\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DEV_NO_RESPONSE;
++		break;
++	case IO_XFER_OPEN_RETRY_TIMEOUT:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
++		break;
++	case IO_ERROR_INTERNAL_SMP_RESOURCE:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_QUEUE_FULL;
++		break;
++	case IO_PORT_IN_RESET:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_PORT_IN_RESET\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
++		break;
++	case IO_DS_NON_OPERATIONAL:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DEV_NO_RESPONSE;
++		break;
++	case IO_DS_IN_RECOVERY:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_DS_IN_RECOVERY\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
++		break;
++	case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
++		break;
++	default:
++		PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk("Unknown status 0x%x\n", status));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAS_DEV_NO_RESPONSE;
++		/* not allowed case. Therefore, return failed status */
++		break;
++	}
++	spin_lock_irqsave(&t->task_state_lock, flags);
++	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
++	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
++	t->task_state_flags |= SAS_TASK_STATE_DONE;
++	if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
++		spin_unlock_irqrestore(&t->task_state_lock, flags);
++		PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with"
++			" io_status 0x%x resp 0x%x "
++			"stat 0x%x but aborted by upper layer!\n",
++			t, status, ts->resp, ts->stat));
++		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
++	} else {
++		spin_unlock_irqrestore(&t->task_state_lock, flags);
++		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
++		mb();/* in order to force CPU ordering */
++		t->task_done(t);
++	}
++	return 0;
++}
++
++static void
++mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
++{
++	struct set_dev_state_resp *pPayload =
++		(struct set_dev_state_resp *)(piomb + 4);
++	u32 tag = le32_to_cpu(pPayload->tag);
++	struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
++	struct pm8001_device *pm8001_dev = ccb->device;
++	u32 status = le32_to_cpu(pPayload->status);
++	u32 device_id = le32_to_cpu(pPayload->device_id);
++	u8 pds = le32_to_cpu(pPayload->pds_nds) | PDS_BITS;
++	u8 nds = le32_to_cpu(pPayload->pds_nds) | NDS_BITS;
++	PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set device id = 0x%x state "
++		"from 0x%x to 0x%x status = 0x%x!\n",
++		device_id, pds, nds, status));
++	complete(pm8001_dev->setds_completion);
++	ccb->task = NULL;
++	ccb->ccb_tag = 0xFFFFFFFF;
++	pm8001_ccb_free(pm8001_ha, tag);
++}
++
++static void
++mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
++{
++	struct get_nvm_data_resp *pPayload =
++		(struct get_nvm_data_resp *)(piomb + 4);
++	u32 tag = le32_to_cpu(pPayload->tag);
++	struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
++	u32 dlen_status = le32_to_cpu(pPayload->dlen_status);
++	complete(pm8001_ha->nvmd_completion);
++	PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set nvm data complete!\n"));
++	if ((dlen_status & NVMD_STAT) != 0) {
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("Set nvm data error!\n"));
++		return;
++	}
++	ccb->task = NULL;
++	ccb->ccb_tag = 0xFFFFFFFF;
++	pm8001_ccb_free(pm8001_ha, tag);
++}
++
++static void
++mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
++{
++	struct fw_control_ex	*fw_control_context;
++	struct get_nvm_data_resp *pPayload =
++		(struct get_nvm_data_resp *)(piomb + 4);
++	u32 tag = le32_to_cpu(pPayload->tag);
++	struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
++	u32 dlen_status = le32_to_cpu(pPayload->dlen_status);
++	u32 ir_tds_bn_dps_das_nvm =
++		le32_to_cpu(pPayload->ir_tda_bn_dps_das_nvm);
++	void *virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
++	fw_control_context = ccb->fw_control_context;
++
++	PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Get nvm data complete!\n"));
++	if ((dlen_status & NVMD_STAT) != 0) {
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("Get nvm data error!\n"));
++		complete(pm8001_ha->nvmd_completion);
++		return;
++	}
++
++	if (ir_tds_bn_dps_das_nvm & IPMode) {
++		/* indirect mode - IR bit set */
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("Get NVMD success, IR=1\n"));
++		if ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == TWI_DEVICE) {
++			if (ir_tds_bn_dps_das_nvm == 0x80a80200) {
++				memcpy(pm8001_ha->sas_addr,
++				      ((u8 *)virt_addr + 4),
++				       SAS_ADDR_SIZE);
++				PM8001_MSG_DBG(pm8001_ha,
++					pm8001_printk("Get SAS address"
++					" from VPD successfully!\n"));
++			}
++		} else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == C_SEEPROM)
++			|| ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == VPD_FLASH) ||
++			((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == EXPAN_ROM)) {
++				;
++		} else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == AAP1_RDUMP)
++			|| ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == IOP_RDUMP)) {
++			;
++		} else {
++			/* Should not be happened*/
++			PM8001_MSG_DBG(pm8001_ha,
++				pm8001_printk("(IR=1)Wrong Device type 0x%x\n",
++				ir_tds_bn_dps_das_nvm));
++		}
++	} else /* direct mode */{
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("Get NVMD success, IR=0, dataLen=%d\n",
++			(dlen_status & NVMD_LEN) >> 24));
++	}
++	memcpy((void *)(fw_control_context->usrAddr),
++		(void *)(pm8001_ha->memoryMap.region[NVMD].virt_ptr),
++		fw_control_context->len);
++	complete(pm8001_ha->nvmd_completion);
++	ccb->task = NULL;
++	ccb->ccb_tag = 0xFFFFFFFF;
++	pm8001_ccb_free(pm8001_ha, tag);
++}
++
++static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
++{
++	struct local_phy_ctl_resp *pPayload =
++		(struct local_phy_ctl_resp *)(piomb + 4);
++	u32 status = le32_to_cpu(pPayload->status);
++	u32 phy_id = le32_to_cpu(pPayload->phyop_phyid) & ID_BITS;
++	u32 phy_op = le32_to_cpu(pPayload->phyop_phyid) & OP_BITS;
++	if (status != 0) {
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("%x phy execute %x phy op failed! \n",
++			phy_id, phy_op));
++	} else
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("%x phy execute %x phy op success! \n",
++			phy_id, phy_op));
++	return 0;
++}
++
++/**
++ * pm8001_bytes_dmaed - one of the interface function communication with libsas
++ * @pm8001_ha: our hba card information
++ * @i: which phy that received the event.
++ *
++ * when HBA driver received the identify done event or initiate FIS received
++ * event(for SATA), it will invoke this function to notify the sas layer that
++ * the sas toplogy has formed, please discover the the whole sas domain,
++ * while receive a broadcast(change) primitive just tell the sas
++ * layer to discover the changed domain rather than the whole domain.
++ */
++static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
++{
++	struct pm8001_phy *phy = &pm8001_ha->phy[i];
++	struct asd_sas_phy *sas_phy = &phy->sas_phy;
++	struct sas_ha_struct *sas_ha;
++	if (!phy->phy_attached)
++		return;
++
++	sas_ha = pm8001_ha->sas;
++	if (sas_phy->phy) {
++		struct sas_phy *sphy = sas_phy->phy;
++		sphy->negotiated_linkrate = sas_phy->linkrate;
++		sphy->minimum_linkrate = phy->minimum_linkrate;
++		sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
++		sphy->maximum_linkrate = phy->maximum_linkrate;
++		sphy->maximum_linkrate_hw = phy->maximum_linkrate;
++	}
++
++	if (phy->phy_type & PORT_TYPE_SAS) {
++		struct sas_identify_frame *id;
++		id = (struct sas_identify_frame *)phy->frame_rcvd;
++		id->dev_type = phy->identify.device_type;
++		id->initiator_bits = SAS_PROTOCOL_ALL;
++		id->target_bits = phy->identify.target_port_protocols;
++	} else if (phy->phy_type & PORT_TYPE_SATA) {
++		/*Nothing*/
++	}
++	PM8001_MSG_DBG(pm8001_ha, pm8001_printk("phy %d byte dmaded.\n", i));
++
++	sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
++	pm8001_ha->sas->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
++}
++
++/* Get the link rate speed  */
++static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
++{
++	struct sas_phy *sas_phy = phy->sas_phy.phy;
++
++	switch (link_rate) {
++	case PHY_SPEED_60:
++		phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS;
++		phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
++		break;
++	case PHY_SPEED_30:
++		phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS;
++		phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
++		break;
++	case PHY_SPEED_15:
++		phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS;
++		phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
++		break;
++	}
++	sas_phy->negotiated_linkrate = phy->sas_phy.linkrate;
++	sas_phy->maximum_linkrate_hw = SAS_LINK_RATE_6_0_GBPS;
++	sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
++	sas_phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
++	sas_phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
++}
++
++/**
++ * asd_get_attached_sas_addr -- extract/generate attached SAS address
++ * @phy: pointer to asd_phy
++ * @sas_addr: pointer to buffer where the SAS address is to be written
++ *
++ * This function extracts the SAS address from an IDENTIFY frame
++ * received.  If OOB is SATA, then a SAS address is generated from the
++ * HA tables.
++ *
++ * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame
++ * buffer.
++ */
++static void pm8001_get_attached_sas_addr(struct pm8001_phy *phy,
++	u8 *sas_addr)
++{
++	if (phy->sas_phy.frame_rcvd[0] == 0x34
++		&& phy->sas_phy.oob_mode == SATA_OOB_MODE) {
++		struct pm8001_hba_info *pm8001_ha = phy->sas_phy.ha->lldd_ha;
++		/* FIS device-to-host */
++		u64 addr = be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr);
++		addr += phy->sas_phy.id;
++		*(__be64 *)sas_addr = cpu_to_be64(addr);
++	} else {
++		struct sas_identify_frame *idframe =
++			(void *) phy->sas_phy.frame_rcvd;
++		memcpy(sas_addr, idframe->sas_addr, SAS_ADDR_SIZE);
++	}
++}
++
++/**
++ * pm8001_hw_event_ack_req- For PM8001,some events need to acknowage to FW.
++ * @pm8001_ha: our hba card information
++ * @Qnum: the outbound queue message number.
++ * @SEA: source of event to ack
++ * @port_id: port id.
++ * @phyId: phy id.
++ * @param0: parameter 0.
++ * @param1: parameter 1.
++ */
++static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
++	u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1)
++{
++	struct hw_event_ack_req	 payload;
++	u32 opc = OPC_INB_SAS_HW_EVENT_ACK;
++
++	struct inbound_queue_table *circularQ;
++
++	memset((u8 *)&payload, 0, sizeof(payload));
++	circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
++	payload.tag = 1;
++	payload.sea_phyid_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
++		((phyId & 0x0F) << 4) | (port_id & 0x0F));
++	payload.param0 = cpu_to_le32(param0);
++	payload.param1 = cpu_to_le32(param1);
++	mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
++}
++
++static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
++	u32 phyId, u32 phy_op);
++
++/**
++ * hw_event_sas_phy_up -FW tells me a SAS phy up event.
++ * @pm8001_ha: our hba card information
++ * @piomb: IO message buffer
++ */
++static void
++hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
++{
++	struct hw_event_resp *pPayload =
++		(struct hw_event_resp *)(piomb + 4);
++	u32 lr_evt_status_phyid_portid =
++		le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
++	u8 link_rate =
++		(u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
++	u8 phy_id =
++		(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
++	struct sas_ha_struct *sas_ha = pm8001_ha->sas;
++	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
++	unsigned long flags;
++	u8 deviceType = pPayload->sas_identify.dev_type;
++
++	PM8001_MSG_DBG(pm8001_ha,
++		pm8001_printk("HW_EVENT_SAS_PHY_UP \n"));
++
++	switch (deviceType) {
++	case SAS_PHY_UNUSED:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("device type no device.\n"));
++		break;
++	case SAS_END_DEVICE:
++		PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
++		pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
++			PHY_NOTIFY_ENABLE_SPINUP);
++		get_lrate_mode(phy, link_rate);
++		break;
++	case SAS_EDGE_EXPANDER_DEVICE:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("expander device.\n"));
++		get_lrate_mode(phy, link_rate);
++		break;
++	case SAS_FANOUT_EXPANDER_DEVICE:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("fanout expander device.\n"));
++		get_lrate_mode(phy, link_rate);
++		break;
++	default:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("unkown device type(%x)\n", deviceType));
++		break;
++	}
++	phy->phy_type |= PORT_TYPE_SAS;
++	phy->identify.device_type = deviceType;
++	phy->phy_attached = 1;
++	if (phy->identify.device_type == SAS_END_DEV)
++		phy->identify.target_port_protocols = SAS_PROTOCOL_SSP;
++	else if (phy->identify.device_type != NO_DEVICE)
++		phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
++	phy->sas_phy.oob_mode = SAS_OOB_MODE;
++	sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
++	spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
++	memcpy(phy->frame_rcvd, &pPayload->sas_identify,
++		sizeof(struct sas_identify_frame)-4);
++	phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4;
++	pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
++	spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
++	if (pm8001_ha->flags == PM8001F_RUN_TIME)
++		mdelay(200);/*delay a moment to wait disk to spinup*/
++	pm8001_bytes_dmaed(pm8001_ha, phy_id);
++}
++
++/**
++ * hw_event_sata_phy_up -FW tells me a SATA phy up event.
++ * @pm8001_ha: our hba card information
++ * @piomb: IO message buffer
++ */
++static void
++hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
++{
++	struct hw_event_resp *pPayload =
++		(struct hw_event_resp *)(piomb + 4);
++	u32 lr_evt_status_phyid_portid =
++		le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
++	u8 link_rate =
++		(u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
++	u8 phy_id =
++		(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
++	struct sas_ha_struct *sas_ha = pm8001_ha->sas;
++	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
++	unsigned long flags;
++	get_lrate_mode(phy, link_rate);
++	phy->phy_type |= PORT_TYPE_SATA;
++	phy->phy_attached = 1;
++	phy->sas_phy.oob_mode = SATA_OOB_MODE;
++	sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
++	spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
++	memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4),
++		sizeof(struct dev_to_host_fis));
++	phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
++	phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
++	phy->identify.device_type = SATA_DEV;
++	pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
++	spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
++	pm8001_bytes_dmaed(pm8001_ha, phy_id);
++}
++
++/**
++ * hw_event_phy_down -we should notify the libsas the phy is down.
++ * @pm8001_ha: our hba card information
++ * @piomb: IO message buffer
++ */
++static void
++hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
++{
++	struct hw_event_resp *pPayload =
++		(struct hw_event_resp *)(piomb + 4);
++	u32 lr_evt_status_phyid_portid =
++		le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
++	u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
++	u8 phy_id =
++		(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
++	u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
++	u8 portstate = (u8)(npip_portstate & 0x0000000F);
++
++	switch (portstate) {
++	case PORT_VALID:
++		break;
++	case PORT_INVALID:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk(" PortInvalid portID %d \n", port_id));
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk(" Last phy Down and port invalid\n"));
++		pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
++			port_id, phy_id, 0, 0);
++		break;
++	case PORT_IN_RESET:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk(" PortInReset portID %d \n", port_id));
++		break;
++	case PORT_NOT_ESTABLISHED:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
++		break;
++	case PORT_LOSTCOMM:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk(" Last phy Down and port invalid\n"));
++		pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
++			port_id, phy_id, 0, 0);
++		break;
++	default:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk(" phy Down and(default) = %x\n",
++			portstate));
++		break;
++
++	}
++}
++
++/**
++ * mpi_reg_resp -process register device ID response.
++ * @pm8001_ha: our hba card information
++ * @piomb: IO message buffer
++ *
++ * when sas layer find a device it will notify LLDD, then the driver register
++ * the domain device to FW, this event is the return device ID which the FW
++ * has assigned, from now,inter-communication with FW is no longer using the
++ * SAS address, use device ID which FW assigned.
++ */
++static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
++{
++	u32 status;
++	u32 device_id;
++	u32 htag;
++	struct pm8001_ccb_info *ccb;
++	struct pm8001_device *pm8001_dev;
++	struct dev_reg_resp *registerRespPayload =
++		(struct dev_reg_resp *)(piomb + 4);
++
++	htag = le32_to_cpu(registerRespPayload->tag);
++	ccb = &pm8001_ha->ccb_info[registerRespPayload->tag];
++	pm8001_dev = ccb->device;
++	status = le32_to_cpu(registerRespPayload->status);
++	device_id = le32_to_cpu(registerRespPayload->device_id);
++	PM8001_MSG_DBG(pm8001_ha,
++		pm8001_printk(" register device is status = %d\n", status));
++	switch (status) {
++	case DEVREG_SUCCESS:
++		PM8001_MSG_DBG(pm8001_ha, pm8001_printk("DEVREG_SUCCESS\n"));
++		pm8001_dev->device_id = device_id;
++		break;
++	case DEVREG_FAILURE_OUT_OF_RESOURCE:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("DEVREG_FAILURE_OUT_OF_RESOURCE\n"));
++		break;
++	case DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED:
++		PM8001_MSG_DBG(pm8001_ha,
++		   pm8001_printk("DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED\n"));
++		break;
++	case DEVREG_FAILURE_INVALID_PHY_ID:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("DEVREG_FAILURE_INVALID_PHY_ID\n"));
++		break;
++	case DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED:
++		PM8001_MSG_DBG(pm8001_ha,
++		   pm8001_printk("DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED\n"));
++		break;
++	case DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE\n"));
++		break;
++	case DEVREG_FAILURE_PORT_NOT_VALID_STATE:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("DEVREG_FAILURE_PORT_NOT_VALID_STATE\n"));
++		break;
++	case DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID:
++		PM8001_MSG_DBG(pm8001_ha,
++		       pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID\n"));
++		break;
++	default:
++		PM8001_MSG_DBG(pm8001_ha,
++		 pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_UNSORPORTED\n"));
++		break;
++	}
++	complete(pm8001_dev->dcompletion);
++	ccb->task = NULL;
++	ccb->ccb_tag = 0xFFFFFFFF;
++	pm8001_ccb_free(pm8001_ha, htag);
++	return 0;
++}
++
++static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
++{
++	u32 status;
++	u32 device_id;
++	struct dev_reg_resp *registerRespPayload =
++		(struct dev_reg_resp *)(piomb + 4);
++
++	status = le32_to_cpu(registerRespPayload->status);
++	device_id = le32_to_cpu(registerRespPayload->device_id);
++	if (status != 0)
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk(" deregister device failed ,status = %x"
++			", device_id = %x\n", status, device_id));
++	return 0;
++}
++
++static int
++mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
++{
++	u32 status;
++	struct fw_control_ex	fw_control_context;
++	struct fw_flash_Update_resp *ppayload =
++		(struct fw_flash_Update_resp *)(piomb + 4);
++	u32 tag = le32_to_cpu(ppayload->tag);
++	struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
++	status = le32_to_cpu(ppayload->status);
++	memcpy(&fw_control_context,
++		ccb->fw_control_context,
++		sizeof(fw_control_context));
++	switch (status) {
++	case FLASH_UPDATE_COMPLETE_PENDING_REBOOT:
++		PM8001_MSG_DBG(pm8001_ha,
++		pm8001_printk(": FLASH_UPDATE_COMPLETE_PENDING_REBOOT\n"));
++		break;
++	case FLASH_UPDATE_IN_PROGRESS:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk(": FLASH_UPDATE_IN_PROGRESS\n"));
++		break;
++	case FLASH_UPDATE_HDR_ERR:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk(": FLASH_UPDATE_HDR_ERR\n"));
++		break;
++	case FLASH_UPDATE_OFFSET_ERR:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk(": FLASH_UPDATE_OFFSET_ERR\n"));
++		break;
++	case FLASH_UPDATE_CRC_ERR:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk(": FLASH_UPDATE_CRC_ERR\n"));
++		break;
++	case FLASH_UPDATE_LENGTH_ERR:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk(": FLASH_UPDATE_LENGTH_ERR\n"));
++		break;
++	case FLASH_UPDATE_HW_ERR:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk(": FLASH_UPDATE_HW_ERR\n"));
++		break;
++	case FLASH_UPDATE_DNLD_NOT_SUPPORTED:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk(": FLASH_UPDATE_DNLD_NOT_SUPPORTED\n"));
++		break;
++	case FLASH_UPDATE_DISABLED:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk(": FLASH_UPDATE_DISABLED\n"));
++		break;
++	default:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("No matched status = %d\n", status));
++		break;
++	}
++	ccb->fw_control_context->fw_control->retcode = status;
++	pci_free_consistent(pm8001_ha->pdev,
++			fw_control_context.len,
++			fw_control_context.virtAddr,
++			fw_control_context.phys_addr);
++	complete(pm8001_ha->nvmd_completion);
++	ccb->task = NULL;
++	ccb->ccb_tag = 0xFFFFFFFF;
++	pm8001_ccb_free(pm8001_ha, tag);
++	return 0;
++}
++
++static int
++mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
++{
++	u32 status;
++	int i;
++	struct general_event_resp *pPayload =
++		(struct general_event_resp *)(piomb + 4);
++	status = le32_to_cpu(pPayload->status);
++	PM8001_MSG_DBG(pm8001_ha,
++		pm8001_printk(" status = 0x%x\n", status));
++	for (i = 0; i < GENERAL_EVENT_PAYLOAD; i++)
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("inb_IOMB_payload[0x%x] 0x%x, \n", i,
++			pPayload->inb_IOMB_payload[i]));
++	return 0;
++}
++
++static int
++mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
++{
++	struct sas_task *t;
++	struct pm8001_ccb_info *ccb;
++	unsigned long flags;
++	u32 status ;
++	u32 tag, scp;
++	struct task_status_struct *ts;
++
++	struct task_abort_resp *pPayload =
++		(struct task_abort_resp *)(piomb + 4);
++	ccb = &pm8001_ha->ccb_info[pPayload->tag];
++	t = ccb->task;
++	ts = &t->task_status;
++
++	if (t == NULL)
++		return -1;
++
++	status = le32_to_cpu(pPayload->status);
++	tag = le32_to_cpu(pPayload->tag);
++	scp = le32_to_cpu(pPayload->scp);
++	PM8001_IO_DBG(pm8001_ha,
++		pm8001_printk(" status = 0x%x\n", status));
++	if (status != 0)
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("task abort failed tag = 0x%x,"
++			" scp= 0x%x\n", tag, scp));
++	switch (status) {
++	case IO_SUCCESS:
++		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
++		ts->resp = SAS_TASK_COMPLETE;
++		ts->stat = SAM_GOOD;
++		break;
++	case IO_NOT_VALID:
++		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NOT_VALID\n"));
++		ts->resp = TMF_RESP_FUNC_FAILED;
++		break;
++	}
++	spin_lock_irqsave(&t->task_state_lock, flags);
++	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
++	t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
++	t->task_state_flags |= SAS_TASK_STATE_DONE;
++	spin_unlock_irqrestore(&t->task_state_lock, flags);
++	pm8001_ccb_task_free(pm8001_ha, t, ccb, pPayload->tag);
++	mb();
++	t->task_done(t);
++	return 0;
++}
++
++/**
++ * mpi_hw_event -The hw event has come.
++ * @pm8001_ha: our hba card information
++ * @piomb: IO message buffer
++ */
++static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
++{
++	unsigned long flags;
++	struct hw_event_resp *pPayload =
++		(struct hw_event_resp *)(piomb + 4);
++	u32 lr_evt_status_phyid_portid =
++		le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
++	u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
++	u8 phy_id =
++		(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
++	u16 eventType =
++		(u16)((lr_evt_status_phyid_portid & 0x00FFFF00) >> 8);
++	u8 status =
++		(u8)((lr_evt_status_phyid_portid & 0x0F000000) >> 24);
++	struct sas_ha_struct *sas_ha = pm8001_ha->sas;
++	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
++	struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
++	PM8001_MSG_DBG(pm8001_ha,
++		pm8001_printk("outbound queue HW event & event type : "));
++	switch (eventType) {
++	case HW_EVENT_PHY_START_STATUS:
++		PM8001_MSG_DBG(pm8001_ha,
++		pm8001_printk("HW_EVENT_PHY_START_STATUS"
++			" status = %x\n", status));
++		if (status == 0) {
++			phy->phy_state = 1;
++			if (pm8001_ha->flags == PM8001F_RUN_TIME)
++				complete(phy->enable_completion);
++		}
++		break;
++	case HW_EVENT_SAS_PHY_UP:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("HW_EVENT_PHY_START_STATUS \n"));
++		hw_event_sas_phy_up(pm8001_ha, piomb);
++		break;
++	case HW_EVENT_SATA_PHY_UP:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("HW_EVENT_SATA_PHY_UP \n"));
++		hw_event_sata_phy_up(pm8001_ha, piomb);
++		break;
++	case HW_EVENT_PHY_STOP_STATUS:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("HW_EVENT_PHY_STOP_STATUS "
++			"status = %x\n", status));
++		if (status == 0)
++			phy->phy_state = 0;
++		break;
++	case HW_EVENT_SATA_SPINUP_HOLD:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD \n"));
++		sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
++		break;
++	case HW_EVENT_PHY_DOWN:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("HW_EVENT_PHY_DOWN \n"));
++		sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
++		phy->phy_attached = 0;
++		phy->phy_state = 0;
++		hw_event_phy_down(pm8001_ha, piomb);
++		break;
++	case HW_EVENT_PORT_INVALID:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("HW_EVENT_PORT_INVALID\n"));
++		sas_phy_disconnected(sas_phy);
++		phy->phy_attached = 0;
++		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++		break;
++	/* the broadcast change primitive received, tell the LIBSAS this event
++	to revalidate the sas domain*/
++	case HW_EVENT_BROADCAST_CHANGE:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n"));
++		pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE,
++			port_id, phy_id, 1, 0);
++		spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
++		sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE;
++		spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
++		sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++		break;
++	case HW_EVENT_PHY_ERROR:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("HW_EVENT_PHY_ERROR\n"));
++		sas_phy_disconnected(&phy->sas_phy);
++		phy->phy_attached = 0;
++		sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
++		break;
++	case HW_EVENT_BROADCAST_EXP:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("HW_EVENT_BROADCAST_EXP\n"));
++		spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
++		sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
++		spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
++		sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++		break;
++	case HW_EVENT_LINK_ERR_INVALID_DWORD:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n"));
++		pm8001_hw_event_ack_req(pm8001_ha, 0,
++			HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
++		sas_phy_disconnected(sas_phy);
++		phy->phy_attached = 0;
++		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++		break;
++	case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n"));
++		pm8001_hw_event_ack_req(pm8001_ha, 0,
++			HW_EVENT_LINK_ERR_DISPARITY_ERROR,
++			port_id, phy_id, 0, 0);
++		sas_phy_disconnected(sas_phy);
++		phy->phy_attached = 0;
++		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++		break;
++	case HW_EVENT_LINK_ERR_CODE_VIOLATION:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n"));
++		pm8001_hw_event_ack_req(pm8001_ha, 0,
++			HW_EVENT_LINK_ERR_CODE_VIOLATION,
++			port_id, phy_id, 0, 0);
++		sas_phy_disconnected(sas_phy);
++		phy->phy_attached = 0;
++		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++		break;
++	case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
++		PM8001_MSG_DBG(pm8001_ha,
++		      pm8001_printk("HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n"));
++		pm8001_hw_event_ack_req(pm8001_ha, 0,
++			HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
++			port_id, phy_id, 0, 0);
++		sas_phy_disconnected(sas_phy);
++		phy->phy_attached = 0;
++		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++		break;
++	case HW_EVENT_MALFUNCTION:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("HW_EVENT_MALFUNCTION\n"));
++		break;
++	case HW_EVENT_BROADCAST_SES:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("HW_EVENT_BROADCAST_SES\n"));
++		spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
++		sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
++		spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
++		sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++		break;
++	case HW_EVENT_INBOUND_CRC_ERROR:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n"));
++		pm8001_hw_event_ack_req(pm8001_ha, 0,
++			HW_EVENT_INBOUND_CRC_ERROR,
++			port_id, phy_id, 0, 0);
++		break;
++	case HW_EVENT_HARD_RESET_RECEIVED:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n"));
++		sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
++		break;
++	case HW_EVENT_ID_FRAME_TIMEOUT:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n"));
++		sas_phy_disconnected(sas_phy);
++		phy->phy_attached = 0;
++		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++		break;
++	case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED \n"));
++		pm8001_hw_event_ack_req(pm8001_ha, 0,
++			HW_EVENT_LINK_ERR_PHY_RESET_FAILED,
++			port_id, phy_id, 0, 0);
++		sas_phy_disconnected(sas_phy);
++		phy->phy_attached = 0;
++		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++		break;
++	case HW_EVENT_PORT_RESET_TIMER_TMO:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO \n"));
++		sas_phy_disconnected(sas_phy);
++		phy->phy_attached = 0;
++		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++		break;
++	case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO \n"));
++		sas_phy_disconnected(sas_phy);
++		phy->phy_attached = 0;
++		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++		break;
++	case HW_EVENT_PORT_RECOVER:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("HW_EVENT_PORT_RECOVER \n"));
++		break;
++	case HW_EVENT_PORT_RESET_COMPLETE:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE \n"));
++		break;
++	case EVENT_BROADCAST_ASYNCH_EVENT:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
++		break;
++	default:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("Unknown event type = %x\n", eventType));
++		break;
++	}
++	return 0;
++}
++
++/**
++ * process_one_iomb - process one outbound Queue memory block
++ * @pm8001_ha: our hba card information
++ * @piomb: IO message buffer
++ */
++static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
++{
++	u32 pHeader = (u32)*(u32 *)piomb;
++	u8 opc = (u8)((le32_to_cpu(pHeader)) & 0xFFF);
++
++	PM8001_MSG_DBG(pm8001_ha, pm8001_printk("process_one_iomb:\n"));
++
++	switch (opc) {
++	case OPC_OUB_ECHO:
++		PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO \n"));
++		break;
++	case OPC_OUB_HW_EVENT:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_HW_EVENT \n"));
++		mpi_hw_event(pm8001_ha, piomb);
++		break;
++	case OPC_OUB_SSP_COMP:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_SSP_COMP \n"));
++		mpi_ssp_completion(pm8001_ha, piomb);
++		break;
++	case OPC_OUB_SMP_COMP:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_SMP_COMP \n"));
++		mpi_smp_completion(pm8001_ha, piomb);
++		break;
++	case OPC_OUB_LOCAL_PHY_CNTRL:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
++		mpi_local_phy_ctl(pm8001_ha, piomb);
++		break;
++	case OPC_OUB_DEV_REGIST:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_DEV_REGIST \n"));
++		mpi_reg_resp(pm8001_ha, piomb);
++		break;
++	case OPC_OUB_DEREG_DEV:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("unresgister the deviece \n"));
++		mpi_dereg_resp(pm8001_ha, piomb);
++		break;
++	case OPC_OUB_GET_DEV_HANDLE:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_GET_DEV_HANDLE \n"));
++		break;
++	case OPC_OUB_SATA_COMP:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_SATA_COMP \n"));
++		mpi_sata_completion(pm8001_ha, piomb);
++		break;
++	case OPC_OUB_SATA_EVENT:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_SATA_EVENT \n"));
++		mpi_sata_event(pm8001_ha, piomb);
++		break;
++	case OPC_OUB_SSP_EVENT:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_SSP_EVENT\n"));
++		mpi_ssp_event(pm8001_ha, piomb);
++		break;
++	case OPC_OUB_DEV_HANDLE_ARRIV:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n"));
++		/*This is for target*/
++		break;
++	case OPC_OUB_SSP_RECV_EVENT:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n"));
++		/*This is for target*/
++		break;
++	case OPC_OUB_DEV_INFO:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_DEV_INFO\n"));
++		break;
++	case OPC_OUB_FW_FLASH_UPDATE:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
++		mpi_fw_flash_update_resp(pm8001_ha, piomb);
++		break;
++	case OPC_OUB_GPIO_RESPONSE:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_GPIO_RESPONSE\n"));
++		break;
++	case OPC_OUB_GPIO_EVENT:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_GPIO_EVENT\n"));
++		break;
++	case OPC_OUB_GENERAL_EVENT:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
++		mpi_general_event(pm8001_ha, piomb);
++		break;
++	case OPC_OUB_SSP_ABORT_RSP:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
++		mpi_task_abort_resp(pm8001_ha, piomb);
++		break;
++	case OPC_OUB_SATA_ABORT_RSP:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
++		mpi_task_abort_resp(pm8001_ha, piomb);
++		break;
++	case OPC_OUB_SAS_DIAG_MODE_START_END:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n"));
++		break;
++	case OPC_OUB_SAS_DIAG_EXECUTE:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n"));
++		break;
++	case OPC_OUB_GET_TIME_STAMP:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_GET_TIME_STAMP\n"));
++		break;
++	case OPC_OUB_SAS_HW_EVENT_ACK:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n"));
++		break;
++	case OPC_OUB_PORT_CONTROL:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_PORT_CONTROL\n"));
++		break;
++	case OPC_OUB_SMP_ABORT_RSP:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
++		mpi_task_abort_resp(pm8001_ha, piomb);
++		break;
++	case OPC_OUB_GET_NVMD_DATA:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
++		mpi_get_nvmd_resp(pm8001_ha, piomb);
++		break;
++	case OPC_OUB_SET_NVMD_DATA:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
++		mpi_set_nvmd_resp(pm8001_ha, piomb);
++		break;
++	case OPC_OUB_DEVICE_HANDLE_REMOVAL:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n"));
++		break;
++	case OPC_OUB_SET_DEVICE_STATE:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
++		mpi_set_dev_state_resp(pm8001_ha, piomb);
++		break;
++	case OPC_OUB_GET_DEVICE_STATE:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n"));
++		break;
++	case OPC_OUB_SET_DEV_INFO:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_SET_DEV_INFO\n"));
++		break;
++	case OPC_OUB_SAS_RE_INITIALIZE:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("OPC_OUB_SAS_RE_INITIALIZE\n"));
++		break;
++	default:
++		PM8001_MSG_DBG(pm8001_ha,
++			pm8001_printk("Unknown outbound Queue IOMB OPC = %x\n",
++			opc));
++		break;
++	}
++}
++
++static int process_oq(struct pm8001_hba_info *pm8001_ha)
++{
++	struct outbound_queue_table *circularQ;
++	void *pMsg1 = NULL;
++	u8 bc = 0;
++	u32 ret = MPI_IO_STATUS_FAIL, processedMsgCount = 0;
++
++	circularQ = &pm8001_ha->outbnd_q_tbl[0];
++	do {
++		ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
++		if (MPI_IO_STATUS_SUCCESS == ret) {
++			/* process the outbound message */
++			process_one_iomb(pm8001_ha, (void *)((u8 *)pMsg1 - 4));
++			/* free the message from the outbound circular buffer */
++			mpi_msg_free_set(pm8001_ha, circularQ, bc);
++			processedMsgCount++;
++		}
++		if (MPI_IO_STATUS_BUSY == ret) {
++			u32 producer_idx;
++			/* Update the producer index from SPC */
++			producer_idx = pm8001_read_32(circularQ->pi_virt);
++			circularQ->producer_index = cpu_to_le32(producer_idx);
++			if (circularQ->producer_index ==
++				circularQ->consumer_idx)
++				/* OQ is empty */
++				break;
++		}
++	} while (100 > processedMsgCount);/*end message processing if hit the
++	count*/
++	return ret;
++}
++
++/* PCI_DMA_... to our direction translation. */
++static const u8 data_dir_flags[] = {
++	[PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */
++	[PCI_DMA_TODEVICE]	= DATA_DIR_OUT,/* OUTBOUND */
++	[PCI_DMA_FROMDEVICE]	= DATA_DIR_IN,/* INBOUND */
++	[PCI_DMA_NONE]		= DATA_DIR_NONE,/* NO TRANSFER */
++};
++static void
++pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd)
++{
++	int i;
++	struct scatterlist *sg;
++	struct pm8001_prd *buf_prd = prd;
++
++	for_each_sg(scatter, sg, nr, i) {
++		buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
++		buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg));
++		buf_prd->im_len.e = 0;
++		buf_prd++;
++	}
++}
++
++static void build_smp_cmd(u32 deviceID, u32 hTag, struct smp_req *psmp_cmd)
++{
++	psmp_cmd->tag = cpu_to_le32(hTag);
++	psmp_cmd->device_id = cpu_to_le32(deviceID);
++	psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1));
++}
++
++/**
++ * pm8001_chip_smp_req - send a SMP task to FW
++ * @pm8001_ha: our hba card information.
++ * @ccb: the ccb information this request used.
++ */
++static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
++	struct pm8001_ccb_info *ccb)
++{
++	int elem, rc;
++	struct sas_task *task = ccb->task;
++	struct domain_device *dev = task->dev;
++	struct pm8001_device *pm8001_dev = dev->lldd_dev;
++	struct scatterlist *sg_req, *sg_resp;
++	u32 req_len, resp_len;
++	struct smp_req smp_cmd;
++	u32 opc;
++	struct inbound_queue_table *circularQ;
++
++	memset(&smp_cmd, 0, sizeof(smp_cmd));
++	/*
++	 * DMA-map SMP request, response buffers
++	 */
++	sg_req = &task->smp_task.smp_req;
++	elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE);
++	if (!elem)
++		return -ENOMEM;
++	req_len = sg_dma_len(sg_req);
++
++	sg_resp = &task->smp_task.smp_resp;
++	elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
++	if (!elem) {
++		rc = -ENOMEM;
++		goto err_out;
++	}
++	resp_len = sg_dma_len(sg_resp);
++	/* must be in dwords */
++	if ((req_len & 0x3) || (resp_len & 0x3)) {
++		rc = -EINVAL;
++		goto err_out_2;
++	}
++
++	opc = OPC_INB_SMP_REQUEST;
++	circularQ = &pm8001_ha->inbnd_q_tbl[0];
++	smp_cmd.tag = cpu_to_le32(ccb->ccb_tag);
++	smp_cmd.long_smp_req.long_req_addr =
++		cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
++	smp_cmd.long_smp_req.long_req_size =
++		cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
++	smp_cmd.long_smp_req.long_resp_addr =
++		cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp));
++	smp_cmd.long_smp_req.long_resp_size =
++		cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
++	build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd);
++	mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd);
++	return 0;
++
++err_out_2:
++	dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
++			PCI_DMA_FROMDEVICE);
++err_out:
++	dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
++			PCI_DMA_TODEVICE);
++	return rc;
++}
++
++/**
++ * pm8001_chip_ssp_io_req - send a SSP task to FW
++ * @pm8001_ha: our hba card information.
++ * @ccb: the ccb information this request used.
++ */
++static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
++	struct pm8001_ccb_info *ccb)
++{
++	struct sas_task *task = ccb->task;
++	struct domain_device *dev = task->dev;
++	struct pm8001_device *pm8001_dev = dev->lldd_dev;
++	struct ssp_ini_io_start_req ssp_cmd;
++	u32 tag = ccb->ccb_tag;
++	__le64 phys_addr;
++	struct inbound_queue_table *circularQ;
++	u32 opc = OPC_INB_SSPINIIOSTART;
++	memset(&ssp_cmd, 0, sizeof(ssp_cmd));
++	memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
++	ssp_cmd.dir_m_tlr = data_dir_flags[task->data_dir] << 8 | 0x0;/*0 for
++	SAS 1.1 compatible TLR*/
++	ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
++	ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
++	ssp_cmd.tag = cpu_to_le32(tag);
++	if (task->ssp_task.enable_first_burst)
++		ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
++	ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
++	ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
++	memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cdb, 16);
++	circularQ = &pm8001_ha->inbnd_q_tbl[0];
++
++	/* fill in PRD (scatter/gather) table, if any */
++	if (task->num_scatter > 1) {
++		pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
++		phys_addr = cpu_to_le64(ccb->ccb_dma_handle +
++				offsetof(struct pm8001_ccb_info, buf_prd[0]));
++		ssp_cmd.addr_low = lower_32_bits(phys_addr);
++		ssp_cmd.addr_high = upper_32_bits(phys_addr);
++		ssp_cmd.esgl = cpu_to_le32(1<<31);
++	} else if (task->num_scatter == 1) {
++		__le64 dma_addr = cpu_to_le64(sg_dma_address(task->scatter));
++		ssp_cmd.addr_low = lower_32_bits(dma_addr);
++		ssp_cmd.addr_high = upper_32_bits(dma_addr);
++		ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
++		ssp_cmd.esgl = 0;
++	} else if (task->num_scatter == 0) {
++		ssp_cmd.addr_low = 0;
++		ssp_cmd.addr_high = 0;
++		ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
++		ssp_cmd.esgl = 0;
++	}
++	mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd);
++	return 0;
++}
++
++static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
++	struct pm8001_ccb_info *ccb)
++{
++	struct sas_task *task = ccb->task;
++	struct domain_device *dev = task->dev;
++	struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
++	u32 tag = ccb->ccb_tag;
++	struct sata_start_req sata_cmd;
++	u32 hdr_tag, ncg_tag = 0;
++	__le64 phys_addr;
++	u32 ATAP = 0x0;
++	u32 dir;
++	struct inbound_queue_table *circularQ;
++	u32  opc = OPC_INB_SATA_HOST_OPSTART;
++	memset(&sata_cmd, 0, sizeof(sata_cmd));
++	circularQ = &pm8001_ha->inbnd_q_tbl[0];
++	if (task->data_dir == PCI_DMA_NONE) {
++		ATAP = 0x04;  /* no data*/
++		PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data \n"));
++	} else if (likely(!task->ata_task.device_control_reg_update)) {
++		if (task->ata_task.dma_xfer) {
++			ATAP = 0x06; /* DMA */
++			PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA \n"));
++		} else {
++			ATAP = 0x05; /* PIO*/
++			PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO \n"));
++		}
++		if (task->ata_task.use_ncq &&
++			dev->sata_dev.command_set != ATAPI_COMMAND_SET) {
++			ATAP = 0x07; /* FPDMA */
++			PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA \n"));
++		}
++	}
++	if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag))
++		ncg_tag = cpu_to_le32(hdr_tag);
++	dir = data_dir_flags[task->data_dir] << 8;
++	sata_cmd.tag = cpu_to_le32(tag);
++	sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
++	sata_cmd.data_len = cpu_to_le32(task->total_xfer_len);
++	sata_cmd.ncqtag_atap_dir_m =
++		cpu_to_le32(((ncg_tag & 0xff)<<16)|((ATAP & 0x3f) << 10) | dir);
++	sata_cmd.sata_fis = task->ata_task.fis;
++	if (likely(!task->ata_task.device_control_reg_update))
++		sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */
++	sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */
++	/* fill in PRD (scatter/gather) table, if any */
++	if (task->num_scatter > 1) {
++		pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
++		phys_addr = cpu_to_le64(ccb->ccb_dma_handle +
++				offsetof(struct pm8001_ccb_info, buf_prd[0]));
++		sata_cmd.addr_low = lower_32_bits(phys_addr);
++		sata_cmd.addr_high = upper_32_bits(phys_addr);
++		sata_cmd.esgl = cpu_to_le32(1 << 31);
++	} else if (task->num_scatter == 1) {
++		__le64 dma_addr = cpu_to_le64(sg_dma_address(task->scatter));
++		sata_cmd.addr_low = lower_32_bits(dma_addr);
++		sata_cmd.addr_high = upper_32_bits(dma_addr);
++		sata_cmd.len = cpu_to_le32(task->total_xfer_len);
++		sata_cmd.esgl = 0;
++	} else if (task->num_scatter == 0) {
++		sata_cmd.addr_low = 0;
++		sata_cmd.addr_high = 0;
++		sata_cmd.len = cpu_to_le32(task->total_xfer_len);
++		sata_cmd.esgl = 0;
++	}
++	mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd);
++	return 0;
++}
++
++/**
++ * pm8001_chip_phy_start_req - start phy via PHY_START COMMAND
++ * @pm8001_ha: our hba card information.
++ * @num: the inbound queue number
++ * @phy_id: the phy id which we wanted to start up.
++ */
++static int
++pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
++{
++	struct phy_start_req payload;
++	struct inbound_queue_table *circularQ;
++	u32 tag = 0x01;
++	u32 opcode = OPC_INB_PHYSTART;
++	circularQ = &pm8001_ha->inbnd_q_tbl[0];
++	memset(&payload, 0, sizeof(payload));
++	payload.tag = cpu_to_le32(tag);
++	/*
++	 ** [0:7]   PHY Identifier
++	 ** [8:11]  link rate 1.5G, 3G, 6G
++	 ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b both
++	 ** [14]    0b disable spin up hold; 1b enable spin up hold
++	 */
++	payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
++		LINKMODE_AUTO |	LINKRATE_15 |
++		LINKRATE_30 | LINKRATE_60 | phy_id);
++	payload.sas_identify.dev_type = SAS_END_DEV;
++	payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
++	memcpy(payload.sas_identify.sas_addr,
++		pm8001_ha->sas_addr, SAS_ADDR_SIZE);
++	payload.sas_identify.phy_id = phy_id;
++	mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload);
++	return 0;
++}
++
++/**
++ * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND
++ * @pm8001_ha: our hba card information.
++ * @num: the inbound queue number
++ * @phy_id: the phy id which we wanted to start up.
++ */
++static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
++	u8 phy_id)
++{
++	struct phy_stop_req payload;
++	struct inbound_queue_table *circularQ;
++	u32 tag = 0x01;
++	u32 opcode = OPC_INB_PHYSTOP;
++	circularQ = &pm8001_ha->inbnd_q_tbl[0];
++	memset(&payload, 0, sizeof(payload));
++	payload.tag = cpu_to_le32(tag);
++	payload.phy_id = cpu_to_le32(phy_id);
++	mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload);
++	return 0;
++}
++
++/**
++ * see comments on mpi_reg_resp.
++ */
++static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
++	struct pm8001_device *pm8001_dev, u32 flag)
++{
++	struct reg_dev_req payload;
++	u32	opc;
++	u32 stp_sspsmp_sata = 0x4;
++	struct inbound_queue_table *circularQ;
++	u32 linkrate, phy_id;
++	u32 rc, tag = 0xdeadbeef;
++	struct pm8001_ccb_info *ccb;
++	u8 retryFlag = 0x1;
++	u16 firstBurstSize = 0;
++	u16 ITNT = 2000;
++	struct domain_device *dev = pm8001_dev->sas_device;
++	struct domain_device *parent_dev = dev->parent;
++	circularQ = &pm8001_ha->inbnd_q_tbl[0];
++
++	memset(&payload, 0, sizeof(payload));
++	rc = pm8001_tag_alloc(pm8001_ha, &tag);
++	if (rc)
++		return rc;
++	ccb = &pm8001_ha->ccb_info[tag];
++	ccb->device = pm8001_dev;
++	ccb->ccb_tag = tag;
++	payload.tag = cpu_to_le32(tag);
++	if (flag == 1)
++		stp_sspsmp_sata = 0x02; /*direct attached sata */
++	else {
++		if (pm8001_dev->dev_type == SATA_DEV)
++			stp_sspsmp_sata = 0x00; /* stp*/
++		else if (pm8001_dev->dev_type == SAS_END_DEV ||
++			pm8001_dev->dev_type == EDGE_DEV ||
++			pm8001_dev->dev_type == FANOUT_DEV)
++			stp_sspsmp_sata = 0x01; /*ssp or smp*/
++	}
++	if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
++		phy_id = parent_dev->ex_dev.ex_phy->phy_id;
++	else
++		phy_id = pm8001_dev->attached_phy;
++	opc = OPC_INB_REG_DEV;
++	linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ?
++			pm8001_dev->sas_device->linkrate : dev->port->linkrate;
++	payload.phyid_portid =
++		cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0x0F) |
++		((phy_id & 0x0F) << 4));
++	payload.dtype_dlr_retry = cpu_to_le32((retryFlag & 0x01) |
++		((linkrate & 0x0F) * 0x1000000) |
++		((stp_sspsmp_sata & 0x03) * 0x10000000));
++	payload.firstburstsize_ITNexustimeout =
++		cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
++	memcpy(&payload.sas_addr_hi, pm8001_dev->sas_device->sas_addr,
++		SAS_ADDR_SIZE);
++	mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
++	return 0;
++}
++
++/**
++ * see comments on mpi_reg_resp.
++ */
++static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
++	u32 device_id)
++{
++	struct dereg_dev_req payload;
++	u32 opc = OPC_INB_DEREG_DEV_HANDLE;
++	struct inbound_queue_table *circularQ;
++
++	circularQ = &pm8001_ha->inbnd_q_tbl[0];
++	memset((u8 *)&payload, 0, sizeof(payload));
++	payload.tag = 1;
++	payload.device_id = cpu_to_le32(device_id);
++	PM8001_MSG_DBG(pm8001_ha,
++		pm8001_printk("unregister device device_id = %d\n", device_id));
++	mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
++	return 0;
++}
++
++/**
++ * pm8001_chip_phy_ctl_req - support the local phy operation
++ * @pm8001_ha: our hba card information.
++ * @num: the inbound queue number
++ * @phy_id: the phy id which we wanted to operate
++ * @phy_op:
++ */
++static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
++	u32 phyId, u32 phy_op)
++{
++	struct local_phy_ctl_req payload;
++	struct inbound_queue_table *circularQ;
++	u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
++	memset((u8 *)&payload, 0, sizeof(payload));
++	circularQ = &pm8001_ha->inbnd_q_tbl[0];
++	payload.tag = 1;
++	payload.phyop_phyid =
++		cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
++	mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
++	return 0;
++}
++
++static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
++{
++	u32 value;
++#ifdef PM8001_USE_MSIX
++	return 1;
++#endif
++	value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
++	if (value)
++		return 1;
++	return 0;
++
++}
++
++/**
++ * pm8001_chip_isr - PM8001 isr handler.
++ * @pm8001_ha: our hba card information.
++ * @irq: irq number.
++ * @stat: stat.
++ */
++static void
++pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha)
++{
++	pm8001_chip_interrupt_disable(pm8001_ha);
++	process_oq(pm8001_ha);
++	pm8001_chip_interrupt_enable(pm8001_ha);
++}
++
++static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
++	u32 dev_id, u8 flag, u32 task_tag, u32 cmd_tag)
++{
++	struct task_abort_req task_abort;
++	struct inbound_queue_table *circularQ;
++
++	circularQ = &pm8001_ha->inbnd_q_tbl[0];
++	memset(&task_abort, 0, sizeof(task_abort));
++	if (ABORT_SINGLE == (flag & ABORT_MASK)) {
++		task_abort.abort_all = 0;
++		task_abort.device_id = cpu_to_le32(dev_id);
++		task_abort.tag_to_abort = cpu_to_le32(task_tag);
++		task_abort.tag = cpu_to_le32(cmd_tag);
++	} else if (ABORT_ALL == (flag & ABORT_MASK)) {
++		task_abort.abort_all = cpu_to_le32(1);
++		task_abort.device_id = cpu_to_le32(dev_id);
++		task_abort.tag = cpu_to_le32(cmd_tag);
++	}
++	mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort);
++	return 0;
++}
++
++/**
++ * pm8001_chip_abort_task - SAS abort task when error or exception happened.
++ * @task: the task we wanted to aborted.
++ * @flag: the abort flag.
++ */
++static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
++	struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag)
++{
++	u32 opc, device_id;
++	int rc = TMF_RESP_FUNC_FAILED;
++	PM8001_IO_DBG(pm8001_ha, pm8001_printk("Abort tag[%x]", task_tag));
++	if (pm8001_dev->dev_type == SAS_END_DEV)
++		opc = OPC_INB_SSP_ABORT;
++	else if (pm8001_dev->dev_type == SATA_DEV)
++		opc = OPC_INB_SATA_ABORT;
++	else
++		opc = OPC_INB_SMP_ABORT;/* SMP */
++	device_id = pm8001_dev->device_id;
++	rc = send_task_abort(pm8001_ha, opc, device_id, flag,
++		task_tag, cmd_tag);
++	if (rc != TMF_RESP_FUNC_COMPLETE)
++		PM8001_IO_DBG(pm8001_ha, pm8001_printk("rc= %d\n", rc));
++	return rc;
++}
++
++/**
++ * pm8001_chip_ssp_tm_req - built the task managment command.
++ * @pm8001_ha: our hba card information.
++ * @ccb: the ccb information.
++ * @tmf: task management function.
++ */
++static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
++	struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
++{
++	struct sas_task *task = ccb->task;
++	struct domain_device *dev = task->dev;
++	struct pm8001_device *pm8001_dev = dev->lldd_dev;
++	u32 opc = OPC_INB_SSPINITMSTART;
++	struct inbound_queue_table *circularQ;
++	struct ssp_ini_tm_start_req sspTMCmd;
++
++	memset(&sspTMCmd, 0, sizeof(sspTMCmd));
++	sspTMCmd.device_id = cpu_to_le32(pm8001_dev->device_id);
++	sspTMCmd.relate_tag = cpu_to_le32(tmf->tag_of_task_to_be_managed);
++	sspTMCmd.tmf = cpu_to_le32(tmf->tmf);
++	sspTMCmd.ds_ads_m = cpu_to_le32(1 << 2);
++	memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
++	sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
++	circularQ = &pm8001_ha->inbnd_q_tbl[0];
++	mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd);
++	return 0;
++}
++
++static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
++	void *payload)
++{
++	u32 opc = OPC_INB_GET_NVMD_DATA;
++	u32 nvmd_type;
++	u32 rc;
++	u32 tag;
++	struct pm8001_ccb_info *ccb;
++	struct inbound_queue_table *circularQ;
++	struct get_nvm_data_req nvmd_req;
++	struct fw_control_ex *fw_control_context;
++	struct pm8001_ioctl_payload *ioctl_payload = payload;
++
++	nvmd_type = ioctl_payload->minor_function;
++	fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
++	fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0];
++	fw_control_context->len = ioctl_payload->length;
++	circularQ = &pm8001_ha->inbnd_q_tbl[0];
++	memset(&nvmd_req, 0, sizeof(nvmd_req));
++	rc = pm8001_tag_alloc(pm8001_ha, &tag);
++	if (rc)
++		return rc;
++	ccb = &pm8001_ha->ccb_info[tag];
++	ccb->ccb_tag = tag;
++	ccb->fw_control_context = fw_control_context;
++	nvmd_req.tag = cpu_to_le32(tag);
++
++	switch (nvmd_type) {
++	case TWI_DEVICE: {
++		u32 twi_addr, twi_page_size;
++		twi_addr = 0xa8;
++		twi_page_size = 2;
++
++		nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 |
++			twi_page_size << 8 | TWI_DEVICE);
++		nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
++		nvmd_req.resp_addr_hi =
++		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
++		nvmd_req.resp_addr_lo =
++		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
++		break;
++	}
++	case C_SEEPROM: {
++		nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM);
++		nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
++		nvmd_req.resp_addr_hi =
++		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
++		nvmd_req.resp_addr_lo =
++		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
++		break;
++	}
++	case VPD_FLASH: {
++		nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH);
++		nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
++		nvmd_req.resp_addr_hi =
++		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
++		nvmd_req.resp_addr_lo =
++		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
++		break;
++	}
++	case EXPAN_ROM: {
++		nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM);
++		nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
++		nvmd_req.resp_addr_hi =
++		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
++		nvmd_req.resp_addr_lo =
++		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
++		break;
++	}
++	default:
++		break;
++	}
++	mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req);
++	return 0;
++}
++
++static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
++	void *payload)
++{
++	u32 opc = OPC_INB_SET_NVMD_DATA;
++	u32 nvmd_type;
++	u32 rc;
++	u32 tag;
++	struct pm8001_ccb_info *ccb;
++	struct inbound_queue_table *circularQ;
++	struct set_nvm_data_req nvmd_req;
++	struct fw_control_ex *fw_control_context;
++	struct pm8001_ioctl_payload *ioctl_payload = payload;
++
++	nvmd_type = ioctl_payload->minor_function;
++	fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
++	circularQ = &pm8001_ha->inbnd_q_tbl[0];
++	memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr,
++		ioctl_payload->func_specific,
++		ioctl_payload->length);
++	memset(&nvmd_req, 0, sizeof(nvmd_req));
++	rc = pm8001_tag_alloc(pm8001_ha, &tag);
++	if (rc)
++		return rc;
++	ccb = &pm8001_ha->ccb_info[tag];
++	ccb->fw_control_context = fw_control_context;
++	ccb->ccb_tag = tag;
++	nvmd_req.tag = cpu_to_le32(tag);
++	switch (nvmd_type) {
++	case TWI_DEVICE: {
++		u32 twi_addr, twi_page_size;
++		twi_addr = 0xa8;
++		twi_page_size = 2;
++		nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
++		nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 |
++			twi_page_size << 8 | TWI_DEVICE);
++		nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
++		nvmd_req.resp_addr_hi =
++		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
++		nvmd_req.resp_addr_lo =
++		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
++		break;
++	}
++	case C_SEEPROM:
++		nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM);
++		nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
++		nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
++		nvmd_req.resp_addr_hi =
++		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
++		nvmd_req.resp_addr_lo =
++		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
++		break;
++	case VPD_FLASH:
++		nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH);
++		nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
++		nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
++		nvmd_req.resp_addr_hi =
++		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
++		nvmd_req.resp_addr_lo =
++		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
++		break;
++	case EXPAN_ROM:
++		nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM);
++		nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
++		nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98);
++		nvmd_req.resp_addr_hi =
++		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
++		nvmd_req.resp_addr_lo =
++		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
++		break;
++	default:
++		break;
++	}
++	mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req);
++	return 0;
++}
++
++/**
++ * pm8001_chip_fw_flash_update_build - support the firmware update operation
++ * @pm8001_ha: our hba card information.
++ * @fw_flash_updata_info: firmware flash update param
++ */
++static int
++pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
++	void *fw_flash_updata_info, u32 tag)
++{
++	struct fw_flash_Update_req payload;
++	struct fw_flash_updata_info *info;
++	struct inbound_queue_table *circularQ;
++	u32 opc = OPC_INB_FW_FLASH_UPDATE;
++
++	memset((u8 *)&payload, 0, sizeof(struct fw_flash_Update_req));
++	circularQ = &pm8001_ha->inbnd_q_tbl[0];
++	info = fw_flash_updata_info;
++	payload.tag = cpu_to_le32(tag);
++	payload.cur_image_len = cpu_to_le32(info->cur_image_len);
++	payload.cur_image_offset = cpu_to_le32(info->cur_image_offset);
++	payload.total_image_len = cpu_to_le32(info->total_image_len);
++	payload.len = info->sgl.im_len.len ;
++	payload.sgl_addr_lo = lower_32_bits(info->sgl.addr);
++	payload.sgl_addr_hi = upper_32_bits(info->sgl.addr);
++	mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
++	return 0;
++}
++
++static int
++pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
++	void *payload)
++{
++	struct fw_flash_updata_info flash_update_info;
++	struct fw_control_info *fw_control;
++	struct fw_control_ex *fw_control_context;
++	u32 rc;
++	u32 tag;
++	struct pm8001_ccb_info *ccb;
++	void *buffer = NULL;
++	dma_addr_t phys_addr;
++	u32 phys_addr_hi;
++	u32 phys_addr_lo;
++	struct pm8001_ioctl_payload *ioctl_payload = payload;
++
++	fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
++	fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0];
++	if (fw_control->len != 0) {
++		if (pm8001_mem_alloc(pm8001_ha->pdev,
++			(void **)&buffer,
++			&phys_addr,
++			&phys_addr_hi,
++			&phys_addr_lo,
++			fw_control->len, 0) != 0) {
++				PM8001_FAIL_DBG(pm8001_ha,
++					pm8001_printk("Mem alloc failure\n"));
++				return -ENOMEM;
++		}
++	}
++	memset((void *)buffer, 0, fw_control->len);
++	memcpy((void *)buffer, fw_control->buffer, fw_control->len);
++	flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
++	flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
++	flash_update_info.sgl.im_len.e = 0;
++	flash_update_info.cur_image_offset = fw_control->offset;
++	flash_update_info.cur_image_len = fw_control->len;
++	flash_update_info.total_image_len = fw_control->size;
++	fw_control_context->fw_control = fw_control;
++	fw_control_context->virtAddr = buffer;
++	fw_control_context->len = fw_control->len;
++	rc = pm8001_tag_alloc(pm8001_ha, &tag);
++	if (rc)
++		return rc;
++	ccb = &pm8001_ha->ccb_info[tag];
++	ccb->fw_control_context = fw_control_context;
++	ccb->ccb_tag = tag;
++	pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info, tag);
++	return 0;
++}
++
++static int
++pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
++	struct pm8001_device *pm8001_dev, u32 state)
++{
++	struct set_dev_state_req payload;
++	struct inbound_queue_table *circularQ;
++	struct pm8001_ccb_info *ccb;
++	u32 rc;
++	u32 tag;
++	u32 opc = OPC_INB_SET_DEVICE_STATE;
++	memset((u8 *)&payload, 0, sizeof(payload));
++	rc = pm8001_tag_alloc(pm8001_ha, &tag);
++	if (rc)
++		return -1;
++	ccb = &pm8001_ha->ccb_info[tag];
++	ccb->ccb_tag = tag;
++	ccb->device = pm8001_dev;
++	circularQ = &pm8001_ha->inbnd_q_tbl[0];
++	payload.tag = cpu_to_le32(tag);
++	payload.device_id = cpu_to_le32(pm8001_dev->device_id);
++	payload.nds = cpu_to_le32(state);
++	mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
++	return 0;
++
++}
++
++const struct pm8001_dispatch pm8001_8001_dispatch = {
++	.name			= "pmc8001",
++	.chip_init		= pm8001_chip_init,
++	.chip_soft_rst		= pm8001_chip_soft_rst,
++	.chip_rst		= pm8001_hw_chip_rst,
++	.chip_iounmap		= pm8001_chip_iounmap,
++	.isr			= pm8001_chip_isr,
++	.is_our_interupt	= pm8001_chip_is_our_interupt,
++	.isr_process_oq		= process_oq,
++	.interrupt_enable 	= pm8001_chip_interrupt_enable,
++	.interrupt_disable	= pm8001_chip_interrupt_disable,
++	.make_prd		= pm8001_chip_make_sg,
++	.smp_req		= pm8001_chip_smp_req,
++	.ssp_io_req		= pm8001_chip_ssp_io_req,
++	.sata_req		= pm8001_chip_sata_req,
++	.phy_start_req		= pm8001_chip_phy_start_req,
++	.phy_stop_req		= pm8001_chip_phy_stop_req,
++	.reg_dev_req		= pm8001_chip_reg_dev_req,
++	.dereg_dev_req		= pm8001_chip_dereg_dev_req,
++	.phy_ctl_req		= pm8001_chip_phy_ctl_req,
++	.task_abort		= pm8001_chip_abort_task,
++	.ssp_tm_req		= pm8001_chip_ssp_tm_req,
++	.get_nvmd_req		= pm8001_chip_get_nvmd_req,
++	.set_nvmd_req		= pm8001_chip_set_nvmd_req,
++	.fw_flash_update_req	= pm8001_chip_fw_flash_update_req,
++	.set_dev_state_req	= pm8001_chip_set_dev_state_req,
++};
++
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
+new file mode 100644
+index 0000000..3690a2b
+--- /dev/null
++++ b/drivers/scsi/pm8001/pm8001_hwi.h
+@@ -0,0 +1,1011 @@
++/*
++ * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
++ *
++ * Copyright (c) 2008-2009 USI Co., Ltd.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions, and the following disclaimer,
++ *    without modification.
++ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
++ *    substantially similar to the "NO WARRANTY" disclaimer below
++ *    ("Disclaimer") and any redistribution must be conditioned upon
++ *    including a substantially similar Disclaimer requirement for further
++ *    binary redistribution.
++ * 3. Neither the names of the above-listed copyright holders nor the names
++ *    of any contributors may be used to endorse or promote products derived
++ *    from this software without specific prior written permission.
++ *
++ * Alternatively, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") version 2 as published by the Free
++ * Software Foundation.
++ *
++ * NO WARRANTY
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
++ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
++ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGES.
++ *
++ */
++#ifndef _PMC8001_REG_H_
++#define _PMC8001_REG_H_
++
++#include <linux/types.h>
++#include <scsi/libsas.h>
++
++
++/* for Request Opcode of IOMB */
++#define OPC_INB_ECHO				1	/* 0x000 */
++#define OPC_INB_PHYSTART			4	/* 0x004 */
++#define OPC_INB_PHYSTOP				5	/* 0x005 */
++#define OPC_INB_SSPINIIOSTART			6	/* 0x006 */
++#define OPC_INB_SSPINITMSTART			7	/* 0x007 */
++#define OPC_INB_SSPINIEXTIOSTART		8	/* 0x008 */
++#define OPC_INB_DEV_HANDLE_ACCEPT		9	/* 0x009 */
++#define OPC_INB_SSPTGTIOSTART			10	/* 0x00A */
++#define OPC_INB_SSPTGTRSPSTART			11	/* 0x00B */
++#define OPC_INB_SSPINIEDCIOSTART		12	/* 0x00C */
++#define OPC_INB_SSPINIEXTEDCIOSTART		13	/* 0x00D */
++#define OPC_INB_SSPTGTEDCIOSTART		14	/* 0x00E */
++#define OPC_INB_SSP_ABORT			15	/* 0x00F */
++#define OPC_INB_DEREG_DEV_HANDLE		16	/* 0x010 */
++#define OPC_INB_GET_DEV_HANDLE			17	/* 0x011 */
++#define OPC_INB_SMP_REQUEST			18	/* 0x012 */
++/* SMP_RESPONSE is removed */
++#define OPC_INB_SMP_RESPONSE			19	/* 0x013 */
++#define OPC_INB_SMP_ABORT			20	/* 0x014 */
++#define OPC_INB_REG_DEV				22	/* 0x016 */
++#define OPC_INB_SATA_HOST_OPSTART		23	/* 0x017 */
++#define OPC_INB_SATA_ABORT			24	/* 0x018 */
++#define OPC_INB_LOCAL_PHY_CONTROL		25	/* 0x019 */
++#define OPC_INB_GET_DEV_INFO			26	/* 0x01A */
++#define OPC_INB_FW_FLASH_UPDATE			32	/* 0x020 */
++#define OPC_INB_GPIO				34	/* 0x022 */
++#define OPC_INB_SAS_DIAG_MODE_START_END		35	/* 0x023 */
++#define OPC_INB_SAS_DIAG_EXECUTE		36	/* 0x024 */
++#define OPC_INB_SAS_HW_EVENT_ACK		37	/* 0x025 */
++#define OPC_INB_GET_TIME_STAMP			38	/* 0x026 */
++#define OPC_INB_PORT_CONTROL			39	/* 0x027 */
++#define OPC_INB_GET_NVMD_DATA			40	/* 0x028 */
++#define OPC_INB_SET_NVMD_DATA			41	/* 0x029 */
++#define OPC_INB_SET_DEVICE_STATE		42	/* 0x02A */
++#define OPC_INB_GET_DEVICE_STATE		43	/* 0x02B */
++#define OPC_INB_SET_DEV_INFO			44	/* 0x02C */
++#define OPC_INB_SAS_RE_INITIALIZE		45	/* 0x02D */
++
++/* for Response Opcode of IOMB */
++#define OPC_OUB_ECHO				1	/* 0x001 */
++#define OPC_OUB_HW_EVENT			4	/* 0x004 */
++#define OPC_OUB_SSP_COMP			5	/* 0x005 */
++#define OPC_OUB_SMP_COMP			6	/* 0x006 */
++#define OPC_OUB_LOCAL_PHY_CNTRL			7	/* 0x007 */
++#define OPC_OUB_DEV_REGIST			10	/* 0x00A */
++#define OPC_OUB_DEREG_DEV			11	/* 0x00B */
++#define OPC_OUB_GET_DEV_HANDLE			12	/* 0x00C */
++#define OPC_OUB_SATA_COMP			13	/* 0x00D */
++#define OPC_OUB_SATA_EVENT			14	/* 0x00E */
++#define OPC_OUB_SSP_EVENT			15	/* 0x00F */
++#define OPC_OUB_DEV_HANDLE_ARRIV		16	/* 0x010 */
++/* SMP_RECEIVED Notification is removed */
++#define OPC_OUB_SMP_RECV_EVENT			17	/* 0x011 */
++#define OPC_OUB_SSP_RECV_EVENT			18	/* 0x012 */
++#define OPC_OUB_DEV_INFO			19	/* 0x013 */
++#define OPC_OUB_FW_FLASH_UPDATE			20	/* 0x014 */
++#define OPC_OUB_GPIO_RESPONSE			22	/* 0x016 */
++#define OPC_OUB_GPIO_EVENT			23	/* 0x017 */
++#define OPC_OUB_GENERAL_EVENT			24	/* 0x018 */
++#define OPC_OUB_SSP_ABORT_RSP			26	/* 0x01A */
++#define OPC_OUB_SATA_ABORT_RSP			27	/* 0x01B */
++#define OPC_OUB_SAS_DIAG_MODE_START_END		28	/* 0x01C */
++#define OPC_OUB_SAS_DIAG_EXECUTE		29	/* 0x01D */
++#define OPC_OUB_GET_TIME_STAMP			30	/* 0x01E */
++#define OPC_OUB_SAS_HW_EVENT_ACK		31	/* 0x01F */
++#define OPC_OUB_PORT_CONTROL			32	/* 0x020 */
++#define OPC_OUB_SKIP_ENTRY			33	/* 0x021 */
++#define OPC_OUB_SMP_ABORT_RSP			34	/* 0x022 */
++#define OPC_OUB_GET_NVMD_DATA			35	/* 0x023 */
++#define OPC_OUB_SET_NVMD_DATA			36	/* 0x024 */
++#define OPC_OUB_DEVICE_HANDLE_REMOVAL		37	/* 0x025 */
++#define OPC_OUB_SET_DEVICE_STATE		38	/* 0x026 */
++#define OPC_OUB_GET_DEVICE_STATE		39	/* 0x027 */
++#define OPC_OUB_SET_DEV_INFO			40	/* 0x028 */
++#define OPC_OUB_SAS_RE_INITIALIZE		41	/* 0x029 */
++
++/* for phy start*/
++#define SPINHOLD_DISABLE		(0x00 << 14)
++#define SPINHOLD_ENABLE			(0x01 << 14)
++#define LINKMODE_SAS			(0x01 << 12)
++#define LINKMODE_DSATA			(0x02 << 12)
++#define LINKMODE_AUTO			(0x03 << 12)
++#define LINKRATE_15			(0x01 << 8)
++#define LINKRATE_30			(0x02 << 8)
++#define LINKRATE_60			(0x04 << 8)
++
++struct mpi_msg_hdr{
++	__le32	header;	/* Bits [11:0]  - Message operation code */
++	/* Bits [15:12] - Message Category */
++	/* Bits [21:16] - Outboundqueue ID for the
++	operation completion message */
++	/* Bits [23:22] - Reserved */
++	/* Bits [28:24] - Buffer Count, indicates how
++	many buffer are allocated for the massage */
++	/* Bits [30:29] - Reserved */
++	/* Bits [31] - Message Valid bit */
++} __attribute__((packed, aligned(4)));
++
++
++/*
++ * brief the data structure of PHY Start Command
++ * use to describe enable the phy (64 bytes)
++ */
++struct phy_start_req {
++	__le32	tag;
++	__le32	ase_sh_lm_slr_phyid;
++	struct sas_identify_frame sas_identify;
++	u32	reserved[5];
++} __attribute__((packed, aligned(4)));
++
++
++/*
++ * brief the data structure of PHY Start Command
++ * use to disable the phy (64 bytes)
++ */
++struct phy_stop_req {
++	__le32	tag;
++	__le32	phy_id;
++	u32	reserved[13];
++} __attribute__((packed, aligned(4)));
++
++
++/* set device bits fis - device to host */
++struct  set_dev_bits_fis {
++	u8	fis_type;	/* 0xA1*/
++	u8	n_i_pmport;
++	/* b7 : n Bit. Notification bit. If set device needs attention. */
++	/* b6 : i Bit. Interrupt Bit */
++	/* b5-b4: reserved2 */
++	/* b3-b0: PM Port */
++	u8 	status;
++	u8	error;
++	u32	_r_a;
++} __attribute__ ((packed));
++/* PIO setup FIS - device to host */
++struct  pio_setup_fis {
++	u8	fis_type;	/* 0x5f */
++	u8	i_d_pmPort;
++	/* b7 : reserved */
++	/* b6 : i bit. Interrupt bit */
++	/* b5 : d bit. data transfer direction. set to 1 for device to host
++	xfer */
++	/* b4 : reserved */
++	/* b3-b0: PM Port */
++	u8	status;
++	u8	error;
++	u8	lbal;
++	u8	lbam;
++	u8	lbah;
++	u8	device;
++	u8	lbal_exp;
++	u8	lbam_exp;
++	u8	lbah_exp;
++	u8	_r_a;
++	u8	sector_count;
++	u8	sector_count_exp;
++	u8	_r_b;
++	u8	e_status;
++	u8	_r_c[2];
++	u8	transfer_count;
++} __attribute__ ((packed));
++
++/*
++ * brief the data structure of SATA Completion Response
++ * use to discribe the sata task response (64 bytes)
++ */
++struct sata_completion_resp {
++	__le32	tag;
++	__le32	status;
++	__le32	param;
++	u32	sata_resp[12];
++} __attribute__((packed, aligned(4)));
++
++
++/*
++ * brief the data structure of SAS HW Event Notification
++ * use to alert the host about the hardware event(64 bytes)
++ */
++struct hw_event_resp {
++	__le32	lr_evt_status_phyid_portid;
++	__le32	evt_param;
++	__le32	npip_portstate;
++	struct sas_identify_frame	sas_identify;
++	struct dev_to_host_fis	sata_fis;
++} __attribute__((packed, aligned(4)));
++
++
++/*
++ * brief the data structure of  REGISTER DEVICE Command
++ * use to describe MPI REGISTER DEVICE Command (64 bytes)
++ */
++
++struct reg_dev_req {
++	__le32	tag;
++	__le32	phyid_portid;
++	__le32	dtype_dlr_retry;
++	__le32	firstburstsize_ITNexustimeout;
++	u32	sas_addr_hi;
++	u32	sas_addr_low;
++	__le32	upper_device_id;
++	u32	reserved[8];
++} __attribute__((packed, aligned(4)));
++
++
++/*
++ * brief the data structure of  DEREGISTER DEVICE Command
++ * use to request spc to remove all internal resources associated
++ * with the device id (64 bytes)
++ */
++
++struct dereg_dev_req {
++	__le32	tag;
++	__le32	device_id;
++	u32	reserved[13];
++} __attribute__((packed, aligned(4)));
++
++
++/*
++ * brief the data structure of DEVICE_REGISTRATION Response
++ * use to notify the completion of the device registration  (64 bytes)
++ */
++
++struct dev_reg_resp {
++	__le32	tag;
++	__le32	status;
++	__le32	device_id;
++	u32	reserved[12];
++} __attribute__((packed, aligned(4)));
++
++
++/*
++ * brief the data structure of Local PHY Control Command
++ * use to issue PHY CONTROL to local phy (64 bytes)
++ */
++struct local_phy_ctl_req {
++	__le32	tag;
++	__le32	phyop_phyid;
++	u32	reserved1[13];
++} __attribute__((packed, aligned(4)));
++
++
++/**
++ * brief the data structure of Local Phy Control Response
++ * use to describe MPI Local Phy Control Response (64 bytes)
++ */
++struct local_phy_ctl_resp {
++	__le32	tag;
++	__le32	phyop_phyid;
++	__le32	status;
++	u32	reserved[12];
++} __attribute__((packed, aligned(4)));
++
++
++#define OP_BITS 0x0000FF00
++#define ID_BITS 0x0000000F
++
++/*
++ * brief the data structure of PORT Control Command
++ * use to control port properties (64 bytes)
++ */
++
++struct port_ctl_req {
++	__le32	tag;
++	__le32	portop_portid;
++	__le32	param0;
++	__le32	param1;
++	u32	reserved1[11];
++} __attribute__((packed, aligned(4)));
++
++
++/*
++ * brief the data structure of HW Event Ack Command
++ * use to acknowledge receive HW event (64 bytes)
++ */
++
++struct hw_event_ack_req {
++	__le32	tag;
++	__le32	sea_phyid_portid;
++	__le32	param0;
++	__le32	param1;
++	u32	reserved1[11];
++} __attribute__((packed, aligned(4)));
++
++
++/*
++ * brief the data structure of SSP Completion Response
++ * use to indicate a SSP Completion  (n bytes)
++ */
++struct ssp_completion_resp {
++	__le32	tag;
++	__le32	status;
++	__le32	param;
++	__le32	ssptag_rescv_rescpad;
++	struct ssp_response_iu  ssp_resp_iu;
++	__le32	residual_count;
++} __attribute__((packed, aligned(4)));
++
++
++#define SSP_RESCV_BIT	0x00010000
++
++/*
++ * brief the data structure of SATA EVNET esponse
++ * use to indicate a SATA Completion  (64 bytes)
++ */
++
++struct sata_event_resp {
++	__le32	tag;
++	__le32	event;
++	__le32	port_id;
++	__le32	device_id;
++	u32	reserved[11];
++} __attribute__((packed, aligned(4)));
++
++/*
++ * brief the data structure of SSP EVNET esponse
++ * use to indicate a SSP Completion  (64 bytes)
++ */
++
++struct ssp_event_resp {
++	__le32	tag;
++	__le32	event;
++	__le32	port_id;
++	__le32	device_id;
++	u32	reserved[11];
++} __attribute__((packed, aligned(4)));
++
++/**
++ * brief the data structure of General Event Notification Response
++ * use to describe MPI General Event Notification Response (64 bytes)
++ */
++struct general_event_resp {
++	__le32	status;
++	__le32	inb_IOMB_payload[14];
++} __attribute__((packed, aligned(4)));
++
++
++#define GENERAL_EVENT_PAYLOAD	14
++#define OPCODE_BITS	0x00000fff
++
++/*
++ * brief the data structure of SMP Request Command
++ * use to describe MPI SMP REQUEST Command (64 bytes)
++ */
++struct smp_req {
++	__le32	tag;
++	__le32	device_id;
++	__le32	len_ip_ir;
++	/* Bits [0]  - Indirect response */
++	/* Bits [1] - Indirect Payload */
++	/* Bits [15:2] - Reserved */
++	/* Bits [23:16] - direct payload Len */
++	/* Bits [31:24] - Reserved */
++	u8	smp_req16[16];
++	union {
++		u8	smp_req[32];
++		struct {
++			__le64 long_req_addr;/* sg dma address, LE */
++			__le32 long_req_size;/* LE */
++			u32	_r_a;
++			__le64 long_resp_addr;/* sg dma address, LE */
++			__le32 long_resp_size;/* LE */
++			u32	_r_b;
++			} long_smp_req;/* sequencer extension */
++	};
++} __attribute__((packed, aligned(4)));
++/*
++ * brief the data structure of SMP Completion Response
++ * use to describe MPI SMP Completion Response (64 bytes)
++ */
++struct smp_completion_resp {
++	__le32	tag;
++	__le32	status;
++	__le32	param;
++	__le32	_r_a[12];
++} __attribute__((packed, aligned(4)));
++
++/*
++ *brief the data structure of SSP SMP SATA Abort Command
++ * use to describe MPI SSP SMP & SATA Abort Command (64 bytes)
++ */
++struct task_abort_req {
++	__le32	tag;
++	__le32	device_id;
++	__le32	tag_to_abort;
++	__le32	abort_all;
++	u32	reserved[11];
++} __attribute__((packed, aligned(4)));
++
++/* These flags used for SSP SMP & SATA Abort */
++#define ABORT_MASK		0x3
++#define ABORT_SINGLE		0x0
++#define ABORT_ALL		0x1
++
++/**
++ * brief the data structure of SSP SATA SMP Abort Response
++ * use to describe SSP SMP & SATA Abort Response ( 64 bytes)
++ */
++struct task_abort_resp {
++	__le32	tag;
++	__le32	status;
++	__le32	scp;
++	u32	reserved[12];
++} __attribute__((packed, aligned(4)));
++
++
++/**
++ * brief the data structure of SAS Diagnostic Start/End Command
++ * use to describe MPI SAS Diagnostic Start/End Command (64 bytes)
++ */
++struct sas_diag_start_end_req {
++	__le32	tag;
++	__le32	operation_phyid;
++	u32	reserved[13];
++} __attribute__((packed, aligned(4)));
++
++
++/**
++ * brief the data structure of SAS Diagnostic Execute Command
++ * use to describe MPI SAS Diagnostic Execute Command (64 bytes)
++ */
++struct sas_diag_execute_req{
++	__le32	tag;
++	__le32	cmdtype_cmddesc_phyid;
++	__le32	pat1_pat2;
++	__le32	threshold;
++	__le32	codepat_errmsk;
++	__le32	pmon;
++	__le32	pERF1CTL;
++	u32	reserved[8];
++} __attribute__((packed, aligned(4)));
++
++
++#define SAS_DIAG_PARAM_BYTES 24
++
++/*
++ * brief the data structure of Set Device State Command
++ * use to describe MPI Set Device State Command (64 bytes)
++ */
++struct set_dev_state_req {
++	__le32	tag;
++	__le32	device_id;
++	__le32	nds;
++	u32	reserved[12];
++} __attribute__((packed, aligned(4)));
++
++
++/*
++ * brief the data structure of SATA Start Command
++ * use to describe MPI SATA IO Start Command (64 bytes)
++ */
++
++struct sata_start_req {
++	__le32	tag;
++	__le32	device_id;
++	__le32	data_len;
++	__le32	ncqtag_atap_dir_m;
++	struct host_to_dev_fis	sata_fis;
++	u32	reserved1;
++	u32	reserved2;
++	u32	addr_low;
++	u32	addr_high;
++	__le32	len;
++	__le32	esgl;
++} __attribute__((packed, aligned(4)));
++
++/**
++ * brief the data structure of SSP INI TM Start Command
++ * use to describe MPI SSP INI TM Start Command (64 bytes)
++ */
++struct ssp_ini_tm_start_req {
++	__le32	tag;
++	__le32	device_id;
++	__le32	relate_tag;
++	__le32	tmf;
++	u8	lun[8];
++	__le32	ds_ads_m;
++	u32	reserved[8];
++} __attribute__((packed, aligned(4)));
++
++
++struct ssp_info_unit {
++	u8	lun[8];/* SCSI Logical Unit Number */
++	u8	reserved1;/* reserved */
++	u8	efb_prio_attr;
++	/* B7   : enabledFirstBurst */
++	/* B6-3 : taskPriority */
++	/* B2-0 : taskAttribute */
++	u8	reserved2;	/* reserved */
++	u8	additional_cdb_len;
++	/* B7-2 : additional_cdb_len */
++	/* B1-0 : reserved */
++	u8	cdb[16];/* The SCSI CDB up to 16 bytes length */
++} __attribute__((packed, aligned(4)));
++
++
++/**
++ * brief the data structure of SSP INI IO Start Command
++ * use to describe MPI SSP INI IO Start Command (64 bytes)
++ */
++struct ssp_ini_io_start_req {
++	__le32	tag;
++	__le32	device_id;
++	__le32	data_len;
++	__le32	dir_m_tlr;
++	struct ssp_info_unit	ssp_iu;
++	__le32	addr_low;
++	__le32	addr_high;
++	__le32	len;
++	__le32	esgl;
++} __attribute__((packed, aligned(4)));
++
++
++/**
++ * brief the data structure of Firmware download
++ * use to describe MPI FW DOWNLOAD Command (64 bytes)
++ */
++struct fw_flash_Update_req {
++	__le32	tag;
++	__le32	cur_image_offset;
++	__le32	cur_image_len;
++	__le32	total_image_len;
++	u32	reserved0[7];
++	__le32	sgl_addr_lo;
++	__le32	sgl_addr_hi;
++	__le32	len;
++	__le32	ext_reserved;
++} __attribute__((packed, aligned(4)));
++
++
++#define FWFLASH_IOMB_RESERVED_LEN 0x07
++/**
++ * brief the data structure of FW_FLASH_UPDATE Response
++ * use to describe MPI FW_FLASH_UPDATE Response (64 bytes)
++ *
++ */
++struct fw_flash_Update_resp {
++	dma_addr_t	tag;
++	__le32	status;
++	u32	reserved[13];
++} __attribute__((packed, aligned(4)));
++
++
++/**
++ * brief the data structure of Get NVM Data Command
++ * use to get data from NVM in HBA(64 bytes)
++ */
++struct get_nvm_data_req {
++	__le32	tag;
++	__le32	len_ir_vpdd;
++	__le32	vpd_offset;
++	u32	reserved[8];
++	__le32	resp_addr_lo;
++	__le32	resp_addr_hi;
++	__le32	resp_len;
++	u32	reserved1;
++} __attribute__((packed, aligned(4)));
++
++
++struct set_nvm_data_req {
++	__le32	tag;
++	__le32	len_ir_vpdd;
++	__le32	vpd_offset;
++	u32	reserved[8];
++	__le32	resp_addr_lo;
++	__le32	resp_addr_hi;
++	__le32	resp_len;
++	u32	reserved1;
++} __attribute__((packed, aligned(4)));
++
++
++#define TWI_DEVICE	0x0
++#define C_SEEPROM	0x1
++#define VPD_FLASH	0x4
++#define AAP1_RDUMP	0x5
++#define IOP_RDUMP	0x6
++#define EXPAN_ROM	0x7
++
++#define IPMode		0x80000000
++#define NVMD_TYPE	0x0000000F
++#define NVMD_STAT	0x0000FFFF
++#define NVMD_LEN	0xFF000000
++/**
++ * brief the data structure of Get NVMD Data Response
++ * use to describe MPI Get NVMD Data Response (64 bytes)
++ */
++struct get_nvm_data_resp {
++	__le32		tag;
++	__le32		ir_tda_bn_dps_das_nvm;
++	__le32		dlen_status;
++	__le32		nvm_data[12];
++} __attribute__((packed, aligned(4)));
++
++
++/**
++ * brief the data structure of SAS Diagnostic Start/End Response
++ * use to describe MPI SAS Diagnostic Start/End Response (64 bytes)
++ *
++ */
++struct sas_diag_start_end_resp {
++	__le32		tag;
++	__le32		status;
++	u32		reserved[13];
++} __attribute__((packed, aligned(4)));
++
++
++/**
++ * brief the data structure of SAS Diagnostic Execute Response
++ * use to describe MPI SAS Diagnostic Execute Response (64 bytes)
++ *
++ */
++struct sas_diag_execute_resp {
++	__le32		tag;
++	__le32		cmdtype_cmddesc_phyid;
++	__le32		Status;
++	__le32		ReportData;
++	u32		reserved[11];
++} __attribute__((packed, aligned(4)));
++
++
++/**
++ * brief the data structure of Set Device State Response
++ * use to describe MPI Set Device State Response (64 bytes)
++ *
++ */
++struct set_dev_state_resp {
++	__le32		tag;
++	__le32		status;
++	__le32		device_id;
++	__le32		pds_nds;
++	u32		reserved[11];
++} __attribute__((packed, aligned(4)));
++
++
++#define NDS_BITS 0x0F
++#define PDS_BITS 0xF0
++
++/*
++ * HW Events type
++ */
++
++#define HW_EVENT_RESET_START			0x01
++#define HW_EVENT_CHIP_RESET_COMPLETE		0x02
++#define HW_EVENT_PHY_STOP_STATUS		0x03
++#define HW_EVENT_SAS_PHY_UP			0x04
++#define HW_EVENT_SATA_PHY_UP			0x05
++#define HW_EVENT_SATA_SPINUP_HOLD		0x06
++#define HW_EVENT_PHY_DOWN			0x07
++#define HW_EVENT_PORT_INVALID			0x08
++#define HW_EVENT_BROADCAST_CHANGE		0x09
++#define HW_EVENT_PHY_ERROR			0x0A
++#define HW_EVENT_BROADCAST_SES			0x0B
++#define HW_EVENT_INBOUND_CRC_ERROR		0x0C
++#define HW_EVENT_HARD_RESET_RECEIVED		0x0D
++#define HW_EVENT_MALFUNCTION			0x0E
++#define HW_EVENT_ID_FRAME_TIMEOUT		0x0F
++#define HW_EVENT_BROADCAST_EXP			0x10
++#define HW_EVENT_PHY_START_STATUS		0x11
++#define HW_EVENT_LINK_ERR_INVALID_DWORD		0x12
++#define HW_EVENT_LINK_ERR_DISPARITY_ERROR	0x13
++#define HW_EVENT_LINK_ERR_CODE_VIOLATION	0x14
++#define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH	0x15
++#define HW_EVENT_LINK_ERR_PHY_RESET_FAILED	0x16
++#define HW_EVENT_PORT_RECOVERY_TIMER_TMO	0x17
++#define HW_EVENT_PORT_RECOVER			0x18
++#define HW_EVENT_PORT_RESET_TIMER_TMO		0x19
++#define HW_EVENT_PORT_RESET_COMPLETE		0x20
++#define EVENT_BROADCAST_ASYNCH_EVENT		0x21
++
++/* port state */
++#define PORT_NOT_ESTABLISHED			0x00
++#define PORT_VALID				0x01
++#define PORT_LOSTCOMM				0x02
++#define PORT_IN_RESET				0x04
++#define PORT_INVALID				0x08
++
++/*
++ * SSP/SMP/SATA IO Completion Status values
++ */
++
++#define IO_SUCCESS				0x00
++#define IO_ABORTED				0x01
++#define IO_OVERFLOW				0x02
++#define IO_UNDERFLOW				0x03
++#define IO_FAILED				0x04
++#define IO_ABORT_RESET				0x05
++#define IO_NOT_VALID				0x06
++#define IO_NO_DEVICE				0x07
++#define IO_ILLEGAL_PARAMETER			0x08
++#define IO_LINK_FAILURE				0x09
++#define IO_PROG_ERROR				0x0A
++#define IO_EDC_IN_ERROR				0x0B
++#define IO_EDC_OUT_ERROR			0x0C
++#define IO_ERROR_HW_TIMEOUT			0x0D
++#define IO_XFER_ERROR_BREAK			0x0E
++#define IO_XFER_ERROR_PHY_NOT_READY		0x0F
++#define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED	0x10
++#define IO_OPEN_CNX_ERROR_ZONE_VIOLATION		0x11
++#define IO_OPEN_CNX_ERROR_BREAK				0x12
++#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS			0x13
++#define IO_OPEN_CNX_ERROR_BAD_DESTINATION		0x14
++#define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED	0x15
++#define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY		0x16
++#define IO_OPEN_CNX_ERROR_WRONG_DESTINATION		0x17
++#define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR			0x18
++#define IO_XFER_ERROR_NAK_RECEIVED			0x19
++#define IO_XFER_ERROR_ACK_NAK_TIMEOUT			0x1A
++#define IO_XFER_ERROR_PEER_ABORTED			0x1B
++#define IO_XFER_ERROR_RX_FRAME				0x1C
++#define IO_XFER_ERROR_DMA				0x1D
++#define IO_XFER_ERROR_CREDIT_TIMEOUT			0x1E
++#define IO_XFER_ERROR_SATA_LINK_TIMEOUT			0x1F
++#define IO_XFER_ERROR_SATA				0x20
++#define IO_XFER_ERROR_ABORTED_DUE_TO_SRST		0x22
++#define IO_XFER_ERROR_REJECTED_NCQ_MODE			0x21
++#define IO_XFER_ERROR_ABORTED_NCQ_MODE			0x23
++#define IO_XFER_OPEN_RETRY_TIMEOUT			0x24
++#define IO_XFER_SMP_RESP_CONNECTION_ERROR		0x25
++#define IO_XFER_ERROR_UNEXPECTED_PHASE			0x26
++#define IO_XFER_ERROR_XFER_RDY_OVERRUN			0x27
++#define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED		0x28
++
++#define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT		0x30
++#define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK	0x31
++#define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK	0x32
++
++#define IO_XFER_ERROR_OFFSET_MISMATCH			0x34
++#define IO_XFER_ERROR_XFER_ZERO_DATA_LEN		0x35
++#define IO_XFER_CMD_FRAME_ISSUED			0x36
++#define IO_ERROR_INTERNAL_SMP_RESOURCE			0x37
++#define IO_PORT_IN_RESET				0x38
++#define IO_DS_NON_OPERATIONAL				0x39
++#define IO_DS_IN_RECOVERY				0x3A
++#define IO_TM_TAG_NOT_FOUND				0x3B
++#define IO_XFER_PIO_SETUP_ERROR				0x3C
++#define IO_SSP_EXT_IU_ZERO_LEN_ERROR			0x3D
++#define IO_DS_IN_ERROR					0x3E
++#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY		0x3F
++#define IO_ABORT_IN_PROGRESS				0x40
++#define IO_ABORT_DELAYED				0x41
++#define IO_INVALID_LENGTH				0x42
++
++/* WARNING: This error code must always be the last number.
++ * If you add error code, modify this code also
++ * It is used as an index
++ */
++#define IO_ERROR_UNKNOWN_GENERIC			0x43
++
++/* MSGU CONFIGURATION  TABLE*/
++
++#define SPC_MSGU_CFG_TABLE_UPDATE		0x01/* Inbound doorbell bit0 */
++#define SPC_MSGU_CFG_TABLE_RESET		0x02/* Inbound doorbell bit1 */
++#define SPC_MSGU_CFG_TABLE_FREEZE		0x04/* Inbound doorbell bit2 */
++#define SPC_MSGU_CFG_TABLE_UNFREEZE		0x08/* Inbound doorbell bit4 */
++#define MSGU_IBDB_SET				0x04
++#define MSGU_HOST_INT_STATUS			0x08
++#define MSGU_HOST_INT_MASK			0x0C
++#define MSGU_IOPIB_INT_STATUS			0x18
++#define MSGU_IOPIB_INT_MASK			0x1C
++#define MSGU_IBDB_CLEAR				0x20/* RevB - Host not use */
++#define MSGU_MSGU_CONTROL			0x24
++#define MSGU_ODR				0x3C/* RevB */
++#define MSGU_ODCR				0x40/* RevB */
++#define MSGU_SCRATCH_PAD_0			0x44
++#define MSGU_SCRATCH_PAD_1			0x48
++#define MSGU_SCRATCH_PAD_2			0x4C
++#define MSGU_SCRATCH_PAD_3			0x50
++#define MSGU_HOST_SCRATCH_PAD_0			0x54
++#define MSGU_HOST_SCRATCH_PAD_1			0x58
++#define MSGU_HOST_SCRATCH_PAD_2			0x5C
++#define MSGU_HOST_SCRATCH_PAD_3			0x60
++#define MSGU_HOST_SCRATCH_PAD_4			0x64
++#define MSGU_HOST_SCRATCH_PAD_5			0x68
++#define MSGU_HOST_SCRATCH_PAD_6			0x6C
++#define MSGU_HOST_SCRATCH_PAD_7			0x70
++#define MSGU_ODMR				0x74/* RevB */
++
++/* bit definition for ODMR register */
++#define ODMR_MASK_ALL				0xFFFFFFFF/* mask all
++					interrupt vector */
++#define ODMR_CLEAR_ALL				0/* clear all
++					interrupt vector */
++/* bit definition for ODCR register */
++#define ODCR_CLEAR_ALL		0xFFFFFFFF   /* mask all
++					interrupt vector*/
++/* MSIX Interupts */
++#define MSIX_TABLE_OFFSET		0x2000
++#define MSIX_TABLE_ELEMENT_SIZE		0x10
++#define MSIX_INTERRUPT_CONTROL_OFFSET	0xC
++#define MSIX_TABLE_BASE	  (MSIX_TABLE_OFFSET + MSIX_INTERRUPT_CONTROL_OFFSET)
++#define MSIX_INTERRUPT_DISABLE		0x1
++#define MSIX_INTERRUPT_ENABLE		0x0
++
++
++/* state definition for Scratch Pad1 register */
++#define SCRATCH_PAD1_POR		0x00  /* power on reset state */
++#define SCRATCH_PAD1_SFR		0x01  /* soft reset state */
++#define SCRATCH_PAD1_ERR		0x02  /* error state */
++#define SCRATCH_PAD1_RDY		0x03  /* ready state */
++#define SCRATCH_PAD1_RST		0x04  /* soft reset toggle flag */
++#define SCRATCH_PAD1_AAP1RDY_RST	0x08  /* AAP1 ready for soft reset */
++#define SCRATCH_PAD1_STATE_MASK		0xFFFFFFF0   /* ScratchPad1
++ Mask, bit1-0 State, bit2 Soft Reset, bit3 FW RDY for Soft Reset */
++#define SCRATCH_PAD1_RESERVED		0x000003F8   /* Scratch Pad1
++ Reserved bit 3 to 9 */
++
++ /* state definition for Scratch Pad2 register */
++#define SCRATCH_PAD2_POR		0x00  /* power on state */
++#define SCRATCH_PAD2_SFR		0x01  /* soft reset state */
++#define SCRATCH_PAD2_ERR		0x02  /* error state */
++#define SCRATCH_PAD2_RDY		0x03  /* ready state */
++#define SCRATCH_PAD2_FWRDY_RST		0x04  /* FW ready for soft reset flag*/
++#define SCRATCH_PAD2_IOPRDY_RST		0x08  /* IOP ready for soft reset */
++#define SCRATCH_PAD2_STATE_MASK		0xFFFFFFF4 /* ScratchPad 2
++ Mask, bit1-0 State */
++#define SCRATCH_PAD2_RESERVED		0x000003FC   /* Scratch Pad1
++ Reserved bit 2 to 9 */
++
++#define SCRATCH_PAD_ERROR_MASK		0xFFFFFC00   /* Error mask bits */
++#define SCRATCH_PAD_STATE_MASK		0x00000003   /* State Mask bits */
++
++/* main configuration offset - byte offset */
++#define MAIN_SIGNATURE_OFFSET		0x00/* DWORD 0x00 */
++#define MAIN_INTERFACE_REVISION		0x04/* DWORD 0x01 */
++#define MAIN_FW_REVISION		0x08/* DWORD 0x02 */
++#define MAIN_MAX_OUTSTANDING_IO_OFFSET	0x0C/* DWORD 0x03 */
++#define MAIN_MAX_SGL_OFFSET		0x10/* DWORD 0x04 */
++#define MAIN_CNTRL_CAP_OFFSET		0x14/* DWORD 0x05 */
++#define MAIN_GST_OFFSET			0x18/* DWORD 0x06 */
++#define MAIN_IBQ_OFFSET			0x1C/* DWORD 0x07 */
++#define MAIN_OBQ_OFFSET			0x20/* DWORD 0x08 */
++#define MAIN_IQNPPD_HPPD_OFFSET		0x24/* DWORD 0x09 */
++#define MAIN_OB_HW_EVENT_PID03_OFFSET	0x28/* DWORD 0x0A */
++#define MAIN_OB_HW_EVENT_PID47_OFFSET	0x2C/* DWORD 0x0B */
++#define MAIN_OB_NCQ_EVENT_PID03_OFFSET	0x30/* DWORD 0x0C */
++#define MAIN_OB_NCQ_EVENT_PID47_OFFSET	0x34/* DWORD 0x0D */
++#define MAIN_TITNX_EVENT_PID03_OFFSET	0x38/* DWORD 0x0E */
++#define MAIN_TITNX_EVENT_PID47_OFFSET	0x3C/* DWORD 0x0F */
++#define MAIN_OB_SSP_EVENT_PID03_OFFSET	0x40/* DWORD 0x10 */
++#define MAIN_OB_SSP_EVENT_PID47_OFFSET	0x44/* DWORD 0x11 */
++#define MAIN_OB_SMP_EVENT_PID03_OFFSET	0x48/* DWORD 0x12 */
++#define MAIN_OB_SMP_EVENT_PID47_OFFSET	0x4C/* DWORD 0x13 */
++#define MAIN_EVENT_LOG_ADDR_HI		0x50/* DWORD 0x14 */
++#define MAIN_EVENT_LOG_ADDR_LO		0x54/* DWORD 0x15 */
++#define MAIN_EVENT_LOG_BUFF_SIZE	0x58/* DWORD 0x16 */
++#define MAIN_EVENT_LOG_OPTION		0x5C/* DWORD 0x17 */
++#define MAIN_IOP_EVENT_LOG_ADDR_HI	0x60/* DWORD 0x18 */
++#define MAIN_IOP_EVENT_LOG_ADDR_LO	0x64/* DWORD 0x19 */
++#define MAIN_IOP_EVENT_LOG_BUFF_SIZE	0x68/* DWORD 0x1A */
++#define MAIN_IOP_EVENT_LOG_OPTION	0x6C/* DWORD 0x1B */
++#define MAIN_FATAL_ERROR_INTERRUPT	0x70/* DWORD 0x1C */
++#define MAIN_FATAL_ERROR_RDUMP0_OFFSET	0x74/* DWORD 0x1D */
++#define MAIN_FATAL_ERROR_RDUMP0_LENGTH	0x78/* DWORD 0x1E */
++#define MAIN_FATAL_ERROR_RDUMP1_OFFSET	0x7C/* DWORD 0x1F */
++#define MAIN_FATAL_ERROR_RDUMP1_LENGTH	0x80/* DWORD 0x20 */
++#define MAIN_HDA_FLAGS_OFFSET		0x84/* DWORD 0x21 */
++#define MAIN_ANALOG_SETUP_OFFSET	0x88/* DWORD 0x22 */
++
++/* Gereral Status Table offset - byte offset */
++#define GST_GSTLEN_MPIS_OFFSET		0x00
++#define GST_IQ_FREEZE_STATE0_OFFSET	0x04
++#define GST_IQ_FREEZE_STATE1_OFFSET	0x08
++#define GST_MSGUTCNT_OFFSET		0x0C
++#define GST_IOPTCNT_OFFSET		0x10
++#define GST_PHYSTATE_OFFSET		0x18
++#define GST_PHYSTATE0_OFFSET		0x18
++#define GST_PHYSTATE1_OFFSET		0x1C
++#define GST_PHYSTATE2_OFFSET		0x20
++#define GST_PHYSTATE3_OFFSET		0x24
++#define GST_PHYSTATE4_OFFSET		0x28
++#define GST_PHYSTATE5_OFFSET		0x2C
++#define GST_PHYSTATE6_OFFSET		0x30
++#define GST_PHYSTATE7_OFFSET		0x34
++#define GST_RERRINFO_OFFSET		0x44
++
++/* General Status Table - MPI state */
++#define GST_MPI_STATE_UNINIT		0x00
++#define GST_MPI_STATE_INIT		0x01
++#define GST_MPI_STATE_TERMINATION	0x02
++#define GST_MPI_STATE_ERROR		0x03
++#define GST_MPI_STATE_MASK		0x07
++
++#define MBIC_NMI_ENABLE_VPE0_IOP	0x000418
++#define MBIC_NMI_ENABLE_VPE0_AAP1	0x000418
++/* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */
++#define PCIE_EVENT_INTERRUPT_ENABLE	0x003040
++#define PCIE_EVENT_INTERRUPT		0x003044
++#define PCIE_ERROR_INTERRUPT_ENABLE	0x003048
++#define PCIE_ERROR_INTERRUPT		0x00304C
++/* signature defintion for host scratch pad0 register */
++#define SPC_SOFT_RESET_SIGNATURE	0x252acbcd
++/* Signature for Soft Reset */
++
++/* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */
++#define SPC_REG_RESET			0x000000/* reset register */
++
++/* bit difination for SPC_RESET register */
++#define   SPC_REG_RESET_OSSP		0x00000001
++#define   SPC_REG_RESET_RAAE		0x00000002
++#define   SPC_REG_RESET_PCS_SPBC	0x00000004
++#define   SPC_REG_RESET_PCS_IOP_SS	0x00000008
++#define   SPC_REG_RESET_PCS_AAP1_SS	0x00000010
++#define   SPC_REG_RESET_PCS_AAP2_SS	0x00000020
++#define   SPC_REG_RESET_PCS_LM		0x00000040
++#define   SPC_REG_RESET_PCS		0x00000080
++#define   SPC_REG_RESET_GSM		0x00000100
++#define   SPC_REG_RESET_DDR2		0x00010000
++#define   SPC_REG_RESET_BDMA_CORE	0x00020000
++#define   SPC_REG_RESET_BDMA_SXCBI	0x00040000
++#define   SPC_REG_RESET_PCIE_AL_SXCBI	0x00080000
++#define   SPC_REG_RESET_PCIE_PWR	0x00100000
++#define   SPC_REG_RESET_PCIE_SFT	0x00200000
++#define   SPC_REG_RESET_PCS_SXCBI	0x00400000
++#define   SPC_REG_RESET_LMS_SXCBI	0x00800000
++#define   SPC_REG_RESET_PMIC_SXCBI	0x01000000
++#define   SPC_REG_RESET_PMIC_CORE	0x02000000
++#define   SPC_REG_RESET_PCIE_PC_SXCBI	0x04000000
++#define   SPC_REG_RESET_DEVICE		0x80000000
++
++/* registers for BAR Shifting - BAR2(0x18), BAR1(win) */
++#define SPC_IBW_AXI_TRANSLATION_LOW	0x003258
++
++#define MBIC_AAP1_ADDR_BASE		0x060000
++#define MBIC_IOP_ADDR_BASE		0x070000
++#define GSM_ADDR_BASE			0x0700000
++/* Dynamic map through Bar4 - 0x00700000 */
++#define GSM_CONFIG_RESET		0x00000000
++#define RAM_ECC_DB_ERR			0x00000018
++#define GSM_READ_ADDR_PARITY_INDIC	0x00000058
++#define GSM_WRITE_ADDR_PARITY_INDIC	0x00000060
++#define GSM_WRITE_DATA_PARITY_INDIC	0x00000068
++#define GSM_READ_ADDR_PARITY_CHECK	0x00000038
++#define GSM_WRITE_ADDR_PARITY_CHECK	0x00000040
++#define GSM_WRITE_DATA_PARITY_CHECK	0x00000048
++
++#define RB6_ACCESS_REG			0x6A0000
++#define HDAC_EXEC_CMD			0x0002
++#define HDA_C_PA			0xcb
++#define HDA_SEQ_ID_BITS			0x00ff0000
++#define HDA_GSM_OFFSET_BITS		0x00FFFFFF
++#define MBIC_AAP1_ADDR_BASE		0x060000
++#define MBIC_IOP_ADDR_BASE		0x070000
++#define GSM_ADDR_BASE			0x0700000
++#define SPC_TOP_LEVEL_ADDR_BASE		0x000000
++#define GSM_CONFIG_RESET_VALUE          0x00003b00
++#define GPIO_ADDR_BASE                  0x00090000
++#define GPIO_GPIO_0_0UTPUT_CTL_OFFSET   0x0000010c
++
++/* RB6 offset */
++#define SPC_RB6_OFFSET			0x80C0
++/* Magic number of  soft reset for RB6 */
++#define RB6_MAGIC_NUMBER_RST		0x1234
++
++/* Device Register status */
++#define DEVREG_SUCCESS					0x00
++#define DEVREG_FAILURE_OUT_OF_RESOURCE			0x01
++#define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED	0x02
++#define DEVREG_FAILURE_INVALID_PHY_ID			0x03
++#define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED	0x04
++#define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE		0x05
++#define DEVREG_FAILURE_PORT_NOT_VALID_STATE		0x06
++#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID		0x07
++
++#endif
++
+diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
+new file mode 100644
+index 0000000..811b5d3
+--- /dev/null
++++ b/drivers/scsi/pm8001/pm8001_init.c
+@@ -0,0 +1,888 @@
++/*
++ * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
++ *
++ * Copyright (c) 2008-2009 USI Co., Ltd.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions, and the following disclaimer,
++ *    without modification.
++ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
++ *    substantially similar to the "NO WARRANTY" disclaimer below
++ *    ("Disclaimer") and any redistribution must be conditioned upon
++ *    including a substantially similar Disclaimer requirement for further
++ *    binary redistribution.
++ * 3. Neither the names of the above-listed copyright holders nor the names
++ *    of any contributors may be used to endorse or promote products derived
++ *    from this software without specific prior written permission.
++ *
++ * Alternatively, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") version 2 as published by the Free
++ * Software Foundation.
++ *
++ * NO WARRANTY
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
++ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
++ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGES.
++ *
++ */
++
++#include "pm8001_sas.h"
++#include "pm8001_chips.h"
++
++static struct scsi_transport_template *pm8001_stt;
++
++static const struct pm8001_chip_info pm8001_chips[] = {
++	[chip_8001] = {  8, &pm8001_8001_dispatch,},
++};
++static int pm8001_id;
++
++LIST_HEAD(hba_list);
++
++/**
++ * The main structure which LLDD must register for scsi core.
++ */
++static struct scsi_host_template pm8001_sht = {
++	.module			= THIS_MODULE,
++	.name			= DRV_NAME,
++	.queuecommand		= sas_queuecommand,
++	.target_alloc		= sas_target_alloc,
++	.slave_configure	= pm8001_slave_configure,
++	.slave_destroy		= sas_slave_destroy,
++	.scan_finished		= pm8001_scan_finished,
++	.scan_start		= pm8001_scan_start,
++	.change_queue_depth	= sas_change_queue_depth,
++	.change_queue_type	= sas_change_queue_type,
++	.bios_param		= sas_bios_param,
++	.can_queue		= 1,
++	.cmd_per_lun		= 1,
++	.this_id		= -1,
++	.sg_tablesize		= SG_ALL,
++	.max_sectors		= SCSI_DEFAULT_MAX_SECTORS,
++	.use_clustering		= ENABLE_CLUSTERING,
++	.eh_device_reset_handler = sas_eh_device_reset_handler,
++	.eh_bus_reset_handler	= sas_eh_bus_reset_handler,
++	.slave_alloc		= pm8001_slave_alloc,
++	.target_destroy		= sas_target_destroy,
++	.ioctl			= sas_ioctl,
++	.shost_attrs		= pm8001_host_attrs,
++};
++
++/**
++ * Sas layer call this function to execute specific task.
++ */
++static struct sas_domain_function_template pm8001_transport_ops = {
++	.lldd_dev_found		= pm8001_dev_found,
++	.lldd_dev_gone		= pm8001_dev_gone,
++
++	.lldd_execute_task	= pm8001_queue_command,
++	.lldd_control_phy	= pm8001_phy_control,
++
++	.lldd_abort_task	= pm8001_abort_task,
++	.lldd_abort_task_set	= pm8001_abort_task_set,
++	.lldd_clear_aca		= pm8001_clear_aca,
++	.lldd_clear_task_set	= pm8001_clear_task_set,
++	.lldd_I_T_nexus_reset   = pm8001_I_T_nexus_reset,
++	.lldd_lu_reset		= pm8001_lu_reset,
++	.lldd_query_task	= pm8001_query_task,
++};
++
++/**
++ *pm8001_phy_init - initiate our adapter phys
++ *@pm8001_ha: our hba structure.
++ *@phy_id: phy id.
++ */
++static void __devinit pm8001_phy_init(struct pm8001_hba_info *pm8001_ha,
++	int phy_id)
++{
++	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
++	struct asd_sas_phy *sas_phy = &phy->sas_phy;
++	phy->phy_state = 0;
++	phy->pm8001_ha = pm8001_ha;
++	sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
++	sas_phy->class = SAS;
++	sas_phy->iproto = SAS_PROTOCOL_ALL;
++	sas_phy->tproto = 0;
++	sas_phy->type = PHY_TYPE_PHYSICAL;
++	sas_phy->role = PHY_ROLE_INITIATOR;
++	sas_phy->oob_mode = OOB_NOT_CONNECTED;
++	sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
++	sas_phy->id = phy_id;
++	sas_phy->sas_addr = &pm8001_ha->sas_addr[0];
++	sas_phy->frame_rcvd = &phy->frame_rcvd[0];
++	sas_phy->ha = (struct sas_ha_struct *)pm8001_ha->shost->hostdata;
++	sas_phy->lldd_phy = phy;
++}
++
++/**
++ *pm8001_free - free hba
++ *@pm8001_ha:	our hba structure.
++ *
++ */
++static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
++{
++	int i;
++	struct pm8001_wq *wq;
++
++	if (!pm8001_ha)
++		return;
++
++	for (i = 0; i < USI_MAX_MEMCNT; i++) {
++		if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
++			pci_free_consistent(pm8001_ha->pdev,
++				pm8001_ha->memoryMap.region[i].element_size,
++				pm8001_ha->memoryMap.region[i].virt_ptr,
++				pm8001_ha->memoryMap.region[i].phys_addr);
++			}
++	}
++	PM8001_CHIP_DISP->chip_iounmap(pm8001_ha);
++	if (pm8001_ha->shost)
++		scsi_host_put(pm8001_ha->shost);
++	list_for_each_entry(wq, &pm8001_ha->wq_list, entry)
++		cancel_delayed_work(&wq->work_q);
++	kfree(pm8001_ha->tags);
++	kfree(pm8001_ha);
++}
++
++#ifdef PM8001_USE_TASKLET
++static void pm8001_tasklet(unsigned long opaque)
++{
++	struct pm8001_hba_info *pm8001_ha;
++	pm8001_ha = (struct pm8001_hba_info *)opaque;;
++	if (unlikely(!pm8001_ha))
++		BUG_ON(1);
++	PM8001_CHIP_DISP->isr(pm8001_ha);
++}
++#endif
++
++
++ /**
++  * pm8001_interrupt - when HBA originate a interrupt,we should invoke this
++  * dispatcher to handle each case.
++  * @irq: irq number.
++  * @opaque: the passed general host adapter struct
++  */
++static irqreturn_t pm8001_interrupt(int irq, void *opaque)
++{
++	struct pm8001_hba_info *pm8001_ha;
++	irqreturn_t ret = IRQ_HANDLED;
++	struct sas_ha_struct *sha = opaque;
++	pm8001_ha = sha->lldd_ha;
++	if (unlikely(!pm8001_ha))
++		return IRQ_NONE;
++	if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
++		return IRQ_NONE;
++#ifdef PM8001_USE_TASKLET
++	tasklet_schedule(&pm8001_ha->tasklet);
++#else
++	ret = PM8001_CHIP_DISP->isr(pm8001_ha);
++#endif
++	return ret;
++}
++
++/**
++ * pm8001_alloc - initiate our hba structure and 6 DMAs area.
++ * @pm8001_ha:our hba structure.
++ *
++ */
++static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
++{
++	int i;
++	spin_lock_init(&pm8001_ha->lock);
++	for (i = 0; i < pm8001_ha->chip->n_phy; i++)
++		pm8001_phy_init(pm8001_ha, i);
++
++	pm8001_ha->tags = kmalloc(sizeof(*pm8001_ha->tags)*PM8001_MAX_DEVICES,
++		GFP_KERNEL);
++
++	/* MPI Memory region 1 for AAP Event Log for fw */
++	pm8001_ha->memoryMap.region[AAP1].num_elements = 1;
++	pm8001_ha->memoryMap.region[AAP1].element_size = PM8001_EVENT_LOG_SIZE;
++	pm8001_ha->memoryMap.region[AAP1].total_len = PM8001_EVENT_LOG_SIZE;
++	pm8001_ha->memoryMap.region[AAP1].alignment = 32;
++
++	/* MPI Memory region 2 for IOP Event Log for fw */
++	pm8001_ha->memoryMap.region[IOP].num_elements = 1;
++	pm8001_ha->memoryMap.region[IOP].element_size = PM8001_EVENT_LOG_SIZE;
++	pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE;
++	pm8001_ha->memoryMap.region[IOP].alignment = 32;
++
++	/* MPI Memory region 3 for consumer Index of inbound queues */
++	pm8001_ha->memoryMap.region[CI].num_elements = 1;
++	pm8001_ha->memoryMap.region[CI].element_size = 4;
++	pm8001_ha->memoryMap.region[CI].total_len = 4;
++	pm8001_ha->memoryMap.region[CI].alignment = 4;
++
++	/* MPI Memory region 4 for producer Index of outbound queues */
++	pm8001_ha->memoryMap.region[PI].num_elements = 1;
++	pm8001_ha->memoryMap.region[PI].element_size = 4;
++	pm8001_ha->memoryMap.region[PI].total_len = 4;
++	pm8001_ha->memoryMap.region[PI].alignment = 4;
++
++	/* MPI Memory region 5 inbound queues */
++	pm8001_ha->memoryMap.region[IB].num_elements = 256;
++	pm8001_ha->memoryMap.region[IB].element_size = 64;
++	pm8001_ha->memoryMap.region[IB].total_len = 256 * 64;
++	pm8001_ha->memoryMap.region[IB].alignment = 64;
++
++	/* MPI Memory region 6 inbound queues */
++	pm8001_ha->memoryMap.region[OB].num_elements = 256;
++	pm8001_ha->memoryMap.region[OB].element_size = 64;
++	pm8001_ha->memoryMap.region[OB].total_len = 256 * 64;
++	pm8001_ha->memoryMap.region[OB].alignment = 64;
++
++	/* Memory region write DMA*/
++	pm8001_ha->memoryMap.region[NVMD].num_elements = 1;
++	pm8001_ha->memoryMap.region[NVMD].element_size = 4096;
++	pm8001_ha->memoryMap.region[NVMD].total_len = 4096;
++	/* Memory region for devices*/
++	pm8001_ha->memoryMap.region[DEV_MEM].num_elements = 1;
++	pm8001_ha->memoryMap.region[DEV_MEM].element_size = PM8001_MAX_DEVICES *
++		sizeof(struct pm8001_device);
++	pm8001_ha->memoryMap.region[DEV_MEM].total_len = PM8001_MAX_DEVICES *
++		sizeof(struct pm8001_device);
++
++	/* Memory region for ccb_info*/
++	pm8001_ha->memoryMap.region[CCB_MEM].num_elements = 1;
++	pm8001_ha->memoryMap.region[CCB_MEM].element_size = PM8001_MAX_CCB *
++		sizeof(struct pm8001_ccb_info);
++	pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB *
++		sizeof(struct pm8001_ccb_info);
++
++	for (i = 0; i < USI_MAX_MEMCNT; i++) {
++		if (pm8001_mem_alloc(pm8001_ha->pdev,
++			&pm8001_ha->memoryMap.region[i].virt_ptr,
++			&pm8001_ha->memoryMap.region[i].phys_addr,
++			&pm8001_ha->memoryMap.region[i].phys_addr_hi,
++			&pm8001_ha->memoryMap.region[i].phys_addr_lo,
++			pm8001_ha->memoryMap.region[i].total_len,
++			pm8001_ha->memoryMap.region[i].alignment) != 0) {
++				PM8001_FAIL_DBG(pm8001_ha,
++					pm8001_printk("Mem%d alloc failed\n",
++					i));
++				goto err_out;
++		}
++	}
++
++	pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr;
++	for (i = 0; i < PM8001_MAX_DEVICES; i++) {
++		pm8001_ha->devices[i].dev_type = NO_DEVICE;
++		pm8001_ha->devices[i].id = i;
++		pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES;
++		pm8001_ha->devices[i].running_req = 0;
++	}
++	pm8001_ha->ccb_info = pm8001_ha->memoryMap.region[CCB_MEM].virt_ptr;
++	for (i = 0; i < PM8001_MAX_CCB; i++) {
++		pm8001_ha->ccb_info[i].ccb_dma_handle =
++			pm8001_ha->memoryMap.region[CCB_MEM].phys_addr +
++			i * sizeof(struct pm8001_ccb_info);
++		++pm8001_ha->tags_num;
++	}
++	pm8001_ha->flags = PM8001F_INIT_TIME;
++	/* Initialize tags */
++	pm8001_tag_init(pm8001_ha);
++	return 0;
++err_out:
++	return 1;
++}
++
++/**
++ * pm8001_ioremap - remap the pci high physical address to kernal virtual
++ * address so that we can access them.
++ * @pm8001_ha:our hba structure.
++ */
++static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
++{
++	u32 bar;
++	u32 logicalBar = 0;
++	struct pci_dev *pdev;
++
++	pdev = pm8001_ha->pdev;
++	/* map pci mem (PMC pci base 0-3)*/
++	for (bar = 0; bar < 6; bar++) {
++		/*
++		** logical BARs for SPC:
++		** bar 0 and 1 - logical BAR0
++		** bar 2 and 3 - logical BAR1
++		** bar4 - logical BAR2
++		** bar5 - logical BAR3
++		** Skip the appropriate assignments:
++		*/
++		if ((bar == 1) || (bar == 3))
++			continue;
++		if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
++			pm8001_ha->io_mem[logicalBar].membase =
++				pci_resource_start(pdev, bar);
++			pm8001_ha->io_mem[logicalBar].membase &=
++				(u32)PCI_BASE_ADDRESS_MEM_MASK;
++			pm8001_ha->io_mem[logicalBar].memsize =
++				pci_resource_len(pdev, bar);
++			pm8001_ha->io_mem[logicalBar].memvirtaddr =
++				ioremap(pm8001_ha->io_mem[logicalBar].membase,
++				pm8001_ha->io_mem[logicalBar].memsize);
++			PM8001_INIT_DBG(pm8001_ha,
++				pm8001_printk("PCI: bar %d, logicalBar %d "
++				"virt_addr=%lx,len=%d\n", bar, logicalBar,
++				(unsigned long)
++				pm8001_ha->io_mem[logicalBar].memvirtaddr,
++				pm8001_ha->io_mem[logicalBar].memsize));
++		} else {
++			pm8001_ha->io_mem[logicalBar].membase	= 0;
++			pm8001_ha->io_mem[logicalBar].memsize	= 0;
++			pm8001_ha->io_mem[logicalBar].memvirtaddr = 0;
++		}
++		logicalBar++;
++	}
++	return 0;
++}
++
++/**
++ * pm8001_pci_alloc - initialize our ha card structure
++ * @pdev: pci device.
++ * @ent: ent
++ * @shost: scsi host struct which has been initialized before.
++ */
++static struct pm8001_hba_info *__devinit
++pm8001_pci_alloc(struct pci_dev *pdev, u32 chip_id, struct Scsi_Host *shost)
++{
++	struct pm8001_hba_info *pm8001_ha;
++	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
++
++
++	pm8001_ha = sha->lldd_ha;
++	if (!pm8001_ha)
++		return NULL;
++
++	pm8001_ha->pdev = pdev;
++	pm8001_ha->dev = &pdev->dev;
++	pm8001_ha->chip_id = chip_id;
++	pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id];
++	pm8001_ha->irq = pdev->irq;
++	pm8001_ha->sas = sha;
++	pm8001_ha->shost = shost;
++	pm8001_ha->id = pm8001_id++;
++	INIT_LIST_HEAD(&pm8001_ha->wq_list);
++	pm8001_ha->logging_level = 0x01;
++	sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
++#ifdef PM8001_USE_TASKLET
++	tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
++		(unsigned long)pm8001_ha);
++#endif
++	pm8001_ioremap(pm8001_ha);
++	if (!pm8001_alloc(pm8001_ha))
++		return pm8001_ha;
++	pm8001_free(pm8001_ha);
++	return NULL;
++}
++
++/**
++ * pci_go_44 - pm8001 specified, its DMA is 44 bit rather than 64 bit
++ * @pdev: pci device.
++ */
++static int pci_go_44(struct pci_dev *pdev)
++{
++	int rc;
++
++	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(44))) {
++		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(44));
++		if (rc) {
++			rc = pci_set_consistent_dma_mask(pdev,
++				DMA_BIT_MASK(32));
++			if (rc) {
++				dev_printk(KERN_ERR, &pdev->dev,
++					"44-bit DMA enable failed\n");
++				return rc;
++			}
++		}
++	} else {
++		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
++		if (rc) {
++			dev_printk(KERN_ERR, &pdev->dev,
++				"32-bit DMA enable failed\n");
++			return rc;
++		}
++		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
++		if (rc) {
++			dev_printk(KERN_ERR, &pdev->dev,
++				"32-bit consistent DMA enable failed\n");
++			return rc;
++		}
++	}
++	return rc;
++}
++
++/**
++ * pm8001_prep_sas_ha_init - allocate memory in general hba struct && init them.
++ * @shost: scsi host which has been allocated outside.
++ * @chip_info: our ha struct.
++ */
++static int __devinit pm8001_prep_sas_ha_init(struct Scsi_Host * shost,
++	const struct pm8001_chip_info *chip_info)
++{
++	int phy_nr, port_nr;
++	struct asd_sas_phy **arr_phy;
++	struct asd_sas_port **arr_port;
++	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
++
++	phy_nr = chip_info->n_phy;
++	port_nr = phy_nr;
++	memset(sha, 0x00, sizeof(*sha));
++	arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
++	if (!arr_phy)
++		goto exit;
++	arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
++	if (!arr_port)
++		goto exit_free2;
++
++	sha->sas_phy = arr_phy;
++	sha->sas_port = arr_port;
++	sha->lldd_ha = kzalloc(sizeof(struct pm8001_hba_info), GFP_KERNEL);
++	if (!sha->lldd_ha)
++		goto exit_free1;
++
++	shost->transportt = pm8001_stt;
++	shost->max_id = PM8001_MAX_DEVICES;
++	shost->max_lun = 8;
++	shost->max_channel = 0;
++	shost->unique_id = pm8001_id;
++	shost->max_cmd_len = 16;
++	shost->can_queue = PM8001_CAN_QUEUE;
++	shost->cmd_per_lun = 32;
++	return 0;
++exit_free1:
++	kfree(arr_port);
++exit_free2:
++	kfree(arr_phy);
++exit:
++	return -1;
++}
++
++/**
++ * pm8001_post_sas_ha_init - initialize general hba struct defined in libsas
++ * @shost: scsi host which has been allocated outside
++ * @chip_info: our ha struct.
++ */
++static void  __devinit pm8001_post_sas_ha_init(struct Scsi_Host *shost,
++	const struct pm8001_chip_info *chip_info)
++{
++	int i = 0;
++	struct pm8001_hba_info *pm8001_ha;
++	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
++
++	pm8001_ha = sha->lldd_ha;
++	for (i = 0; i < chip_info->n_phy; i++) {
++		sha->sas_phy[i] = &pm8001_ha->phy[i].sas_phy;
++		sha->sas_port[i] = &pm8001_ha->port[i].sas_port;
++	}
++	sha->sas_ha_name = DRV_NAME;
++	sha->dev = pm8001_ha->dev;
++
++	sha->lldd_module = THIS_MODULE;
++	sha->sas_addr = &pm8001_ha->sas_addr[0];
++	sha->num_phys = chip_info->n_phy;
++	sha->lldd_max_execute_num = 1;
++	sha->lldd_queue_size = PM8001_CAN_QUEUE;
++	sha->core.shost = shost;
++}
++
++/**
++ * pm8001_init_sas_add - initialize sas address
++ * @chip_info: our ha struct.
++ *
++ * Currently we just set the fixed SAS address to our HBA,for manufacture,
++ * it should read from the EEPROM
++ */
++static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
++{
++	u8 i;
++#ifdef PM8001_READ_VPD
++	DECLARE_COMPLETION_ONSTACK(completion);
++	pm8001_ha->nvmd_completion = &completion;
++	PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, 0, 0);
++	wait_for_completion(&completion);
++	for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
++		memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr,
++			SAS_ADDR_SIZE);
++		PM8001_INIT_DBG(pm8001_ha,
++			pm8001_printk("phy %d sas_addr = %x \n", i,
++			(u64)pm8001_ha->phy[i].dev_sas_addr));
++	}
++#else
++	for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
++		pm8001_ha->phy[i].dev_sas_addr = 0x500e004010000004ULL;
++		pm8001_ha->phy[i].dev_sas_addr =
++			cpu_to_be64((u64)
++				(*(u64 *)&pm8001_ha->phy[i].dev_sas_addr));
++	}
++	memcpy(pm8001_ha->sas_addr, &pm8001_ha->phy[0].dev_sas_addr,
++		SAS_ADDR_SIZE);
++#endif
++}
++
++#ifdef PM8001_USE_MSIX
++/**
++ * pm8001_setup_msix - enable MSI-X interrupt
++ * @chip_info: our ha struct.
++ * @irq_handler: irq_handler
++ */
++static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha,
++	irq_handler_t irq_handler)
++{
++	u32 i = 0, j = 0;
++	u32 number_of_intr = 1;
++	int flag = 0;
++	u32 max_entry;
++	int rc;
++	max_entry = sizeof(pm8001_ha->msix_entries) /
++		sizeof(pm8001_ha->msix_entries[0]);
++	flag |= IRQF_DISABLED;
++	for (i = 0; i < max_entry ; i++)
++		pm8001_ha->msix_entries[i].entry = i;
++	rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries,
++		number_of_intr);
++	pm8001_ha->number_of_intr = number_of_intr;
++	if (!rc) {
++		for (i = 0; i < number_of_intr; i++) {
++			if (request_irq(pm8001_ha->msix_entries[i].vector,
++				irq_handler, flag, DRV_NAME,
++				SHOST_TO_SAS_HA(pm8001_ha->shost))) {
++				for (j = 0; j < i; j++)
++					free_irq(
++					pm8001_ha->msix_entries[j].vector,
++					SHOST_TO_SAS_HA(pm8001_ha->shost));
++				pci_disable_msix(pm8001_ha->pdev);
++				break;
++			}
++		}
++	}
++	return rc;
++}
++#endif
++
++/**
++ * pm8001_request_irq - register interrupt
++ * @chip_info: our ha struct.
++ */
++static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
++{
++	struct pci_dev *pdev;
++	irq_handler_t irq_handler = pm8001_interrupt;
++	u32 rc;
++
++	pdev = pm8001_ha->pdev;
++
++#ifdef PM8001_USE_MSIX
++	if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
++		return pm8001_setup_msix(pm8001_ha, irq_handler);
++	else
++		goto intx;
++#endif
++
++intx:
++	/* intialize the INT-X interrupt */
++	rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME,
++		SHOST_TO_SAS_HA(pm8001_ha->shost));
++	return rc;
++}
++
++/**
++ * pm8001_pci_probe - probe supported device
++ * @pdev: pci device which kernel has been prepared for.
++ * @ent: pci device id
++ *
++ * This function is the main initialization function, when register a new
++ * pci driver it is invoked, all struct an hardware initilization should be done
++ * here, also, register interrupt
++ */
++static int __devinit pm8001_pci_probe(struct pci_dev *pdev,
++	const struct pci_device_id *ent)
++{
++	unsigned int rc;
++	u32	pci_reg;
++	struct pm8001_hba_info *pm8001_ha;
++	struct Scsi_Host *shost = NULL;
++	const struct pm8001_chip_info *chip;
++
++	dev_printk(KERN_INFO, &pdev->dev,
++		"pm8001: driver version %s\n", DRV_VERSION);
++	rc = pci_enable_device(pdev);
++	if (rc)
++		goto err_out_enable;
++	pci_set_master(pdev);
++	/*
++	 * Enable pci slot busmaster by setting pci command register.
++	 * This is required by FW for Cyclone card.
++	 */
++
++	pci_read_config_dword(pdev, PCI_COMMAND, &pci_reg);
++	pci_reg |= 0x157;
++	pci_write_config_dword(pdev, PCI_COMMAND, pci_reg);
++	rc = pci_request_regions(pdev, DRV_NAME);
++	if (rc)
++		goto err_out_disable;
++	rc = pci_go_44(pdev);
++	if (rc)
++		goto err_out_regions;
++
++	shost = scsi_host_alloc(&pm8001_sht, sizeof(void *));
++	if (!shost) {
++		rc = -ENOMEM;
++		goto err_out_regions;
++	}
++	chip = &pm8001_chips[ent->driver_data];
++	SHOST_TO_SAS_HA(shost) =
++		kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
++	if (!SHOST_TO_SAS_HA(shost)) {
++		rc = -ENOMEM;
++		goto err_out_free_host;
++	}
++
++	rc = pm8001_prep_sas_ha_init(shost, chip);
++	if (rc) {
++		rc = -ENOMEM;
++		goto err_out_free;
++	}
++	pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
++	pm8001_ha = pm8001_pci_alloc(pdev, chip_8001, shost);
++	if (!pm8001_ha) {
++		rc = -ENOMEM;
++		goto err_out_free;
++	}
++	list_add_tail(&pm8001_ha->list, &hba_list);
++	PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
++	rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
++	if (rc)
++		goto err_out_ha_free;
++
++	rc = scsi_add_host(shost, &pdev->dev);
++	if (rc)
++		goto err_out_ha_free;
++	rc = pm8001_request_irq(pm8001_ha);
++	if (rc)
++		goto err_out_shost;
++
++	PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
++	pm8001_init_sas_add(pm8001_ha);
++	pm8001_post_sas_ha_init(shost, chip);
++	rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
++	if (rc)
++		goto err_out_shost;
++	scsi_scan_host(pm8001_ha->shost);
++	return 0;
++
++err_out_shost:
++	scsi_remove_host(pm8001_ha->shost);
++err_out_ha_free:
++	pm8001_free(pm8001_ha);
++err_out_free:
++	kfree(SHOST_TO_SAS_HA(shost));
++err_out_free_host:
++	kfree(shost);
++err_out_regions:
++	pci_release_regions(pdev);
++err_out_disable:
++	pci_disable_device(pdev);
++err_out_enable:
++	return rc;
++}
++
++static void __devexit pm8001_pci_remove(struct pci_dev *pdev)
++{
++	struct sas_ha_struct *sha = pci_get_drvdata(pdev);
++	struct pm8001_hba_info *pm8001_ha;
++	int i;
++	pm8001_ha = sha->lldd_ha;
++	pci_set_drvdata(pdev, NULL);
++	sas_unregister_ha(sha);
++	sas_remove_host(pm8001_ha->shost);
++	list_del(&pm8001_ha->list);
++	scsi_remove_host(pm8001_ha->shost);
++	PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
++	PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
++
++#ifdef PM8001_USE_MSIX
++	for (i = 0; i < pm8001_ha->number_of_intr; i++)
++		synchronize_irq(pm8001_ha->msix_entries[i].vector);
++	for (i = 0; i < pm8001_ha->number_of_intr; i++)
++		free_irq(pm8001_ha->msix_entries[i].vector, sha);
++	pci_disable_msix(pdev);
++#else
++	free_irq(pm8001_ha->irq, sha);
++#endif
++#ifdef PM8001_USE_TASKLET
++	tasklet_kill(&pm8001_ha->tasklet);
++#endif
++	pm8001_free(pm8001_ha);
++	kfree(sha->sas_phy);
++	kfree(sha->sas_port);
++	kfree(sha);
++	pci_release_regions(pdev);
++	pci_disable_device(pdev);
++}
++
++/**
++ * pm8001_pci_suspend - power management suspend main entry point
++ * @pdev: PCI device struct
++ * @state: PM state change to (usually PCI_D3)
++ *
++ * Returns 0 success, anything else error.
++ */
++static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++	struct sas_ha_struct *sha = pci_get_drvdata(pdev);
++	struct pm8001_hba_info *pm8001_ha;
++	int i , pos;
++	u32 device_state;
++	pm8001_ha = sha->lldd_ha;
++	flush_scheduled_work();
++	scsi_block_requests(pm8001_ha->shost);
++	pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
++	if (pos == 0) {
++		printk(KERN_ERR " PCI PM not supported\n");
++		return -ENODEV;
++	}
++	PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
++	PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
++#ifdef PM8001_USE_MSIX
++	for (i = 0; i < pm8001_ha->number_of_intr; i++)
++		synchronize_irq(pm8001_ha->msix_entries[i].vector);
++	for (i = 0; i < pm8001_ha->number_of_intr; i++)
++		free_irq(pm8001_ha->msix_entries[i].vector, sha);
++	pci_disable_msix(pdev);
++#else
++	free_irq(pm8001_ha->irq, sha);
++#endif
++#ifdef PM8001_USE_TASKLET
++	tasklet_kill(&pm8001_ha->tasklet);
++#endif
++	device_state = pci_choose_state(pdev, state);
++	pm8001_printk("pdev=0x%p, slot=%s, entering "
++		      "operating state [D%d]\n", pdev,
++		      pm8001_ha->name, device_state);
++	pci_save_state(pdev);
++	pci_disable_device(pdev);
++	pci_set_power_state(pdev, device_state);
++	return 0;
++}
++
++/**
++ * pm8001_pci_resume - power management resume main entry point
++ * @pdev: PCI device struct
++ *
++ * Returns 0 success, anything else error.
++ */
++static int pm8001_pci_resume(struct pci_dev *pdev)
++{
++	struct sas_ha_struct *sha = pci_get_drvdata(pdev);
++	struct pm8001_hba_info *pm8001_ha;
++	int rc;
++	u32 device_state;
++	pm8001_ha = sha->lldd_ha;
++	device_state = pdev->current_state;
++
++	pm8001_printk("pdev=0x%p, slot=%s, resuming from previous "
++		"operating state [D%d]\n", pdev, pm8001_ha->name, device_state);
++
++	pci_set_power_state(pdev, PCI_D0);
++	pci_enable_wake(pdev, PCI_D0, 0);
++	pci_restore_state(pdev);
++	rc = pci_enable_device(pdev);
++	if (rc) {
++		pm8001_printk("slot=%s Enable device failed during resume\n",
++			      pm8001_ha->name);
++		goto err_out_enable;
++	}
++
++	pci_set_master(pdev);
++	rc = pci_go_44(pdev);
++	if (rc)
++		goto err_out_disable;
++
++	PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
++	rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
++	if (rc)
++		goto err_out_disable;
++	PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
++	rc = pm8001_request_irq(pm8001_ha);
++	if (rc)
++		goto err_out_disable;
++	#ifdef PM8001_USE_TASKLET
++	tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
++		    (unsigned long)pm8001_ha);
++	#endif
++	PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
++	scsi_unblock_requests(pm8001_ha->shost);
++	return 0;
++
++err_out_disable:
++	scsi_remove_host(pm8001_ha->shost);
++	pci_disable_device(pdev);
++err_out_enable:
++	return rc;
++}
++
++static struct pci_device_id __devinitdata pm8001_pci_table[] = {
++	{
++		PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001
++	},
++	{
++		PCI_DEVICE(0x117c, 0x0042),
++		.driver_data = chip_8001
++	},
++	{} /* terminate list */
++};
++
++static struct pci_driver pm8001_pci_driver = {
++	.name		= DRV_NAME,
++	.id_table	= pm8001_pci_table,
++	.probe		= pm8001_pci_probe,
++	.remove		= __devexit_p(pm8001_pci_remove),
++	.suspend	= pm8001_pci_suspend,
++	.resume		= pm8001_pci_resume,
++};
++
++/**
++ *	pm8001_init - initialize scsi transport template
++ */
++static int __init pm8001_init(void)
++{
++	int rc;
++	pm8001_id = 0;
++	pm8001_stt = sas_domain_attach_transport(&pm8001_transport_ops);
++	if (!pm8001_stt)
++		return -ENOMEM;
++	rc = pci_register_driver(&pm8001_pci_driver);
++	if (rc)
++		goto err_out;
++	return 0;
++err_out:
++	sas_release_transport(pm8001_stt);
++	return rc;
++}
++
++static void __exit pm8001_exit(void)
++{
++	pci_unregister_driver(&pm8001_pci_driver);
++	sas_release_transport(pm8001_stt);
++}
++
++module_init(pm8001_init);
++module_exit(pm8001_exit);
++
++MODULE_AUTHOR("Jack Wang <jack_wang at usish.com>");
++MODULE_DESCRIPTION("PMC-Sierra PM8001 SAS/SATA controller driver");
++MODULE_VERSION(DRV_VERSION);
++MODULE_LICENSE("GPL");
++MODULE_DEVICE_TABLE(pci, pm8001_pci_table);
++
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+new file mode 100644
+index 0000000..7bf30fa
+--- /dev/null
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -0,0 +1,1104 @@
++/*
++ * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
++ *
++ * Copyright (c) 2008-2009 USI Co., Ltd.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions, and the following disclaimer,
++ *    without modification.
++ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
++ *    substantially similar to the "NO WARRANTY" disclaimer below
++ *    ("Disclaimer") and any redistribution must be conditioned upon
++ *    including a substantially similar Disclaimer requirement for further
++ *    binary redistribution.
++ * 3. Neither the names of the above-listed copyright holders nor the names
++ *    of any contributors may be used to endorse or promote products derived
++ *    from this software without specific prior written permission.
++ *
++ * Alternatively, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") version 2 as published by the Free
++ * Software Foundation.
++ *
++ * NO WARRANTY
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
++ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
++ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGES.
++ *
++ */
++
++#include "pm8001_sas.h"
++
++/**
++ * pm8001_find_tag - from sas task to find out  tag that belongs to this task
++ * @task: the task sent to the LLDD
++ * @tag: the found tag associated with the task
++ */
++static int pm8001_find_tag(struct sas_task *task, u32 *tag)
++{
++	if (task->lldd_task) {
++		struct pm8001_ccb_info *ccb;
++		ccb = task->lldd_task;
++		*tag = ccb->ccb_tag;
++		return 1;
++	}
++	return 0;
++}
++
++/**
++  * pm8001_tag_clear - clear the tags bitmap
++  * @pm8001_ha: our hba struct
++  * @tag: the found tag associated with the task
++  */
++static void pm8001_tag_clear(struct pm8001_hba_info *pm8001_ha, u32 tag)
++{
++	void *bitmap = pm8001_ha->tags;
++	clear_bit(tag, bitmap);
++}
++
++static void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
++{
++	pm8001_tag_clear(pm8001_ha, tag);
++}
++
++static void pm8001_tag_set(struct pm8001_hba_info *pm8001_ha, u32 tag)
++{
++	void *bitmap = pm8001_ha->tags;
++	set_bit(tag, bitmap);
++}
++
++/**
++  * pm8001_tag_alloc - allocate a empty tag for task used.
++  * @pm8001_ha: our hba struct
++  * @tag_out: the found empty tag .
++  */
++inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
++{
++	unsigned int index, tag;
++	void *bitmap = pm8001_ha->tags;
++
++	index = find_first_zero_bit(bitmap, pm8001_ha->tags_num);
++	tag = index;
++	if (tag >= pm8001_ha->tags_num)
++		return -SAS_QUEUE_FULL;
++	pm8001_tag_set(pm8001_ha, tag);
++	*tag_out = tag;
++	return 0;
++}
++
++void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha)
++{
++	int i;
++	for (i = 0; i < pm8001_ha->tags_num; ++i)
++		pm8001_tag_clear(pm8001_ha, i);
++}
++
++ /**
++  * pm8001_mem_alloc - allocate memory for pm8001.
++  * @pdev: pci device.
++  * @virt_addr: the allocated virtual address
++  * @pphys_addr_hi: the physical address high byte address.
++  * @pphys_addr_lo: the physical address low byte address.
++  * @mem_size: memory size.
++  */
++int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
++	dma_addr_t *pphys_addr, u32 *pphys_addr_hi,
++	u32 *pphys_addr_lo, u32 mem_size, u32 align)
++{
++	caddr_t mem_virt_alloc;
++	dma_addr_t mem_dma_handle;
++	u64 phys_align;
++	u64 align_offset = 0;
++	if (align)
++		align_offset = (dma_addr_t)align - 1;
++	mem_virt_alloc =
++		pci_alloc_consistent(pdev, mem_size + align, &mem_dma_handle);
++	if (!mem_virt_alloc) {
++		pm8001_printk("memory allocation error\n");
++		return -1;
++	}
++	memset((void *)mem_virt_alloc, 0, mem_size+align);
++	*pphys_addr = mem_dma_handle;
++	phys_align = (*pphys_addr + align_offset) & ~align_offset;
++	*virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
++	*pphys_addr_hi = upper_32_bits(phys_align);
++	*pphys_addr_lo = lower_32_bits(phys_align);
++	return 0;
++}
++/**
++  * pm8001_find_ha_by_dev - from domain device which come from sas layer to
++  * find out our hba struct.
++  * @dev: the domain device which from sas layer.
++  */
++static
++struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev)
++{
++	struct sas_ha_struct *sha = dev->port->ha;
++	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
++	return pm8001_ha;
++}
++
++/**
++  * pm8001_phy_control - this function should be registered to
++  * sas_domain_function_template to provide libsas used, note: this is just
++  * control the HBA phy rather than other expander phy if you want control
++  * other phy, you should use SMP command.
++  * @sas_phy: which phy in HBA phys.
++  * @func: the operation.
++  * @funcdata: always NULL.
++  */
++int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
++	void *funcdata)
++{
++	int rc = 0, phy_id = sas_phy->id;
++	struct pm8001_hba_info *pm8001_ha = NULL;
++	struct sas_phy_linkrates *rates;
++	DECLARE_COMPLETION_ONSTACK(completion);
++	pm8001_ha = sas_phy->ha->lldd_ha;
++	pm8001_ha->phy[phy_id].enable_completion = &completion;
++	switch (func) {
++	case PHY_FUNC_SET_LINK_RATE:
++		rates = funcdata;
++		if (rates->minimum_linkrate) {
++			pm8001_ha->phy[phy_id].minimum_linkrate =
++				rates->minimum_linkrate;
++		}
++		if (rates->maximum_linkrate) {
++			pm8001_ha->phy[phy_id].maximum_linkrate =
++				rates->maximum_linkrate;
++		}
++		if (pm8001_ha->phy[phy_id].phy_state == 0) {
++			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
++			wait_for_completion(&completion);
++		}
++		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
++					      PHY_LINK_RESET);
++		break;
++	case PHY_FUNC_HARD_RESET:
++		if (pm8001_ha->phy[phy_id].phy_state == 0) {
++			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
++			wait_for_completion(&completion);
++		}
++		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
++					      PHY_HARD_RESET);
++		break;
++	case PHY_FUNC_LINK_RESET:
++		if (pm8001_ha->phy[phy_id].phy_state == 0) {
++			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
++			wait_for_completion(&completion);
++		}
++		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
++					      PHY_LINK_RESET);
++		break;
++	case PHY_FUNC_RELEASE_SPINUP_HOLD:
++		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
++					      PHY_LINK_RESET);
++		break;
++	case PHY_FUNC_DISABLE:
++		PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
++		break;
++	default:
++		rc = -EOPNOTSUPP;
++	}
++	msleep(300);
++	return rc;
++}
++
++int pm8001_slave_alloc(struct scsi_device *scsi_dev)
++{
++	struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
++	if (dev_is_sata(dev)) {
++		/* We don't need to rescan targets
++		* if REPORT_LUNS request is failed
++		*/
++		if (scsi_dev->lun > 0)
++			return -ENXIO;
++		scsi_dev->tagged_supported = 1;
++	}
++	return sas_slave_alloc(scsi_dev);
++}
++
++/**
++  * pm8001_scan_start - we should enable all HBA phys by sending the phy_start
++  * command to HBA.
++  * @shost: the scsi host data.
++  */
++void pm8001_scan_start(struct Scsi_Host *shost)
++{
++	int i;
++	struct pm8001_hba_info *pm8001_ha;
++	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
++	pm8001_ha = sha->lldd_ha;
++	for (i = 0; i < pm8001_ha->chip->n_phy; ++i)
++		PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
++}
++
++int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
++{
++	/* give the phy enabling interrupt event time to come in (1s
++	* is empirically about all it takes) */
++	if (time < HZ)
++		return 0;
++	/* Wait for discovery to finish */
++	scsi_flush_work(shost);
++	return 1;
++}
++
++/**
++  * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task
++  * @pm8001_ha: our hba card information
++  * @ccb: the ccb which attached to smp task
++  */
++static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha,
++	struct pm8001_ccb_info *ccb)
++{
++	return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb);
++}
++
++u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
++{
++	struct ata_queued_cmd *qc = task->uldd_task;
++	if (qc) {
++		if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
++			qc->tf.command == ATA_CMD_FPDMA_READ) {
++			*tag = qc->tag;
++			return 1;
++		}
++	}
++	return 0;
++}
++
++/**
++  * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task
++  * @pm8001_ha: our hba card information
++  * @ccb: the ccb which attached to sata task
++  */
++static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha,
++	struct pm8001_ccb_info *ccb)
++{
++	return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb);
++}
++
++/**
++  * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data
++  * @pm8001_ha: our hba card information
++  * @ccb: the ccb which attached to TM
++  * @tmf: the task management IU
++  */
++static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
++	struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
++{
++	return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf);
++}
++
++/**
++  * pm8001_task_prep_ssp - the dispatcher function,prepare ssp data for ssp task
++  * @pm8001_ha: our hba card information
++  * @ccb: the ccb which attached to ssp task
++  */
++static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
++	struct pm8001_ccb_info *ccb)
++{
++	return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
++}
++int pm8001_slave_configure(struct scsi_device *sdev)
++{
++	struct domain_device *dev = sdev_to_domain_dev(sdev);
++	int ret = sas_slave_configure(sdev);
++	if (ret)
++		return ret;
++	if (dev_is_sata(dev)) {
++	#ifdef PM8001_DISABLE_NCQ
++		struct ata_port *ap = dev->sata_dev.ap;
++		struct ata_device *adev = ap->link.device;
++		adev->flags |= ATA_DFLAG_NCQ_OFF;
++		scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
++	#endif
++	}
++	return 0;
++}
++/**
++  * pm8001_task_exec -execute the task which come from upper level, send the
++  * command or data to DMA area and then increase CI,for queuecommand(ssp),
++  * it is from upper layer and for smp command,it is from libsas,
++  * for ata command it is from libata.
++  * @task: the task to be execute.
++  * @num: if can_queue great than 1, the task can be queued up. for SMP task,
++  * we always execute one one time.
++  * @gfp_flags: gfp_flags.
++  * @is tmf: if it is task management task.
++  * @tmf: the task management IU
++  */
++#define DEV_IS_GONE(pm8001_dev)	\
++	((!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE)))
++static int pm8001_task_exec(struct sas_task *task, const int num,
++	gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
++{
++	struct domain_device *dev = task->dev;
++	struct pm8001_hba_info *pm8001_ha;
++	struct pm8001_device *pm8001_dev;
++	struct sas_task *t = task;
++	struct pm8001_ccb_info *ccb;
++	u32 tag = 0xdeadbeef, rc, n_elem = 0;
++	u32 n = num;
++	unsigned long flags = 0;
++
++	if (!dev->port) {
++		struct task_status_struct *tsm = &t->task_status;
++		tsm->resp = SAS_TASK_UNDELIVERED;
++		tsm->stat = SAS_PHY_DOWN;
++		if (dev->dev_type != SATA_DEV)
++			t->task_done(t);
++		return 0;
++	}
++	pm8001_ha = pm8001_find_ha_by_dev(task->dev);
++	PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n "));
++	spin_lock_irqsave(&pm8001_ha->lock, flags);
++	do {
++		dev = t->dev;
++		pm8001_dev = dev->lldd_dev;
++		if (DEV_IS_GONE(pm8001_dev)) {
++			if (pm8001_dev) {
++				PM8001_IO_DBG(pm8001_ha,
++					pm8001_printk("device %d not ready.\n",
++					pm8001_dev->device_id));
++			} else {
++				PM8001_IO_DBG(pm8001_ha,
++					pm8001_printk("device %016llx not "
++					"ready.\n", SAS_ADDR(dev->sas_addr)));
++			}
++		rc = SAS_PHY_DOWN;
++			goto out_done;
++		}
++		rc = pm8001_tag_alloc(pm8001_ha, &tag);
++		if (rc)
++			goto err_out;
++		ccb = &pm8001_ha->ccb_info[tag];
++
++		if (!sas_protocol_ata(t->task_proto)) {
++			if (t->num_scatter) {
++				n_elem = dma_map_sg(pm8001_ha->dev,
++					t->scatter,
++					t->num_scatter,
++					t->data_dir);
++				if (!n_elem) {
++					rc = -ENOMEM;
++					goto err_out;
++				}
++			}
++		} else {
++			n_elem = t->num_scatter;
++		}
++
++		t->lldd_task = NULL;
++		ccb->n_elem = n_elem;
++		ccb->ccb_tag = tag;
++		ccb->task = t;
++		switch (t->task_proto) {
++		case SAS_PROTOCOL_SMP:
++			rc = pm8001_task_prep_smp(pm8001_ha, ccb);
++			break;
++		case SAS_PROTOCOL_SSP:
++			if (is_tmf)
++				rc = pm8001_task_prep_ssp_tm(pm8001_ha,
++					ccb, tmf);
++			else
++				rc = pm8001_task_prep_ssp(pm8001_ha, ccb);
++			break;
++		case SAS_PROTOCOL_SATA:
++		case SAS_PROTOCOL_STP:
++		case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
++			rc = pm8001_task_prep_ata(pm8001_ha, ccb);
++			break;
++		default:
++			dev_printk(KERN_ERR, pm8001_ha->dev,
++				"unknown sas_task proto: 0x%x\n",
++				t->task_proto);
++			rc = -EINVAL;
++			break;
++		}
++
++		if (rc) {
++			PM8001_IO_DBG(pm8001_ha,
++				pm8001_printk("rc is %x\n", rc));
++			goto err_out_tag;
++		}
++		t->lldd_task = ccb;
++		/* TODO: select normal or high priority */
++		spin_lock(&t->task_state_lock);
++		t->task_state_flags |= SAS_TASK_AT_INITIATOR;
++		spin_unlock(&t->task_state_lock);
++		pm8001_dev->running_req++;
++		if (n > 1)
++			t = list_entry(t->list.next, struct sas_task, list);
++	} while (--n);
++	rc = 0;
++	goto out_done;
++
++err_out_tag:
++	pm8001_tag_free(pm8001_ha, tag);
++err_out:
++	dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
++	if (!sas_protocol_ata(t->task_proto))
++		if (n_elem)
++			dma_unmap_sg(pm8001_ha->dev, t->scatter, n_elem,
++				t->data_dir);
++out_done:
++	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
++	return rc;
++}
++
++/**
++  * pm8001_queue_command - register for upper layer used, all IO commands sent
++  * to HBA are from this interface.
++  * @task: the task to be execute.
++  * @num: if can_queue great than 1, the task can be queued up. for SMP task,
++  * we always execute one one time
++  * @gfp_flags: gfp_flags
++  */
++int pm8001_queue_command(struct sas_task *task, const int num,
++		gfp_t gfp_flags)
++{
++	return pm8001_task_exec(task, num, gfp_flags, 0, NULL);
++}
++
++void pm8001_ccb_free(struct pm8001_hba_info *pm8001_ha, u32 ccb_idx)
++{
++	pm8001_tag_clear(pm8001_ha, ccb_idx);
++}
++
++/**
++  * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb.
++  * @pm8001_ha: our hba card information
++  * @ccb: the ccb which attached to ssp task
++  * @task: the task to be free.
++  * @ccb_idx: ccb index.
++  */
++void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
++	struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx)
++{
++	if (!ccb->task)
++		return;
++	if (!sas_protocol_ata(task->task_proto))
++		if (ccb->n_elem)
++			dma_unmap_sg(pm8001_ha->dev, task->scatter,
++				task->num_scatter, task->data_dir);
++
++	switch (task->task_proto) {
++	case SAS_PROTOCOL_SMP:
++		dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
++			PCI_DMA_FROMDEVICE);
++		dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
++			PCI_DMA_TODEVICE);
++		break;
++
++	case SAS_PROTOCOL_SATA:
++	case SAS_PROTOCOL_STP:
++	case SAS_PROTOCOL_SSP:
++	default:
++		/* do nothing */
++		break;
++	}
++	task->lldd_task = NULL;
++	ccb->task = NULL;
++	ccb->ccb_tag = 0xFFFFFFFF;
++	pm8001_ccb_free(pm8001_ha, ccb_idx);
++}
++
++ /**
++  * pm8001_alloc_dev - find the empty pm8001_device structure, allocate and
++  * return it for use.
++  * @pm8001_ha: our hba card information
++  */
++struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
++{
++	u32 dev;
++	for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
++		if (pm8001_ha->devices[dev].dev_type == NO_DEVICE) {
++			pm8001_ha->devices[dev].id = dev;
++			return &pm8001_ha->devices[dev];
++		}
++	}
++	if (dev == PM8001_MAX_DEVICES) {
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("max support %d devices, ignore ..\n",
++			PM8001_MAX_DEVICES));
++	}
++	return NULL;
++}
++
++static void pm8001_free_dev(struct pm8001_device *pm8001_dev)
++{
++	u32 id = pm8001_dev->id;
++	memset(pm8001_dev, 0, sizeof(*pm8001_dev));
++	pm8001_dev->id = id;
++	pm8001_dev->dev_type = NO_DEVICE;
++	pm8001_dev->device_id = PM8001_MAX_DEVICES;
++	pm8001_dev->sas_device = NULL;
++}
++
++/**
++  * pm8001_dev_found_notify - when libsas find a sas domain device, it should
++  * tell the LLDD that device is found, and then LLDD register this device to
++  * HBA FW by the command "OPC_INB_REG_DEV", after that the HBA will assign
++  * a device ID(according to device's sas address) and returned it to LLDD.from
++  * now on, we communicate with HBA FW with the device ID which HBA assigned
++  * rather than sas address. it is the neccessary step for our HBA but it is
++  * the optional for other HBA driver.
++  * @dev: the device structure which sas layer used.
++  */
++static int pm8001_dev_found_notify(struct domain_device *dev)
++{
++	unsigned long flags = 0;
++	int res = 0;
++	struct pm8001_hba_info *pm8001_ha = NULL;
++	struct domain_device *parent_dev = dev->parent;
++	struct pm8001_device *pm8001_device;
++	DECLARE_COMPLETION_ONSTACK(completion);
++	u32 flag = 0;
++	pm8001_ha = pm8001_find_ha_by_dev(dev);
++	spin_lock_irqsave(&pm8001_ha->lock, flags);
++
++	pm8001_device = pm8001_alloc_dev(pm8001_ha);
++	pm8001_device->sas_device = dev;
++	if (!pm8001_device) {
++		res = -1;
++		goto found_out;
++	}
++	dev->lldd_dev = pm8001_device;
++	pm8001_device->dev_type = dev->dev_type;
++	pm8001_device->dcompletion = &completion;
++	if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
++		int phy_id;
++		struct ex_phy *phy;
++		for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys;
++		phy_id++) {
++			phy = &parent_dev->ex_dev.ex_phy[phy_id];
++			if (SAS_ADDR(phy->attached_sas_addr)
++				== SAS_ADDR(dev->sas_addr)) {
++				pm8001_device->attached_phy = phy_id;
++				break;
++			}
++		}
++		if (phy_id == parent_dev->ex_dev.num_phys) {
++			PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("Error: no attached dev:%016llx"
++			" at ex:%016llx.\n", SAS_ADDR(dev->sas_addr),
++				SAS_ADDR(parent_dev->sas_addr)));
++			res = -1;
++		}
++	} else {
++		if (dev->dev_type == SATA_DEV) {
++			pm8001_device->attached_phy =
++				dev->rphy->identify.phy_identifier;
++				flag = 1; /* directly sata*/
++		}
++	} /*register this device to HBA*/
++	PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device \n"));
++	PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
++	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
++	wait_for_completion(&completion);
++	if (dev->dev_type == SAS_END_DEV)
++		msleep(50);
++	pm8001_ha->flags = PM8001F_RUN_TIME ;
++	return 0;
++found_out:
++	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
++	return res;
++}
++
++int pm8001_dev_found(struct domain_device *dev)
++{
++	return pm8001_dev_found_notify(dev);
++}
++
++/**
++  * pm8001_alloc_task - allocate a task structure for TMF
++  */
++static struct sas_task *pm8001_alloc_task(void)
++{
++	struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL);
++	if (task) {
++		INIT_LIST_HEAD(&task->list);
++		spin_lock_init(&task->task_state_lock);
++		task->task_state_flags = SAS_TASK_STATE_PENDING;
++		init_timer(&task->timer);
++		init_completion(&task->completion);
++	}
++	return task;
++}
++
++static void pm8001_free_task(struct sas_task *task)
++{
++	if (task) {
++		BUG_ON(!list_empty(&task->list));
++		kfree(task);
++	}
++}
++
++static void pm8001_task_done(struct sas_task *task)
++{
++	if (!del_timer(&task->timer))
++		return;
++	complete(&task->completion);
++}
++
++static void pm8001_tmf_timedout(unsigned long data)
++{
++	struct sas_task *task = (struct sas_task *)data;
++
++	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
++	complete(&task->completion);
++}
++
++#define PM8001_TASK_TIMEOUT 20
++/**
++  * pm8001_exec_internal_tmf_task - when errors or exception happened, we may
++  * want to do something, for example abort issued task which result in this
++  * execption, this is by calling this function, note it is also with the task
++  * execute interface.
++  * @dev: the wanted device.
++  * @tmf: which task management wanted to be take.
++  * @para_len: para_len.
++  * @parameter: ssp task parameter.
++  */
++static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
++	void *parameter, u32 para_len, struct pm8001_tmf_task *tmf)
++{
++	int res, retry;
++	struct sas_task *task = NULL;
++	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
++
++	for (retry = 0; retry < 3; retry++) {
++		task = pm8001_alloc_task();
++		if (!task)
++			return -ENOMEM;
++
++		task->dev = dev;
++		task->task_proto = dev->tproto;
++		memcpy(&task->ssp_task, parameter, para_len);
++		task->task_done = pm8001_task_done;
++		task->timer.data = (unsigned long)task;
++		task->timer.function = pm8001_tmf_timedout;
++		task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
++		add_timer(&task->timer);
++
++		res = pm8001_task_exec(task, 1, GFP_KERNEL, 1, tmf);
++
++		if (res) {
++			del_timer(&task->timer);
++			PM8001_FAIL_DBG(pm8001_ha,
++				pm8001_printk("Executing internal task "
++				"failed\n"));
++			goto ex_err;
++		}
++		wait_for_completion(&task->completion);
++		res = -TMF_RESP_FUNC_FAILED;
++		/* Even TMF timed out, return direct. */
++		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
++			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
++				PM8001_FAIL_DBG(pm8001_ha,
++					pm8001_printk("TMF task[%x]timeout.\n",
++					tmf->tmf));
++				goto ex_err;
++			}
++		}
++
++		if (task->task_status.resp == SAS_TASK_COMPLETE &&
++			task->task_status.stat == SAM_GOOD) {
++			res = TMF_RESP_FUNC_COMPLETE;
++			break;
++		}
++
++		if (task->task_status.resp == SAS_TASK_COMPLETE &&
++		task->task_status.stat == SAS_DATA_UNDERRUN) {
++			/* no error, but return the number of bytes of
++			* underrun */
++			res = task->task_status.residual;
++			break;
++		}
++
++		if (task->task_status.resp == SAS_TASK_COMPLETE &&
++			task->task_status.stat == SAS_DATA_OVERRUN) {
++			PM8001_FAIL_DBG(pm8001_ha,
++				pm8001_printk("Blocked task error.\n"));
++			res = -EMSGSIZE;
++			break;
++		} else {
++			PM8001_IO_DBG(pm8001_ha,
++			pm8001_printk(" Task to dev %016llx response: 0x%x"
++				"status 0x%x\n",
++				SAS_ADDR(dev->sas_addr),
++				task->task_status.resp,
++				task->task_status.stat));
++			pm8001_free_task(task);
++			task = NULL;
++		}
++	}
++ex_err:
++	BUG_ON(retry == 3 && task != NULL);
++	if (task != NULL)
++		pm8001_free_task(task);
++	return res;
++}
++
++static int
++pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
++	struct pm8001_device *pm8001_dev, struct domain_device *dev, u32 flag,
++	u32 task_tag)
++{
++	int res, retry;
++	u32 rc, ccb_tag;
++	struct pm8001_ccb_info *ccb;
++	struct sas_task *task = NULL;
++
++	for (retry = 0; retry < 3; retry++) {
++		task = pm8001_alloc_task();
++		if (!task)
++			return -ENOMEM;
++
++		task->dev = dev;
++		task->task_proto = dev->tproto;
++		task->task_done = pm8001_task_done;
++		task->timer.data = (unsigned long)task;
++		task->timer.function = pm8001_tmf_timedout;
++		task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
++		add_timer(&task->timer);
++
++		rc = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
++		if (rc)
++			return rc;
++		ccb = &pm8001_ha->ccb_info[ccb_tag];
++		ccb->device = pm8001_dev;
++		ccb->ccb_tag = ccb_tag;
++		ccb->task = task;
++
++		res = PM8001_CHIP_DISP->task_abort(pm8001_ha,
++			pm8001_dev, flag, task_tag, ccb_tag);
++
++		if (res) {
++			del_timer(&task->timer);
++			PM8001_FAIL_DBG(pm8001_ha,
++				pm8001_printk("Executing internal task "
++				"failed\n"));
++			goto ex_err;
++		}
++		wait_for_completion(&task->completion);
++		res = TMF_RESP_FUNC_FAILED;
++		/* Even TMF timed out, return direct. */
++		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
++			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
++				PM8001_FAIL_DBG(pm8001_ha,
++					pm8001_printk("TMF task timeout.\n"));
++				goto ex_err;
++			}
++		}
++
++		if (task->task_status.resp == SAS_TASK_COMPLETE &&
++			task->task_status.stat == SAM_GOOD) {
++			res = TMF_RESP_FUNC_COMPLETE;
++			break;
++
++		} else {
++			PM8001_IO_DBG(pm8001_ha,
++				pm8001_printk(" Task to dev %016llx response: "
++					"0x%x status 0x%x\n",
++				SAS_ADDR(dev->sas_addr),
++				task->task_status.resp,
++				task->task_status.stat));
++			pm8001_free_task(task);
++			task = NULL;
++		}
++	}
++ex_err:
++	BUG_ON(retry == 3 && task != NULL);
++	if (task != NULL)
++		pm8001_free_task(task);
++	return res;
++}
++
++/**
++  * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify"
++  * @dev: the device structure which sas layer used.
++  */
++static void pm8001_dev_gone_notify(struct domain_device *dev)
++{
++	unsigned long flags = 0;
++	u32 tag;
++	struct pm8001_hba_info *pm8001_ha;
++	struct pm8001_device *pm8001_dev = dev->lldd_dev;
++	u32 device_id = pm8001_dev->device_id;
++	pm8001_ha = pm8001_find_ha_by_dev(dev);
++	spin_lock_irqsave(&pm8001_ha->lock, flags);
++	pm8001_tag_alloc(pm8001_ha, &tag);
++	if (pm8001_dev) {
++		PM8001_DISC_DBG(pm8001_ha,
++			pm8001_printk("found dev[%d:%x] is gone.\n",
++			pm8001_dev->device_id, pm8001_dev->dev_type));
++		if (pm8001_dev->running_req) {
++			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
++			pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
++				dev, 1, 0);
++			spin_lock_irqsave(&pm8001_ha->lock, flags);
++		}
++		PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
++		pm8001_free_dev(pm8001_dev);
++	} else {
++		PM8001_DISC_DBG(pm8001_ha,
++			pm8001_printk("Found dev has gone.\n"));
++	}
++	dev->lldd_dev = NULL;
++	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
++}
++
++void pm8001_dev_gone(struct domain_device *dev)
++{
++	pm8001_dev_gone_notify(dev);
++}
++
++static int pm8001_issue_ssp_tmf(struct domain_device *dev,
++	u8 *lun, struct pm8001_tmf_task *tmf)
++{
++	struct sas_ssp_task ssp_task;
++	if (!(dev->tproto & SAS_PROTOCOL_SSP))
++		return TMF_RESP_FUNC_ESUPP;
++
++	strncpy((u8 *)&ssp_task.LUN, lun, 8);
++	return pm8001_exec_internal_tmf_task(dev, &ssp_task, sizeof(ssp_task),
++		tmf);
++}
++
++/**
++  * Standard mandates link reset for ATA  (type 0) and hard reset for
++  * SSP (type 1) , only for RECOVERY
++  */
++int pm8001_I_T_nexus_reset(struct domain_device *dev)
++{
++	int rc = TMF_RESP_FUNC_FAILED;
++	struct pm8001_device *pm8001_dev;
++	struct pm8001_hba_info *pm8001_ha;
++	struct sas_phy *phy;
++	if (!dev || !dev->lldd_dev)
++		return -1;
++
++	pm8001_dev = dev->lldd_dev;
++	pm8001_ha = pm8001_find_ha_by_dev(dev);
++	phy = sas_find_local_phy(dev);
++
++	if (dev_is_sata(dev)) {
++		DECLARE_COMPLETION_ONSTACK(completion_setstate);
++		rc = sas_phy_reset(phy, 1);
++		msleep(2000);
++		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
++			dev, 1, 0);
++		pm8001_dev->setds_completion = &completion_setstate;
++		rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
++			pm8001_dev, 0x01);
++		wait_for_completion(&completion_setstate);
++	} else{
++	rc = sas_phy_reset(phy, 1);
++	msleep(2000);
++	}
++	PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
++		pm8001_dev->device_id, rc));
++	return rc;
++}
++
++/* mandatory SAM-3, the task reset the specified LUN*/
++int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
++{
++	int rc = TMF_RESP_FUNC_FAILED;
++	struct pm8001_tmf_task tmf_task;
++	struct pm8001_device *pm8001_dev = dev->lldd_dev;
++	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
++	if (dev_is_sata(dev)) {
++		struct sas_phy *phy = sas_find_local_phy(dev);
++		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
++			dev, 1, 0);
++		rc = sas_phy_reset(phy, 1);
++		rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
++			pm8001_dev, 0x01);
++		msleep(2000);
++	} else {
++		tmf_task.tmf = TMF_LU_RESET;
++		rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
++	}
++	/* If failed, fall-through I_T_Nexus reset */
++	PM8001_EH_DBG(pm8001_ha, pm8001_printk("for device[%x]:rc=%d\n",
++		pm8001_dev->device_id, rc));
++	return rc;
++}
++
++/* optional SAM-3 */
++int pm8001_query_task(struct sas_task *task)
++{
++	u32 tag = 0xdeadbeef;
++	int i = 0;
++	struct scsi_lun lun;
++	struct pm8001_tmf_task tmf_task;
++	int rc = TMF_RESP_FUNC_FAILED;
++	if (unlikely(!task || !task->lldd_task || !task->dev))
++		return rc;
++
++	if (task->task_proto & SAS_PROTOCOL_SSP) {
++		struct scsi_cmnd *cmnd = task->uldd_task;
++		struct domain_device *dev = task->dev;
++		struct pm8001_hba_info *pm8001_ha =
++			pm8001_find_ha_by_dev(dev);
++
++		int_to_scsilun(cmnd->device->lun, &lun);
++		rc = pm8001_find_tag(task, &tag);
++		if (rc == 0) {
++			rc = TMF_RESP_FUNC_FAILED;
++			return rc;
++		}
++		PM8001_EH_DBG(pm8001_ha, pm8001_printk("Query:["));
++		for (i = 0; i < 16; i++)
++			printk(KERN_INFO "%02x ", cmnd->cmnd[i]);
++		printk(KERN_INFO "]\n");
++		tmf_task.tmf = 	TMF_QUERY_TASK;
++		tmf_task.tag_of_task_to_be_managed = tag;
++
++		rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
++		switch (rc) {
++		/* The task is still in Lun, release it then */
++		case TMF_RESP_FUNC_SUCC:
++			PM8001_EH_DBG(pm8001_ha,
++				pm8001_printk("The task is still in Lun \n"));
++		/* The task is not in Lun or failed, reset the phy */
++		case TMF_RESP_FUNC_FAILED:
++		case TMF_RESP_FUNC_COMPLETE:
++			PM8001_EH_DBG(pm8001_ha,
++			pm8001_printk("The task is not in Lun or failed,"
++			" reset the phy \n"));
++			break;
++		}
++	}
++	pm8001_printk(":rc= %d\n", rc);
++	return rc;
++}
++
++/*  mandatory SAM-3, still need free task/ccb info, abord the specified task */
++int pm8001_abort_task(struct sas_task *task)
++{
++	unsigned long flags;
++	u32 tag = 0xdeadbeef;
++	u32 device_id;
++	struct domain_device *dev ;
++	struct pm8001_hba_info *pm8001_ha = NULL;
++	struct pm8001_ccb_info *ccb;
++	struct scsi_lun lun;
++	struct pm8001_device *pm8001_dev;
++	struct pm8001_tmf_task tmf_task;
++	int rc = TMF_RESP_FUNC_FAILED;
++	if (unlikely(!task || !task->lldd_task || !task->dev))
++		return rc;
++	spin_lock_irqsave(&task->task_state_lock, flags);
++	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
++		spin_unlock_irqrestore(&task->task_state_lock, flags);
++		rc = TMF_RESP_FUNC_COMPLETE;
++		goto out;
++	}
++	spin_unlock_irqrestore(&task->task_state_lock, flags);
++	if (task->task_proto & SAS_PROTOCOL_SSP) {
++		struct scsi_cmnd *cmnd = task->uldd_task;
++		dev = task->dev;
++		ccb = task->lldd_task;
++		pm8001_dev = dev->lldd_dev;
++		pm8001_ha = pm8001_find_ha_by_dev(dev);
++		int_to_scsilun(cmnd->device->lun, &lun);
++		rc = pm8001_find_tag(task, &tag);
++		if (rc == 0) {
++			printk(KERN_INFO "No such tag in %s\n", __func__);
++			rc = TMF_RESP_FUNC_FAILED;
++			return rc;
++		}
++		device_id = pm8001_dev->device_id;
++		PM8001_EH_DBG(pm8001_ha,
++		pm8001_printk("abort io to device_id = %d\n", device_id));
++		tmf_task.tmf = 	TMF_ABORT_TASK;
++		tmf_task.tag_of_task_to_be_managed = tag;
++		rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
++		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
++			pm8001_dev->sas_device, 0, tag);
++	} else if (task->task_proto & SAS_PROTOCOL_SATA ||
++		task->task_proto & SAS_PROTOCOL_STP) {
++		dev = task->dev;
++		pm8001_dev = dev->lldd_dev;
++		pm8001_ha = pm8001_find_ha_by_dev(dev);
++		rc = pm8001_find_tag(task, &tag);
++		if (rc == 0) {
++			printk(KERN_INFO "No such tag in %s\n", __func__);
++			rc = TMF_RESP_FUNC_FAILED;
++			return rc;
++		}
++		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
++			pm8001_dev->sas_device, 0, tag);
++	} else if (task->task_proto & SAS_PROTOCOL_SMP) {
++		/* SMP */
++		dev = task->dev;
++		pm8001_dev = dev->lldd_dev;
++		pm8001_ha = pm8001_find_ha_by_dev(dev);
++		rc = pm8001_find_tag(task, &tag);
++		if (rc == 0) {
++			printk(KERN_INFO "No such tag in %s\n", __func__);
++			rc = TMF_RESP_FUNC_FAILED;
++			return rc;
++		}
++		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
++			pm8001_dev->sas_device, 0, tag);
++
++	}
++out:
++	if (rc != TMF_RESP_FUNC_COMPLETE)
++		pm8001_printk("rc= %d\n", rc);
++	return rc;
++}
++
++int pm8001_abort_task_set(struct domain_device *dev, u8 *lun)
++{
++	int rc = TMF_RESP_FUNC_FAILED;
++	struct pm8001_tmf_task tmf_task;
++
++	tmf_task.tmf = TMF_ABORT_TASK_SET;
++	rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
++	return rc;
++}
++
++int pm8001_clear_aca(struct domain_device *dev, u8 *lun)
++{
++	int rc = TMF_RESP_FUNC_FAILED;
++	struct pm8001_tmf_task tmf_task;
++
++	tmf_task.tmf = TMF_CLEAR_ACA;
++	rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
++
++	return rc;
++}
++
++int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
++{
++	int rc = TMF_RESP_FUNC_FAILED;
++	struct pm8001_tmf_task tmf_task;
++	struct pm8001_device *pm8001_dev = dev->lldd_dev;
++	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
++
++	PM8001_EH_DBG(pm8001_ha,
++		pm8001_printk("I_T_L_Q clear task set[%x]\n",
++		pm8001_dev->device_id));
++	tmf_task.tmf = TMF_CLEAR_TASK_SET;
++	rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
++	return rc;
++}
++
+diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
+new file mode 100644
+index 0000000..14c676b
+--- /dev/null
++++ b/drivers/scsi/pm8001/pm8001_sas.h
+@@ -0,0 +1,480 @@
++/*
++ * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
++ *
++ * Copyright (c) 2008-2009 USI Co., Ltd.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions, and the following disclaimer,
++ *    without modification.
++ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
++ *    substantially similar to the "NO WARRANTY" disclaimer below
++ *    ("Disclaimer") and any redistribution must be conditioned upon
++ *    including a substantially similar Disclaimer requirement for further
++ *    binary redistribution.
++ * 3. Neither the names of the above-listed copyright holders nor the names
++ *    of any contributors may be used to endorse or promote products derived
++ *    from this software without specific prior written permission.
++ *
++ * Alternatively, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") version 2 as published by the Free
++ * Software Foundation.
++ *
++ * NO WARRANTY
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
++ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
++ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGES.
++ *
++ */
++
++#ifndef _PM8001_SAS_H_
++#define _PM8001_SAS_H_
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/spinlock.h>
++#include <linux/delay.h>
++#include <linux/types.h>
++#include <linux/ctype.h>
++#include <linux/dma-mapping.h>
++#include <linux/pci.h>
++#include <linux/interrupt.h>
++#include <linux/smp_lock.h>
++#include <scsi/libsas.h>
++#include <scsi/scsi_tcq.h>
++#include <scsi/sas_ata.h>
++#include <asm/atomic.h>
++#include "pm8001_defs.h"
++
++#define DRV_NAME		"pm8001"
++#define DRV_VERSION		"0.1.36"
++#define PM8001_FAIL_LOGGING	0x01 /* libsas EH function logging */
++#define PM8001_INIT_LOGGING	0x02 /* driver init logging */
++#define PM8001_DISC_LOGGING	0x04 /* discovery layer logging */
++#define PM8001_IO_LOGGING	0x08 /* I/O path logging */
++#define PM8001_EH_LOGGING	0x10 /* Error message logging */
++#define PM8001_IOCTL_LOGGING	0x20 /* IOCTL message logging */
++#define PM8001_MSG_LOGGING	0x40 /* misc message logging */
++#define pm8001_printk(format, arg...)	printk(KERN_INFO "%s %d:" format,\
++				__func__, __LINE__, ## arg)
++#define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD)	\
++do {						\
++	if (unlikely(HBA->logging_level & LEVEL))	\
++		do {					\
++			CMD;				\
++		} while (0);				\
++} while (0);
++
++#define PM8001_EH_DBG(HBA, CMD)			\
++	PM8001_CHECK_LOGGING(HBA, PM8001_EH_LOGGING, CMD)
++
++#define PM8001_INIT_DBG(HBA, CMD)		\
++	PM8001_CHECK_LOGGING(HBA, PM8001_INIT_LOGGING, CMD)
++
++#define PM8001_DISC_DBG(HBA, CMD)		\
++	PM8001_CHECK_LOGGING(HBA, PM8001_DISC_LOGGING, CMD)
++
++#define PM8001_IO_DBG(HBA, CMD)		\
++	PM8001_CHECK_LOGGING(HBA, PM8001_IO_LOGGING, CMD)
++
++#define PM8001_FAIL_DBG(HBA, CMD)		\
++	PM8001_CHECK_LOGGING(HBA, PM8001_FAIL_LOGGING, CMD)
++
++#define PM8001_IOCTL_DBG(HBA, CMD)		\
++	PM8001_CHECK_LOGGING(HBA, PM8001_IOCTL_LOGGING, CMD)
++
++#define PM8001_MSG_DBG(HBA, CMD)		\
++	PM8001_CHECK_LOGGING(HBA, PM8001_MSG_LOGGING, CMD)
++
++
++#define PM8001_USE_TASKLET
++#define PM8001_USE_MSIX
++
++
++#define DEV_IS_EXPANDER(type)	((type == EDGE_DEV) || (type == FANOUT_DEV))
++
++#define PM8001_NAME_LENGTH		32/* generic length of strings */
++extern struct list_head hba_list;
++extern const struct pm8001_dispatch pm8001_8001_dispatch;
++
++struct pm8001_hba_info;
++struct pm8001_ccb_info;
++struct pm8001_device;
++struct pm8001_tmf_task;
++struct pm8001_dispatch {
++	char *name;
++	int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
++	int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha, u32 signature);
++	void (*chip_rst)(struct pm8001_hba_info *pm8001_ha);
++	int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha);
++	void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha);
++	void (*isr)(struct pm8001_hba_info *pm8001_ha);
++	u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha);
++	int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha);
++	void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha);
++	void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha);
++	void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
++	int (*smp_req)(struct pm8001_hba_info *pm8001_ha,
++		struct pm8001_ccb_info *ccb);
++	int (*ssp_io_req)(struct pm8001_hba_info *pm8001_ha,
++		struct pm8001_ccb_info *ccb);
++	int (*sata_req)(struct pm8001_hba_info *pm8001_ha,
++		struct pm8001_ccb_info *ccb);
++	int (*phy_start_req)(struct pm8001_hba_info *pm8001_ha,	u8 phy_id);
++	int (*phy_stop_req)(struct pm8001_hba_info *pm8001_ha, u8 phy_id);
++	int (*reg_dev_req)(struct pm8001_hba_info *pm8001_ha,
++		struct pm8001_device *pm8001_dev, u32 flag);
++	int (*dereg_dev_req)(struct pm8001_hba_info *pm8001_ha, u32 device_id);
++	int (*phy_ctl_req)(struct pm8001_hba_info *pm8001_ha,
++		u32 phy_id, u32 phy_op);
++	int (*task_abort)(struct pm8001_hba_info *pm8001_ha,
++		struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag,
++		u32 cmd_tag);
++	int (*ssp_tm_req)(struct pm8001_hba_info *pm8001_ha,
++		struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf);
++	int (*get_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload);
++	int (*set_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload);
++	int (*fw_flash_update_req)(struct pm8001_hba_info *pm8001_ha,
++		void *payload);
++	int (*set_dev_state_req)(struct pm8001_hba_info *pm8001_ha,
++		struct pm8001_device *pm8001_dev, u32 state);
++	int (*sas_diag_start_end_req)(struct pm8001_hba_info *pm8001_ha,
++		u32 state);
++	int (*sas_diag_execute_req)(struct pm8001_hba_info *pm8001_ha,
++		u32 state);
++};
++
++struct pm8001_chip_info {
++	u32	n_phy;
++	const struct pm8001_dispatch	*dispatch;
++};
++#define PM8001_CHIP_DISP	(pm8001_ha->chip->dispatch)
++
++struct pm8001_port {
++	struct asd_sas_port	sas_port;
++};
++
++struct pm8001_phy {
++	struct pm8001_hba_info	*pm8001_ha;
++	struct pm8001_port	*port;
++	struct asd_sas_phy	sas_phy;
++	struct sas_identify	identify;
++	struct scsi_device	*sdev;
++	u64			dev_sas_addr;
++	u32			phy_type;
++	struct completion	*enable_completion;
++	u32			frame_rcvd_size;
++	u8			frame_rcvd[32];
++	u8			phy_attached;
++	u8			phy_state;
++	enum sas_linkrate	minimum_linkrate;
++	enum sas_linkrate	maximum_linkrate;
++};
++
++struct pm8001_device {
++	enum sas_dev_type	dev_type;
++	struct domain_device	*sas_device;
++	u32			attached_phy;
++	u32			id;
++	struct completion	*dcompletion;
++	struct completion	*setds_completion;
++	u32			device_id;
++	u32			running_req;
++};
++
++struct pm8001_prd_imt {
++	__le32			len;
++	__le32			e;
++};
++
++struct pm8001_prd {
++	__le64			addr;		/* 64-bit buffer address */
++	struct pm8001_prd_imt	im_len;		/* 64-bit length */
++} __attribute__ ((packed));
++/*
++ * CCB(Command Control Block)
++ */
++struct pm8001_ccb_info {
++	struct list_head	entry;
++	struct sas_task		*task;
++	u32			n_elem;
++	u32			ccb_tag;
++	dma_addr_t		ccb_dma_handle;
++	struct pm8001_device	*device;
++	struct pm8001_prd	buf_prd[PM8001_MAX_DMA_SG];
++	struct fw_control_ex	*fw_control_context;
++};
++
++struct mpi_mem {
++	void			*virt_ptr;
++	dma_addr_t		phys_addr;
++	u32			phys_addr_hi;
++	u32			phys_addr_lo;
++	u32			total_len;
++	u32			num_elements;
++	u32			element_size;
++	u32			alignment;
++};
++
++struct mpi_mem_req {
++	/* The number of element in the  mpiMemory array */
++	u32			count;
++	/* The array of structures that define memroy regions*/
++	struct mpi_mem		region[USI_MAX_MEMCNT];
++};
++
++struct main_cfg_table {
++	u32			signature;
++	u32			interface_rev;
++	u32			firmware_rev;
++	u32			max_out_io;
++	u32			max_sgl;
++	u32			ctrl_cap_flag;
++	u32			gst_offset;
++	u32			inbound_queue_offset;
++	u32			outbound_queue_offset;
++	u32			inbound_q_nppd_hppd;
++	u32			outbound_hw_event_pid0_3;
++	u32			outbound_hw_event_pid4_7;
++	u32			outbound_ncq_event_pid0_3;
++	u32			outbound_ncq_event_pid4_7;
++	u32			outbound_tgt_ITNexus_event_pid0_3;
++	u32			outbound_tgt_ITNexus_event_pid4_7;
++	u32			outbound_tgt_ssp_event_pid0_3;
++	u32			outbound_tgt_ssp_event_pid4_7;
++	u32			outbound_tgt_smp_event_pid0_3;
++	u32			outbound_tgt_smp_event_pid4_7;
++	u32			upper_event_log_addr;
++	u32			lower_event_log_addr;
++	u32			event_log_size;
++	u32			event_log_option;
++	u32			upper_iop_event_log_addr;
++	u32			lower_iop_event_log_addr;
++	u32			iop_event_log_size;
++	u32			iop_event_log_option;
++	u32			fatal_err_interrupt;
++	u32			fatal_err_dump_offset0;
++	u32			fatal_err_dump_length0;
++	u32			fatal_err_dump_offset1;
++	u32			fatal_err_dump_length1;
++	u32			hda_mode_flag;
++	u32			anolog_setup_table_offset;
++};
++struct general_status_table {
++	u32			gst_len_mpistate;
++	u32			iq_freeze_state0;
++	u32			iq_freeze_state1;
++	u32			msgu_tcnt;
++	u32			iop_tcnt;
++	u32			reserved;
++	u32			phy_state[8];
++	u32			reserved1;
++	u32			reserved2;
++	u32			reserved3;
++	u32			recover_err_info[8];
++};
++struct inbound_queue_table {
++	u32			element_pri_size_cnt;
++	u32			upper_base_addr;
++	u32			lower_base_addr;
++	u32			ci_upper_base_addr;
++	u32			ci_lower_base_addr;
++	u32			pi_pci_bar;
++	u32			pi_offset;
++	u32			total_length;
++	void			*base_virt;
++	void			*ci_virt;
++	u32			reserved;
++	__le32			consumer_index;
++	u32			producer_idx;
++};
++struct outbound_queue_table {
++	u32			element_size_cnt;
++	u32			upper_base_addr;
++	u32			lower_base_addr;
++	void			*base_virt;
++	u32			pi_upper_base_addr;
++	u32			pi_lower_base_addr;
++	u32			ci_pci_bar;
++	u32			ci_offset;
++	u32			total_length;
++	void			*pi_virt;
++	u32			interrup_vec_cnt_delay;
++	u32			dinterrup_to_pci_offset;
++	__le32			producer_index;
++	u32			consumer_idx;
++};
++struct pm8001_hba_memspace {
++	void __iomem  		*memvirtaddr;
++	u64			membase;
++	u32			memsize;
++};
++struct pm8001_hba_info {
++	char			name[PM8001_NAME_LENGTH];
++	struct list_head	list;
++	unsigned long		flags;
++	spinlock_t		lock;/* host-wide lock */
++	struct pci_dev		*pdev;/* our device */
++	struct device		*dev;
++	struct pm8001_hba_memspace io_mem[6];
++	struct mpi_mem_req	memoryMap;
++	void __iomem	*msg_unit_tbl_addr;/*Message Unit Table Addr*/
++	void __iomem	*main_cfg_tbl_addr;/*Main Config Table Addr*/
++	void __iomem	*general_stat_tbl_addr;/*General Status Table Addr*/
++	void __iomem	*inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/
++	void __iomem	*outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/
++	struct main_cfg_table	main_cfg_tbl;
++	struct general_status_table	gs_tbl;
++	struct inbound_queue_table	inbnd_q_tbl[PM8001_MAX_INB_NUM];
++	struct outbound_queue_table	outbnd_q_tbl[PM8001_MAX_OUTB_NUM];
++	u8			sas_addr[SAS_ADDR_SIZE];
++	struct sas_ha_struct	*sas;/* SCSI/SAS glue */
++	struct Scsi_Host	*shost;
++	u32			chip_id;
++	const struct pm8001_chip_info	*chip;
++	struct completion	*nvmd_completion;
++	int			tags_num;
++	unsigned long		*tags;
++	struct pm8001_phy	phy[PM8001_MAX_PHYS];
++	struct pm8001_port	port[PM8001_MAX_PHYS];
++	u32			id;
++	u32			irq;
++	struct pm8001_device	*devices;
++	struct pm8001_ccb_info	*ccb_info;
++#ifdef PM8001_USE_MSIX
++	struct msix_entry	msix_entries[16];/*for msi-x interrupt*/
++	int			number_of_intr;/*will be used in remove()*/
++#endif
++#ifdef PM8001_USE_TASKLET
++	struct tasklet_struct	tasklet;
++#endif
++	struct list_head 	wq_list;
++	u32			logging_level;
++	u32			fw_status;
++	const struct firmware 	*fw_image;
++};
++
++struct pm8001_wq {
++	struct delayed_work work_q;
++	struct pm8001_hba_info *pm8001_ha;
++	void *data;
++	int handler;
++	struct list_head entry;
++};
++
++struct pm8001_fw_image_header {
++	u8 vender_id[8];
++	u8 product_id;
++	u8 hardware_rev;
++	u8 dest_partition;
++	u8 reserved;
++	u8 fw_rev[4];
++	__be32  image_length;
++	__be32 image_crc;
++	__be32 startup_entry;
++} __attribute__((packed, aligned(4)));
++
++/* define task management IU */
++struct pm8001_tmf_task {
++	u8	tmf;
++	u32	tag_of_task_to_be_managed;
++};
++/**
++ * FW Flash Update status values
++ */
++#define FLASH_UPDATE_COMPLETE_PENDING_REBOOT	0x00
++#define FLASH_UPDATE_IN_PROGRESS		0x01
++#define FLASH_UPDATE_HDR_ERR			0x02
++#define FLASH_UPDATE_OFFSET_ERR			0x03
++#define FLASH_UPDATE_CRC_ERR			0x04
++#define FLASH_UPDATE_LENGTH_ERR			0x05
++#define FLASH_UPDATE_HW_ERR			0x06
++#define FLASH_UPDATE_DNLD_NOT_SUPPORTED		0x10
++#define FLASH_UPDATE_DISABLED			0x11
++
++/**
++ * brief param structure for firmware flash update.
++ */
++struct fw_flash_updata_info {
++	u32			cur_image_offset;
++	u32			cur_image_len;
++	u32			total_image_len;
++	struct pm8001_prd	sgl;
++};
++
++struct fw_control_info {
++	u32			retcode;/*ret code (status)*/
++	u32			phase;/*ret code phase*/
++	u32			phaseCmplt;/*percent complete for the current
++	update phase */
++	u32			version;/*Hex encoded firmware version number*/
++	u32			offset;/*Used for downloading firmware	*/
++	u32			len; /*len of buffer*/
++	u32			size;/* Used in OS VPD and Trace get size
++	operations.*/
++	u32			reserved;/* padding required for 64 bit
++	alignment */
++	u8			buffer[1];/* Start of buffer */
++};
++struct fw_control_ex {
++	struct fw_control_info *fw_control;
++	void			*buffer;/* keep buffer pointer to be
++	freed when the responce comes*/
++	void			*virtAddr;/* keep virtual address of the data */
++	void			*usrAddr;/* keep virtual address of the
++	user data */
++	dma_addr_t		phys_addr;
++	u32			len; /* len of buffer  */
++	void			*payload; /* pointer to IOCTL Payload */
++	u8			inProgress;/*if 1 - the IOCTL request is in
++	progress */
++	void			*param1;
++	void			*param2;
++	void			*param3;
++};
++
++/******************** function prototype *********************/
++int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out);
++void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha);
++u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag);
++void pm8001_ccb_free(struct pm8001_hba_info *pm8001_ha, u32 ccb_idx);
++void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
++	struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx);
++int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
++	void *funcdata);
++int pm8001_slave_alloc(struct scsi_device *scsi_dev);
++int pm8001_slave_configure(struct scsi_device *sdev);
++void pm8001_scan_start(struct Scsi_Host *shost);
++int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time);
++int pm8001_queue_command(struct sas_task *task, const int num,
++	gfp_t gfp_flags);
++int pm8001_abort_task(struct sas_task *task);
++int pm8001_abort_task_set(struct domain_device *dev, u8 *lun);
++int pm8001_clear_aca(struct domain_device *dev, u8 *lun);
++int pm8001_clear_task_set(struct domain_device *dev, u8 *lun);
++int pm8001_dev_found(struct domain_device *dev);
++void pm8001_dev_gone(struct domain_device *dev);
++int pm8001_lu_reset(struct domain_device *dev, u8 *lun);
++int pm8001_I_T_nexus_reset(struct domain_device *dev);
++int pm8001_query_task(struct sas_task *task);
++int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
++	dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo,
++	u32 mem_size, u32 align);
++
++
++/* ctl shared API */
++extern struct device_attribute *pm8001_host_attrs[];
++
++#endif
++
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index b0f0f38..161fadb 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -1586,6 +1586,8 @@
+ #define PCI_VENDOR_ID_COMPEX		0x11f6
+ #define PCI_DEVICE_ID_COMPEX_ENET100VG4	0x0112
+ 
++#define PCI_VENDOR_ID_PMC_Sierra	0x11f8
++
+ #define PCI_VENDOR_ID_RP		0x11fe
+ #define PCI_DEVICE_ID_RP32INTF		0x0001
+ #define PCI_DEVICE_ID_RP8INTF		0x0002
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0002-SCSI-pm8001-add-reinitialize-SPC-parameters-before-p.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0002-SCSI-pm8001-add-reinitialize-SPC-parameters-before-p.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,217 @@
+From: jack_wang <jack_wang at usish.com>
+Date: Thu, 5 Nov 2009 22:32:31 +0800
+Subject: [PATCH 02/25] [SCSI] pm8001: add reinitialize SPC parameters before
+ phy start
+
+commit d0b68041bdd0e5ea6dae1210541bf124443d72ec upstream.
+
+Signed-off-by: Jack Wang <jack_wang at usish.com>
+Signed-off-by: Lindar Liu <lindar_liu at usish.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/pm8001/pm8001_hwi.c |   76 ++++++++++++++++++++++++++++++++------
+ drivers/scsi/pm8001/pm8001_hwi.h |   19 +++++++++
+ drivers/scsi/pm8001/pm8001_sas.c |    1 +
+ drivers/scsi/pm8001/pm8001_sas.h |    1 +
+ 4 files changed, 85 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
+index aa5756f..d18c263 100644
+--- a/drivers/scsi/pm8001/pm8001_hwi.c
++++ b/drivers/scsi/pm8001/pm8001_hwi.c
+@@ -57,9 +57,9 @@ static void __devinit read_main_config_table(struct pm8001_hba_info *pm8001_ha)
+ 	pm8001_ha->main_cfg_tbl.ctrl_cap_flag	= pm8001_mr32(address, 0x14);
+ 	pm8001_ha->main_cfg_tbl.gst_offset	= pm8001_mr32(address, 0x18);
+ 	pm8001_ha->main_cfg_tbl.inbound_queue_offset =
+-		pm8001_mr32(address, 0x1C);
++		pm8001_mr32(address, MAIN_IBQ_OFFSET);
+ 	pm8001_ha->main_cfg_tbl.outbound_queue_offset =
+-		pm8001_mr32(address, 0x20);
++		pm8001_mr32(address, MAIN_OBQ_OFFSET);
+ 	pm8001_ha->main_cfg_tbl.hda_mode_flag	=
+ 		pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET);
+ 
+@@ -124,7 +124,7 @@ read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
+ 	int i;
+ 	void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
+ 	for (i = 0; i < inbQ_num; i++) {
+-		u32 offset = i * 0x24;
++		u32 offset = i * 0x20;
+ 		pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
+ 		      get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
+ 		pm8001_ha->inbnd_q_tbl[i].pi_offset =
+@@ -231,7 +231,7 @@ init_default_table_values(struct pm8001_hba_info *pm8001_ha)
+ 		pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr	=
+ 			pm8001_ha->memoryMap.region[PI].phys_addr_lo;
+ 		pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay	=
+-			0 | (0 << 16) | (0 << 24);
++			0 | (10 << 16) | (0 << 24);
+ 		pm8001_ha->outbnd_q_tbl[i].pi_virt		=
+ 			pm8001_ha->memoryMap.region[PI].virt_ptr;
+ 		offsetob = i * 0x24;
+@@ -375,13 +375,16 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
+ {
+ 	u32 offset;
+ 	u32 value;
+-	u32 i;
++	u32 i, j;
++	u32 bit_cnt;
+ 
+ #define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000
+ #define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000
+ #define SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET 0x1074
+ #define SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET 0x1074
+-#define PHY_SSC_BIT_SHIFT 13
++#define PHY_G3_WITHOUT_SSC_BIT_SHIFT 12
++#define PHY_G3_WITH_SSC_BIT_SHIFT 13
++#define SNW3_PHY_CAPABILITIES_PARITY 31
+ 
+    /*
+     * Using shifted destination address 0x3_0000:0x1074 + 0x4000*N (N=0:3)
+@@ -393,10 +396,22 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
+ 	for (i = 0; i < 4; i++) {
+ 		offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i;
+ 		value = pm8001_cr32(pm8001_ha, 2, offset);
+-		if (SSCbit)
+-			value = value | (0x00000001 << PHY_SSC_BIT_SHIFT);
++		if (SSCbit) {
++			value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT;
++			value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT);
++		} else {
++			value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT;
++			value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT);
++		}
++		bit_cnt = 0;
++		for (j = 0; j < 31; j++)
++			if ((value >> j) & 0x00000001)
++				bit_cnt++;
++		if (bit_cnt % 2)
++			value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY);
+ 		else
+-			value = value & (~(0x00000001<<PHY_SSC_BIT_SHIFT));
++			value |= 0x00000001 << SNW3_PHY_CAPABILITIES_PARITY;
++
+ 		pm8001_cw32(pm8001_ha, 2, offset, value);
+ 	}
+ 
+@@ -408,10 +423,22 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
+ 	for (i = 4; i < 8; i++) {
+ 		offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
+ 		value = pm8001_cr32(pm8001_ha, 2, offset);
+-		if (SSCbit)
+-			value = value | (0x00000001 << PHY_SSC_BIT_SHIFT);
++		if (SSCbit) {
++			value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT;
++			value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT);
++		} else {
++			value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT;
++			value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT);
++		}
++		bit_cnt = 0;
++		for (j = 0; j < 31; j++)
++			if ((value >> j) & 0x00000001)
++				bit_cnt++;
++		if (bit_cnt % 2)
++			value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY);
+ 		else
+-			value = value & (~(0x00000001<<PHY_SSC_BIT_SHIFT));
++			value |= 0x00000001 << SNW3_PHY_CAPABILITIES_PARITY;
++
+ 		pm8001_cw32(pm8001_ha, 2, offset, value);
+ 	}
+ 
+@@ -4338,6 +4365,30 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
+ 	payload.nds = cpu_to_le32(state);
+ 	mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+ 	return 0;
++}
++
++static int
++pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
++{
++	struct sas_re_initialization_req payload;
++	struct inbound_queue_table *circularQ;
++	struct pm8001_ccb_info *ccb;
++	int rc;
++	u32 tag;
++	u32 opc = OPC_INB_SAS_RE_INITIALIZE;
++	memset(&payload, 0, sizeof(payload));
++	rc = pm8001_tag_alloc(pm8001_ha, &tag);
++	if (rc)
++		return -1;
++	ccb = &pm8001_ha->ccb_info[tag];
++	ccb->ccb_tag = tag;
++	circularQ = &pm8001_ha->inbnd_q_tbl[0];
++	payload.tag = cpu_to_le32(tag);
++	payload.SSAHOLT = cpu_to_le32(0xd << 25);
++	payload.sata_hol_tmo = cpu_to_le32(80);
++	payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff);
++	rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
++	return rc;
+ 
+ }
+ 
+@@ -4367,5 +4418,6 @@ const struct pm8001_dispatch pm8001_8001_dispatch = {
+ 	.set_nvmd_req		= pm8001_chip_set_nvmd_req,
+ 	.fw_flash_update_req	= pm8001_chip_fw_flash_update_req,
+ 	.set_dev_state_req	= pm8001_chip_set_dev_state_req,
++	.sas_re_init_req	= pm8001_chip_sas_re_initialization,
+ };
+ 
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
+index 3690a2b..96e4daa 100644
+--- a/drivers/scsi/pm8001/pm8001_hwi.h
++++ b/drivers/scsi/pm8001/pm8001_hwi.h
+@@ -490,6 +490,25 @@ struct set_dev_state_req {
+ 	u32	reserved[12];
+ } __attribute__((packed, aligned(4)));
+ 
++/*
++ * brief the data structure of sas_re_initialization
++ */
++struct sas_re_initialization_req {
++
++	__le32	tag;
++	__le32	SSAHOLT;/* bit29-set max port;
++			** bit28-set open reject cmd retries.
++			** bit27-set open reject data retries.
++			** bit26-set open reject option, remap:1 or not:0.
++			** bit25-set sata head of line time out.
++			*/
++	__le32 reserved_maxPorts;
++	__le32 open_reject_cmdretries_data_retries;/* cmd retries: 31-bit16;
++						    * data retries: bit15-bit0.
++						    */
++	__le32	sata_hol_tmo;
++	u32	reserved1[10];
++} __attribute__((packed, aligned(4)));
+ 
+ /*
+  * brief the data structure of SATA Start Command
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index 7bf30fa..1e840fd 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -240,6 +240,7 @@ void pm8001_scan_start(struct Scsi_Host *shost)
+ 	struct pm8001_hba_info *pm8001_ha;
+ 	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ 	pm8001_ha = sha->lldd_ha;
++	PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
+ 	for (i = 0; i < pm8001_ha->chip->n_phy; ++i)
+ 		PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
+ }
+diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
+index 14c676b..ed6dbd1 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.h
++++ b/drivers/scsi/pm8001/pm8001_sas.h
+@@ -153,6 +153,7 @@ struct pm8001_dispatch {
+ 		u32 state);
+ 	int (*sas_diag_execute_req)(struct pm8001_hba_info *pm8001_ha,
+ 		u32 state);
++	int (*sas_re_init_req)(struct pm8001_hba_info *pm8001_ha);
+ };
+ 
+ struct pm8001_chip_info {
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0003-SCSI-pm8001-enhance-IOMB-process-modules.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0003-SCSI-pm8001-enhance-IOMB-process-modules.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,818 @@
+From: jack_wang <jack_wang at usish.com>
+Date: Thu, 5 Nov 2009 22:33:35 +0800
+Subject: [PATCH 03/25] [SCSI] pm8001: enhance IOMB process modules
+
+commit 72d0baa089ebd058cdb8b87fde835e9157c4597a upstream.
+
+We set interupt cascading count of outbound queue to get better
+performance, correct some unnecessary return values and some noisy
+print messages.  patch attached.
+
+Signed-off-by: Jack Wang <jack_wang at usish.com>
+Signed-off-by: Lindar Liu <lindar_liu at usish.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/pm8001/pm8001_hwi.c |  245 ++++++++++++++++++++++----------------
+ drivers/scsi/pm8001/pm8001_sas.h |    2 +-
+ 2 files changed, 141 insertions(+), 106 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
+index d18c263..a3de306 100644
+--- a/drivers/scsi/pm8001/pm8001_hwi.c
++++ b/drivers/scsi/pm8001/pm8001_hwi.c
+@@ -341,7 +341,7 @@ update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number)
+  * @pm8001_ha : our hba card infomation
+  * @shiftValue : shifting value in memory bar.
+  */
+-static u32 bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
++static int bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
+ {
+ 	u32 regVal;
+ 	u32 max_wait_count;
+@@ -1217,7 +1217,7 @@ pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
+  * @messageSize: the message size of this transfer, normally it is 64 bytes
+  * @messagePtr: the pointer to message.
+  */
+-static u32 mpi_msg_free_get(struct inbound_queue_table *circularQ,
++static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
+ 			    u16 messageSize, void **messagePtr)
+ {
+ 	u32 offset, consumer_index;
+@@ -1257,7 +1257,7 @@ static u32 mpi_msg_free_get(struct inbound_queue_table *circularQ,
+  * @opCode: the operation code represents commands which LLDD and fw recognized.
+  * @payload: the command payload of each operation command.
+  */
+-static u32 mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
++static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
+ 			 struct inbound_queue_table *circularQ,
+ 			 u32 opCode, void *payload)
+ {
+@@ -1270,7 +1270,7 @@ static u32 mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
+ 			pm8001_printk("No free mpi buffer \n"));
+ 		return -1;
+ 	}
+-
++	BUG_ON(!payload);
+ 	/*Copy to the payload*/
+ 	memcpy(pMessage, payload, (64 - sizeof(struct mpi_msg_hdr)));
+ 
+@@ -1289,10 +1289,30 @@ static u32 mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
+ 	return 0;
+ }
+ 
+-static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha,
++static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
+ 			    struct outbound_queue_table *circularQ, u8 bc)
+ {
+ 	u32 producer_index;
++	struct mpi_msg_hdr *msgHeader;
++	struct mpi_msg_hdr *pOutBoundMsgHeader;
++
++	msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr));
++	pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt +
++				circularQ->consumer_idx * 64);
++	if (pOutBoundMsgHeader != msgHeader) {
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("consumer_idx = %d msgHeader = %p\n",
++			circularQ->consumer_idx, msgHeader));
++
++		/* Update the producer index from SPC */
++		producer_index = pm8001_read_32(circularQ->pi_virt);
++		circularQ->producer_index = cpu_to_le32(producer_index);
++		PM8001_FAIL_DBG(pm8001_ha,
++			pm8001_printk("consumer_idx = %d producer_index = %d"
++			"msgHeader = %p\n", circularQ->consumer_idx,
++			circularQ->producer_index, msgHeader));
++		return 0;
++	}
+ 	/* free the circular queue buffer elements associated with the message*/
+ 	circularQ->consumer_idx = (circularQ->consumer_idx + bc) % 256;
+ 	/* update the CI of outbound queue */
+@@ -1324,8 +1344,6 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
+ 	do {
+ 		/* If there are not-yet-delivered messages ... */
+ 		if (circularQ->producer_index != circularQ->consumer_idx) {
+-			PM8001_IO_DBG(pm8001_ha,
+-				pm8001_printk("process an IOMB\n"));
+ 			/*Get the pointer to the circular queue buffer element*/
+ 			msgHeader = (struct mpi_msg_hdr *)
+ 				(circularQ->base_virt +
+@@ -1342,34 +1360,43 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
+ 					*pBC = (u8)((msgHeader_tmp >> 24) &
+ 						0x1f);
+ 					PM8001_IO_DBG(pm8001_ha,
+-						pm8001_printk("mpi_msg_consume"
+-						": CI=%d PI=%d msgHeader=%x\n",
++						pm8001_printk(": CI=%d PI=%d "
++						"msgHeader=%x\n",
+ 						circularQ->consumer_idx,
+ 						circularQ->producer_index,
+ 						msgHeader_tmp));
+ 					return MPI_IO_STATUS_SUCCESS;
+ 				} else {
+-					u32 producer_index;
+-					void *pi_virt = circularQ->pi_virt;
+-					/* free the circular queue buffer
+-					elements associated with the message*/
+ 					circularQ->consumer_idx =
+ 						(circularQ->consumer_idx +
+ 						((msgHeader_tmp >> 24) & 0x1f))
+ 						% 256;
++					msgHeader_tmp = 0;
++					pm8001_write_32(msgHeader, 0, 0);
+ 					/* update the CI of outbound queue */
+ 					pm8001_cw32(pm8001_ha,
+ 						circularQ->ci_pci_bar,
+ 						circularQ->ci_offset,
+ 						circularQ->consumer_idx);
+-					/* Update the producer index from SPC */
+-					producer_index =
+-						pm8001_read_32(pi_virt);
+-					circularQ->producer_index =
+-						cpu_to_le32(producer_index);
+ 				}
+-			} else
++			} else {
++				circularQ->consumer_idx =
++					(circularQ->consumer_idx +
++					((msgHeader_tmp >> 24) & 0x1f)) % 256;
++				msgHeader_tmp = 0;
++				pm8001_write_32(msgHeader, 0, 0);
++				/* update the CI of outbound queue */
++				pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar,
++					circularQ->ci_offset,
++					circularQ->consumer_idx);
+ 				return MPI_IO_STATUS_FAIL;
++			}
++		} else {
++			u32 producer_index;
++			void *pi_virt = circularQ->pi_virt;
++			/* Update the producer index from SPC */
++			producer_index = pm8001_read_32(pi_virt);
++			circularQ->producer_index = cpu_to_le32(producer_index);
+ 		}
+ 	} while (circularQ->producer_index != circularQ->consumer_idx);
+ 	/* while we don't have any more not-yet-delivered message */
+@@ -1441,7 +1468,7 @@ static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
+  * So we will tell the caller who maybe waiting the result to tell upper layer
+  * that the task has been finished.
+  */
+-static int
++static void
+ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
+ {
+ 	struct sas_task *t;
+@@ -1461,14 +1488,13 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
+ 	pm8001_dev = ccb->device;
+ 	param = le32_to_cpu(psspPayload->param);
+ 
+-	PM8001_IO_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SSP_COMP\n"));
+ 	t = ccb->task;
+ 
+-	if (status)
++	if (status && status != IO_UNDERFLOW)
+ 		PM8001_FAIL_DBG(pm8001_ha,
+ 			pm8001_printk("sas IO status 0x%x\n", status));
+ 	if (unlikely(!t || !t->lldd_task || !t->dev))
+-		return -1;
++		return;
+ 	ts = &t->task_status;
+ 	switch (status) {
+ 	case IO_SUCCESS:
+@@ -1541,7 +1567,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
+ 			pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ 		ts->resp = SAS_TASK_COMPLETE;
+ 		ts->stat = SAS_OPEN_REJECT;
+-		ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
++		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ 		break;
+ 	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+ 		PM8001_IO_DBG(pm8001_ha,
+@@ -1581,6 +1607,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
+ 			pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ 		ts->resp = SAS_TASK_COMPLETE;
+ 		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ 		break;
+ 	case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+ 		PM8001_IO_DBG(pm8001_ha,
+@@ -1656,7 +1683,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
+ 		break;
+ 	}
+ 	PM8001_IO_DBG(pm8001_ha,
+-		pm8001_printk("scsi_satus = %x \n ",
++		pm8001_printk("scsi_status = %x \n ",
+ 		psspPayload->ssp_resp_iu.status));
+ 	spin_lock_irqsave(&t->task_state_lock, flags);
+ 	t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+@@ -1675,11 +1702,10 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
+ 		mb();/* in order to force CPU ordering */
+ 		t->task_done(t);
+ 	}
+-	return 0;
+ }
+ 
+ /*See the comments for mpi_ssp_completion */
+-static int mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
++static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+ {
+ 	struct sas_task *t;
+ 	unsigned long flags;
+@@ -1700,7 +1726,7 @@ static int mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+ 		PM8001_FAIL_DBG(pm8001_ha,
+ 			pm8001_printk("sas IO status 0x%x\n", event));
+ 	if (unlikely(!t || !t->lldd_task || !t->dev))
+-		return -1;
++		return;
+ 	ts = &t->task_status;
+ 	PM8001_IO_DBG(pm8001_ha,
+ 		pm8001_printk("port_id = %x,device_id = %x\n",
+@@ -1747,7 +1773,7 @@ static int mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+ 			pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+ 		ts->resp = SAS_TASK_COMPLETE;
+ 		ts->stat = SAS_OPEN_REJECT;
+-		ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
++		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ 		break;
+ 	case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+ 		PM8001_IO_DBG(pm8001_ha,
+@@ -1787,6 +1813,7 @@ static int mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+ 			pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+ 		ts->resp = SAS_TASK_COMPLETE;
+ 		ts->stat = SAS_OPEN_REJECT;
++		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ 		break;
+ 	case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+ 		PM8001_IO_DBG(pm8001_ha,
+@@ -1840,7 +1867,7 @@ static int mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+ 	case IO_XFER_CMD_FRAME_ISSUED:
+ 		PM8001_IO_DBG(pm8001_ha,
+ 			pm8001_printk("  IO_XFER_CMD_FRAME_ISSUED\n"));
+-		return 0;
++		return;
+ 	default:
+ 		PM8001_IO_DBG(pm8001_ha,
+ 			pm8001_printk("Unknown status 0x%x\n", event));
+@@ -1866,11 +1893,10 @@ static int mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+ 		mb();/* in order to force CPU ordering */
+ 		t->task_done(t);
+ 	}
+-	return 0;
+ }
+ 
+ /*See the comments for mpi_ssp_completion */
+-static int
++static void
+ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ {
+ 	struct sas_task *t;
+@@ -1898,7 +1924,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 		PM8001_FAIL_DBG(pm8001_ha,
+ 			pm8001_printk("sata IO status 0x%x\n", status));
+ 	if (unlikely(!t || !t->lldd_task || !t->dev))
+-		return -1;
++		return;
+ 
+ 	switch (status) {
+ 	case IO_SUCCESS:
+@@ -2015,7 +2041,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ 			mb();/*in order to force CPU ordering*/
+ 			t->task_done(t);
+-			return 0;
++			return;
+ 		}
+ 		break;
+ 	case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+@@ -2033,7 +2059,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ 			mb();/*ditto*/
+ 			t->task_done(t);
+-			return 0;
++			return;
+ 		}
+ 		break;
+ 	case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+@@ -2059,7 +2085,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ 			mb();/* ditto*/
+ 			t->task_done(t);
+-			return 0;
++			return;
+ 		}
+ 		break;
+ 	case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+@@ -2124,7 +2150,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ 			mb();/*ditto*/
+ 			t->task_done(t);
+-			return 0;
++			return;
+ 		}
+ 		break;
+ 	case IO_DS_IN_RECOVERY:
+@@ -2146,7 +2172,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ 			mb();/*ditto*/
+ 			t->task_done(t);
+-			return 0;
++			return;
+ 		}
+ 		break;
+ 	case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+@@ -2180,11 +2206,10 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 		mb();/* ditto */
+ 		t->task_done(t);
+ 	}
+-	return 0;
+ }
+ 
+ /*See the comments for mpi_ssp_completion */
+-static int mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
++static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+ {
+ 	struct sas_task *t;
+ 	unsigned long flags;
+@@ -2205,7 +2230,7 @@ static int mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+ 		PM8001_FAIL_DBG(pm8001_ha,
+ 			pm8001_printk("sata IO status 0x%x\n", event));
+ 	if (unlikely(!t || !t->lldd_task || !t->dev))
+-		return -1;
++		return;
+ 	ts = &t->task_status;
+ 	PM8001_IO_DBG(pm8001_ha,
+ 		pm8001_printk("port_id = %x,device_id = %x\n",
+@@ -2268,7 +2293,7 @@ static int mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+ 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ 			mb();/*ditto*/
+ 			t->task_done(t);
+-			return 0;
++			return;
+ 		}
+ 		break;
+ 	case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+@@ -2382,11 +2407,10 @@ static int mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+ 		mb();/* in order to force CPU ordering */
+ 		t->task_done(t);
+ 	}
+-	return 0;
+ }
+ 
+ /*See the comments for mpi_ssp_completion */
+-static int
++static void
+ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ {
+ 	u32 param;
+@@ -2412,7 +2436,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 		PM8001_FAIL_DBG(pm8001_ha,
+ 			pm8001_printk("smp IO status 0x%x\n", status));
+ 	if (unlikely(!t || !t->lldd_task || !t->dev))
+-		return -1;
++		return;
+ 
+ 	switch (status) {
+ 	case IO_SUCCESS:
+@@ -2585,7 +2609,6 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 		mb();/* in order to force CPU ordering */
+ 		t->task_done(t);
+ 	}
+-	return 0;
+ }
+ 
+ static void
+@@ -2682,8 +2705,8 @@ mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 			pm8001_printk("Get NVMD success, IR=0, dataLen=%d\n",
+ 			(dlen_status & NVMD_LEN) >> 24));
+ 	}
+-	memcpy((void *)(fw_control_context->usrAddr),
+-		(void *)(pm8001_ha->memoryMap.region[NVMD].virt_ptr),
++	memcpy(fw_control_context->usrAddr,
++		pm8001_ha->memoryMap.region[NVMD].virt_ptr,
+ 		fw_control_context->len);
+ 	complete(pm8001_ha->nvmd_completion);
+ 	ccb->task = NULL;
+@@ -3184,28 +3207,28 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 		(struct task_abort_resp *)(piomb + 4);
+ 	ccb = &pm8001_ha->ccb_info[pPayload->tag];
+ 	t = ccb->task;
+-	ts = &t->task_status;
+ 
+-	if (t == NULL)
+-		return -1;
+ 
+ 	status = le32_to_cpu(pPayload->status);
+ 	tag = le32_to_cpu(pPayload->tag);
+ 	scp = le32_to_cpu(pPayload->scp);
+ 	PM8001_IO_DBG(pm8001_ha,
+ 		pm8001_printk(" status = 0x%x\n", status));
++	if (t == NULL)
++		return -1;
++	ts = &t->task_status;
+ 	if (status != 0)
+ 		PM8001_FAIL_DBG(pm8001_ha,
+-			pm8001_printk("task abort failed tag = 0x%x,"
+-			" scp= 0x%x\n", tag, scp));
++			pm8001_printk("task abort failed status 0x%x ,"
++			"tag = 0x%x, scp= 0x%x\n", status, tag, scp));
+ 	switch (status) {
+ 	case IO_SUCCESS:
+-		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
++		PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+ 		ts->resp = SAS_TASK_COMPLETE;
+ 		ts->stat = SAM_GOOD;
+ 		break;
+ 	case IO_NOT_VALID:
+-		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NOT_VALID\n"));
++		PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_NOT_VALID\n"));
+ 		ts->resp = TMF_RESP_FUNC_FAILED;
+ 		break;
+ 	}
+@@ -3443,7 +3466,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 	u32 pHeader = (u32)*(u32 *)piomb;
+ 	u8 opc = (u8)((le32_to_cpu(pHeader)) & 0xFFF);
+ 
+-	PM8001_MSG_DBG(pm8001_ha, pm8001_printk("process_one_iomb:\n"));
++	PM8001_MSG_DBG(pm8001_ha, pm8001_printk("process_one_iomb:"));
+ 
+ 	switch (opc) {
+ 	case OPC_OUB_ECHO:
+@@ -3609,17 +3632,16 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha)
+ 	struct outbound_queue_table *circularQ;
+ 	void *pMsg1 = NULL;
+ 	u8 bc = 0;
+-	u32 ret = MPI_IO_STATUS_FAIL, processedMsgCount = 0;
++	u32 ret = MPI_IO_STATUS_FAIL;
+ 
+ 	circularQ = &pm8001_ha->outbnd_q_tbl[0];
+ 	do {
+ 		ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
+ 		if (MPI_IO_STATUS_SUCCESS == ret) {
+ 			/* process the outbound message */
+-			process_one_iomb(pm8001_ha, (void *)((u8 *)pMsg1 - 4));
++			process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
+ 			/* free the message from the outbound circular buffer */
+-			mpi_msg_free_set(pm8001_ha, circularQ, bc);
+-			processedMsgCount++;
++			mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc);
+ 		}
+ 		if (MPI_IO_STATUS_BUSY == ret) {
+ 			u32 producer_idx;
+@@ -3631,8 +3653,7 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha)
+ 				/* OQ is empty */
+ 				break;
+ 		}
+-	} while (100 > processedMsgCount);/*end message processing if hit the
+-	count*/
++	} while (1);
+ 	return ret;
+ }
+ 
+@@ -3743,6 +3764,7 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
+ 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
+ 	struct ssp_ini_io_start_req ssp_cmd;
+ 	u32 tag = ccb->ccb_tag;
++	int ret;
+ 	__le64 phys_addr;
+ 	struct inbound_queue_table *circularQ;
+ 	u32 opc = OPC_INB_SSPINIIOSTART;
+@@ -3780,8 +3802,8 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
+ 		ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
+ 		ssp_cmd.esgl = 0;
+ 	}
+-	mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd);
+-	return 0;
++	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd);
++	return ret;
+ }
+ 
+ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
+@@ -3791,6 +3813,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
+ 	struct domain_device *dev = task->dev;
+ 	struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
+ 	u32 tag = ccb->ccb_tag;
++	int ret;
+ 	struct sata_start_req sata_cmd;
+ 	u32 hdr_tag, ncg_tag = 0;
+ 	__le64 phys_addr;
+@@ -3849,8 +3872,8 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
+ 		sata_cmd.len = cpu_to_le32(task->total_xfer_len);
+ 		sata_cmd.esgl = 0;
+ 	}
+-	mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd);
+-	return 0;
++	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd);
++	return ret;
+ }
+ 
+ /**
+@@ -3864,6 +3887,7 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
+ {
+ 	struct phy_start_req payload;
+ 	struct inbound_queue_table *circularQ;
++	int ret;
+ 	u32 tag = 0x01;
+ 	u32 opcode = OPC_INB_PHYSTART;
+ 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+@@ -3883,8 +3907,8 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
+ 	memcpy(payload.sas_identify.sas_addr,
+ 		pm8001_ha->sas_addr, SAS_ADDR_SIZE);
+ 	payload.sas_identify.phy_id = phy_id;
+-	mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload);
+-	return 0;
++	ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload);
++	return ret;
+ }
+ 
+ /**
+@@ -3898,14 +3922,15 @@ static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
+ {
+ 	struct phy_stop_req payload;
+ 	struct inbound_queue_table *circularQ;
++	int ret;
+ 	u32 tag = 0x01;
+ 	u32 opcode = OPC_INB_PHYSTOP;
+ 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ 	memset(&payload, 0, sizeof(payload));
+ 	payload.tag = cpu_to_le32(tag);
+ 	payload.phy_id = cpu_to_le32(phy_id);
+-	mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload);
+-	return 0;
++	ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload);
++	return ret;
+ }
+ 
+ /**
+@@ -3919,7 +3944,7 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
+ 	u32 stp_sspsmp_sata = 0x4;
+ 	struct inbound_queue_table *circularQ;
+ 	u32 linkrate, phy_id;
+-	u32 rc, tag = 0xdeadbeef;
++	int rc, tag = 0xdeadbeef;
+ 	struct pm8001_ccb_info *ccb;
+ 	u8 retryFlag = 0x1;
+ 	u16 firstBurstSize = 0;
+@@ -3963,8 +3988,8 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
+ 		cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
+ 	memcpy(&payload.sas_addr_hi, pm8001_dev->sas_device->sas_addr,
+ 		SAS_ADDR_SIZE);
+-	mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+-	return 0;
++	rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
++	return rc;
+ }
+ 
+ /**
+@@ -3975,16 +4000,17 @@ static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
+ {
+ 	struct dereg_dev_req payload;
+ 	u32 opc = OPC_INB_DEREG_DEV_HANDLE;
++	int ret;
+ 	struct inbound_queue_table *circularQ;
+ 
+ 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+-	memset((u8 *)&payload, 0, sizeof(payload));
++	memset(&payload, 0, sizeof(payload));
+ 	payload.tag = 1;
+ 	payload.device_id = cpu_to_le32(device_id);
+ 	PM8001_MSG_DBG(pm8001_ha,
+ 		pm8001_printk("unregister device device_id = %d\n", device_id));
+-	mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+-	return 0;
++	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
++	return ret;
+ }
+ 
+ /**
+@@ -3999,14 +4025,15 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
+ {
+ 	struct local_phy_ctl_req payload;
+ 	struct inbound_queue_table *circularQ;
++	int ret;
+ 	u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
+ 	memset((u8 *)&payload, 0, sizeof(payload));
+ 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ 	payload.tag = 1;
+ 	payload.phyop_phyid =
+ 		cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
+-	mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+-	return 0;
++	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
++	return ret;
+ }
+ 
+ static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
+@@ -4028,12 +4055,16 @@ static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
+  * @irq: irq number.
+  * @stat: stat.
+  */
+-static void
++static irqreturn_t
+ pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha)
+ {
++	unsigned long flags;
++	spin_lock_irqsave(&pm8001_ha->lock, flags);
+ 	pm8001_chip_interrupt_disable(pm8001_ha);
+ 	process_oq(pm8001_ha);
+ 	pm8001_chip_interrupt_enable(pm8001_ha);
++	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
++	return IRQ_HANDLED;
+ }
+ 
+ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
+@@ -4041,7 +4072,7 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
+ {
+ 	struct task_abort_req task_abort;
+ 	struct inbound_queue_table *circularQ;
+-
++	int ret;
+ 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ 	memset(&task_abort, 0, sizeof(task_abort));
+ 	if (ABORT_SINGLE == (flag & ABORT_MASK)) {
+@@ -4054,8 +4085,8 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
+ 		task_abort.device_id = cpu_to_le32(dev_id);
+ 		task_abort.tag = cpu_to_le32(cmd_tag);
+ 	}
+-	mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort);
+-	return 0;
++	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort);
++	return ret;
+ }
+ 
+ /**
+@@ -4068,7 +4099,8 @@ static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
+ {
+ 	u32 opc, device_id;
+ 	int rc = TMF_RESP_FUNC_FAILED;
+-	PM8001_IO_DBG(pm8001_ha, pm8001_printk("Abort tag[%x]", task_tag));
++	PM8001_EH_DBG(pm8001_ha, pm8001_printk("cmd_tag = %x, abort task tag"
++		" = %x", cmd_tag, task_tag));
+ 	if (pm8001_dev->dev_type == SAS_END_DEV)
+ 		opc = OPC_INB_SSP_ABORT;
+ 	else if (pm8001_dev->dev_type == SATA_DEV)
+@@ -4079,7 +4111,7 @@ static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
+ 	rc = send_task_abort(pm8001_ha, opc, device_id, flag,
+ 		task_tag, cmd_tag);
+ 	if (rc != TMF_RESP_FUNC_COMPLETE)
+-		PM8001_IO_DBG(pm8001_ha, pm8001_printk("rc= %d\n", rc));
++		PM8001_EH_DBG(pm8001_ha, pm8001_printk("rc= %d\n", rc));
+ 	return rc;
+ }
+ 
+@@ -4098,17 +4130,17 @@ static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
+ 	u32 opc = OPC_INB_SSPINITMSTART;
+ 	struct inbound_queue_table *circularQ;
+ 	struct ssp_ini_tm_start_req sspTMCmd;
++	int ret;
+ 
+ 	memset(&sspTMCmd, 0, sizeof(sspTMCmd));
+ 	sspTMCmd.device_id = cpu_to_le32(pm8001_dev->device_id);
+ 	sspTMCmd.relate_tag = cpu_to_le32(tmf->tag_of_task_to_be_managed);
+ 	sspTMCmd.tmf = cpu_to_le32(tmf->tmf);
+-	sspTMCmd.ds_ads_m = cpu_to_le32(1 << 2);
+ 	memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
+ 	sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
+ 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+-	mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd);
+-	return 0;
++	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd);
++	return ret;
+ }
+ 
+ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+@@ -4116,7 +4148,7 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+ {
+ 	u32 opc = OPC_INB_GET_NVMD_DATA;
+ 	u32 nvmd_type;
+-	u32 rc;
++	int rc;
+ 	u32 tag;
+ 	struct pm8001_ccb_info *ccb;
+ 	struct inbound_queue_table *circularQ;
+@@ -4183,8 +4215,8 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+ 	default:
+ 		break;
+ 	}
+-	mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req);
+-	return 0;
++	rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req);
++	return rc;
+ }
+ 
+ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+@@ -4192,7 +4224,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+ {
+ 	u32 opc = OPC_INB_SET_NVMD_DATA;
+ 	u32 nvmd_type;
+-	u32 rc;
++	int rc;
+ 	u32 tag;
+ 	struct pm8001_ccb_info *ccb;
+ 	struct inbound_queue_table *circularQ;
+@@ -4259,8 +4291,8 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+ 	default:
+ 		break;
+ 	}
+-	mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req);
+-	return 0;
++	rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req);
++	return rc;
+ }
+ 
+ /**
+@@ -4275,9 +4307,10 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
+ 	struct fw_flash_Update_req payload;
+ 	struct fw_flash_updata_info *info;
+ 	struct inbound_queue_table *circularQ;
++	int ret;
+ 	u32 opc = OPC_INB_FW_FLASH_UPDATE;
+ 
+-	memset((u8 *)&payload, 0, sizeof(struct fw_flash_Update_req));
++	memset(&payload, 0, sizeof(struct fw_flash_Update_req));
+ 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ 	info = fw_flash_updata_info;
+ 	payload.tag = cpu_to_le32(tag);
+@@ -4287,8 +4320,8 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
+ 	payload.len = info->sgl.im_len.len ;
+ 	payload.sgl_addr_lo = lower_32_bits(info->sgl.addr);
+ 	payload.sgl_addr_hi = upper_32_bits(info->sgl.addr);
+-	mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+-	return 0;
++	ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
++	return ret;
+ }
+ 
+ static int
+@@ -4298,7 +4331,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
+ 	struct fw_flash_updata_info flash_update_info;
+ 	struct fw_control_info *fw_control;
+ 	struct fw_control_ex *fw_control_context;
+-	u32 rc;
++	int rc;
+ 	u32 tag;
+ 	struct pm8001_ccb_info *ccb;
+ 	void *buffer = NULL;
+@@ -4321,8 +4354,8 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
+ 				return -ENOMEM;
+ 		}
+ 	}
+-	memset((void *)buffer, 0, fw_control->len);
+-	memcpy((void *)buffer, fw_control->buffer, fw_control->len);
++	memset(buffer, 0, fw_control->len);
++	memcpy(buffer, fw_control->buffer, fw_control->len);
+ 	flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
+ 	flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
+ 	flash_update_info.sgl.im_len.e = 0;
+@@ -4338,8 +4371,9 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
+ 	ccb = &pm8001_ha->ccb_info[tag];
+ 	ccb->fw_control_context = fw_control_context;
+ 	ccb->ccb_tag = tag;
+-	pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info, tag);
+-	return 0;
++	rc = pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info,
++		tag);
++	return rc;
+ }
+ 
+ static int
+@@ -4349,10 +4383,10 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
+ 	struct set_dev_state_req payload;
+ 	struct inbound_queue_table *circularQ;
+ 	struct pm8001_ccb_info *ccb;
+-	u32 rc;
++	int rc;
+ 	u32 tag;
+ 	u32 opc = OPC_INB_SET_DEVICE_STATE;
+-	memset((u8 *)&payload, 0, sizeof(payload));
++	memset(&payload, 0, sizeof(payload));
+ 	rc = pm8001_tag_alloc(pm8001_ha, &tag);
+ 	if (rc)
+ 		return -1;
+@@ -4363,8 +4397,9 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
+ 	payload.tag = cpu_to_le32(tag);
+ 	payload.device_id = cpu_to_le32(pm8001_dev->device_id);
+ 	payload.nds = cpu_to_le32(state);
+-	mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+-	return 0;
++	rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
++	return rc;
++
+ }
+ 
+ static int
+diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
+index ed6dbd1..30f2ede 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.h
++++ b/drivers/scsi/pm8001/pm8001_sas.h
+@@ -119,7 +119,7 @@ struct pm8001_dispatch {
+ 	void (*chip_rst)(struct pm8001_hba_info *pm8001_ha);
+ 	int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha);
+ 	void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha);
+-	void (*isr)(struct pm8001_hba_info *pm8001_ha);
++	irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha);
+ 	u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha);
+ 	int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha);
+ 	void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha);
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0004-SCSI-pm8001-Fixes-for-tag-alloc-error-goto-and-code-.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0004-SCSI-pm8001-Fixes-for-tag-alloc-error-goto-and-code-.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,224 @@
+From: jack_wang <jack_wang at usish.com>
+Date: Thu, 5 Nov 2009 22:33:51 +0800
+Subject: [PATCH 04/25] [SCSI] pm8001: Fixes for tag alloc, error goto and
+ code cleanup
+
+commit 97ee20886cfd257a7818087c1638ca60b9ffd192 upstream.
+
+Allocate right size for bitmap tag,fix error goto and cleanup print
+message and undocable commemts. patch attached.
+
+Signed-off-by: Lindar Liu <lindar_liu at usish.com>
+Signed-off-by: Jack Wang <jack_wang at usish.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/pm8001/pm8001_init.c |   11 ++++--
+ drivers/scsi/pm8001/pm8001_sas.c  |   60 ++++++++++++++++++-------------------
+ 2 files changed, 36 insertions(+), 35 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
+index 811b5d3..42ebe72 100644
+--- a/drivers/scsi/pm8001/pm8001_init.c
++++ b/drivers/scsi/pm8001/pm8001_init.c
+@@ -203,9 +203,9 @@ static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
+ 	for (i = 0; i < pm8001_ha->chip->n_phy; i++)
+ 		pm8001_phy_init(pm8001_ha, i);
+ 
+-	pm8001_ha->tags = kmalloc(sizeof(*pm8001_ha->tags)*PM8001_MAX_DEVICES,
+-		GFP_KERNEL);
+-
++	pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL);
++	if (!pm8001_ha->tags)
++		goto err_out;
+ 	/* MPI Memory region 1 for AAP Event Log for fw */
+ 	pm8001_ha->memoryMap.region[AAP1].num_elements = 1;
+ 	pm8001_ha->memoryMap.region[AAP1].element_size = PM8001_EVENT_LOG_SIZE;
+@@ -287,6 +287,9 @@ static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
+ 		pm8001_ha->ccb_info[i].ccb_dma_handle =
+ 			pm8001_ha->memoryMap.region[CCB_MEM].phys_addr +
+ 			i * sizeof(struct pm8001_ccb_info);
++		pm8001_ha->ccb_info[i].task = NULL;
++		pm8001_ha->ccb_info[i].ccb_tag = 0xffffffff;
++		pm8001_ha->ccb_info[i].device = NULL;
+ 		++pm8001_ha->tags_num;
+ 	}
+ 	pm8001_ha->flags = PM8001F_INIT_TIME;
+@@ -578,7 +581,7 @@ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
+ {
+ 	struct pci_dev *pdev;
+ 	irq_handler_t irq_handler = pm8001_interrupt;
+-	u32 rc;
++	int rc;
+ 
+ 	pdev = pm8001_ha->pdev;
+ 
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index 1e840fd..1f767a0 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -330,15 +330,12 @@ int pm8001_slave_configure(struct scsi_device *sdev)
+ 	return 0;
+ }
+ /**
+-  * pm8001_task_exec -execute the task which come from upper level, send the
+-  * command or data to DMA area and then increase CI,for queuecommand(ssp),
+-  * it is from upper layer and for smp command,it is from libsas,
+-  * for ata command it is from libata.
++  * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware.
+   * @task: the task to be execute.
+   * @num: if can_queue great than 1, the task can be queued up. for SMP task,
+   * we always execute one one time.
+   * @gfp_flags: gfp_flags.
+-  * @is tmf: if it is task management task.
++  * @is_tmf: if it is task management task.
+   * @tmf: the task management IU
+   */
+ #define DEV_IS_GONE(pm8001_dev)	\
+@@ -379,7 +376,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
+ 					pm8001_printk("device %016llx not "
+ 					"ready.\n", SAS_ADDR(dev->sas_addr)));
+ 			}
+-		rc = SAS_PHY_DOWN;
++			rc = SAS_PHY_DOWN;
+ 			goto out_done;
+ 		}
+ 		rc = pm8001_tag_alloc(pm8001_ha, &tag);
+@@ -395,14 +392,14 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
+ 					t->data_dir);
+ 				if (!n_elem) {
+ 					rc = -ENOMEM;
+-					goto err_out;
++					goto err_out_tag;
+ 				}
+ 			}
+ 		} else {
+ 			n_elem = t->num_scatter;
+ 		}
+ 
+-		t->lldd_task = NULL;
++		t->lldd_task = ccb;
+ 		ccb->n_elem = n_elem;
+ 		ccb->ccb_tag = tag;
+ 		ccb->task = t;
+@@ -435,7 +432,6 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
+ 				pm8001_printk("rc is %x\n", rc));
+ 			goto err_out_tag;
+ 		}
+-		t->lldd_task = ccb;
+ 		/* TODO: select normal or high priority */
+ 		spin_lock(&t->task_state_lock);
+ 		t->task_state_flags |= SAS_TASK_AT_INITIATOR;
+@@ -518,8 +514,7 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
+ }
+ 
+  /**
+-  * pm8001_alloc_dev - find the empty pm8001_device structure, allocate and
+-  * return it for use.
++  * pm8001_alloc_dev - find a empty pm8001_device
+   * @pm8001_ha: our hba card information
+   */
+ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
+@@ -550,14 +545,16 @@ static void pm8001_free_dev(struct pm8001_device *pm8001_dev)
+ }
+ 
+ /**
+-  * pm8001_dev_found_notify - when libsas find a sas domain device, it should
+-  * tell the LLDD that device is found, and then LLDD register this device to
+-  * HBA FW by the command "OPC_INB_REG_DEV", after that the HBA will assign
+-  * a device ID(according to device's sas address) and returned it to LLDD.from
++  * pm8001_dev_found_notify - libsas notify a device is found.
++  * @dev: the device structure which sas layer used.
++  *
++  * when libsas find a sas domain device, it should tell the LLDD that
++  * device is found, and then LLDD register this device to HBA firmware
++  * by the command "OPC_INB_REG_DEV", after that the HBA will assign a
++  * device ID(according to device's sas address) and returned it to LLDD. From
+   * now on, we communicate with HBA FW with the device ID which HBA assigned
+   * rather than sas address. it is the neccessary step for our HBA but it is
+   * the optional for other HBA driver.
+-  * @dev: the device structure which sas layer used.
+   */
+ static int pm8001_dev_found_notify(struct domain_device *dev)
+ {
+@@ -665,14 +662,15 @@ static void pm8001_tmf_timedout(unsigned long data)
+ 
+ #define PM8001_TASK_TIMEOUT 20
+ /**
+-  * pm8001_exec_internal_tmf_task - when errors or exception happened, we may
+-  * want to do something, for example abort issued task which result in this
+-  * execption, this is by calling this function, note it is also with the task
+-  * execute interface.
++  * pm8001_exec_internal_tmf_task - execute some task management commands.
+   * @dev: the wanted device.
+   * @tmf: which task management wanted to be take.
+   * @para_len: para_len.
+   * @parameter: ssp task parameter.
++  *
++  * when errors or exception happened, we may want to do something, for example
++  * abort the issued task which result in this execption, it is done by calling
++  * this function, note it is also with the task execute interface.
+   */
+ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
+ 	void *parameter, u32 para_len, struct pm8001_tmf_task *tmf)
+@@ -737,9 +735,9 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
+ 			res = -EMSGSIZE;
+ 			break;
+ 		} else {
+-			PM8001_IO_DBG(pm8001_ha,
+-			pm8001_printk(" Task to dev %016llx response: 0x%x"
+-				"status 0x%x\n",
++			PM8001_EH_DBG(pm8001_ha,
++				pm8001_printk(" Task to dev %016llx response:"
++				"0x%x status 0x%x\n",
+ 				SAS_ADDR(dev->sas_addr),
+ 				task->task_status.resp,
+ 				task->task_status.stat));
+@@ -760,7 +758,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
+ 	u32 task_tag)
+ {
+ 	int res, retry;
+-	u32 rc, ccb_tag;
++	u32 ccb_tag;
+ 	struct pm8001_ccb_info *ccb;
+ 	struct sas_task *task = NULL;
+ 
+@@ -777,9 +775,9 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
+ 		task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
+ 		add_timer(&task->timer);
+ 
+-		rc = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+-		if (rc)
+-			return rc;
++		res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
++		if (res)
++			return res;
+ 		ccb = &pm8001_ha->ccb_info[ccb_tag];
+ 		ccb->device = pm8001_dev;
+ 		ccb->ccb_tag = ccb_tag;
+@@ -812,7 +810,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
+ 			break;
+ 
+ 		} else {
+-			PM8001_IO_DBG(pm8001_ha,
++			PM8001_EH_DBG(pm8001_ha,
+ 				pm8001_printk(" Task to dev %016llx response: "
+ 					"0x%x status 0x%x\n",
+ 				SAS_ADDR(dev->sas_addr),
+@@ -1027,11 +1025,11 @@ int pm8001_abort_task(struct sas_task *task)
+ 		}
+ 		device_id = pm8001_dev->device_id;
+ 		PM8001_EH_DBG(pm8001_ha,
+-		pm8001_printk("abort io to device_id = %d\n", device_id));
+-		tmf_task.tmf = 	TMF_ABORT_TASK;
++			pm8001_printk("abort io to deviceid= %d\n", device_id));
++		tmf_task.tmf = TMF_ABORT_TASK;
+ 		tmf_task.tag_of_task_to_be_managed = tag;
+ 		rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+-		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
++		pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
+ 			pm8001_dev->sas_device, 0, tag);
+ 	} else if (task->task_proto & SAS_PROTOCOL_SATA ||
+ 		task->task_proto & SAS_PROTOCOL_STP) {
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0005-SCSI-pm8001-Fix-for-sata-io-circular-lock-dependency.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0005-SCSI-pm8001-Fix-for-sata-io-circular-lock-dependency.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,149 @@
+From: jack wang <jack_wang at usish.com>
+Date: Mon, 7 Dec 2009 17:22:36 +0800
+Subject: [PATCH 05/25] [SCSI] pm8001: Fix for sata io circular lock
+ dependency.
+
+commit 9e79e12554d651f586ff2364e69a8e9cd5e9dbcb upstream.
+
+This patch fix for sata IO circular lock dependency. When we call task_done
+for SATA IO, we have got pm8001_ha->lock ,and in sas_ata_task_done, it will
+get (dev->sata_dev.ap->lock. then cause circular lock dependency .So we
+should drop pm8001_ha->lock when we call task_done for SATA task.
+
+Signed-off-by: Jack Wang <jack_wang at usish.com>
+Signed-off-by: Lindar Liu <lindar_liu at usish.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/pm8001/pm8001_hwi.c |   40 +++++++++++++++++++++++++++++++++----
+ 1 files changed, 35 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
+index a3de306..68695b7 100644
+--- a/drivers/scsi/pm8001/pm8001_hwi.c
++++ b/drivers/scsi/pm8001/pm8001_hwi.c
+@@ -1901,7 +1901,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ {
+ 	struct sas_task *t;
+ 	struct pm8001_ccb_info *ccb;
+-	unsigned long flags;
++	unsigned long flags = 0;
+ 	u32 param;
+ 	u32 status;
+ 	u32 tag;
+@@ -2040,7 +2040,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 			ts->stat = SAS_QUEUE_FULL;
+ 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ 			mb();/*in order to force CPU ordering*/
++			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ 			t->task_done(t);
++			spin_lock_irqsave(&pm8001_ha->lock, flags);
+ 			return;
+ 		}
+ 		break;
+@@ -2058,7 +2060,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 			ts->stat = SAS_QUEUE_FULL;
+ 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ 			mb();/*ditto*/
++			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ 			t->task_done(t);
++			spin_lock_irqsave(&pm8001_ha->lock, flags);
+ 			return;
+ 		}
+ 		break;
+@@ -2084,7 +2088,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 			ts->stat = SAS_QUEUE_FULL;
+ 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ 			mb();/* ditto*/
++			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ 			t->task_done(t);
++			spin_lock_irqsave(&pm8001_ha->lock, flags);
+ 			return;
+ 		}
+ 		break;
+@@ -2149,7 +2155,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 			ts->stat = SAS_QUEUE_FULL;
+ 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ 			mb();/*ditto*/
++			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ 			t->task_done(t);
++			spin_lock_irqsave(&pm8001_ha->lock, flags);
+ 			return;
+ 		}
+ 		break;
+@@ -2171,7 +2179,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 			ts->stat = SAS_QUEUE_FULL;
+ 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ 			mb();/*ditto*/
++			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ 			t->task_done(t);
++			spin_lock_irqsave(&pm8001_ha->lock, flags);
+ 			return;
+ 		}
+ 		break;
+@@ -2200,11 +2210,20 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 			" resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ 			t, status, ts->resp, ts->stat));
+ 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+-	} else {
++	} else if (t->uldd_task) {
+ 		spin_unlock_irqrestore(&t->task_state_lock, flags);
+ 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ 		mb();/* ditto */
++		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
++		t->task_done(t);
++		spin_lock_irqsave(&pm8001_ha->lock, flags);
++	} else if (!t->uldd_task) {
++		spin_unlock_irqrestore(&t->task_state_lock, flags);
++		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
++		mb();/*ditto*/
++		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ 		t->task_done(t);
++		spin_lock_irqsave(&pm8001_ha->lock, flags);
+ 	}
+ }
+ 
+@@ -2212,7 +2231,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+ {
+ 	struct sas_task *t;
+-	unsigned long flags;
++	unsigned long flags = 0;
+ 	struct task_status_struct *ts;
+ 	struct pm8001_ccb_info *ccb;
+ 	struct pm8001_device *pm8001_dev;
+@@ -2292,7 +2311,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+ 			ts->stat = SAS_QUEUE_FULL;
+ 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ 			mb();/*ditto*/
++			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ 			t->task_done(t);
++			spin_lock_irqsave(&pm8001_ha->lock, flags);
+ 			return;
+ 		}
+ 		break;
+@@ -2401,11 +2422,20 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+ 			" resp 0x%x stat 0x%x but aborted by upper layer!\n",
+ 			t, event, ts->resp, ts->stat));
+ 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+-	} else {
++	} else if (t->uldd_task) {
+ 		spin_unlock_irqrestore(&t->task_state_lock, flags);
+ 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+-		mb();/* in order to force CPU ordering */
++		mb();/* ditto */
++		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
++		t->task_done(t);
++		spin_lock_irqsave(&pm8001_ha->lock, flags);
++	} else if (!t->uldd_task) {
++		spin_unlock_irqrestore(&t->task_state_lock, flags);
++		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
++		mb();/*ditto*/
++		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ 		t->task_done(t);
++		spin_lock_irqsave(&pm8001_ha->lock, flags);
+ 	}
+ }
+ 
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0006-SCSI-pm8001-enhance-error-handle-for-IO-patch.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0006-SCSI-pm8001-enhance-error-handle-for-IO-patch.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,241 @@
+From: jack wang <jack_wang at usish.com>
+Date: Mon, 7 Dec 2009 17:22:42 +0800
+Subject: [PATCH 06/25] [SCSI] pm8001: enhance error handle for IO patch
+
+commit 1cc943ae5003e4612a73119cb6fb637a45c2714d upstream.
+
+Enhance error handle for IO patch, when the port is down, fast return phy
+down for task.
+
+Signed-off-by: Jack Wang <jack_wang at usish.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/pm8001/pm8001_hwi.c  |   29 +++++++++++++++++++--
+ drivers/scsi/pm8001/pm8001_init.c |    7 ++++-
+ drivers/scsi/pm8001/pm8001_sas.c  |   49 ++++++++++++++++++++++++++++++++++++-
+ drivers/scsi/pm8001/pm8001_sas.h  |    4 +++
+ 4 files changed, 84 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
+index 68695b7..3a121fb 100644
+--- a/drivers/scsi/pm8001/pm8001_hwi.c
++++ b/drivers/scsi/pm8001/pm8001_hwi.c
+@@ -2906,13 +2906,17 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 		le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
+ 	u8 link_rate =
+ 		(u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
++	u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
+ 	u8 phy_id =
+ 		(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
++	u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
++	u8 portstate = (u8)(npip_portstate & 0x0000000F);
++	struct pm8001_port *port = &pm8001_ha->port[port_id];
+ 	struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+ 	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ 	unsigned long flags;
+ 	u8 deviceType = pPayload->sas_identify.dev_type;
+-
++	port->port_state =  portstate;
+ 	PM8001_MSG_DBG(pm8001_ha,
+ 		pm8001_printk("HW_EVENT_SAS_PHY_UP \n"));
+ 
+@@ -2925,16 +2929,19 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 		PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
+ 		pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
+ 			PHY_NOTIFY_ENABLE_SPINUP);
++		port->port_attached = 1;
+ 		get_lrate_mode(phy, link_rate);
+ 		break;
+ 	case SAS_EDGE_EXPANDER_DEVICE:
+ 		PM8001_MSG_DBG(pm8001_ha,
+ 			pm8001_printk("expander device.\n"));
++		port->port_attached = 1;
+ 		get_lrate_mode(phy, link_rate);
+ 		break;
+ 	case SAS_FANOUT_EXPANDER_DEVICE:
+ 		PM8001_MSG_DBG(pm8001_ha,
+ 			pm8001_printk("fanout expander device.\n"));
++		port->port_attached = 1;
+ 		get_lrate_mode(phy, link_rate);
+ 		break;
+ 	default:
+@@ -2976,11 +2983,17 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 		le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
+ 	u8 link_rate =
+ 		(u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
++	u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
+ 	u8 phy_id =
+ 		(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
++	u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
++	u8 portstate = (u8)(npip_portstate & 0x0000000F);
++	struct pm8001_port *port = &pm8001_ha->port[port_id];
+ 	struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+ 	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ 	unsigned long flags;
++	port->port_state =  portstate;
++	port->port_attached = 1;
+ 	get_lrate_mode(phy, link_rate);
+ 	phy->phy_type |= PORT_TYPE_SATA;
+ 	phy->phy_attached = 1;
+@@ -3014,7 +3027,13 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 		(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
+ 	u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
+ 	u8 portstate = (u8)(npip_portstate & 0x0000000F);
+-
++	struct pm8001_port *port = &pm8001_ha->port[port_id];
++	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
++	port->port_state =  portstate;
++	phy->phy_type = 0;
++	phy->identify.device_type = 0;
++	phy->phy_attached = 0;
++	memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
+ 	switch (portstate) {
+ 	case PORT_VALID:
+ 		break;
+@@ -3023,26 +3042,30 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 			pm8001_printk(" PortInvalid portID %d \n", port_id));
+ 		PM8001_MSG_DBG(pm8001_ha,
+ 			pm8001_printk(" Last phy Down and port invalid\n"));
++		port->port_attached = 0;
+ 		pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+ 			port_id, phy_id, 0, 0);
+ 		break;
+ 	case PORT_IN_RESET:
+ 		PM8001_MSG_DBG(pm8001_ha,
+-			pm8001_printk(" PortInReset portID %d \n", port_id));
++			pm8001_printk(" Port In Reset portID %d \n", port_id));
+ 		break;
+ 	case PORT_NOT_ESTABLISHED:
+ 		PM8001_MSG_DBG(pm8001_ha,
+ 			pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
++		port->port_attached = 0;
+ 		break;
+ 	case PORT_LOSTCOMM:
+ 		PM8001_MSG_DBG(pm8001_ha,
+ 			pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
+ 		PM8001_MSG_DBG(pm8001_ha,
+ 			pm8001_printk(" Last phy Down and port invalid\n"));
++		port->port_attached = 0;
+ 		pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+ 			port_id, phy_id, 0, 0);
+ 		break;
+ 	default:
++		port->port_attached = 0;
+ 		PM8001_MSG_DBG(pm8001_ha,
+ 			pm8001_printk(" phy Down and(default) = %x\n",
+ 			portstate));
+diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
+index 42ebe72..fb6379a 100644
+--- a/drivers/scsi/pm8001/pm8001_init.c
++++ b/drivers/scsi/pm8001/pm8001_init.c
+@@ -200,8 +200,13 @@ static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
+ {
+ 	int i;
+ 	spin_lock_init(&pm8001_ha->lock);
+-	for (i = 0; i < pm8001_ha->chip->n_phy; i++)
++	for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
+ 		pm8001_phy_init(pm8001_ha, i);
++		pm8001_ha->port[i].wide_port_phymap = 0;
++		pm8001_ha->port[i].port_attached = 0;
++		pm8001_ha->port[i].port_state = 0;
++		INIT_LIST_HEAD(&pm8001_ha->port[i].list);
++	}
+ 
+ 	pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL);
+ 	if (!pm8001_ha->tags)
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index 1f767a0..49721c8 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -329,6 +329,23 @@ int pm8001_slave_configure(struct scsi_device *sdev)
+ 	}
+ 	return 0;
+ }
++ /* Find the local port id that's attached to this device */
++static int sas_find_local_port_id(struct domain_device *dev)
++{
++	struct domain_device *pdev = dev->parent;
++
++	/* Directly attached device */
++	if (!pdev)
++		return dev->port->id;
++	while (pdev) {
++		struct domain_device *pdev_p = pdev->parent;
++		if (!pdev_p)
++			return pdev->port->id;
++		pdev = pdev->parent;
++	}
++	return 0;
++}
++
+ /**
+   * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware.
+   * @task: the task to be execute.
+@@ -346,11 +363,12 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
+ 	struct domain_device *dev = task->dev;
+ 	struct pm8001_hba_info *pm8001_ha;
+ 	struct pm8001_device *pm8001_dev;
++	struct pm8001_port *port = NULL;
+ 	struct sas_task *t = task;
+ 	struct pm8001_ccb_info *ccb;
+ 	u32 tag = 0xdeadbeef, rc, n_elem = 0;
+ 	u32 n = num;
+-	unsigned long flags = 0;
++	unsigned long flags = 0, flags_libsas = 0;
+ 
+ 	if (!dev->port) {
+ 		struct task_status_struct *tsm = &t->task_status;
+@@ -379,6 +397,35 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
+ 			rc = SAS_PHY_DOWN;
+ 			goto out_done;
+ 		}
++		port = &pm8001_ha->port[sas_find_local_port_id(dev)];
++		if (!port->port_attached) {
++			if (sas_protocol_ata(t->task_proto)) {
++				struct task_status_struct *ts = &t->task_status;
++				ts->resp = SAS_TASK_UNDELIVERED;
++				ts->stat = SAS_PHY_DOWN;
++
++				spin_unlock_irqrestore(&pm8001_ha->lock, flags);
++				spin_unlock_irqrestore(dev->sata_dev.ap->lock,
++						flags_libsas);
++				t->task_done(t);
++				spin_lock_irqsave(dev->sata_dev.ap->lock,
++					flags_libsas);
++				spin_lock_irqsave(&pm8001_ha->lock, flags);
++				if (n > 1)
++					t = list_entry(t->list.next,
++							struct sas_task, list);
++				continue;
++			} else {
++				struct task_status_struct *ts = &t->task_status;
++				ts->resp = SAS_TASK_UNDELIVERED;
++				ts->stat = SAS_PHY_DOWN;
++				t->task_done(t);
++				if (n > 1)
++					t = list_entry(t->list.next,
++							struct sas_task, list);
++				continue;
++			}
++		}
+ 		rc = pm8001_tag_alloc(pm8001_ha, &tag);
+ 		if (rc)
+ 			goto err_out;
+diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
+index 30f2ede..c44a115 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.h
++++ b/drivers/scsi/pm8001/pm8001_sas.h
+@@ -164,6 +164,10 @@ struct pm8001_chip_info {
+ 
+ struct pm8001_port {
+ 	struct asd_sas_port	sas_port;
++	u8			port_attached;
++	u8			wide_port_phymap;
++	u8			port_state;
++	struct list_head	list;
+ };
+ 
+ struct pm8001_phy {
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0007-SCSI-pm8001-fix-endian-issues-with-SAS-address.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0007-SCSI-pm8001-fix-endian-issues-with-SAS-address.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,62 @@
+From: jack wang <jack_wang at usish.com>
+Date: Mon, 7 Dec 2009 17:22:47 +0800
+Subject: [PATCH 07/25] [SCSI] pm8001: fix endian issues with SAS address
+
+commit afc5ca9ddc6c223dbea8a2f8816a88b21a0883b5 upstream.
+
+Signed-off-by: Jack Wang <jack_wang at usish.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/pm8001/pm8001_hwi.c |    7 ++++---
+ drivers/scsi/pm8001/pm8001_hwi.h |    3 +--
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
+index 3a121fb..b4426b5 100644
+--- a/drivers/scsi/pm8001/pm8001_hwi.c
++++ b/drivers/scsi/pm8001/pm8001_hwi.c
+@@ -3823,7 +3823,8 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
+ 	u32 opc = OPC_INB_SSPINIIOSTART;
+ 	memset(&ssp_cmd, 0, sizeof(ssp_cmd));
+ 	memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
+-	ssp_cmd.dir_m_tlr = data_dir_flags[task->data_dir] << 8 | 0x0;/*0 for
++	ssp_cmd.dir_m_tlr =
++		cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);/*0 for
+ 	SAS 1.1 compatible TLR*/
+ 	ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
+ 	ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
+@@ -3894,7 +3895,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
+ 		}
+ 	}
+ 	if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag))
+-		ncg_tag = cpu_to_le32(hdr_tag);
++		ncg_tag = hdr_tag;
+ 	dir = data_dir_flags[task->data_dir] << 8;
+ 	sata_cmd.tag = cpu_to_le32(tag);
+ 	sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+@@ -4039,7 +4040,7 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
+ 		((stp_sspsmp_sata & 0x03) * 0x10000000));
+ 	payload.firstburstsize_ITNexustimeout =
+ 		cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
+-	memcpy(&payload.sas_addr_hi, pm8001_dev->sas_device->sas_addr,
++	memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
+ 		SAS_ADDR_SIZE);
+ 	rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+ 	return rc;
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
+index 96e4daa..833a520 100644
+--- a/drivers/scsi/pm8001/pm8001_hwi.h
++++ b/drivers/scsi/pm8001/pm8001_hwi.h
+@@ -242,8 +242,7 @@ struct reg_dev_req {
+ 	__le32	phyid_portid;
+ 	__le32	dtype_dlr_retry;
+ 	__le32	firstburstsize_ITNexustimeout;
+-	u32	sas_addr_hi;
+-	u32	sas_addr_low;
++	u8	sas_addr[SAS_ADDR_SIZE];
+ 	__le32	upper_device_id;
+ 	u32	reserved[8];
+ } __attribute__((packed, aligned(4)));
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0008-SCSI-pm8001-set-SSC-down-spreading-only-to-get-less-.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0008-SCSI-pm8001-set-SSC-down-spreading-only-to-get-less-.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,108 @@
+From: jack wang <jack_wang at usish.com>
+Date: Mon, 7 Dec 2009 17:46:22 +0800
+Subject: [PATCH 08/25] [SCSI] pm8001: set SSC down-spreading only to get less
+ errors on some 6G device.
+
+commit 0330dba36127768a2e2df2eabb902b5530102871 upstream.
+
+Signed-off-by: Jack Wang <jack_wang at usish.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/pm8001/pm8001_hwi.c |   65 ++++++++++++-------------------------
+ 1 files changed, 21 insertions(+), 44 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
+index b4426b5..6e1bdd8 100644
+--- a/drivers/scsi/pm8001/pm8001_hwi.c
++++ b/drivers/scsi/pm8001/pm8001_hwi.c
+@@ -373,10 +373,7 @@ static int bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
+ static void __devinit
+ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
+ {
+-	u32 offset;
+-	u32 value;
+-	u32 i, j;
+-	u32 bit_cnt;
++	u32 value, offset, i;
+ 
+ #define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000
+ #define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000
+@@ -392,55 +389,35 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
+     */
+ 	if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR))
+ 		return;
+-	/* set SSC bit of PHY 0 - 3 */
++
+ 	for (i = 0; i < 4; i++) {
+ 		offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i;
+-		value = pm8001_cr32(pm8001_ha, 2, offset);
+-		if (SSCbit) {
+-			value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT;
+-			value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT);
+-		} else {
+-			value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT;
+-			value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT);
+-		}
+-		bit_cnt = 0;
+-		for (j = 0; j < 31; j++)
+-			if ((value >> j) & 0x00000001)
+-				bit_cnt++;
+-		if (bit_cnt % 2)
+-			value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY);
+-		else
+-			value |= 0x00000001 << SNW3_PHY_CAPABILITIES_PARITY;
+-
+-		pm8001_cw32(pm8001_ha, 2, offset, value);
++		pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
+ 	}
+-
+ 	/* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */
+ 	if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR))
+ 		return;
+-
+-	/* set SSC bit of PHY 4 - 7 */
+ 	for (i = 4; i < 8; i++) {
+ 		offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
+-		value = pm8001_cr32(pm8001_ha, 2, offset);
+-		if (SSCbit) {
+-			value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT;
+-			value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT);
+-		} else {
+-			value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT;
+-			value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT);
+-		}
+-		bit_cnt = 0;
+-		for (j = 0; j < 31; j++)
+-			if ((value >> j) & 0x00000001)
+-				bit_cnt++;
+-		if (bit_cnt % 2)
+-			value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY);
+-		else
+-			value |= 0x00000001 << SNW3_PHY_CAPABILITIES_PARITY;
+-
+-		pm8001_cw32(pm8001_ha, 2, offset, value);
++		pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
+ 	}
++	/*************************************************************
++	Change the SSC upspreading value to 0x0 so that upspreading is disabled.
++	Device MABC SMOD0 Controls
++	Address: (via MEMBASE-III):
++	Using shifted destination address 0x0_0000: with Offset 0xD8
++
++	31:28 R/W Reserved Do not change
++	27:24 R/W SAS_SMOD_SPRDUP 0000
++	23:20 R/W SAS_SMOD_SPRDDN 0000
++	19:0  R/W  Reserved Do not change
++	Upon power-up this register will read as 0x8990c016,
++	and I would like you to change the SAS_SMOD_SPRDUP bits to 0b0000
++	so that the written value will be 0x8090c016.
++	This will ensure only down-spreading SSC is enabled on the SPC.
++	*************************************************************/
++	value = pm8001_cr32(pm8001_ha, 2, 0xd8);
++	pm8001_cw32(pm8001_ha, 2, 0xd8, 0x8000C016);
+ 
+ 	/*set the shifted destination address to 0x0 to avoid error operation */
+ 	bar4_shift(pm8001_ha, 0x0);
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0009-SCSI-pm8001-fix-potential-NULL-pointer-dereference.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0009-SCSI-pm8001-fix-potential-NULL-pointer-dereference.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,32 @@
+From: jack wang <jack_wang at usish.com>
+Date: Mon, 7 Dec 2009 17:22:55 +0800
+Subject: [PATCH 09/25] [SCSI] pm8001:fix potential NULL pointer dereference
+
+commit f01f4e6a1cb343fc75dc580ec9203d9719f78f95 upstream.
+
+Signed-off-by: Jack Wang <jack_wang at usish.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/pm8001/pm8001_sas.c |    2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index 49721c8..487f78f 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -616,11 +616,11 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
+ 	spin_lock_irqsave(&pm8001_ha->lock, flags);
+ 
+ 	pm8001_device = pm8001_alloc_dev(pm8001_ha);
+-	pm8001_device->sas_device = dev;
+ 	if (!pm8001_device) {
+ 		res = -1;
+ 		goto found_out;
+ 	}
++	pm8001_device->sas_device = dev;
+ 	dev->lldd_dev = pm8001_device;
+ 	pm8001_device->dev_type = dev->dev_type;
+ 	pm8001_device->dcompletion = &completion;
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0010-SCSI-pm8001-bit-set-pm8001_ha-flags.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0010-SCSI-pm8001-bit-set-pm8001_ha-flags.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,28 @@
+From: jack wang <jack_wang at usish.com>
+Date: Mon, 7 Dec 2009 17:22:59 +0800
+Subject: [PATCH 10/25] [SCSI] pm8001: bit set pm8001_ha->flags
+
+commit a61b8699c764cccf85ccbf489e1772b2950ba4c6 upstream.
+
+Signed-off-by: Jack Wang <jack_wang at usish.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/pm8001/pm8001_sas.c |    2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index 487f78f..c86f921 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -656,7 +656,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
+ 	wait_for_completion(&completion);
+ 	if (dev->dev_type == SAS_END_DEV)
+ 		msleep(50);
+-	pm8001_ha->flags = PM8001F_RUN_TIME ;
++	pm8001_ha->flags |= PM8001F_RUN_TIME ;
+ 	return 0;
+ found_out:
+ 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0011-SCSI-pm8001-do-not-reset-local-sata-as-it-will-not-b.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0011-SCSI-pm8001-do-not-reset-local-sata-as-it-will-not-b.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,29 @@
+From: jack wang <jack_wang at usish.com>
+Date: Mon, 7 Dec 2009 17:23:05 +0800
+Subject: [PATCH 11/25] [SCSI] pm8001: do not reset local sata as it will not
+ be found if reset
+
+commit 8257ec80ba5b333dedf3395acf90055075aeba94 upstream.
+
+Signed-off-by: Jack Wang <jack_wang at usish.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/pm8001/pm8001_sas.c |    2 ++
+ 1 files changed, 2 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index c86f921..e3d4f38 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -944,6 +944,8 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
+ 
+ 	if (dev_is_sata(dev)) {
+ 		DECLARE_COMPLETION_ONSTACK(completion_setstate);
++		if (scsi_is_sas_phy_local(phy))
++			return 0;
+ 		rc = sas_phy_reset(phy, 1);
+ 		msleep(2000);
+ 		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0012-SCSI-pm8001-enable-read-HBA-SAS-address-from-VPD.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0012-SCSI-pm8001-enable-read-HBA-SAS-address-from-VPD.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,119 @@
+From: jack wang <jack_wang at usish.com>
+Date: Mon, 7 Dec 2009 17:23:08 +0800
+Subject: [PATCH 12/25] [SCSI] pm8001: enable read HBA SAS address from VPD
+
+commit 7c8356d969e203a8f2f740a9a80d4944eb8cf1d1 upstream.
+
+Signed-off-by: Jack Wang <jack_wang at usish.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/pm8001/pm8001_ctl.h  |   10 ----------
+ drivers/scsi/pm8001/pm8001_init.c |   12 ++++++++----
+ drivers/scsi/pm8001/pm8001_sas.h  |   24 ++++++++++++++++++------
+ 3 files changed, 26 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_ctl.h b/drivers/scsi/pm8001/pm8001_ctl.h
+index 22644de..63ad4aa 100644
+--- a/drivers/scsi/pm8001/pm8001_ctl.h
++++ b/drivers/scsi/pm8001/pm8001_ctl.h
+@@ -45,16 +45,6 @@
+ #define HEADER_LEN			28
+ #define SIZE_OFFSET			16
+ 
+-struct pm8001_ioctl_payload {
+-	u32	signature;
+-	u16	major_function;
+-	u16	minor_function;
+-	u16	length;
+-	u16	status;
+-	u16	offset;
+-	u16	id;
+-	u8	func_specific[1];
+-};
+ 
+ #define FLASH_OK                        0x000000
+ #define FAIL_OPEN_BIOS_FILE             0x000100
+diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
+index fb6379a..c2f1032 100644
+--- a/drivers/scsi/pm8001/pm8001_init.c
++++ b/drivers/scsi/pm8001/pm8001_init.c
+@@ -516,19 +516,23 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
+ 	u8 i;
+ #ifdef PM8001_READ_VPD
+ 	DECLARE_COMPLETION_ONSTACK(completion);
++	struct pm8001_ioctl_payload payload;
+ 	pm8001_ha->nvmd_completion = &completion;
+-	PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, 0, 0);
++	payload.minor_function = 0;
++	payload.length = 128;
++	payload.func_specific = kzalloc(128, GFP_KERNEL);
++	PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
+ 	wait_for_completion(&completion);
+ 	for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
+ 		memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr,
+ 			SAS_ADDR_SIZE);
+ 		PM8001_INIT_DBG(pm8001_ha,
+-			pm8001_printk("phy %d sas_addr = %x \n", i,
+-			(u64)pm8001_ha->phy[i].dev_sas_addr));
++			pm8001_printk("phy %d sas_addr = %016llx \n", i,
++			pm8001_ha->phy[i].dev_sas_addr));
+ 	}
+ #else
+ 	for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
+-		pm8001_ha->phy[i].dev_sas_addr = 0x500e004010000004ULL;
++		pm8001_ha->phy[i].dev_sas_addr = 0x50010c600047f9d0ULL;
+ 		pm8001_ha->phy[i].dev_sas_addr =
+ 			cpu_to_be64((u64)
+ 				(*(u64 *)&pm8001_ha->phy[i].dev_sas_addr));
+diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
+index c44a115..599601e 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.h
++++ b/drivers/scsi/pm8001/pm8001_sas.h
+@@ -100,6 +100,7 @@ do {						\
+ 
+ #define PM8001_USE_TASKLET
+ #define PM8001_USE_MSIX
++#define PM8001_READ_VPD
+ 
+ 
+ #define DEV_IS_EXPANDER(type)	((type == EDGE_DEV) || (type == FANOUT_DEV))
+@@ -111,7 +112,22 @@ extern const struct pm8001_dispatch pm8001_8001_dispatch;
+ struct pm8001_hba_info;
+ struct pm8001_ccb_info;
+ struct pm8001_device;
+-struct pm8001_tmf_task;
++/* define task management IU */
++struct pm8001_tmf_task {
++	u8	tmf;
++	u32	tag_of_task_to_be_managed;
++};
++struct pm8001_ioctl_payload {
++	u32	signature;
++	u16	major_function;
++	u16	minor_function;
++	u16	length;
++	u16	status;
++	u16	offset;
++	u16	id;
++	u8	*func_specific;
++};
++
+ struct pm8001_dispatch {
+ 	char *name;
+ 	int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
+@@ -390,11 +406,7 @@ struct pm8001_fw_image_header {
+ 	__be32 startup_entry;
+ } __attribute__((packed, aligned(4)));
+ 
+-/* define task management IU */
+-struct pm8001_tmf_task {
+-	u8	tmf;
+-	u32	tag_of_task_to_be_managed;
+-};
++
+ /**
+  * FW Flash Update status values
+  */
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0013-SCSI-pm8001-misc-code-cleanup.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0013-SCSI-pm8001-misc-code-cleanup.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,84 @@
+From: jack wang <jack_wang at usish.com>
+Date: Mon, 7 Dec 2009 17:23:11 +0800
+Subject: [PATCH 13/25] [SCSI] pm8001: misc code cleanup
+
+commit 83e7332941e3e2621502aadb0e5c8a3b11fd1197 upstream.
+
+Add more data to printk's, add some spaces around arithmetic ops and
+improve comments.
+
+Signed-off-by: Jack Wang <jack_wang at usish.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/pm8001/pm8001_hwi.c |    8 ++++++--
+ drivers/scsi/pm8001/pm8001_sas.c |    2 +-
+ drivers/scsi/pm8001/pm8001_sas.h |    4 ++--
+ 3 files changed, 9 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
+index 6e1bdd8..9b44c6f 100644
+--- a/drivers/scsi/pm8001/pm8001_hwi.c
++++ b/drivers/scsi/pm8001/pm8001_hwi.c
+@@ -2895,7 +2895,8 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 	u8 deviceType = pPayload->sas_identify.dev_type;
+ 	port->port_state =  portstate;
+ 	PM8001_MSG_DBG(pm8001_ha,
+-		pm8001_printk("HW_EVENT_SAS_PHY_UP \n"));
++		pm8001_printk("HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n",
++		port_id, phy_id));
+ 
+ 	switch (deviceType) {
+ 	case SAS_PHY_UNUSED:
+@@ -2969,6 +2970,9 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ 	struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+ 	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ 	unsigned long flags;
++	PM8001_MSG_DBG(pm8001_ha,
++		pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d,"
++		" phy id = %d\n", port_id, phy_id));
+ 	port->port_state =  portstate;
+ 	port->port_attached = 1;
+ 	get_lrate_mode(phy, link_rate);
+@@ -4058,7 +4062,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
+ 	struct inbound_queue_table *circularQ;
+ 	int ret;
+ 	u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
+-	memset((u8 *)&payload, 0, sizeof(payload));
++	memset(&payload, 0, sizeof(payload));
+ 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ 	payload.tag = 1;
+ 	payload.phyop_phyid =
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index e3d4f38..7f9c83a 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -819,7 +819,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
+ 		task->task_done = pm8001_task_done;
+ 		task->timer.data = (unsigned long)task;
+ 		task->timer.function = pm8001_tmf_timedout;
+-		task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
++		task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
+ 		add_timer(&task->timer);
+ 
+ 		res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
+index 599601e..8e38ca8 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.h
++++ b/drivers/scsi/pm8001/pm8001_sas.h
+@@ -59,11 +59,11 @@
+ 
+ #define DRV_NAME		"pm8001"
+ #define DRV_VERSION		"0.1.36"
+-#define PM8001_FAIL_LOGGING	0x01 /* libsas EH function logging */
++#define PM8001_FAIL_LOGGING	0x01 /* Error message logging */
+ #define PM8001_INIT_LOGGING	0x02 /* driver init logging */
+ #define PM8001_DISC_LOGGING	0x04 /* discovery layer logging */
+ #define PM8001_IO_LOGGING	0x08 /* I/O path logging */
+-#define PM8001_EH_LOGGING	0x10 /* Error message logging */
++#define PM8001_EH_LOGGING	0x10 /* libsas EH function logging*/
+ #define PM8001_IOCTL_LOGGING	0x20 /* IOCTL message logging */
+ #define PM8001_MSG_LOGGING	0x40 /* misc message logging */
+ #define pm8001_printk(format, arg...)	printk(KERN_INFO "%s %d:" format,\
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0014-SCSI-pm8001-Use-kzalloc-for-allocating-only-one-thin.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0014-SCSI-pm8001-Use-kzalloc-for-allocating-only-one-thin.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,44 @@
+From: Julia Lawall <julia at diku.dk>
+Date: Sat, 19 Dec 2009 08:17:27 +0100
+Subject: [PATCH 14/25] [SCSI] pm8001: Use kzalloc for allocating only one
+ thing
+
+commit 3dbf6c0012d12473461b7485006db373e8192fa5 upstream.
+
+Use kzalloc rather than kcalloc(1,...)
+
+The semantic patch that makes this change is as follows:
+(http://coccinelle.lip6.fr/)
+
+// <smpl>
+@@
+@@
+
+- kcalloc(1,
++ kzalloc(
+          ...)
+// </smpl>
+
+Signed-off-by: Julia Lawall <julia at diku.dk>
+Acked-by:Jack Wang <jack_wang at usish.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/pm8001/pm8001_init.c |    2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
+index c2f1032..f80c1da8 100644
+--- a/drivers/scsi/pm8001/pm8001_init.c
++++ b/drivers/scsi/pm8001/pm8001_init.c
+@@ -654,7 +654,7 @@ static int __devinit pm8001_pci_probe(struct pci_dev *pdev,
+ 	}
+ 	chip = &pm8001_chips[ent->driver_data];
+ 	SHOST_TO_SAS_HA(shost) =
+-		kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
++		kzalloc(sizeof(struct sas_ha_struct), GFP_KERNEL);
+ 	if (!SHOST_TO_SAS_HA(shost)) {
+ 		rc = -ENOMEM;
+ 		goto err_out_free_host;
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0017-SCSI-pm8001-drop-redundant-memset.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0017-SCSI-pm8001-drop-redundant-memset.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,43 @@
+From: Julia Lawall <julia at diku.dk>
+Date: Tue, 9 Mar 2010 22:14:34 +0100
+Subject: [PATCH 17/25] [SCSI] pm8001: drop redundant memset
+
+commit e05a9e7b18dfcce6911d0b901d7f04387cc1d93c upstream.
+
+The region set by the call to memset is immediately overwritten by the
+subsequent call to memcpy.
+
+The semantic patch that makes this change is as follows:
+(http://coccinelle.lip6.fr/)
+
+// <smpl>
+@@
+expression e1,e2,e3,e4;
+@@
+
+- memset(e1,e2,e3);
+  memcpy(e1,e4,e3);
+// </smpl>
+
+Signed-off-by: Julia Lawall <julia at diku.dk>
+Acked-by: Jack Wang <jack_wang at usish.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/pm8001/pm8001_hwi.c |    1 -
+ 1 files changed, 0 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
+index 909c00e..5ff8261 100644
+--- a/drivers/scsi/pm8001/pm8001_hwi.c
++++ b/drivers/scsi/pm8001/pm8001_hwi.c
+@@ -4390,7 +4390,6 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
+ 				return -ENOMEM;
+ 		}
+ 	}
+-	memset(buffer, 0, fw_control->len);
+ 	memcpy(buffer, fw_control->buffer, fw_control->len);
+ 	flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
+ 	flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0018-SCSI-pm8001-potential-null-dereference-in-pm8001_dev.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0018-SCSI-pm8001-potential-null-dereference-in-pm8001_dev.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,43 @@
+From: Dan Carpenter <error27 at gmail.com>
+Date: Fri, 23 Apr 2010 14:01:04 +0200
+Subject: [PATCH 18/25] [SCSI] pm8001: potential null dereference in
+ pm8001_dev_gone_notify()
+
+commit 2471b894068ec59ab3012e788401b345ef459e49 upstream.
+
+In the original code we dereferenced "pm8001_dev" before checking if it
+was null.  This patch moves the dereference inside the condition.
+
+This was found by a static checker (smatch).  I looked, but I couldn't
+tell if "pm8001_dev" dev was ever actually null.  The approach in this
+patch seemed like the safest response.
+
+Signed-off-by: Dan Carpenter <error27 at gmail.com>
+Acked-by: Jack Wang <jack_wang at usish.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/pm8001/pm8001_sas.c |    4 +++-
+ 1 files changed, 3 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index bff4f51..cd02cea 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -885,11 +885,13 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
+ 	u32 tag;
+ 	struct pm8001_hba_info *pm8001_ha;
+ 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
+-	u32 device_id = pm8001_dev->device_id;
++
+ 	pm8001_ha = pm8001_find_ha_by_dev(dev);
+ 	spin_lock_irqsave(&pm8001_ha->lock, flags);
+ 	pm8001_tag_alloc(pm8001_ha, &tag);
+ 	if (pm8001_dev) {
++		u32 device_id = pm8001_dev->device_id;
++
+ 		PM8001_DISC_DBG(pm8001_ha,
+ 			pm8001_printk("found dev[%d:%x] is gone.\n",
+ 			pm8001_dev->device_id, pm8001_dev->dev_type));
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0022-SCSI-pm8001-introduce-missing-kfree.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0022-SCSI-pm8001-introduce-missing-kfree.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,106 @@
+From: Julia Lawall <julia at diku.dk>
+Date: Sun, 1 Aug 2010 19:23:35 +0200
+Subject: [PATCH 22/25] [SCSI] pm8001: introduce missing kfree
+
+commit 823d219f23b958292279cfdc8583dc4f1f91c2d5 upstream.
+
+Error handling code following a kmalloc should free the allocated data.
+
+The semantic match that finds the problem is as follows:
+(http://www.emn.fr/x-info/coccinelle/)
+
+// <smpl>
+ at r exists@
+local idexpression x;
+expression E;
+identifier f,f1;
+position p1,p2;
+@@
+
+x at p1 = \(kmalloc\|kzalloc\|kcalloc\)(...);
+<... when != x
+     when != if (...) { <+...x...+> }
+     when != (x) == NULL
+     when != (x) != NULL
+     when != (x) == 0
+     when != (x) != 0
+(
+x->f1 = E
+|
+ (x->f1 == NULL || ...)
+|
+ f(...,x->f1,...)
+)
+...>
+(
+ return <+...x...+>;
+|
+ return at p2 ...;
+)
+
+ at script:python@
+p1 << r.p1;
+p2 << r.p2;
+@@
+
+print "* file: %s kmalloc %s return %s" % (p1[0].file,p1[0].line,p2[0].line)
+// </smpl>
+
+Signed-off-by: Julia Lawall <julia at diku.dk>
+Acked-by: jack wang <jack_wang at usish.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/pm8001/pm8001_hwi.c |   13 ++++++++++---
+ 1 files changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
+index 58d1134..9793aa6 100644
+--- a/drivers/scsi/pm8001/pm8001_hwi.c
++++ b/drivers/scsi/pm8001/pm8001_hwi.c
+@@ -4199,8 +4199,10 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+ 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ 	memset(&nvmd_req, 0, sizeof(nvmd_req));
+ 	rc = pm8001_tag_alloc(pm8001_ha, &tag);
+-	if (rc)
++	if (rc) {
++		kfree(fw_control_context);
+ 		return rc;
++	}
+ 	ccb = &pm8001_ha->ccb_info[tag];
+ 	ccb->ccb_tag = tag;
+ 	ccb->fw_control_context = fw_control_context;
+@@ -4276,8 +4278,10 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+ 		ioctl_payload->length);
+ 	memset(&nvmd_req, 0, sizeof(nvmd_req));
+ 	rc = pm8001_tag_alloc(pm8001_ha, &tag);
+-	if (rc)
++	if (rc) {
++		kfree(fw_control_context);
+ 		return rc;
++	}
+ 	ccb = &pm8001_ha->ccb_info[tag];
+ 	ccb->fw_control_context = fw_control_context;
+ 	ccb->ccb_tag = tag;
+@@ -4387,6 +4391,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
+ 			fw_control->len, 0) != 0) {
+ 				PM8001_FAIL_DBG(pm8001_ha,
+ 					pm8001_printk("Mem alloc failure\n"));
++				kfree(fw_control_context);
+ 				return -ENOMEM;
+ 		}
+ 	}
+@@ -4401,8 +4406,10 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
+ 	fw_control_context->virtAddr = buffer;
+ 	fw_control_context->len = fw_control->len;
+ 	rc = pm8001_tag_alloc(pm8001_ha, &tag);
+-	if (rc)
++	if (rc) {
++		kfree(fw_control_context);
+ 		return rc;
++	}
+ 	ccb = &pm8001_ha->ccb_info[tag];
+ 	ccb->fw_control_context = fw_control_context;
+ 	ccb->ccb_tag = tag;
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0023-SCSI-pm8001-handle-allocation-failures.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/pm8001/0023-SCSI-pm8001-handle-allocation-failures.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,49 @@
+From: Dan Carpenter <error27 at gmail.com>
+Date: Tue, 17 Aug 2010 13:54:57 +0200
+Subject: [PATCH 23/25] [SCSI] pm8001: handle allocation failures
+
+commit 0caeb91c8d9ae6398bfe46ce70892e965353f613 upstream.
+
+Return -ENOMEM if the allocations fail.
+
+Signed-off-by: Dan Carpenter <error27 at gmail.com>
+Acked-by: Jack Wang <jack_wang at usish.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/pm8001/pm8001_hwi.c |    6 ++++++
+ 1 files changed, 6 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
+index 9793aa6..d8db013 100644
+--- a/drivers/scsi/pm8001/pm8001_hwi.c
++++ b/drivers/scsi/pm8001/pm8001_hwi.c
+@@ -4194,6 +4194,8 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+ 
+ 	nvmd_type = ioctl_payload->minor_function;
+ 	fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
++	if (!fw_control_context)
++		return -ENOMEM;
+ 	fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0];
+ 	fw_control_context->len = ioctl_payload->length;
+ 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+@@ -4272,6 +4274,8 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+ 
+ 	nvmd_type = ioctl_payload->minor_function;
+ 	fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
++	if (!fw_control_context)
++		return -ENOMEM;
+ 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ 	memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr,
+ 		ioctl_payload->func_specific,
+@@ -4381,6 +4385,8 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
+ 	struct pm8001_ioctl_payload *ioctl_payload = payload;
+ 
+ 	fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
++	if (!fw_control_context)
++		return -ENOMEM;
+ 	fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0];
+ 	if (fw_control->len != 0) {
+ 		if (pm8001_mem_alloc(pm8001_ha->pdev,
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/all/wl1251-add-support-for-PG11-chips.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/wl1251-add-support-for-PG11-chips.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,32 @@
+From: David-John Willis <John.Willis at Distant-earth.com>
+Date: Thu, 15 Oct 2009 14:38:16 +0100
+Subject: [PATCH] wl1251: add support for PG11 chips.
+
+commit 2c759e03b3b7639fff23ec3b7bab64a35ca0914f upstream.
+
+This simple patch adds support for the PG11 variant of the WL1251 chip as
+used on the OpenPandora OMAP3 device.
+
+Signed-off-by: David-John Willis <John.Willis at Distant-earth.com>
+Signed-off-by: John W. Linville <linville at tuxdriver.com>
+---
+ drivers/net/wireless/wl12xx/wl1251_main.c |    3 +++
+ 1 files changed, 3 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
+index 23ac503..ec01e9d 100644
+--- a/drivers/net/wireless/wl12xx/wl1251_main.c
++++ b/drivers/net/wireless/wl12xx/wl1251_main.c
+@@ -185,6 +185,9 @@ static int wl1251_chip_wakeup(struct wl1251 *wl)
+ 		break;
+ 	case CHIP_ID_1251_PG10:
+ 	case CHIP_ID_1251_PG11:
++		wl1251_debug(DEBUG_BOOT, "chip id 0x%x (1251 PG11)",
++			     wl->chip_id);
++		break;
+ 	default:
+ 		wl1251_error("unsupported chip id: 0x%x", wl->chip_id);
+ 		ret = -ENODEV;
+-- 
+1.7.4.4
+

Added: dists/squeeze/linux-2.6/debian/patches/features/x86/ata-Intel-IDE-R-support.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/x86/ata-Intel-IDE-R-support.patch	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,100 @@
+From: Alan Cox <alan at linux.intel.com>
+Date: Tue, 28 Sep 2010 13:19:38 +0100
+Subject: [PATCH] ata: Intel IDE-R support
+
+commit 60039a5295b3d82a48fe132c699987d2e1408675 upstream.
+
+Intel IDE-R devices are part of the Intel AMT management setup. They don't
+have any special configuration registers or settings so the ata_generic
+driver will support them fully.
+
+Rather than add a huge table of IDs for each chipset and keep sending in
+new ones this patch autodetects them.
+
+Signed-off-by: Alan Cox <alan at linux.intel.com>
+Acked-by: Tejun Heo <tj at kernel.org>
+Signed-off-by: Jeff Garzik <jgarzik at redhat.com>
+[bwh: Adjust context for 2.6.32]
+---
+--- a/drivers/ata/ata_generic.c
++++ b/drivers/ata/ata_generic.c
+@@ -35,6 +35,7 @@
+ enum {
+ 	ATA_GEN_CLASS_MATCH		= (1 << 0),
+ 	ATA_GEN_FORCE_DMA		= (1 << 1),
++	ATA_GEN_INTEL_IDER		= (1 << 2),
+ };
+ 
+ /**
+@@ -113,6 +114,49 @@ static struct ata_port_operations generic_port_ops = {
+ static int all_generic_ide;		/* Set to claim all devices */
+ 
+ /**
++ *	is_intel_ider		-	identify intel IDE-R devices
++ *	@dev: PCI device
++ *
++ *	Distinguish Intel IDE-R controller devices from other Intel IDE
++ *	devices. IDE-R devices have no timing registers and are in
++ *	most respects virtual. They should be driven by the ata_generic
++ *	driver.
++ *
++ *	IDE-R devices have PCI offset 0xF8.L as zero, later Intel ATA has
++ *	it non zero. All Intel ATA has 0x40 writable (timing), but it is
++ *	not writable on IDE-R devices (this is guaranteed).
++ */
++
++static int is_intel_ider(struct pci_dev *dev)
++{
++	/* For Intel IDE the value at 0xF8 is only zero on IDE-R
++	   interfaces */
++	u32 r;
++	u16 t;
++
++	/* Check the manufacturing ID, it will be zero for IDE-R */
++	pci_read_config_dword(dev, 0xF8, &r);
++	/* Not IDE-R: punt so that ata_(old)piix gets it */
++	if (r != 0)
++		return 0;
++	/* 0xF8 will also be zero on some early Intel IDE devices
++	   but they will have a sane timing register */
++	pci_read_config_word(dev, 0x40, &t);
++	if (t != 0)
++		return 0;
++	/* Finally check if the timing register is writable so that
++	   we eliminate any early devices hot-docked in a docking
++	   station */
++	pci_write_config_word(dev, 0x40, 1);
++	pci_read_config_word(dev, 0x40, &t);
++	if (t) {
++		pci_write_config_word(dev, 0x40, 0);
++		return 0;
++	}
++	return 1;
++}
++
++/**
+  *	ata_generic_init		-	attach generic IDE
+  *	@dev: PCI device found
+  *	@id: match entry
+@@ -138,6 +182,10 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
+ 	if ((id->driver_data & ATA_GEN_CLASS_MATCH) && all_generic_ide == 0)
+ 		return -ENODEV;
+ 
++	if (id->driver_data & ATA_GEN_INTEL_IDER)
++		if (!is_intel_ider(dev))
++			return -ENODEV;
++
+ 	/* Devices that need care */
+ 	if (dev->vendor == PCI_VENDOR_ID_UMC &&
+ 	    dev->device == PCI_DEVICE_ID_UMC_UM8886A &&
+@@ -188,6 +236,10 @@ static struct pci_device_id ata_generic[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO), },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2),  },
++	/* Intel, IDE class device */
++	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
++	  PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 
++	  .driver_data = ATA_GEN_INTEL_IDER },
+ 	/* Must come last. If you add entries adjust this table appropriately */
+ 	{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL),
+ 	  .driver_data = ATA_GEN_CLASS_MATCH },

Added: dists/squeeze/linux-2.6/debian/patches/series/35
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/series/35	Wed May  4 01:10:25 2011	(r17295)
@@ -0,0 +1,131 @@
+# Storage
++ features/all/hpsa/0001-SCSI-hpsa-add-driver-for-HP-Smart-Array-controllers.patch
++ features/all/hpsa/0002-SCSI-hpsa-fix-typo-in-comments.patch
++ features/all/hpsa/0003-SCSI-hpsa-Use-kernel-integer-types-not-userland-ones.patch
++ features/all/hpsa/0004-SCSI-hpsa-avoid-unwanted-promotion-from-unsigned-to-.patch
++ features/all/hpsa/0005-SCSI-hpsa-Use-BUG_ON-instead-of-an-if-statement.patch
++ features/all/hpsa/0006-SCSI-hpsa-make-adjust_hpsa_scsi_table-return-void.patch
++ features/all/hpsa/0007-SCSI-hpsa-remove-superfluous-returns-from-void-funct.patch
++ features/all/hpsa/0008-SCSI-hpsa-return-proper-error-codes-not-minus-one.patch
++ features/all/hpsa/0009-SCSI-hpsa-use-sizeof-not-an-inline-constant-in-memse.patch
++ features/all/hpsa/0010-SCSI-hpsa-use-kzalloc-not-kmalloc-plus-memset.patch
++ features/all/hpsa/0011-SCSI-hpsa-remove-unwanted-debug-code.patch
++ features/all/hpsa/0012-SCSI-hpsa-eliminate-unnecessary-memcpys.patch
++ features/all/hpsa/0013-SCSI-hpsa-make-tag-macros-into-functions.patch
++ features/all/hpsa/0014-SCSI-hpsa-fix-some-debug-printks-to-use-dev_dbg-inst.patch
++ features/all/hpsa/0015-SCSI-hpsa-interrupt-pending-function-should-return-b.patch
++ features/all/hpsa/0016-SCSI-hpsa-Allow-multiple-command-completions-per-int.patch
++ features/all/hpsa/0017-SCSI-hpsa-add-pci-ids-for-storageworks-1210m-remove-.patch
++ features/all/hpsa/0018-SCSI-hpsa-Fix-p1210m-LUN-assignment.patch
++ features/all/hpsa/0019-SCSI-hpsa-Return-DID_RESET-for-commands-which-comple.patch
++ features/all/hpsa/0020-SCSI-hpsa-Retry-commands-completing-with-a-sense-key.patch
++ features/all/hpsa/0021-SCSI-hpsa-Don-t-return-DID_NO_CONNECT-when-a-device-.patch
++ features/all/hpsa/0022-SCSI-hpsa-Add-an-shost_to_hba-helper-function.patch
++ features/all/hpsa/0023-SCSI-hpsa-use-scan_start-and-scan_finished-entry-poi.patch
++ features/all/hpsa/0024-SCSI-hpsa-when-resetting-devices-print-out-which-dev.patch
++ features/all/hpsa/0025-SCSI-hpsa-print-all-the-bytes-of-the-CDB-not-just-th.patch
++ features/all/hpsa/0026-SCSI-hpsa-clarify-obscure-comment-in-adjust_hpsa_scs.patch
++ features/all/hpsa/0027-SCSI-hpsa-Fix-hpsa_find_scsi_entry-so-that-it-doesn-.patch
++ features/all/hpsa/0028-SCSI-hpsa-fix-bug-in-adjust_hpsa_scsi_table.patch
++ features/all/hpsa/0029-SCSI-hpsa-eliminate-lock_kernel-in-compat_ioctl.patch
++ features/all/hpsa/0030-SCSI-hpsa-Reorder-compat-ioctl-functions-to-eliminat.patch
++ features/all/hpsa/0031-SCSI-hpsa-update-driver-version-to-2.0.1-3.patch
++ features/all/hpsa/0033-SCSI-hpsa-fix-firmwart-typo.patch
++ features/all/hpsa/0034-SCSI-hpsa-fix-scsi-status-mis-shift.patch
++ features/all/hpsa/0035-SCSI-hpsa-return-ENOMEM-not-1.patch
++ features/all/hpsa/0036-SCSI-hpsa-remove-scan-thread.patch
++ features/all/hpsa/0037-SCSI-hpsa-mark-hpsa_pci_init-as-__devinit.patch
++ features/all/hpsa/0038-SCSI-hpsa-Clarify-calculation-of-padding-for-command.patch
++ features/all/hpsa/0039-SCSI-hpsa-Increase-the-number-of-scatter-gather-elem.patch
++ features/all/hpsa/0040-SCSI-hpsa-remove-unused-members-next-prev-and-retry_.patch
++ features/all/hpsa/0041-SCSI-hpsa-remove-unneeded-defines.patch
++ features/all/hpsa/0042-SCSI-hpsa-save-pdev-pointer-in-per-hba-structure-ear.patch
++ features/all/hpsa/0043-SCSI-hpsa-factor-out-hpsa_lookup_board_id.patch
++ features/all/hpsa/0044-SCSI-hpsa-factor-out-hpsa_board_disabled.patch
++ features/all/hpsa/0045-SCSI-hpsa-remove-redundant-board_id-parameter-from-h.patch
++ features/all/hpsa/0046-SCSI-hpsa-factor-out-hpsa_find_memory_BAR.patch
++ features/all/hpsa/0047-SCSI-hpsa-factor-out-hpsa_wait_for_board_ready.patch
++ features/all/hpsa/0048-SCSI-hpsa-factor-out-hpsa_find_cfgtables.patch
++ features/all/hpsa/0049-SCSI-hpsa-fix-leak-of-ioremapped-memory-in-hpsa_pci_.patch
++ features/all/hpsa/0050-SCSI-hpsa-hpsa-factor-out-hpsa_find_board_params.patch
++ features/all/hpsa/0051-SCSI-hpsa-factor-out-hpsa-CISS-signature-present.patch
++ features/all/hpsa/0052-SCSI-hpsa-factor-out-hpsa_enable_scsi_prefetch.patch
++ features/all/hpsa/0053-SCSI-hpsa-factor-out-hpsa_p600_dma_prefetch_quirk.patch
++ features/all/hpsa/0054-SCSI-hpsa-factor-out-hpsa_enter_simple_mode.patch
++ features/all/hpsa/0055-SCSI-hpsa-check-that-simple-mode-is-supported.patch
++ features/all/hpsa/0056-SCSI-hpsa-clean-up-debug-ifdefs.patch
++ features/all/hpsa/0057-SCSI-hpsa-mark-hpsa_mark_hpsa_put_ctlr_into_performa.patch
++ features/all/hpsa/0058-SCSI-hpsa-factor-out-hpsa_wait_for_mode_change_ack.patch
++ features/all/hpsa/0059-SCSI-hpsa-remove-unused-variable-trans_offset.patch
++ features/all/hpsa/0060-SCSI-hpsa-factor-out-hpsa_enter_performant_mode.patch
++ features/all/hpsa/0061-SCSI-hpsa-remove-unused-firm_ver-member-of-the-per-h.patch
++ features/all/hpsa/0062-SCSI-hpsa-Add-hpsa.txt-to-Documentation-scsi.patch
++ features/all/hpsa/0063-SCSI-hpsa-expose-controller-firmware-revision-via-sy.patch
++ features/all/hpsa/0064-SCSI-hpsa-fix-block-fetch-table-problem.patch
++ features/all/hpsa/0065-SCSI-hpsa-add-new-controllers.patch
++ features/all/hpsa/0066-SCSI-hpsa-Make-hpsa_allow_any-1-boot-param-enable-Co.patch
++ features/all/hpsa/0067-SCSI-hpsa-make-hpsa_find_memory_BAR-not-require-the-.patch
++ features/all/hpsa/0068-SCSI-hpsa-factor-out-hpsa_find_cfg_addrs.patch
++ features/all/hpsa/0069-SCSI-hpsa-factor-out-the-code-to-reset-controllers-o.patch
++ features/all/hpsa/0070-SCSI-hpsa-Fix-hard-reset-code.patch
++ features/all/hpsa/0071-SCSI-hpsa-forbid-hard-reset-of-640x-boards.patch
++ features/all/hpsa/0072-SCSI-hpsa-separate-intx-and-msi-msix-interrupt-handl.patch
++ features/all/hpsa/0073-SCSI-hpsa-sanitize-max-commands.patch
++ features/all/hpsa/0074-SCSI-hpsa-disable-doorbell-reset-on-reset_devices.patch
++ features/all/hpsa/0077-SCSI-hpsa-fix-redefinition-of-PCI_DEVICE_ID_CISSF.patch
++ features/all/hpsa/0078-SCSI-hpsa-do-not-consider-firmware-revision-when-loo.patch
++ features/all/hpsa/0079-SCSI-hpsa-do-not-consider-RAID-level-to-be-part-of-d.patch
++ debian/hpsa-Remove-device-IDs-currently-handled-by-cciss.patch
++ features/all/pm8001/0001-SCSI-pm8001-add-SAS-SATA-HBA-driver.patch
++ features/all/pm8001/0002-SCSI-pm8001-add-reinitialize-SPC-parameters-before-p.patch
++ features/all/pm8001/0003-SCSI-pm8001-enhance-IOMB-process-modules.patch
++ features/all/pm8001/0004-SCSI-pm8001-Fixes-for-tag-alloc-error-goto-and-code-.patch
++ features/all/pm8001/0005-SCSI-pm8001-Fix-for-sata-io-circular-lock-dependency.patch
++ features/all/pm8001/0006-SCSI-pm8001-enhance-error-handle-for-IO-patch.patch
++ features/all/pm8001/0007-SCSI-pm8001-fix-endian-issues-with-SAS-address.patch
++ features/all/pm8001/0008-SCSI-pm8001-set-SSC-down-spreading-only-to-get-less-.patch
++ features/all/pm8001/0009-SCSI-pm8001-fix-potential-NULL-pointer-dereference.patch
++ features/all/pm8001/0010-SCSI-pm8001-bit-set-pm8001_ha-flags.patch
++ features/all/pm8001/0011-SCSI-pm8001-do-not-reset-local-sata-as-it-will-not-b.patch
++ features/all/pm8001/0012-SCSI-pm8001-enable-read-HBA-SAS-address-from-VPD.patch
++ features/all/pm8001/0013-SCSI-pm8001-misc-code-cleanup.patch
++ features/all/pm8001/0014-SCSI-pm8001-Use-kzalloc-for-allocating-only-one-thin.patch
++ features/all/pm8001/0017-SCSI-pm8001-drop-redundant-memset.patch
++ features/all/pm8001/0018-SCSI-pm8001-potential-null-dereference-in-pm8001_dev.patch
++ features/all/pm8001/0022-SCSI-pm8001-introduce-missing-kfree.patch
++ features/all/pm8001/0023-SCSI-pm8001-handle-allocation-failures.patch
++ features/all/SCSI-bnx2i-Add-5771E-device-support-to-bnx2i-driver.patch
++ features/x86/ata-Intel-IDE-R-support.patch
+
+# Networking
++ features/all/kernel.h-add-pr_warn-for-symmetry-to-dev_warn-netdev.patch
++ features/all/netdevice.h-Add-netdev_printk-helpers-like-dev_printk.patch
++ features/all/netdevice.h-Add-netif_printk-helpers.patch
++ features/all/net-use-helpers-to-access-mc-list-V2.patch
++ features/all/wl1251-add-support-for-PG11-chips.patch
++ features/all/bnx2x-Add-support-for-BCM84823.patch
++ features/all/bnx2x-Fix-10G-mode-in-BCM8481-BCM84823.patch
++ features/all/ar9170-add-support-for-NEC-WL300NU-G-USB-dongle.patch
++ features/all/ar9170usb-add-Sphairon-Homelink-1202-USB-ID.patch
++ features/all/ar9170usb-add-vendor-and-device-ID-for-Qwest-Actiont.patch
++ features/all/bna/0001-bna-Brocade-10Gb-Ethernet-device-driver.patch
++ features/all/bna/0002-bna-Delete-get_flags-and-set_flags-ethtool-methods.patch
++ features/all/bna/0003-bna-Fixed-build-break-for-allyesconfig.patch
++ features/all/bna/0004-bna-fix-stats-handling.patch
++ features/all/bna/0006-NET-bna-fix-lock-imbalance.patch
++ features/all/bna/0007-bna-Check-for-NULL-before-deref-in-bnad_cb_tx_cleanu.patch
++ features/all/bna/0008-bna-off-by-one.patch
++ features/all/bna/0009-drivers-net-return-operator-cleanup.patch
++ features/all/bna/0010-bna-fix-interrupt-handling.patch
++ features/all/bna/0011-bna-scope-and-dead-code-cleanup.patch
++ features/all/bna/0013-bna-TxRx-and-datapath-fix.patch
++ features/all/bna/0014-bna-Port-enable-disable-sync-and-txq-priority-fix.patch
++ features/all/bna/0015-bna-Fix-ethtool-register-dump-and-reordered-an-API.patch
++ features/all/bna/0016-bna-Enable-pure-priority-tagged-packet-reception-and.patch
++ features/all/bna/0017-bna-Fix-for-TX-queue.patch
++ features/all/bna/0018-bna-IOC-uninit-check-and-misc-cleanup.patch
++ features/all/bna/0019-bna-Removed-unused-code.patch
++ features/all/bna/0020-bna-Restore-VLAN-filter-table.patch
++ features/all/bna/0021-bna-IOC-failure-auto-recovery-fix.patch
++ features/all/bna/0022-bna-Update-the-driver-version-to-2.3.2.3.patch
++ features/all/bna/0023-bna-Remove-unnecessary-memset-0.patch



More information about the Kernel-svn-changes mailing list