[kernel] r18403 - in dists/squeeze/linux-2.6/debian: . config/kernelarch-x86 patches/debian patches/features/all patches/features/x86 patches/series
Ben Hutchings
benh at alioth.debian.org
Wed Dec 21 06:29:18 UTC 2011
Author: benh
Date: Wed Dec 21 06:29:16 2011
New Revision: 18403
Log:
[x86] Add isci driver from Linux 3.1 (Closes: #652857)
Added:
dists/squeeze/linux-2.6/debian/patches/debian/libsas-Avoid-ABI-change-from-addition-of-sas_ha_stru.patch
dists/squeeze/linux-2.6/debian/patches/features/all/kernel.h-add-BUILD_BUG_ON_NOT_POWER_OF_2.patch
dists/squeeze/linux-2.6/debian/patches/features/all/libsas-fix-definition-of-wideport-include-local-sas-.patch
dists/squeeze/linux-2.6/debian/patches/features/x86/isci-Import-from-Linux-3.1.patch
dists/squeeze/linux-2.6/debian/patches/features/x86/x86-Introduce-pci_map_biosrom.patch
Modified:
dists/squeeze/linux-2.6/debian/changelog
dists/squeeze/linux-2.6/debian/config/kernelarch-x86/config
dists/squeeze/linux-2.6/debian/patches/series/40
Modified: dists/squeeze/linux-2.6/debian/changelog
==============================================================================
--- dists/squeeze/linux-2.6/debian/changelog Wed Dec 21 03:37:24 2011 (r18402)
+++ dists/squeeze/linux-2.6/debian/changelog Wed Dec 21 06:29:16 2011 (r18403)
@@ -56,6 +56,7 @@
and the bug report which this closes: #651367.
* [vserver] Update patch to 2.6.32.48-vs2.3.0.36.29.8
- nfs: Fix client uid/gid caching (Closes: #633526)
+ * [x86] Add isci driver from Linux 3.1 (Closes: #652857)
[ Ian Campbell ]
* xen: backport upstream (xen.git#xen/stable-2.6.32.y) fixes to event
Modified: dists/squeeze/linux-2.6/debian/config/kernelarch-x86/config
==============================================================================
--- dists/squeeze/linux-2.6/debian/config/kernelarch-x86/config Wed Dec 21 03:37:24 2011 (r18402)
+++ dists/squeeze/linux-2.6/debian/config/kernelarch-x86/config Wed Dec 21 06:29:16 2011 (r18403)
@@ -1049,6 +1049,7 @@
CONFIG_SCSI_EATA_MAX_TAGS=16
CONFIG_SCSI_FUTURE_DOMAIN=m
CONFIG_SCSI_GDTH=m
+CONFIG_SCSI_ISCI=m
CONFIG_SCSI_IPS=m
CONFIG_SCSI_INITIO=m
CONFIG_SCSI_SYM53C8XX_2=m
Added: dists/squeeze/linux-2.6/debian/patches/debian/libsas-Avoid-ABI-change-from-addition-of-sas_ha_stru.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/debian/libsas-Avoid-ABI-change-from-addition-of-sas_ha_stru.patch Wed Dec 21 06:29:16 2011 (r18403)
@@ -0,0 +1,45 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Wed, 21 Dec 2011 06:01:51 +0000
+Subject: [PATCH 3/5] libsas: Avoid ABI change from addition of
+ sas_ha_struct::strict_wide_ports
+
+This structure is allocated by drivers, so we cannot extend it.
+However, the private 'state' field takes up at least 4 bytes but only
+requires 1 bit, and the 'strict_wide_ports' field also only requires 1
+bit. Change each of them to u8 and fit the 'strict_wide_ports' field
+into the spare bytes.
+---
+ include/scsi/libsas.h | 9 +++++++--
+ 1 files changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
+index c464e93..bee9229 100644
+--- a/include/scsi/libsas.h
++++ b/include/scsi/libsas.h
+@@ -338,7 +338,14 @@ struct sas_ha_struct {
+ struct sas_ha_event ha_events[HA_NUM_EVENTS];
+ unsigned long pending;
+
++#ifdef __GENKSYMS__
+ enum sas_ha_state state;
++#else
++ u8 state;
++ u8 strict_wide_ports; /* both sas_addr and attached_sas_addr must match
++ * their siblings when forming wide ports */
++ u8 pad[2];
++#endif
+ spinlock_t state_lock;
+
+ struct scsi_core core;
+@@ -359,8 +366,6 @@ struct sas_ha_struct {
+ /* The class calls this to send a task for execution. */
+ int lldd_max_execute_num;
+ int lldd_queue_size;
+- int strict_wide_ports; /* both sas_addr and attached_sas_addr must match
+- * their siblings when forming wide ports */
+
+ /* LLDD calls these to notify the class of an event. */
+ void (*notify_ha_event)(struct sas_ha_struct *, enum ha_event);
+--
+1.7.7.3
+
Added: dists/squeeze/linux-2.6/debian/patches/features/all/kernel.h-add-BUILD_BUG_ON_NOT_POWER_OF_2.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/kernel.h-add-BUILD_BUG_ON_NOT_POWER_OF_2.patch Wed Dec 21 06:29:16 2011 (r18403)
@@ -0,0 +1,54 @@
+From: Roland Dreier <rdreier at cisco.com>
+Date: Fri, 15 Jan 2010 17:01:22 -0800
+Subject: [PATCH 1/5] kernel.h: add BUILD_BUG_ON_NOT_POWER_OF_2()
+
+commit cc8ef6eb21e964b1c5eb97b2d0e8ac9893e1bf86 upstream.
+
+Add BUILD_BUG_ON_NOT_POWER_OF_2()
+
+When code relies on a constant being a power of 2:
+
+ #define FOO 512 /* must be a power of 2 */
+
+it would be nice to be able to do:
+
+ BUILD_BUG_ON(!is_power_of_2(FOO));
+
+However applying an inline function does not result in a compile-time
+constant that can be used with BUILD_BUG_ON(), so trying that gives
+results in:
+
+ error: bit-field '<anonymous>' width not an integer constant
+
+As suggested by akpm, rather than monkeying around with is_power_of_2()
+and risking gcc warts about constant expressions, just create a macro
+BUILD_BUG_ON_NOT_POWER_OF_2() to encapsulate this common requirement.
+
+Signed-off-by: Roland Dreier <rolandd at cisco.com>
+Cc: Bart Van Assche <bvanassche at acm.org>
+Cc: David Dillow <dave at thedillows.org>
+Cc: "Robert P. J. Day" <rpjday at crashcourse.ca>
+Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
+---
+ include/linux/kernel.h | 4 ++++
+ 1 files changed, 4 insertions(+), 0 deletions(-)
+
+diff --git a/include/linux/kernel.h b/include/linux/kernel.h
+index d211e20..798bee3 100644
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -690,6 +690,10 @@ struct sysinfo {
+ /* Force a compilation error if condition is constant and true */
+ #define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)]))
+
++/* Force a compilation error if a constant expression is not a power of 2 */
++#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
++ BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
++
+ /* Force a compilation error if condition is true, but also produce a
+ result (of value 0 and type size_t), so the expression can be used
+ e.g. in a structure initializer (or where-ever else comma expressions
+--
+1.7.7.3
+
Added: dists/squeeze/linux-2.6/debian/patches/features/all/libsas-fix-definition-of-wideport-include-local-sas-.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/all/libsas-fix-definition-of-wideport-include-local-sas-.patch Wed Dec 21 06:29:16 2011 (r18403)
@@ -0,0 +1,84 @@
+From: Dan Williams <dan.j.williams at intel.com>
+Date: Fri, 1 Oct 2010 13:55:52 -0700
+Subject: [PATCH 2/5] libsas: fix definition of wideport, include local sas
+ address
+
+commit 00f0254ed9b19164d416dc2e3c2e81eda55a6faf upstream.
+
+To date libsas has only looked at the attached sas address when
+determining the formation of wide ports. The specification and some
+hardware expects that phys with different addresses will not form a wide
+port unless the local peer phys also match each other. Introduce a flag
+to select stricter behavior at sas_register_ha() time. The flag can be
+dropped once it is known that all libsas users expect the same behavior.
+
+Current drivers just initialize this field to zero and get the
+traditional behavior.
+
+Reported-by: Patrick Thomson <patrick.s.thomson at intel.com>
+Signed-off-by: Dan Williams <dan.j.williams at intel.com>
+Signed-off-by: James Bottomley <James.Bottomley at suse.de>
+---
+ drivers/scsi/libsas/sas_port.c | 18 +++++++++++++-----
+ include/scsi/libsas.h | 2 ++
+ 2 files changed, 15 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
+index fe8b74c..5257fdf 100644
+--- a/drivers/scsi/libsas/sas_port.c
++++ b/drivers/scsi/libsas/sas_port.c
+@@ -28,6 +28,17 @@
+ #include <scsi/scsi_transport_sas.h>
+ #include "../scsi_sas_internal.h"
+
++static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy *phy)
++{
++ struct sas_ha_struct *sas_ha = phy->ha;
++
++ if (memcmp(port->attached_sas_addr, phy->attached_sas_addr,
++ SAS_ADDR_SIZE) != 0 || (sas_ha->strict_wide_ports &&
++ memcmp(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE) != 0))
++ return false;
++ return true;
++}
++
+ /**
+ * sas_form_port -- add this phy to a port
+ * @phy: the phy of interest
+@@ -45,8 +56,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
+ unsigned long flags;
+
+ if (port) {
+- if (memcmp(port->attached_sas_addr, phy->attached_sas_addr,
+- SAS_ADDR_SIZE) != 0)
++ if (!phy_is_wideport_member(port, phy))
+ sas_deform_port(phy);
+ else {
+ SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
+@@ -62,9 +72,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
+ port = sas_ha->sas_port[i];
+ spin_lock(&port->phy_list_lock);
+ if (*(u64 *) port->sas_addr &&
+- memcmp(port->attached_sas_addr,
+- phy->attached_sas_addr, SAS_ADDR_SIZE) == 0 &&
+- port->num_phys > 0) {
++ phy_is_wideport_member(port, phy) && port->num_phys > 0) {
+ /* wide port */
+ SAS_DPRINTK("phy%d matched wide port%d\n", phy->id,
+ port->id);
+diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
+index e78d3b6..c464e93 100644
+--- a/include/scsi/libsas.h
++++ b/include/scsi/libsas.h
+@@ -359,6 +359,8 @@ struct sas_ha_struct {
+ /* The class calls this to send a task for execution. */
+ int lldd_max_execute_num;
+ int lldd_queue_size;
++ int strict_wide_ports; /* both sas_addr and attached_sas_addr must match
++ * their siblings when forming wide ports */
+
+ /* LLDD calls these to notify the class of an event. */
+ void (*notify_ha_event)(struct sas_ha_struct *, enum ha_event);
+--
+1.7.7.3
+
Added: dists/squeeze/linux-2.6/debian/patches/features/x86/isci-Import-from-Linux-3.1.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/x86/isci-Import-from-Linux-3.1.patch Wed Dec 21 06:29:16 2011 (r18403)
@@ -0,0 +1,23949 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Wed, 21 Dec 2011 04:20:03 +0000
+Subject: [PATCH 5/5] isci: Import from Linux 3.1
+
+---
+ drivers/scsi/Kconfig | 14 +
+ drivers/scsi/Makefile | 1 +
+ drivers/scsi/isci/Makefile | 8 +
+ drivers/scsi/isci/firmware/Makefile | 19 +
+ drivers/scsi/isci/firmware/README | 36 +
+ drivers/scsi/isci/firmware/create_fw.c | 99 +
+ drivers/scsi/isci/firmware/create_fw.h | 77 +
+ drivers/scsi/isci/host.c | 2762 ++++++++++++++++++++
+ drivers/scsi/isci/host.h | 545 ++++
+ drivers/scsi/isci/init.c | 574 +++++
+ drivers/scsi/isci/isci.h | 538 ++++
+ drivers/scsi/isci/phy.c | 1325 ++++++++++
+ drivers/scsi/isci/phy.h | 504 ++++
+ drivers/scsi/isci/port.c | 1757 +++++++++++++
+ drivers/scsi/isci/port.h | 306 +++
+ drivers/scsi/isci/port_config.c | 754 ++++++
+ drivers/scsi/isci/probe_roms.c | 243 ++
+ drivers/scsi/isci/probe_roms.h | 249 ++
+ drivers/scsi/isci/registers.h | 1946 ++++++++++++++
+ drivers/scsi/isci/remote_device.c | 1501 +++++++++++
+ drivers/scsi/isci/remote_device.h | 352 +++
+ drivers/scsi/isci/remote_node_context.c | 627 +++++
+ drivers/scsi/isci/remote_node_context.h | 224 ++
+ drivers/scsi/isci/remote_node_table.c | 598 +++++
+ drivers/scsi/isci/remote_node_table.h | 188 ++
+ drivers/scsi/isci/request.c | 3393 +++++++++++++++++++++++++
+ drivers/scsi/isci/request.h | 448 ++++
+ drivers/scsi/isci/sas.h | 219 ++
+ drivers/scsi/isci/scu_completion_codes.h | 283 ++
+ drivers/scsi/isci/scu_event_codes.h | 336 +++
+ drivers/scsi/isci/scu_remote_node_context.h | 229 ++
+ drivers/scsi/isci/scu_task_context.h | 942 +++++++
+ drivers/scsi/isci/task.c | 1676 ++++++++++++
+ drivers/scsi/isci/task.h | 367 +++
+ drivers/scsi/isci/unsolicited_frame_control.c | 225 ++
+ drivers/scsi/isci/unsolicited_frame_control.h | 278 ++
+ 36 files changed, 23643 insertions(+), 0 deletions(-)
+ create mode 100644 drivers/scsi/isci/Makefile
+ create mode 100644 drivers/scsi/isci/firmware/Makefile
+ create mode 100644 drivers/scsi/isci/firmware/README
+ create mode 100644 drivers/scsi/isci/firmware/create_fw.c
+ create mode 100644 drivers/scsi/isci/firmware/create_fw.h
+ create mode 100644 drivers/scsi/isci/host.c
+ create mode 100644 drivers/scsi/isci/host.h
+ create mode 100644 drivers/scsi/isci/init.c
+ create mode 100644 drivers/scsi/isci/isci.h
+ create mode 100644 drivers/scsi/isci/phy.c
+ create mode 100644 drivers/scsi/isci/phy.h
+ create mode 100644 drivers/scsi/isci/port.c
+ create mode 100644 drivers/scsi/isci/port.h
+ create mode 100644 drivers/scsi/isci/port_config.c
+ create mode 100644 drivers/scsi/isci/probe_roms.c
+ create mode 100644 drivers/scsi/isci/probe_roms.h
+ create mode 100644 drivers/scsi/isci/registers.h
+ create mode 100644 drivers/scsi/isci/remote_device.c
+ create mode 100644 drivers/scsi/isci/remote_device.h
+ create mode 100644 drivers/scsi/isci/remote_node_context.c
+ create mode 100644 drivers/scsi/isci/remote_node_context.h
+ create mode 100644 drivers/scsi/isci/remote_node_table.c
+ create mode 100644 drivers/scsi/isci/remote_node_table.h
+ create mode 100644 drivers/scsi/isci/request.c
+ create mode 100644 drivers/scsi/isci/request.h
+ create mode 100644 drivers/scsi/isci/sas.h
+ create mode 100644 drivers/scsi/isci/scu_completion_codes.h
+ create mode 100644 drivers/scsi/isci/scu_event_codes.h
+ create mode 100644 drivers/scsi/isci/scu_remote_node_context.h
+ create mode 100644 drivers/scsi/isci/scu_task_context.h
+ create mode 100644 drivers/scsi/isci/task.c
+ create mode 100644 drivers/scsi/isci/task.h
+ create mode 100644 drivers/scsi/isci/unsolicited_frame_control.c
+ create mode 100644 drivers/scsi/isci/unsolicited_frame_control.h
+
+diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
+index ded0288..6ca0e29 100644
+--- a/drivers/scsi/Kconfig
++++ b/drivers/scsi/Kconfig
+@@ -815,6 +815,20 @@ config SCSI_GDTH
+ To compile this driver as a module, choose M here: the
+ module will be called gdth.
+
++config SCSI_ISCI
++ tristate "Intel(R) C600 Series Chipset SAS Controller"
++ depends on PCI && SCSI
++ depends on X86
++ # (temporary): known alpha quality driver
++ depends on EXPERIMENTAL
++ select SCSI_SAS_LIBSAS
++ select SCSI_SAS_HOST_SMP
++ ---help---
++ This driver supports the 6Gb/s SAS capabilities of the storage
++ control unit found in the Intel(R) C600 series chipset.
++
++ The experimental tag will be removed after the driver exits alpha
++
+ config SCSI_GENERIC_NCR5380
+ tristate "Generic NCR5380/53c400 SCSI PIO support"
+ depends on ISA && SCSI
+diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
+index 92a8c50..f126973 100644
+--- a/drivers/scsi/Makefile
++++ b/drivers/scsi/Makefile
+@@ -71,6 +71,7 @@ obj-$(CONFIG_SCSI_AACRAID) += aacraid/
+ obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o
+ obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/
+ obj-$(CONFIG_SCSI_PM8001) += pm8001/
++obj-$(CONFIG_SCSI_ISCI) += isci/
+ obj-$(CONFIG_SCSI_IPS) += ips.o
+ obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o
+ obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
+diff --git a/drivers/scsi/isci/Makefile b/drivers/scsi/isci/Makefile
+new file mode 100644
+index 0000000..3359e10
+--- /dev/null
++++ b/drivers/scsi/isci/Makefile
+@@ -0,0 +1,8 @@
++obj-$(CONFIG_SCSI_ISCI) += isci.o
++isci-objs := init.o phy.o request.o \
++ remote_device.o port.o \
++ host.o task.o probe_roms.o \
++ remote_node_context.o \
++ remote_node_table.o \
++ unsolicited_frame_control.o \
++ port_config.o \
+diff --git a/drivers/scsi/isci/firmware/Makefile b/drivers/scsi/isci/firmware/Makefile
+new file mode 100644
+index 0000000..5f54461
+--- /dev/null
++++ b/drivers/scsi/isci/firmware/Makefile
+@@ -0,0 +1,19 @@
++# Makefile for create_fw
++#
++CC=gcc
++CFLAGS=-c -Wall -O2 -g
++LDFLAGS=
++SOURCES=create_fw.c
++OBJECTS=$(SOURCES:.cpp=.o)
++EXECUTABLE=create_fw
++
++all: $(SOURCES) $(EXECUTABLE)
++
++$(EXECUTABLE): $(OBJECTS)
++ $(CC) $(LDFLAGS) $(OBJECTS) -o $@
++
++.c.o:
++ $(CC) $(CFLAGS) $< -O $@
++
++clean:
++ rm -f *.o $(EXECUTABLE)
+diff --git a/drivers/scsi/isci/firmware/README b/drivers/scsi/isci/firmware/README
+new file mode 100644
+index 0000000..8056d2b
+--- /dev/null
++++ b/drivers/scsi/isci/firmware/README
+@@ -0,0 +1,36 @@
++This defines the temporary binary blow we are to pass to the SCU
++driver to emulate the binary firmware that we will eventually be
++able to access via NVRAM on the SCU controller.
++
++The current size of the binary blob is expected to be 149 bytes or larger
++
++Header Types:
++0x1: Phy Masks
++0x2: Phy Gens
++0x3: SAS Addrs
++0xff: End of Data
++
++ID string - u8[12]: "#SCU MAGIC#\0"
++Version - u8: 1
++SubVersion - u8: 0
++
++Header Type - u8: 0x1
++Size - u8: 8
++Phy Mask - u32[8]
++
++Header Type - u8: 0x2
++Size - u8: 8
++Phy Gen - u32[8]
++
++Header Type - u8: 0x3
++Size - u8: 8
++Sas Addr - u64[8]
++
++Header Type - u8: 0xf
++
++
++==============================================================================
++
++Place isci_firmware.bin in /lib/firmware
++Be sure to recreate the initramfs image to include the firmware.
++
+diff --git a/drivers/scsi/isci/firmware/create_fw.c b/drivers/scsi/isci/firmware/create_fw.c
+new file mode 100644
+index 0000000..c7a2887
+--- /dev/null
++++ b/drivers/scsi/isci/firmware/create_fw.c
+@@ -0,0 +1,99 @@
++#include <stdio.h>
++#include <stdlib.h>
++#include <unistd.h>
++#include <sys/types.h>
++#include <sys/stat.h>
++#include <fcntl.h>
++#include <string.h>
++#include <errno.h>
++#include <asm/types.h>
++#include <strings.h>
++#include <stdint.h>
++
++#include "create_fw.h"
++#include "../probe_roms.h"
++
++int write_blob(struct isci_orom *isci_orom)
++{
++ FILE *fd;
++ int err;
++ size_t count;
++
++ fd = fopen(blob_name, "w+");
++ if (!fd) {
++ perror("Open file for write failed");
++ fclose(fd);
++ return -EIO;
++ }
++
++ count = fwrite(isci_orom, sizeof(struct isci_orom), 1, fd);
++ if (count != 1) {
++ perror("Write data failed");
++ fclose(fd);
++ return -EIO;
++ }
++
++ fclose(fd);
++
++ return 0;
++}
++
++void set_binary_values(struct isci_orom *isci_orom)
++{
++ int ctrl_idx, phy_idx, port_idx;
++
++ /* setting OROM signature */
++ strncpy(isci_orom->hdr.signature, sig, strlen(sig));
++ isci_orom->hdr.version = version;
++ isci_orom->hdr.total_block_length = sizeof(struct isci_orom);
++ isci_orom->hdr.hdr_length = sizeof(struct sci_bios_oem_param_block_hdr);
++ isci_orom->hdr.num_elements = num_elements;
++
++ for (ctrl_idx = 0; ctrl_idx < 2; ctrl_idx++) {
++ isci_orom->ctrl[ctrl_idx].controller.mode_type = mode_type;
++ isci_orom->ctrl[ctrl_idx].controller.max_concurrent_dev_spin_up =
++ max_num_concurrent_dev_spin_up;
++ isci_orom->ctrl[ctrl_idx].controller.do_enable_ssc =
++ enable_ssc;
++
++ for (port_idx = 0; port_idx < 4; port_idx++)
++ isci_orom->ctrl[ctrl_idx].ports[port_idx].phy_mask =
++ phy_mask[ctrl_idx][port_idx];
++
++ for (phy_idx = 0; phy_idx < 4; phy_idx++) {
++ isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.high =
++ (__u32)(sas_addr[ctrl_idx][phy_idx] >> 32);
++ isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.low =
++ (__u32)(sas_addr[ctrl_idx][phy_idx]);
++
++ isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control0 =
++ afe_tx_amp_control0;
++ isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control1 =
++ afe_tx_amp_control1;
++ isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control2 =
++ afe_tx_amp_control2;
++ isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control3 =
++ afe_tx_amp_control3;
++ }
++ }
++}
++
++int main(void)
++{
++ int err;
++ struct isci_orom *isci_orom;
++
++ isci_orom = malloc(sizeof(struct isci_orom));
++ memset(isci_orom, 0, sizeof(struct isci_orom));
++
++ set_binary_values(isci_orom);
++
++ err = write_blob(isci_orom);
++ if (err < 0) {
++ free(isci_orom);
++ return err;
++ }
++
++ free(isci_orom);
++ return 0;
++}
+diff --git a/drivers/scsi/isci/firmware/create_fw.h b/drivers/scsi/isci/firmware/create_fw.h
+new file mode 100644
+index 0000000..5f29882
+--- /dev/null
++++ b/drivers/scsi/isci/firmware/create_fw.h
+@@ -0,0 +1,77 @@
++#ifndef _CREATE_FW_H_
++#define _CREATE_FW_H_
++#include "../probe_roms.h"
++
++
++/* we are configuring for 2 SCUs */
++static const int num_elements = 2;
++
++/*
++ * For all defined arrays:
++ * elements 0-3 are for SCU0, ports 0-3
++ * elements 4-7 are for SCU1, ports 0-3
++ *
++ * valid configurations for one SCU are:
++ * P0 P1 P2 P3
++ * ----------------
++ * 0xF,0x0,0x0,0x0 # 1 x4 port
++ * 0x3,0x0,0x4,0x8 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are each x1
++ * # ports
++ * 0x1,0x2,0xC,0x0 # Phys 0 and 1 are each x1 ports, phy 2 and phy 3 are a x2
++ * # port
++ * 0x3,0x0,0xC,0x0 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are a x2 port
++ * 0x1,0x2,0x4,0x8 # Each phy is a x1 port (this is the default configuration)
++ *
++ * if there is a port/phy on which you do not wish to override the default
++ * values, use the value assigned to UNINIT_PARAM (255).
++ */
++
++/* discovery mode type (port auto config mode by default ) */
++
++/*
++ * if there is a port/phy on which you do not wish to override the default
++ * values, use the value "0000000000000000". SAS address of zero's is
++ * considered invalid and will not be used.
++ */
++#ifdef MPC
++static const int mode_type = SCIC_PORT_MANUAL_CONFIGURATION_MODE;
++static const __u8 phy_mask[2][4] = { {1, 2, 4, 8},
++ {1, 2, 4, 8} };
++static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFFF0000001ULL,
++ 0x5FCFFFFFF0000002ULL,
++ 0x5FCFFFFFF0000003ULL,
++ 0x5FCFFFFFF0000004ULL },
++ { 0x5FCFFFFFF0000005ULL,
++ 0x5FCFFFFFF0000006ULL,
++ 0x5FCFFFFFF0000007ULL,
++ 0x5FCFFFFFF0000008ULL } };
++#else /* APC (default) */
++static const int mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
++static const __u8 phy_mask[2][4];
++static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFF00000001ULL,
++ 0x5FCFFFFF00000001ULL,
++ 0x5FCFFFFF00000001ULL,
++ 0x5FCFFFFF00000001ULL },
++ { 0x5FCFFFFF00000002ULL,
++ 0x5FCFFFFF00000002ULL,
++ 0x5FCFFFFF00000002ULL,
++ 0x5FCFFFFF00000002ULL } };
++#endif
++
++/* Maximum number of concurrent device spin up */
++static const int max_num_concurrent_dev_spin_up = 1;
++
++/* enable of ssc operation */
++static const int enable_ssc;
++
++/* AFE_TX_AMP_CONTROL */
++static const unsigned int afe_tx_amp_control0 = 0x000bdd08;
++static const unsigned int afe_tx_amp_control1 = 0x000ffc00;
++static const unsigned int afe_tx_amp_control2 = 0x000b7c09;
++static const unsigned int afe_tx_amp_control3 = 0x000afc6e;
++
++static const char blob_name[] = "isci_firmware.bin";
++static const char sig[] = "ISCUOEMB";
++static const unsigned char version = 0x10;
++
++#endif
+diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
+new file mode 100644
+index 0000000..6981b77
+--- /dev/null
++++ b/drivers/scsi/isci/host.c
+@@ -0,0 +1,2762 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <linux/circ_buf.h>
++#include <linux/device.h>
++#include <scsi/sas.h>
++#include "host.h"
++#include "isci.h"
++#include "port.h"
++#include "host.h"
++#include "probe_roms.h"
++#include "remote_device.h"
++#include "request.h"
++#include "scu_completion_codes.h"
++#include "scu_event_codes.h"
++#include "registers.h"
++#include "scu_remote_node_context.h"
++#include "scu_task_context.h"
++
++#define SCU_CONTEXT_RAM_INIT_STALL_TIME 200
++
++#define smu_max_ports(dcc_value) \
++ (\
++ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
++ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
++ )
++
++#define smu_max_task_contexts(dcc_value) \
++ (\
++ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
++ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
++ )
++
++#define smu_max_rncs(dcc_value) \
++ (\
++ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
++ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
++ )
++
++#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
++
++/**
++ *
++ *
++ * The number of milliseconds to wait while a given phy is consuming power
++ * before allowing another set of phys to consume power. Ultimately, this will
++ * be specified by OEM parameter.
++ */
++#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
++
++/**
++ * NORMALIZE_PUT_POINTER() -
++ *
++ * This macro will normalize the completion queue put pointer so its value can
++ * be used as an array inde
++ */
++#define NORMALIZE_PUT_POINTER(x) \
++ ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
++
++
++/**
++ * NORMALIZE_EVENT_POINTER() -
++ *
++ * This macro will normalize the completion queue event entry so its value can
++ * be used as an index.
++ */
++#define NORMALIZE_EVENT_POINTER(x) \
++ (\
++ ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
++ >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
++ )
++
++/**
++ * NORMALIZE_GET_POINTER() -
++ *
++ * This macro will normalize the completion queue get pointer so its value can
++ * be used as an index into an array
++ */
++#define NORMALIZE_GET_POINTER(x) \
++ ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
++
++/**
++ * NORMALIZE_GET_POINTER_CYCLE_BIT() -
++ *
++ * This macro will normalize the completion queue cycle pointer so it matches
++ * the completion queue cycle bit
++ */
++#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
++ ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
++
++/**
++ * COMPLETION_QUEUE_CYCLE_BIT() -
++ *
++ * This macro will return the cycle bit of the completion queue entry
++ */
++#define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
++
++/* Init the state machine and call the state entry function (if any) */
++void sci_init_sm(struct sci_base_state_machine *sm,
++ const struct sci_base_state *state_table, u32 initial_state)
++{
++ sci_state_transition_t handler;
++
++ sm->initial_state_id = initial_state;
++ sm->previous_state_id = initial_state;
++ sm->current_state_id = initial_state;
++ sm->state_table = state_table;
++
++ handler = sm->state_table[initial_state].enter_state;
++ if (handler)
++ handler(sm);
++}
++
++/* Call the state exit fn, update the current state, call the state entry fn */
++void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
++{
++ sci_state_transition_t handler;
++
++ handler = sm->state_table[sm->current_state_id].exit_state;
++ if (handler)
++ handler(sm);
++
++ sm->previous_state_id = sm->current_state_id;
++ sm->current_state_id = next_state;
++
++ handler = sm->state_table[sm->current_state_id].enter_state;
++ if (handler)
++ handler(sm);
++}
++
++static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
++{
++ u32 get_value = ihost->completion_queue_get;
++ u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
++
++ if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
++ COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]))
++ return true;
++
++ return false;
++}
++
++static bool sci_controller_isr(struct isci_host *ihost)
++{
++ if (sci_controller_completion_queue_has_entries(ihost)) {
++ return true;
++ } else {
++ /*
++ * we have a spurious interrupt it could be that we have already
++ * emptied the completion queue from a previous interrupt */
++ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
++
++ /*
++ * There is a race in the hardware that could cause us not to be notified
++ * of an interrupt completion if we do not take this step. We will mask
++ * then unmask the interrupts so if there is another interrupt pending
++ * the clearing of the interrupt source we get the next interrupt message. */
++ writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
++ writel(0, &ihost->smu_registers->interrupt_mask);
++ }
++
++ return false;
++}
++
++irqreturn_t isci_msix_isr(int vec, void *data)
++{
++ struct isci_host *ihost = data;
++
++ if (sci_controller_isr(ihost))
++ tasklet_schedule(&ihost->completion_tasklet);
++
++ return IRQ_HANDLED;
++}
++
++static bool sci_controller_error_isr(struct isci_host *ihost)
++{
++ u32 interrupt_status;
++
++ interrupt_status =
++ readl(&ihost->smu_registers->interrupt_status);
++ interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
++
++ if (interrupt_status != 0) {
++ /*
++ * There is an error interrupt pending so let it through and handle
++ * in the callback */
++ return true;
++ }
++
++ /*
++ * There is a race in the hardware that could cause us not to be notified
++ * of an interrupt completion if we do not take this step. We will mask
++ * then unmask the error interrupts so if there was another interrupt
++ * pending we will be notified.
++ * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
++ writel(0xff, &ihost->smu_registers->interrupt_mask);
++ writel(0, &ihost->smu_registers->interrupt_mask);
++
++ return false;
++}
++
++static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
++{
++ u32 index = SCU_GET_COMPLETION_INDEX(ent);
++ struct isci_request *ireq = ihost->reqs[index];
++
++ /* Make sure that we really want to process this IO request */
++ if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
++ ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
++ ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
++ /* Yep this is a valid io request pass it along to the
++ * io request handler
++ */
++ sci_io_request_tc_completion(ireq, ent);
++}
++
++static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
++{
++ u32 index;
++ struct isci_request *ireq;
++ struct isci_remote_device *idev;
++
++ index = SCU_GET_COMPLETION_INDEX(ent);
++
++ switch (scu_get_command_request_type(ent)) {
++ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
++ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
++ ireq = ihost->reqs[index];
++ dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
++ __func__, ent, ireq);
++ /* @todo For a post TC operation we need to fail the IO
++ * request
++ */
++ break;
++ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
++ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
++ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
++ idev = ihost->device_table[index];
++ dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
++ __func__, ent, idev);
++ /* @todo For a port RNC operation we need to fail the
++ * device
++ */
++ break;
++ default:
++ dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
++ __func__, ent);
++ break;
++ }
++}
++
++static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
++{
++ u32 index;
++ u32 frame_index;
++
++ struct scu_unsolicited_frame_header *frame_header;
++ struct isci_phy *iphy;
++ struct isci_remote_device *idev;
++
++ enum sci_status result = SCI_FAILURE;
++
++ frame_index = SCU_GET_FRAME_INDEX(ent);
++
++ frame_header = ihost->uf_control.buffers.array[frame_index].header;
++ ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
++
++ if (SCU_GET_FRAME_ERROR(ent)) {
++ /*
++ * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
++ * / this cause a problem? We expect the phy initialization will
++ * / fail if there is an error in the frame. */
++ sci_controller_release_frame(ihost, frame_index);
++ return;
++ }
++
++ if (frame_header->is_address_frame) {
++ index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
++ iphy = &ihost->phys[index];
++ result = sci_phy_frame_handler(iphy, frame_index);
++ } else {
++
++ index = SCU_GET_COMPLETION_INDEX(ent);
++
++ if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
++ /*
++ * This is a signature fis or a frame from a direct attached SATA
++ * device that has not yet been created. In either case forwared
++ * the frame to the PE and let it take care of the frame data. */
++ index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
++ iphy = &ihost->phys[index];
++ result = sci_phy_frame_handler(iphy, frame_index);
++ } else {
++ if (index < ihost->remote_node_entries)
++ idev = ihost->device_table[index];
++ else
++ idev = NULL;
++
++ if (idev != NULL)
++ result = sci_remote_device_frame_handler(idev, frame_index);
++ else
++ sci_controller_release_frame(ihost, frame_index);
++ }
++ }
++
++ if (result != SCI_SUCCESS) {
++ /*
++ * / @todo Is there any reason to report some additional error message
++ * / when we get this failure notifiction? */
++ }
++}
++
++static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
++{
++ struct isci_remote_device *idev;
++ struct isci_request *ireq;
++ struct isci_phy *iphy;
++ u32 index;
++
++ index = SCU_GET_COMPLETION_INDEX(ent);
++
++ switch (scu_get_event_type(ent)) {
++ case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
++ /* / @todo The driver did something wrong and we need to fix the condtion. */
++ dev_err(&ihost->pdev->dev,
++ "%s: SCIC Controller 0x%p received SMU command error "
++ "0x%x\n",
++ __func__,
++ ihost,
++ ent);
++ break;
++
++ case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
++ case SCU_EVENT_TYPE_SMU_ERROR:
++ case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
++ /*
++ * / @todo This is a hardware failure and its likely that we want to
++ * / reset the controller. */
++ dev_err(&ihost->pdev->dev,
++ "%s: SCIC Controller 0x%p received fatal controller "
++ "event 0x%x\n",
++ __func__,
++ ihost,
++ ent);
++ break;
++
++ case SCU_EVENT_TYPE_TRANSPORT_ERROR:
++ ireq = ihost->reqs[index];
++ sci_io_request_event_handler(ireq, ent);
++ break;
++
++ case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
++ switch (scu_get_event_specifier(ent)) {
++ case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
++ case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
++ ireq = ihost->reqs[index];
++ if (ireq != NULL)
++ sci_io_request_event_handler(ireq, ent);
++ else
++ dev_warn(&ihost->pdev->dev,
++ "%s: SCIC Controller 0x%p received "
++ "event 0x%x for io request object "
++ "that doesnt exist.\n",
++ __func__,
++ ihost,
++ ent);
++
++ break;
++
++ case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
++ idev = ihost->device_table[index];
++ if (idev != NULL)
++ sci_remote_device_event_handler(idev, ent);
++ else
++ dev_warn(&ihost->pdev->dev,
++ "%s: SCIC Controller 0x%p received "
++ "event 0x%x for remote device object "
++ "that doesnt exist.\n",
++ __func__,
++ ihost,
++ ent);
++
++ break;
++ }
++ break;
++
++ case SCU_EVENT_TYPE_BROADCAST_CHANGE:
++ /*
++ * direct the broadcast change event to the phy first and then let
++ * the phy redirect the broadcast change to the port object */
++ case SCU_EVENT_TYPE_ERR_CNT_EVENT:
++ /*
++ * direct error counter event to the phy object since that is where
++ * we get the event notification. This is a type 4 event. */
++ case SCU_EVENT_TYPE_OSSP_EVENT:
++ index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
++ iphy = &ihost->phys[index];
++ sci_phy_event_handler(iphy, ent);
++ break;
++
++ case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
++ case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
++ case SCU_EVENT_TYPE_RNC_OPS_MISC:
++ if (index < ihost->remote_node_entries) {
++ idev = ihost->device_table[index];
++
++ if (idev != NULL)
++ sci_remote_device_event_handler(idev, ent);
++ } else
++ dev_err(&ihost->pdev->dev,
++ "%s: SCIC Controller 0x%p received event 0x%x "
++ "for remote device object 0x%0x that doesnt "
++ "exist.\n",
++ __func__,
++ ihost,
++ ent,
++ index);
++
++ break;
++
++ default:
++ dev_warn(&ihost->pdev->dev,
++ "%s: SCIC Controller received unknown event code %x\n",
++ __func__,
++ ent);
++ break;
++ }
++}
++
++static void sci_controller_process_completions(struct isci_host *ihost)
++{
++ u32 completion_count = 0;
++ u32 ent;
++ u32 get_index;
++ u32 get_cycle;
++ u32 event_get;
++ u32 event_cycle;
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: completion queue begining get:0x%08x\n",
++ __func__,
++ ihost->completion_queue_get);
++
++ /* Get the component parts of the completion queue */
++ get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
++ get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
++
++ event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get);
++ event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get;
++
++ while (
++ NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
++ == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
++ ) {
++ completion_count++;
++
++ ent = ihost->completion_queue[get_index];
++
++ /* increment the get pointer and check for rollover to toggle the cycle bit */
++ get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
++ (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
++ get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: completion queue entry:0x%08x\n",
++ __func__,
++ ent);
++
++ switch (SCU_GET_COMPLETION_TYPE(ent)) {
++ case SCU_COMPLETION_TYPE_TASK:
++ sci_controller_task_completion(ihost, ent);
++ break;
++
++ case SCU_COMPLETION_TYPE_SDMA:
++ sci_controller_sdma_completion(ihost, ent);
++ break;
++
++ case SCU_COMPLETION_TYPE_UFI:
++ sci_controller_unsolicited_frame(ihost, ent);
++ break;
++
++ case SCU_COMPLETION_TYPE_EVENT:
++ sci_controller_event_completion(ihost, ent);
++ break;
++
++ case SCU_COMPLETION_TYPE_NOTIFY: {
++ event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
++ (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
++ event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
++
++ sci_controller_event_completion(ihost, ent);
++ break;
++ }
++ default:
++ dev_warn(&ihost->pdev->dev,
++ "%s: SCIC Controller received unknown "
++ "completion type %x\n",
++ __func__,
++ ent);
++ break;
++ }
++ }
++
++ /* Update the get register if we completed one or more entries */
++ if (completion_count > 0) {
++ ihost->completion_queue_get =
++ SMU_CQGR_GEN_BIT(ENABLE) |
++ SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
++ event_cycle |
++ SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
++ get_cycle |
++ SMU_CQGR_GEN_VAL(POINTER, get_index);
++
++ writel(ihost->completion_queue_get,
++ &ihost->smu_registers->completion_queue_get);
++
++ }
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: completion queue ending get:0x%08x\n",
++ __func__,
++ ihost->completion_queue_get);
++
++}
++
++static void sci_controller_error_handler(struct isci_host *ihost)
++{
++ u32 interrupt_status;
++
++ interrupt_status =
++ readl(&ihost->smu_registers->interrupt_status);
++
++ if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
++ sci_controller_completion_queue_has_entries(ihost)) {
++
++ sci_controller_process_completions(ihost);
++ writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
++ } else {
++ dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
++ interrupt_status);
++
++ sci_change_state(&ihost->sm, SCIC_FAILED);
++
++ return;
++ }
++
++ /* If we dont process any completions I am not sure that we want to do this.
++ * We are in the middle of a hardware fault and should probably be reset.
++ */
++ writel(0, &ihost->smu_registers->interrupt_mask);
++}
++
++irqreturn_t isci_intx_isr(int vec, void *data)
++{
++ irqreturn_t ret = IRQ_NONE;
++ struct isci_host *ihost = data;
++
++ if (sci_controller_isr(ihost)) {
++ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
++ tasklet_schedule(&ihost->completion_tasklet);
++ ret = IRQ_HANDLED;
++ } else if (sci_controller_error_isr(ihost)) {
++ spin_lock(&ihost->scic_lock);
++ sci_controller_error_handler(ihost);
++ spin_unlock(&ihost->scic_lock);
++ ret = IRQ_HANDLED;
++ }
++
++ return ret;
++}
++
++irqreturn_t isci_error_isr(int vec, void *data)
++{
++ struct isci_host *ihost = data;
++
++ if (sci_controller_error_isr(ihost))
++ sci_controller_error_handler(ihost);
++
++ return IRQ_HANDLED;
++}
++
++/**
++ * isci_host_start_complete() - This function is called by the core library,
++ * through the ISCI Module, to indicate controller start status.
++ * @isci_host: This parameter specifies the ISCI host object
++ * @completion_status: This parameter specifies the completion status from the
++ * core library.
++ *
++ */
++static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
++{
++ if (completion_status != SCI_SUCCESS)
++ dev_info(&ihost->pdev->dev,
++ "controller start timed out, continuing...\n");
++ isci_host_change_state(ihost, isci_ready);
++ clear_bit(IHOST_START_PENDING, &ihost->flags);
++ wake_up(&ihost->eventq);
++}
++
++int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
++{
++ struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
++
++ if (test_bit(IHOST_START_PENDING, &ihost->flags))
++ return 0;
++
++ /* todo: use sas_flush_discovery once it is upstream */
++ scsi_flush_work(shost);
++
++ scsi_flush_work(shost);
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: ihost->status = %d, time = %ld\n",
++ __func__, isci_host_get_state(ihost), time);
++
++ return 1;
++
++}
++
++/**
++ * sci_controller_get_suggested_start_timeout() - This method returns the
++ * suggested sci_controller_start() timeout amount. The user is free to
++ * use any timeout value, but this method provides the suggested minimum
++ * start timeout value. The returned value is based upon empirical
++ * information determined as a result of interoperability testing.
++ * @controller: the handle to the controller object for which to return the
++ * suggested start timeout.
++ *
++ * This method returns the number of milliseconds for the suggested start
++ * operation timeout.
++ */
++static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
++{
++ /* Validate the user supplied parameters. */
++ if (!ihost)
++ return 0;
++
++ /*
++ * The suggested minimum timeout value for a controller start operation:
++ *
++ * Signature FIS Timeout
++ * + Phy Start Timeout
++ * + Number of Phy Spin Up Intervals
++ * ---------------------------------
++ * Number of milliseconds for the controller start operation.
++ *
++ * NOTE: The number of phy spin up intervals will be equivalent
++ * to the number of phys divided by the number phys allowed
++ * per interval - 1 (once OEM parameters are supported).
++ * Currently we assume only 1 phy per interval. */
++
++ return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
++ + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
++ + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
++}
++
++static void sci_controller_enable_interrupts(struct isci_host *ihost)
++{
++ BUG_ON(ihost->smu_registers == NULL);
++ writel(0, &ihost->smu_registers->interrupt_mask);
++}
++
++void sci_controller_disable_interrupts(struct isci_host *ihost)
++{
++ BUG_ON(ihost->smu_registers == NULL);
++ writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
++}
++
++static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
++{
++ u32 port_task_scheduler_value;
++
++ port_task_scheduler_value =
++ readl(&ihost->scu_registers->peg0.ptsg.control);
++ port_task_scheduler_value |=
++ (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
++ SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
++ writel(port_task_scheduler_value,
++ &ihost->scu_registers->peg0.ptsg.control);
++}
++
++static void sci_controller_assign_task_entries(struct isci_host *ihost)
++{
++ u32 task_assignment;
++
++ /*
++ * Assign all the TCs to function 0
++ * TODO: Do we actually need to read this register to write it back?
++ */
++
++ task_assignment =
++ readl(&ihost->smu_registers->task_context_assignment[0]);
++
++ task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
++ (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) |
++ (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
++
++ writel(task_assignment,
++ &ihost->smu_registers->task_context_assignment[0]);
++
++}
++
++static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
++{
++ u32 index;
++ u32 completion_queue_control_value;
++ u32 completion_queue_get_value;
++ u32 completion_queue_put_value;
++
++ ihost->completion_queue_get = 0;
++
++ completion_queue_control_value =
++ (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) |
++ SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1));
++
++ writel(completion_queue_control_value,
++ &ihost->smu_registers->completion_queue_control);
++
++
++ /* Set the completion queue get pointer and enable the queue */
++ completion_queue_get_value = (
++ (SMU_CQGR_GEN_VAL(POINTER, 0))
++ | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
++ | (SMU_CQGR_GEN_BIT(ENABLE))
++ | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
++ );
++
++ writel(completion_queue_get_value,
++ &ihost->smu_registers->completion_queue_get);
++
++ /* Set the completion queue put pointer */
++ completion_queue_put_value = (
++ (SMU_CQPR_GEN_VAL(POINTER, 0))
++ | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
++ );
++
++ writel(completion_queue_put_value,
++ &ihost->smu_registers->completion_queue_put);
++
++ /* Initialize the cycle bit of the completion queue entries */
++ for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) {
++ /*
++ * If get.cycle_bit != completion_queue.cycle_bit
++ * its not a valid completion queue entry
++ * so at system start all entries are invalid */
++ ihost->completion_queue[index] = 0x80000000;
++ }
++}
++
++static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
++{
++ u32 frame_queue_control_value;
++ u32 frame_queue_get_value;
++ u32 frame_queue_put_value;
++
++ /* Write the queue size */
++ frame_queue_control_value =
++ SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES);
++
++ writel(frame_queue_control_value,
++ &ihost->scu_registers->sdma.unsolicited_frame_queue_control);
++
++ /* Setup the get pointer for the unsolicited frame queue */
++ frame_queue_get_value = (
++ SCU_UFQGP_GEN_VAL(POINTER, 0)
++ | SCU_UFQGP_GEN_BIT(ENABLE_BIT)
++ );
++
++ writel(frame_queue_get_value,
++ &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
++ /* Setup the put pointer for the unsolicited frame queue */
++ frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
++ writel(frame_queue_put_value,
++ &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
++}
++
++static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
++{
++ if (ihost->sm.current_state_id == SCIC_STARTING) {
++ /*
++ * We move into the ready state, because some of the phys/ports
++ * may be up and operational.
++ */
++ sci_change_state(&ihost->sm, SCIC_READY);
++
++ isci_host_start_complete(ihost, status);
++ }
++}
++
++static bool is_phy_starting(struct isci_phy *iphy)
++{
++ enum sci_phy_states state;
++
++ state = iphy->sm.current_state_id;
++ switch (state) {
++ case SCI_PHY_STARTING:
++ case SCI_PHY_SUB_INITIAL:
++ case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
++ case SCI_PHY_SUB_AWAIT_IAF_UF:
++ case SCI_PHY_SUB_AWAIT_SAS_POWER:
++ case SCI_PHY_SUB_AWAIT_SATA_POWER:
++ case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
++ case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
++ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
++ case SCI_PHY_SUB_FINAL:
++ return true;
++ default:
++ return false;
++ }
++}
++
++/**
++ * sci_controller_start_next_phy - start phy
++ * @scic: controller
++ *
++ * If all the phys have been started, then attempt to transition the
++ * controller to the READY state and inform the user
++ * (sci_cb_controller_start_complete()).
++ */
++static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
++{
++ struct sci_oem_params *oem = &ihost->oem_parameters;
++ struct isci_phy *iphy;
++ enum sci_status status;
++
++ status = SCI_SUCCESS;
++
++ if (ihost->phy_startup_timer_pending)
++ return status;
++
++ if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
++ bool is_controller_start_complete = true;
++ u32 state;
++ u8 index;
++
++ for (index = 0; index < SCI_MAX_PHYS; index++) {
++ iphy = &ihost->phys[index];
++ state = iphy->sm.current_state_id;
++
++ if (!phy_get_non_dummy_port(iphy))
++ continue;
++
++ /* The controller start operation is complete iff:
++ * - all links have been given an opportunity to start
++ * - have no indication of a connected device
++ * - have an indication of a connected device and it has
++ * finished the link training process.
++ */
++ if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
++ (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
++ (iphy->is_in_link_training == true && is_phy_starting(iphy))) {
++ is_controller_start_complete = false;
++ break;
++ }
++ }
++
++ /*
++ * The controller has successfully finished the start process.
++ * Inform the SCI Core user and transition to the READY state. */
++ if (is_controller_start_complete == true) {
++ sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
++ sci_del_timer(&ihost->phy_timer);
++ ihost->phy_startup_timer_pending = false;
++ }
++ } else {
++ iphy = &ihost->phys[ihost->next_phy_to_start];
++
++ if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
++ if (phy_get_non_dummy_port(iphy) == NULL) {
++ ihost->next_phy_to_start++;
++
++ /* Caution recursion ahead be forwarned
++ *
++ * The PHY was never added to a PORT in MPC mode
++ * so start the next phy in sequence This phy
++ * will never go link up and will not draw power
++ * the OEM parameters either configured the phy
++ * incorrectly for the PORT or it was never
++ * assigned to a PORT
++ */
++ return sci_controller_start_next_phy(ihost);
++ }
++ }
++
++ status = sci_phy_start(iphy);
++
++ if (status == SCI_SUCCESS) {
++ sci_mod_timer(&ihost->phy_timer,
++ SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
++ ihost->phy_startup_timer_pending = true;
++ } else {
++ dev_warn(&ihost->pdev->dev,
++ "%s: Controller stop operation failed "
++ "to stop phy %d because of status "
++ "%d.\n",
++ __func__,
++ ihost->phys[ihost->next_phy_to_start].phy_index,
++ status);
++ }
++
++ ihost->next_phy_to_start++;
++ }
++
++ return status;
++}
++
++static void phy_startup_timeout(unsigned long data)
++{
++ struct sci_timer *tmr = (struct sci_timer *)data;
++ struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
++ unsigned long flags;
++ enum sci_status status;
++
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++
++ if (tmr->cancel)
++ goto done;
++
++ ihost->phy_startup_timer_pending = false;
++
++ do {
++ status = sci_controller_start_next_phy(ihost);
++ } while (status != SCI_SUCCESS);
++
++done:
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++}
++
++static u16 isci_tci_active(struct isci_host *ihost)
++{
++ return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
++}
++
++static enum sci_status sci_controller_start(struct isci_host *ihost,
++ u32 timeout)
++{
++ enum sci_status result;
++ u16 index;
++
++ if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
++ dev_warn(&ihost->pdev->dev,
++ "SCIC Controller start operation requested in "
++ "invalid state\n");
++ return SCI_FAILURE_INVALID_STATE;
++ }
++
++ /* Build the TCi free pool */
++ BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
++ ihost->tci_head = 0;
++ ihost->tci_tail = 0;
++ for (index = 0; index < ihost->task_context_entries; index++)
++ isci_tci_free(ihost, index);
++
++ /* Build the RNi free pool */
++ sci_remote_node_table_initialize(&ihost->available_remote_nodes,
++ ihost->remote_node_entries);
++
++ /*
++ * Before anything else lets make sure we will not be
++ * interrupted by the hardware.
++ */
++ sci_controller_disable_interrupts(ihost);
++
++ /* Enable the port task scheduler */
++ sci_controller_enable_port_task_scheduler(ihost);
++
++ /* Assign all the task entries to ihost physical function */
++ sci_controller_assign_task_entries(ihost);
++
++ /* Now initialize the completion queue */
++ sci_controller_initialize_completion_queue(ihost);
++
++ /* Initialize the unsolicited frame queue for use */
++ sci_controller_initialize_unsolicited_frame_queue(ihost);
++
++ /* Start all of the ports on this controller */
++ for (index = 0; index < ihost->logical_port_entries; index++) {
++ struct isci_port *iport = &ihost->ports[index];
++
++ result = sci_port_start(iport);
++ if (result)
++ return result;
++ }
++
++ sci_controller_start_next_phy(ihost);
++
++ sci_mod_timer(&ihost->timer, timeout);
++
++ sci_change_state(&ihost->sm, SCIC_STARTING);
++
++ return SCI_SUCCESS;
++}
++
++void isci_host_scan_start(struct Scsi_Host *shost)
++{
++ struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
++ unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
++
++ set_bit(IHOST_START_PENDING, &ihost->flags);
++
++ spin_lock_irq(&ihost->scic_lock);
++ sci_controller_start(ihost, tmo);
++ sci_controller_enable_interrupts(ihost);
++ spin_unlock_irq(&ihost->scic_lock);
++}
++
++static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
++{
++ isci_host_change_state(ihost, isci_stopped);
++ sci_controller_disable_interrupts(ihost);
++ clear_bit(IHOST_STOP_PENDING, &ihost->flags);
++ wake_up(&ihost->eventq);
++}
++
++static void sci_controller_completion_handler(struct isci_host *ihost)
++{
++ /* Empty out the completion queue */
++ if (sci_controller_completion_queue_has_entries(ihost))
++ sci_controller_process_completions(ihost);
++
++ /* Clear the interrupt and enable all interrupts again */
++ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
++ /* Could we write the value of SMU_ISR_COMPLETION? */
++ writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
++ writel(0, &ihost->smu_registers->interrupt_mask);
++}
++
++/**
++ * isci_host_completion_routine() - This function is the delayed service
++ * routine that calls the sci core library's completion handler. It's
++ * scheduled as a tasklet from the interrupt service routine when interrupts
++ * in use, or set as the timeout function in polled mode.
++ * @data: This parameter specifies the ISCI host object
++ *
++ */
++static void isci_host_completion_routine(unsigned long data)
++{
++ struct isci_host *ihost = (struct isci_host *)data;
++ struct list_head completed_request_list;
++ struct list_head errored_request_list;
++ struct list_head *current_position;
++ struct list_head *next_position;
++ struct isci_request *request;
++ struct isci_request *next_request;
++ struct sas_task *task;
++ u16 active;
++
++ INIT_LIST_HEAD(&completed_request_list);
++ INIT_LIST_HEAD(&errored_request_list);
++
++ spin_lock_irq(&ihost->scic_lock);
++
++ sci_controller_completion_handler(ihost);
++
++ /* Take the lists of completed I/Os from the host. */
++
++ list_splice_init(&ihost->requests_to_complete,
++ &completed_request_list);
++
++ /* Take the list of errored I/Os from the host. */
++ list_splice_init(&ihost->requests_to_errorback,
++ &errored_request_list);
++
++ spin_unlock_irq(&ihost->scic_lock);
++
++ /* Process any completions in the lists. */
++ list_for_each_safe(current_position, next_position,
++ &completed_request_list) {
++
++ request = list_entry(current_position, struct isci_request,
++ completed_node);
++ task = isci_request_access_task(request);
++
++ /* Normal notification (task_done) */
++ dev_dbg(&ihost->pdev->dev,
++ "%s: Normal - request/task = %p/%p\n",
++ __func__,
++ request,
++ task);
++
++ /* Return the task to libsas */
++ if (task != NULL) {
++
++ task->lldd_task = NULL;
++ if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
++
++ /* If the task is already in the abort path,
++ * the task_done callback cannot be called.
++ */
++ task->task_done(task);
++ }
++ }
++
++ spin_lock_irq(&ihost->scic_lock);
++ isci_free_tag(ihost, request->io_tag);
++ spin_unlock_irq(&ihost->scic_lock);
++ }
++ list_for_each_entry_safe(request, next_request, &errored_request_list,
++ completed_node) {
++
++ task = isci_request_access_task(request);
++
++ /* Use sas_task_abort */
++ dev_warn(&ihost->pdev->dev,
++ "%s: Error - request/task = %p/%p\n",
++ __func__,
++ request,
++ task);
++
++ if (task != NULL) {
++
++ /* Put the task into the abort path if it's not there
++ * already.
++ */
++ if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
++ sas_task_abort(task);
++
++ } else {
++ /* This is a case where the request has completed with a
++ * status such that it needed further target servicing,
++ * but the sas_task reference has already been removed
++ * from the request. Since it was errored, it was not
++ * being aborted, so there is nothing to do except free
++ * it.
++ */
++
++ spin_lock_irq(&ihost->scic_lock);
++ /* Remove the request from the remote device's list
++ * of pending requests.
++ */
++ list_del_init(&request->dev_node);
++ isci_free_tag(ihost, request->io_tag);
++ spin_unlock_irq(&ihost->scic_lock);
++ }
++ }
++
++ /* the coalesence timeout doubles at each encoding step, so
++ * update it based on the ilog2 value of the outstanding requests
++ */
++ active = isci_tci_active(ihost);
++ writel(SMU_ICC_GEN_VAL(NUMBER, active) |
++ SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)),
++ &ihost->smu_registers->interrupt_coalesce_control);
++}
++
++/**
++ * sci_controller_stop() - This method will stop an individual controller
++ * object.This method will invoke the associated user callback upon
++ * completion. The completion callback is called when the following
++ * conditions are met: -# the method return status is SCI_SUCCESS. -# the
++ * controller has been quiesced. This method will ensure that all IO
++ * requests are quiesced, phys are stopped, and all additional operation by
++ * the hardware is halted.
++ * @controller: the handle to the controller object to stop.
++ * @timeout: This parameter specifies the number of milliseconds in which the
++ * stop operation should complete.
++ *
++ * The controller must be in the STARTED or STOPPED state. Indicate if the
++ * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
++ * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
++ * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
++ * controller is not either in the STARTED or STOPPED states.
++ */
++static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
++{
++ if (ihost->sm.current_state_id != SCIC_READY) {
++ dev_warn(&ihost->pdev->dev,
++ "SCIC Controller stop operation requested in "
++ "invalid state\n");
++ return SCI_FAILURE_INVALID_STATE;
++ }
++
++ sci_mod_timer(&ihost->timer, timeout);
++ sci_change_state(&ihost->sm, SCIC_STOPPING);
++ return SCI_SUCCESS;
++}
++
++/**
++ * sci_controller_reset() - This method will reset the supplied core
++ * controller regardless of the state of said controller. This operation is
++ * considered destructive. In other words, all current operations are wiped
++ * out. No IO completions for outstanding devices occur. Outstanding IO
++ * requests are not aborted or completed at the actual remote device.
++ * @controller: the handle to the controller object to reset.
++ *
++ * Indicate if the controller reset method succeeded or failed in some way.
++ * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
++ * the controller reset operation is unable to complete.
++ */
++static enum sci_status sci_controller_reset(struct isci_host *ihost)
++{
++ switch (ihost->sm.current_state_id) {
++ case SCIC_RESET:
++ case SCIC_READY:
++ case SCIC_STOPPED:
++ case SCIC_FAILED:
++ /*
++ * The reset operation is not a graceful cleanup, just
++ * perform the state transition.
++ */
++ sci_change_state(&ihost->sm, SCIC_RESETTING);
++ return SCI_SUCCESS;
++ default:
++ dev_warn(&ihost->pdev->dev,
++ "SCIC Controller reset operation requested in "
++ "invalid state\n");
++ return SCI_FAILURE_INVALID_STATE;
++ }
++}
++
++void isci_host_deinit(struct isci_host *ihost)
++{
++ int i;
++
++ isci_host_change_state(ihost, isci_stopping);
++ for (i = 0; i < SCI_MAX_PORTS; i++) {
++ struct isci_port *iport = &ihost->ports[i];
++ struct isci_remote_device *idev, *d;
++
++ list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
++ if (test_bit(IDEV_ALLOCATED, &idev->flags))
++ isci_remote_device_stop(ihost, idev);
++ }
++ }
++
++ set_bit(IHOST_STOP_PENDING, &ihost->flags);
++
++ spin_lock_irq(&ihost->scic_lock);
++ sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
++ spin_unlock_irq(&ihost->scic_lock);
++
++ wait_for_stop(ihost);
++ sci_controller_reset(ihost);
++
++ /* Cancel any/all outstanding port timers */
++ for (i = 0; i < ihost->logical_port_entries; i++) {
++ struct isci_port *iport = &ihost->ports[i];
++ del_timer_sync(&iport->timer.timer);
++ }
++
++ /* Cancel any/all outstanding phy timers */
++ for (i = 0; i < SCI_MAX_PHYS; i++) {
++ struct isci_phy *iphy = &ihost->phys[i];
++ del_timer_sync(&iphy->sata_timer.timer);
++ }
++
++ del_timer_sync(&ihost->port_agent.timer.timer);
++
++ del_timer_sync(&ihost->power_control.timer.timer);
++
++ del_timer_sync(&ihost->timer.timer);
++
++ del_timer_sync(&ihost->phy_timer.timer);
++}
++
++static void __iomem *scu_base(struct isci_host *isci_host)
++{
++ struct pci_dev *pdev = isci_host->pdev;
++ int id = isci_host->id;
++
++ return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
++}
++
++static void __iomem *smu_base(struct isci_host *isci_host)
++{
++ struct pci_dev *pdev = isci_host->pdev;
++ int id = isci_host->id;
++
++ return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
++}
++
++static void isci_user_parameters_get(struct sci_user_parameters *u)
++{
++ int i;
++
++ for (i = 0; i < SCI_MAX_PHYS; i++) {
++ struct sci_phy_user_params *u_phy = &u->phys[i];
++
++ u_phy->max_speed_generation = phy_gen;
++
++ /* we are not exporting these for now */
++ u_phy->align_insertion_frequency = 0x7f;
++ u_phy->in_connection_align_insertion_frequency = 0xff;
++ u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
++ }
++
++ u->stp_inactivity_timeout = stp_inactive_to;
++ u->ssp_inactivity_timeout = ssp_inactive_to;
++ u->stp_max_occupancy_timeout = stp_max_occ_to;
++ u->ssp_max_occupancy_timeout = ssp_max_occ_to;
++ u->no_outbound_task_timeout = no_outbound_task_to;
++ u->max_number_concurrent_device_spin_up = max_concurr_spinup;
++}
++
++static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
++
++ sci_change_state(&ihost->sm, SCIC_RESET);
++}
++
++static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm)
++{
++ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
++
++ sci_del_timer(&ihost->timer);
++}
++
++#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
++#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
++#define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000
++#define INTERRUPT_COALESCE_NUMBER_MAX 256
++#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7
++#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
++
++/**
++ * sci_controller_set_interrupt_coalescence() - This method allows the user to
++ * configure the interrupt coalescence.
++ * @controller: This parameter represents the handle to the controller object
++ * for which its interrupt coalesce register is overridden.
++ * @coalesce_number: Used to control the number of entries in the Completion
++ * Queue before an interrupt is generated. If the number of entries exceed
++ * this number, an interrupt will be generated. The valid range of the input
++ * is [0, 256]. A setting of 0 results in coalescing being disabled.
++ * @coalesce_timeout: Timeout value in microseconds. The valid range of the
++ * input is [0, 2700000] . A setting of 0 is allowed and results in no
++ * interrupt coalescing timeout.
++ *
++ * Indicate if the user successfully set the interrupt coalesce parameters.
++ * SCI_SUCCESS The user successfully updated the interrutp coalescence.
++ * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
++ */
++static enum sci_status
++sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
++ u32 coalesce_number,
++ u32 coalesce_timeout)
++{
++ u8 timeout_encode = 0;
++ u32 min = 0;
++ u32 max = 0;
++
++ /* Check if the input parameters fall in the range. */
++ if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
++ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
++
++ /*
++ * Defined encoding for interrupt coalescing timeout:
++ * Value Min Max Units
++ * ----- --- --- -----
++ * 0 - - Disabled
++ * 1 13.3 20.0 ns
++ * 2 26.7 40.0
++ * 3 53.3 80.0
++ * 4 106.7 160.0
++ * 5 213.3 320.0
++ * 6 426.7 640.0
++ * 7 853.3 1280.0
++ * 8 1.7 2.6 us
++ * 9 3.4 5.1
++ * 10 6.8 10.2
++ * 11 13.7 20.5
++ * 12 27.3 41.0
++ * 13 54.6 81.9
++ * 14 109.2 163.8
++ * 15 218.5 327.7
++ * 16 436.9 655.4
++ * 17 873.8 1310.7
++ * 18 1.7 2.6 ms
++ * 19 3.5 5.2
++ * 20 7.0 10.5
++ * 21 14.0 21.0
++ * 22 28.0 41.9
++ * 23 55.9 83.9
++ * 24 111.8 167.8
++ * 25 223.7 335.5
++ * 26 447.4 671.1
++ * 27 894.8 1342.2
++ * 28 1.8 2.7 s
++ * Others Undefined */
++
++ /*
++ * Use the table above to decide the encode of interrupt coalescing timeout
++ * value for register writing. */
++ if (coalesce_timeout == 0)
++ timeout_encode = 0;
++ else{
++ /* make the timeout value in unit of (10 ns). */
++ coalesce_timeout = coalesce_timeout * 100;
++ min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
++ max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
++
++ /* get the encode of timeout for register writing. */
++ for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
++ timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
++ timeout_encode++) {
++ if (min <= coalesce_timeout && max > coalesce_timeout)
++ break;
++ else if (coalesce_timeout >= max && coalesce_timeout < min * 2
++ && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
++ if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
++ break;
++ else{
++ timeout_encode++;
++ break;
++ }
++ } else {
++ max = max * 2;
++ min = min * 2;
++ }
++ }
++
++ if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
++ /* the value is out of range. */
++ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
++ }
++
++ writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
++ SMU_ICC_GEN_VAL(TIMER, timeout_encode),
++ &ihost->smu_registers->interrupt_coalesce_control);
++
++
++ ihost->interrupt_coalesce_number = (u16)coalesce_number;
++ ihost->interrupt_coalesce_timeout = coalesce_timeout / 100;
++
++ return SCI_SUCCESS;
++}
++
++
++static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
++
++ /* set the default interrupt coalescence number and timeout value. */
++ sci_controller_set_interrupt_coalescence(ihost, 0, 0);
++}
++
++static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
++{
++ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
++
++ /* disable interrupt coalescence. */
++ sci_controller_set_interrupt_coalescence(ihost, 0, 0);
++}
++
++static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
++{
++ u32 index;
++ enum sci_status status;
++ enum sci_status phy_status;
++
++ status = SCI_SUCCESS;
++
++ for (index = 0; index < SCI_MAX_PHYS; index++) {
++ phy_status = sci_phy_stop(&ihost->phys[index]);
++
++ if (phy_status != SCI_SUCCESS &&
++ phy_status != SCI_FAILURE_INVALID_STATE) {
++ status = SCI_FAILURE;
++
++ dev_warn(&ihost->pdev->dev,
++ "%s: Controller stop operation failed to stop "
++ "phy %d because of status %d.\n",
++ __func__,
++ ihost->phys[index].phy_index, phy_status);
++ }
++ }
++
++ return status;
++}
++
++static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
++{
++ u32 index;
++ enum sci_status port_status;
++ enum sci_status status = SCI_SUCCESS;
++
++ for (index = 0; index < ihost->logical_port_entries; index++) {
++ struct isci_port *iport = &ihost->ports[index];
++
++ port_status = sci_port_stop(iport);
++
++ if ((port_status != SCI_SUCCESS) &&
++ (port_status != SCI_FAILURE_INVALID_STATE)) {
++ status = SCI_FAILURE;
++
++ dev_warn(&ihost->pdev->dev,
++ "%s: Controller stop operation failed to "
++ "stop port %d because of status %d.\n",
++ __func__,
++ iport->logical_port_index,
++ port_status);
++ }
++ }
++
++ return status;
++}
++
++static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
++{
++ u32 index;
++ enum sci_status status;
++ enum sci_status device_status;
++
++ status = SCI_SUCCESS;
++
++ for (index = 0; index < ihost->remote_node_entries; index++) {
++ if (ihost->device_table[index] != NULL) {
++ /* / @todo What timeout value do we want to provide to this request? */
++ device_status = sci_remote_device_stop(ihost->device_table[index], 0);
++
++ if ((device_status != SCI_SUCCESS) &&
++ (device_status != SCI_FAILURE_INVALID_STATE)) {
++ dev_warn(&ihost->pdev->dev,
++ "%s: Controller stop operation failed "
++ "to stop device 0x%p because of "
++ "status %d.\n",
++ __func__,
++ ihost->device_table[index], device_status);
++ }
++ }
++ }
++
++ return status;
++}
++
++static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
++
++ /* Stop all of the components for this controller */
++ sci_controller_stop_phys(ihost);
++ sci_controller_stop_ports(ihost);
++ sci_controller_stop_devices(ihost);
++}
++
++static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
++{
++ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
++
++ sci_del_timer(&ihost->timer);
++}
++
++static void sci_controller_reset_hardware(struct isci_host *ihost)
++{
++ /* Disable interrupts so we dont take any spurious interrupts */
++ sci_controller_disable_interrupts(ihost);
++
++ /* Reset the SCU */
++ writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
++
++ /* Delay for 1ms to before clearing the CQP and UFQPR. */
++ udelay(1000);
++
++ /* The write to the CQGR clears the CQP */
++ writel(0x00000000, &ihost->smu_registers->completion_queue_get);
++
++ /* The write to the UFQGP clears the UFQPR */
++ writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
++}
++
++static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
++
++ sci_controller_reset_hardware(ihost);
++ sci_change_state(&ihost->sm, SCIC_RESET);
++}
++
++static const struct sci_base_state sci_controller_state_table[] = {
++ [SCIC_INITIAL] = {
++ .enter_state = sci_controller_initial_state_enter,
++ },
++ [SCIC_RESET] = {},
++ [SCIC_INITIALIZING] = {},
++ [SCIC_INITIALIZED] = {},
++ [SCIC_STARTING] = {
++ .exit_state = sci_controller_starting_state_exit,
++ },
++ [SCIC_READY] = {
++ .enter_state = sci_controller_ready_state_enter,
++ .exit_state = sci_controller_ready_state_exit,
++ },
++ [SCIC_RESETTING] = {
++ .enter_state = sci_controller_resetting_state_enter,
++ },
++ [SCIC_STOPPING] = {
++ .enter_state = sci_controller_stopping_state_enter,
++ .exit_state = sci_controller_stopping_state_exit,
++ },
++ [SCIC_STOPPED] = {},
++ [SCIC_FAILED] = {}
++};
++
++static void sci_controller_set_default_config_parameters(struct isci_host *ihost)
++{
++ /* these defaults are overridden by the platform / firmware */
++ u16 index;
++
++ /* Default to APC mode. */
++ ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
++
++ /* Default to APC mode. */
++ ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1;
++
++ /* Default to no SSC operation. */
++ ihost->oem_parameters.controller.do_enable_ssc = false;
++
++ /* Initialize all of the port parameter information to narrow ports. */
++ for (index = 0; index < SCI_MAX_PORTS; index++) {
++ ihost->oem_parameters.ports[index].phy_mask = 0;
++ }
++
++ /* Initialize all of the phy parameter information. */
++ for (index = 0; index < SCI_MAX_PHYS; index++) {
++ /* Default to 6G (i.e. Gen 3) for now. */
++ ihost->user_parameters.phys[index].max_speed_generation = 3;
++
++ /* the frequencies cannot be 0 */
++ ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
++ ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff;
++ ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
++
++ /*
++ * Previous Vitesse based expanders had a arbitration issue that
++ * is worked around by having the upper 32-bits of SAS address
++ * with a value greater then the Vitesse company identifier.
++ * Hence, usage of 0x5FCFFFFF. */
++ ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id;
++ ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF;
++ }
++
++ ihost->user_parameters.stp_inactivity_timeout = 5;
++ ihost->user_parameters.ssp_inactivity_timeout = 5;
++ ihost->user_parameters.stp_max_occupancy_timeout = 5;
++ ihost->user_parameters.ssp_max_occupancy_timeout = 20;
++ ihost->user_parameters.no_outbound_task_timeout = 20;
++}
++
++static void controller_timeout(unsigned long data)
++{
++ struct sci_timer *tmr = (struct sci_timer *)data;
++ struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
++ struct sci_base_state_machine *sm = &ihost->sm;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++
++ if (tmr->cancel)
++ goto done;
++
++ if (sm->current_state_id == SCIC_STARTING)
++ sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
++ else if (sm->current_state_id == SCIC_STOPPING) {
++ sci_change_state(sm, SCIC_FAILED);
++ isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
++ } else /* / @todo Now what do we want to do in this case? */
++ dev_err(&ihost->pdev->dev,
++ "%s: Controller timer fired when controller was not "
++ "in a state being timed.\n",
++ __func__);
++
++done:
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++}
++
++static enum sci_status sci_controller_construct(struct isci_host *ihost,
++ void __iomem *scu_base,
++ void __iomem *smu_base)
++{
++ u8 i;
++
++ sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
++
++ ihost->scu_registers = scu_base;
++ ihost->smu_registers = smu_base;
++
++ sci_port_configuration_agent_construct(&ihost->port_agent);
++
++ /* Construct the ports for this controller */
++ for (i = 0; i < SCI_MAX_PORTS; i++)
++ sci_port_construct(&ihost->ports[i], i, ihost);
++ sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
++
++ /* Construct the phys for this controller */
++ for (i = 0; i < SCI_MAX_PHYS; i++) {
++ /* Add all the PHYs to the dummy port */
++ sci_phy_construct(&ihost->phys[i],
++ &ihost->ports[SCI_MAX_PORTS], i);
++ }
++
++ ihost->invalid_phy_mask = 0;
++
++ sci_init_timer(&ihost->timer, controller_timeout);
++
++ /* Initialize the User and OEM parameters to default values. */
++ sci_controller_set_default_config_parameters(ihost);
++
++ return sci_controller_reset(ihost);
++}
++
++int sci_oem_parameters_validate(struct sci_oem_params *oem)
++{
++ int i;
++
++ for (i = 0; i < SCI_MAX_PORTS; i++)
++ if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
++ return -EINVAL;
++
++ for (i = 0; i < SCI_MAX_PHYS; i++)
++ if (oem->phys[i].sas_address.high == 0 &&
++ oem->phys[i].sas_address.low == 0)
++ return -EINVAL;
++
++ if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
++ for (i = 0; i < SCI_MAX_PHYS; i++)
++ if (oem->ports[i].phy_mask != 0)
++ return -EINVAL;
++ } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
++ u8 phy_mask = 0;
++
++ for (i = 0; i < SCI_MAX_PHYS; i++)
++ phy_mask |= oem->ports[i].phy_mask;
++
++ if (phy_mask == 0)
++ return -EINVAL;
++ } else
++ return -EINVAL;
++
++ if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
++ return -EINVAL;
++
++ return 0;
++}
++
++static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
++{
++ u32 state = ihost->sm.current_state_id;
++
++ if (state == SCIC_RESET ||
++ state == SCIC_INITIALIZING ||
++ state == SCIC_INITIALIZED) {
++
++ if (sci_oem_parameters_validate(&ihost->oem_parameters))
++ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
++
++ return SCI_SUCCESS;
++ }
++
++ return SCI_FAILURE_INVALID_STATE;
++}
++
++static void power_control_timeout(unsigned long data)
++{
++ struct sci_timer *tmr = (struct sci_timer *)data;
++ struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
++ struct isci_phy *iphy;
++ unsigned long flags;
++ u8 i;
++
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++
++ if (tmr->cancel)
++ goto done;
++
++ ihost->power_control.phys_granted_power = 0;
++
++ if (ihost->power_control.phys_waiting == 0) {
++ ihost->power_control.timer_started = false;
++ goto done;
++ }
++
++ for (i = 0; i < SCI_MAX_PHYS; i++) {
++
++ if (ihost->power_control.phys_waiting == 0)
++ break;
++
++ iphy = ihost->power_control.requesters[i];
++ if (iphy == NULL)
++ continue;
++
++ if (ihost->power_control.phys_granted_power >=
++ ihost->oem_parameters.controller.max_concurrent_dev_spin_up)
++ break;
++
++ ihost->power_control.requesters[i] = NULL;
++ ihost->power_control.phys_waiting--;
++ ihost->power_control.phys_granted_power++;
++ sci_phy_consume_power_handler(iphy);
++ }
++
++ /*
++ * It doesn't matter if the power list is empty, we need to start the
++ * timer in case another phy becomes ready.
++ */
++ sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
++ ihost->power_control.timer_started = true;
++
++done:
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++}
++
++void sci_controller_power_control_queue_insert(struct isci_host *ihost,
++ struct isci_phy *iphy)
++{
++ BUG_ON(iphy == NULL);
++
++ if (ihost->power_control.phys_granted_power <
++ ihost->oem_parameters.controller.max_concurrent_dev_spin_up) {
++ ihost->power_control.phys_granted_power++;
++ sci_phy_consume_power_handler(iphy);
++
++ /*
++ * stop and start the power_control timer. When the timer fires, the
++ * no_of_phys_granted_power will be set to 0
++ */
++ if (ihost->power_control.timer_started)
++ sci_del_timer(&ihost->power_control.timer);
++
++ sci_mod_timer(&ihost->power_control.timer,
++ SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
++ ihost->power_control.timer_started = true;
++
++ } else {
++ /* Add the phy in the waiting list */
++ ihost->power_control.requesters[iphy->phy_index] = iphy;
++ ihost->power_control.phys_waiting++;
++ }
++}
++
++void sci_controller_power_control_queue_remove(struct isci_host *ihost,
++ struct isci_phy *iphy)
++{
++ BUG_ON(iphy == NULL);
++
++ if (ihost->power_control.requesters[iphy->phy_index])
++ ihost->power_control.phys_waiting--;
++
++ ihost->power_control.requesters[iphy->phy_index] = NULL;
++}
++
++#define AFE_REGISTER_WRITE_DELAY 10
++
++/* Initialize the AFE for this phy index. We need to read the AFE setup from
++ * the OEM parameters
++ */
++static void sci_controller_afe_initialization(struct isci_host *ihost)
++{
++ const struct sci_oem_params *oem = &ihost->oem_parameters;
++ struct pci_dev *pdev = ihost->pdev;
++ u32 afe_status;
++ u32 phy_id;
++
++ /* Clear DFX Status registers */
++ writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0);
++ udelay(AFE_REGISTER_WRITE_DELAY);
++
++ if (is_b0(pdev)) {
++ /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
++ * Timer, PM Stagger Timer */
++ writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2);
++ udelay(AFE_REGISTER_WRITE_DELAY);
++ }
++
++ /* Configure bias currents to normal */
++ if (is_a2(pdev))
++ writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control);
++ else if (is_b0(pdev) || is_c0(pdev))
++ writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control);
++
++ udelay(AFE_REGISTER_WRITE_DELAY);
++
++ /* Enable PLL */
++ if (is_b0(pdev) || is_c0(pdev))
++ writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0);
++ else
++ writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0);
++
++ udelay(AFE_REGISTER_WRITE_DELAY);
++
++ /* Wait for the PLL to lock */
++ do {
++ afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status);
++ udelay(AFE_REGISTER_WRITE_DELAY);
++ } while ((afe_status & 0x00001000) == 0);
++
++ if (is_a2(pdev)) {
++ /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
++ writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0);
++ udelay(AFE_REGISTER_WRITE_DELAY);
++ }
++
++ for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
++ const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
++
++ if (is_b0(pdev)) {
++ /* Configure transmitter SSC parameters */
++ writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
++ udelay(AFE_REGISTER_WRITE_DELAY);
++ } else if (is_c0(pdev)) {
++ /* Configure transmitter SSC parameters */
++ writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
++ udelay(AFE_REGISTER_WRITE_DELAY);
++
++ /*
++ * All defaults, except the Receive Word Alignament/Comma Detect
++ * Enable....(0xe800) */
++ writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
++ udelay(AFE_REGISTER_WRITE_DELAY);
++ } else {
++ /*
++ * All defaults, except the Receive Word Alignament/Comma Detect
++ * Enable....(0xe800) */
++ writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
++ udelay(AFE_REGISTER_WRITE_DELAY);
++
++ writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
++ udelay(AFE_REGISTER_WRITE_DELAY);
++ }
++
++ /*
++ * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
++ * & increase TX int & ext bias 20%....(0xe85c) */
++ if (is_a2(pdev))
++ writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
++ else if (is_b0(pdev)) {
++ /* Power down TX and RX (PWRDNTX and PWRDNRX) */
++ writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
++ udelay(AFE_REGISTER_WRITE_DELAY);
++
++ /*
++ * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
++ * & increase TX int & ext bias 20%....(0xe85c) */
++ writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
++ } else {
++ writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
++ udelay(AFE_REGISTER_WRITE_DELAY);
++
++ /*
++ * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
++ * & increase TX int & ext bias 20%....(0xe85c) */
++ writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
++ }
++ udelay(AFE_REGISTER_WRITE_DELAY);
++
++ if (is_a2(pdev)) {
++ /* Enable TX equalization (0xe824) */
++ writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
++ udelay(AFE_REGISTER_WRITE_DELAY);
++ }
++
++ /*
++ * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
++ * RDD=0x0(RX Detect Enabled) ....(0xe800) */
++ writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
++ udelay(AFE_REGISTER_WRITE_DELAY);
++
++ /* Leave DFE/FFE on */
++ if (is_a2(pdev))
++ writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
++ else if (is_b0(pdev)) {
++ writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
++ udelay(AFE_REGISTER_WRITE_DELAY);
++ /* Enable TX equalization (0xe824) */
++ writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
++ } else {
++ writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1);
++ udelay(AFE_REGISTER_WRITE_DELAY);
++
++ writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
++ udelay(AFE_REGISTER_WRITE_DELAY);
++
++ /* Enable TX equalization (0xe824) */
++ writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
++ }
++
++ udelay(AFE_REGISTER_WRITE_DELAY);
++
++ writel(oem_phy->afe_tx_amp_control0,
++ &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
++ udelay(AFE_REGISTER_WRITE_DELAY);
++
++ writel(oem_phy->afe_tx_amp_control1,
++ &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
++ udelay(AFE_REGISTER_WRITE_DELAY);
++
++ writel(oem_phy->afe_tx_amp_control2,
++ &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
++ udelay(AFE_REGISTER_WRITE_DELAY);
++
++ writel(oem_phy->afe_tx_amp_control3,
++ &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
++ udelay(AFE_REGISTER_WRITE_DELAY);
++ }
++
++ /* Transfer control to the PEs */
++ writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0);
++ udelay(AFE_REGISTER_WRITE_DELAY);
++}
++
++static void sci_controller_initialize_power_control(struct isci_host *ihost)
++{
++ sci_init_timer(&ihost->power_control.timer, power_control_timeout);
++
++ memset(ihost->power_control.requesters, 0,
++ sizeof(ihost->power_control.requesters));
++
++ ihost->power_control.phys_waiting = 0;
++ ihost->power_control.phys_granted_power = 0;
++}
++
++static enum sci_status sci_controller_initialize(struct isci_host *ihost)
++{
++ struct sci_base_state_machine *sm = &ihost->sm;
++ enum sci_status result = SCI_FAILURE;
++ unsigned long i, state, val;
++
++ if (ihost->sm.current_state_id != SCIC_RESET) {
++ dev_warn(&ihost->pdev->dev,
++ "SCIC Controller initialize operation requested "
++ "in invalid state\n");
++ return SCI_FAILURE_INVALID_STATE;
++ }
++
++ sci_change_state(sm, SCIC_INITIALIZING);
++
++ sci_init_timer(&ihost->phy_timer, phy_startup_timeout);
++
++ ihost->next_phy_to_start = 0;
++ ihost->phy_startup_timer_pending = false;
++
++ sci_controller_initialize_power_control(ihost);
++
++ /*
++ * There is nothing to do here for B0 since we do not have to
++ * program the AFE registers.
++ * / @todo The AFE settings are supposed to be correct for the B0 but
++ * / presently they seem to be wrong. */
++ sci_controller_afe_initialization(ihost);
++
++
++ /* Take the hardware out of reset */
++ writel(0, &ihost->smu_registers->soft_reset_control);
++
++ /*
++ * / @todo Provide meaningfull error code for hardware failure
++ * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
++ for (i = 100; i >= 1; i--) {
++ u32 status;
++
++ /* Loop until the hardware reports success */
++ udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
++ status = readl(&ihost->smu_registers->control_status);
++
++ if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED)
++ break;
++ }
++ if (i == 0)
++ goto out;
++
++ /*
++ * Determine what are the actaul device capacities that the
++ * hardware will support */
++ val = readl(&ihost->smu_registers->device_context_capacity);
++
++ /* Record the smaller of the two capacity values */
++ ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
++ ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS);
++ ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES);
++
++ /*
++ * Make all PEs that are unassigned match up with the
++ * logical ports
++ */
++ for (i = 0; i < ihost->logical_port_entries; i++) {
++ struct scu_port_task_scheduler_group_registers __iomem
++ *ptsg = &ihost->scu_registers->peg0.ptsg;
++
++ writel(i, &ptsg->protocol_engine[i]);
++ }
++
++ /* Initialize hardware PCI Relaxed ordering in DMA engines */
++ val = readl(&ihost->scu_registers->sdma.pdma_configuration);
++ val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
++ writel(val, &ihost->scu_registers->sdma.pdma_configuration);
++
++ val = readl(&ihost->scu_registers->sdma.cdma_configuration);
++ val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
++ writel(val, &ihost->scu_registers->sdma.cdma_configuration);
++
++ /*
++ * Initialize the PHYs before the PORTs because the PHY registers
++ * are accessed during the port initialization.
++ */
++ for (i = 0; i < SCI_MAX_PHYS; i++) {
++ result = sci_phy_initialize(&ihost->phys[i],
++ &ihost->scu_registers->peg0.pe[i].tl,
++ &ihost->scu_registers->peg0.pe[i].ll);
++ if (result != SCI_SUCCESS)
++ goto out;
++ }
++
++ for (i = 0; i < ihost->logical_port_entries; i++) {
++ struct isci_port *iport = &ihost->ports[i];
++
++ iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
++ iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
++ iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
++ }
++
++ result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
++
++ out:
++ /* Advance the controller state machine */
++ if (result == SCI_SUCCESS)
++ state = SCIC_INITIALIZED;
++ else
++ state = SCIC_FAILED;
++ sci_change_state(sm, state);
++
++ return result;
++}
++
++static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
++ struct sci_user_parameters *sci_parms)
++{
++ u32 state = ihost->sm.current_state_id;
++
++ if (state == SCIC_RESET ||
++ state == SCIC_INITIALIZING ||
++ state == SCIC_INITIALIZED) {
++ u16 index;
++
++ /*
++ * Validate the user parameters. If they are not legal, then
++ * return a failure.
++ */
++ for (index = 0; index < SCI_MAX_PHYS; index++) {
++ struct sci_phy_user_params *user_phy;
++
++ user_phy = &sci_parms->phys[index];
++
++ if (!((user_phy->max_speed_generation <=
++ SCIC_SDS_PARM_MAX_SPEED) &&
++ (user_phy->max_speed_generation >
++ SCIC_SDS_PARM_NO_SPEED)))
++ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
++
++ if (user_phy->in_connection_align_insertion_frequency <
++ 3)
++ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
++
++ if ((user_phy->in_connection_align_insertion_frequency <
++ 3) ||
++ (user_phy->align_insertion_frequency == 0) ||
++ (user_phy->
++ notify_enable_spin_up_insertion_frequency ==
++ 0))
++ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
++ }
++
++ if ((sci_parms->stp_inactivity_timeout == 0) ||
++ (sci_parms->ssp_inactivity_timeout == 0) ||
++ (sci_parms->stp_max_occupancy_timeout == 0) ||
++ (sci_parms->ssp_max_occupancy_timeout == 0) ||
++ (sci_parms->no_outbound_task_timeout == 0))
++ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
++
++ memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
++
++ return SCI_SUCCESS;
++ }
++
++ return SCI_FAILURE_INVALID_STATE;
++}
++
++static int sci_controller_mem_init(struct isci_host *ihost)
++{
++ struct device *dev = &ihost->pdev->dev;
++ dma_addr_t dma;
++ size_t size;
++ int err;
++
++ size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
++ ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
++ if (!ihost->completion_queue)
++ return -ENOMEM;
++
++ writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower);
++ writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper);
++
++ size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
++ ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma,
++ GFP_KERNEL);
++ if (!ihost->remote_node_context_table)
++ return -ENOMEM;
++
++ writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower);
++ writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper);
++
++ size = ihost->task_context_entries * sizeof(struct scu_task_context),
++ ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
++ if (!ihost->task_context_table)
++ return -ENOMEM;
++
++ ihost->task_context_dma = dma;
++ writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower);
++ writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper);
++
++ err = sci_unsolicited_frame_control_construct(ihost);
++ if (err)
++ return err;
++
++ /*
++ * Inform the silicon as to the location of the UF headers and
++ * address table.
++ */
++ writel(lower_32_bits(ihost->uf_control.headers.physical_address),
++ &ihost->scu_registers->sdma.uf_header_base_address_lower);
++ writel(upper_32_bits(ihost->uf_control.headers.physical_address),
++ &ihost->scu_registers->sdma.uf_header_base_address_upper);
++
++ writel(lower_32_bits(ihost->uf_control.address_table.physical_address),
++ &ihost->scu_registers->sdma.uf_address_table_lower);
++ writel(upper_32_bits(ihost->uf_control.address_table.physical_address),
++ &ihost->scu_registers->sdma.uf_address_table_upper);
++
++ return 0;
++}
++
++int isci_host_init(struct isci_host *ihost)
++{
++ int err = 0, i;
++ enum sci_status status;
++ struct sci_user_parameters sci_user_params;
++ struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
++
++ spin_lock_init(&ihost->state_lock);
++ spin_lock_init(&ihost->scic_lock);
++ init_waitqueue_head(&ihost->eventq);
++
++ isci_host_change_state(ihost, isci_starting);
++
++ status = sci_controller_construct(ihost, scu_base(ihost),
++ smu_base(ihost));
++
++ if (status != SCI_SUCCESS) {
++ dev_err(&ihost->pdev->dev,
++ "%s: sci_controller_construct failed - status = %x\n",
++ __func__,
++ status);
++ return -ENODEV;
++ }
++
++ ihost->sas_ha.dev = &ihost->pdev->dev;
++ ihost->sas_ha.lldd_ha = ihost;
++
++ /*
++ * grab initial values stored in the controller object for OEM and USER
++ * parameters
++ */
++ isci_user_parameters_get(&sci_user_params);
++ status = sci_user_parameters_set(ihost, &sci_user_params);
++ if (status != SCI_SUCCESS) {
++ dev_warn(&ihost->pdev->dev,
++ "%s: sci_user_parameters_set failed\n",
++ __func__);
++ return -ENODEV;
++ }
++
++ /* grab any OEM parameters specified in orom */
++ if (pci_info->orom) {
++ status = isci_parse_oem_parameters(&ihost->oem_parameters,
++ pci_info->orom,
++ ihost->id);
++ if (status != SCI_SUCCESS) {
++ dev_warn(&ihost->pdev->dev,
++ "parsing firmware oem parameters failed\n");
++ return -EINVAL;
++ }
++ }
++
++ status = sci_oem_parameters_set(ihost);
++ if (status != SCI_SUCCESS) {
++ dev_warn(&ihost->pdev->dev,
++ "%s: sci_oem_parameters_set failed\n",
++ __func__);
++ return -ENODEV;
++ }
++
++ tasklet_init(&ihost->completion_tasklet,
++ isci_host_completion_routine, (unsigned long)ihost);
++
++ INIT_LIST_HEAD(&ihost->requests_to_complete);
++ INIT_LIST_HEAD(&ihost->requests_to_errorback);
++
++ spin_lock_irq(&ihost->scic_lock);
++ status = sci_controller_initialize(ihost);
++ spin_unlock_irq(&ihost->scic_lock);
++ if (status != SCI_SUCCESS) {
++ dev_warn(&ihost->pdev->dev,
++ "%s: sci_controller_initialize failed -"
++ " status = 0x%x\n",
++ __func__, status);
++ return -ENODEV;
++ }
++
++ err = sci_controller_mem_init(ihost);
++ if (err)
++ return err;
++
++ for (i = 0; i < SCI_MAX_PORTS; i++)
++ isci_port_init(&ihost->ports[i], ihost, i);
++
++ for (i = 0; i < SCI_MAX_PHYS; i++)
++ isci_phy_init(&ihost->phys[i], ihost, i);
++
++ for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
++ struct isci_remote_device *idev = &ihost->devices[i];
++
++ INIT_LIST_HEAD(&idev->reqs_in_process);
++ INIT_LIST_HEAD(&idev->node);
++ }
++
++ for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
++ struct isci_request *ireq;
++ dma_addr_t dma;
++
++ ireq = dmam_alloc_coherent(&ihost->pdev->dev,
++ sizeof(struct isci_request), &dma,
++ GFP_KERNEL);
++ if (!ireq)
++ return -ENOMEM;
++
++ ireq->tc = &ihost->task_context_table[i];
++ ireq->owning_controller = ihost;
++ spin_lock_init(&ireq->state_lock);
++ ireq->request_daddr = dma;
++ ireq->isci_host = ihost;
++ ihost->reqs[i] = ireq;
++ }
++
++ return 0;
++}
++
++void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
++ struct isci_phy *iphy)
++{
++ switch (ihost->sm.current_state_id) {
++ case SCIC_STARTING:
++ sci_del_timer(&ihost->phy_timer);
++ ihost->phy_startup_timer_pending = false;
++ ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
++ iport, iphy);
++ sci_controller_start_next_phy(ihost);
++ break;
++ case SCIC_READY:
++ ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
++ iport, iphy);
++ break;
++ default:
++ dev_dbg(&ihost->pdev->dev,
++ "%s: SCIC Controller linkup event from phy %d in "
++ "unexpected state %d\n", __func__, iphy->phy_index,
++ ihost->sm.current_state_id);
++ }
++}
++
++void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
++ struct isci_phy *iphy)
++{
++ switch (ihost->sm.current_state_id) {
++ case SCIC_STARTING:
++ case SCIC_READY:
++ ihost->port_agent.link_down_handler(ihost, &ihost->port_agent,
++ iport, iphy);
++ break;
++ default:
++ dev_dbg(&ihost->pdev->dev,
++ "%s: SCIC Controller linkdown event from phy %d in "
++ "unexpected state %d\n",
++ __func__,
++ iphy->phy_index,
++ ihost->sm.current_state_id);
++ }
++}
++
++static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
++{
++ u32 index;
++
++ for (index = 0; index < ihost->remote_node_entries; index++) {
++ if ((ihost->device_table[index] != NULL) &&
++ (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
++ return true;
++ }
++
++ return false;
++}
++
++void sci_controller_remote_device_stopped(struct isci_host *ihost,
++ struct isci_remote_device *idev)
++{
++ if (ihost->sm.current_state_id != SCIC_STOPPING) {
++ dev_dbg(&ihost->pdev->dev,
++ "SCIC Controller 0x%p remote device stopped event "
++ "from device 0x%p in unexpected state %d\n",
++ ihost, idev,
++ ihost->sm.current_state_id);
++ return;
++ }
++
++ if (!sci_controller_has_remote_devices_stopping(ihost))
++ sci_change_state(&ihost->sm, SCIC_STOPPED);
++}
++
++void sci_controller_post_request(struct isci_host *ihost, u32 request)
++{
++ dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
++ __func__, ihost->id, request);
++
++ writel(request, &ihost->smu_registers->post_context_port);
++}
++
++struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
++{
++ u16 task_index;
++ u16 task_sequence;
++
++ task_index = ISCI_TAG_TCI(io_tag);
++
++ if (task_index < ihost->task_context_entries) {
++ struct isci_request *ireq = ihost->reqs[task_index];
++
++ if (test_bit(IREQ_ACTIVE, &ireq->flags)) {
++ task_sequence = ISCI_TAG_SEQ(io_tag);
++
++ if (task_sequence == ihost->io_request_sequence[task_index])
++ return ireq;
++ }
++ }
++
++ return NULL;
++}
++
++/**
++ * This method allocates remote node index and the reserves the remote node
++ * context space for use. This method can fail if there are no more remote
++ * node index available.
++ * @scic: This is the controller object which contains the set of
++ * free remote node ids
++ * @sci_dev: This is the device object which is requesting the a remote node
++ * id
++ * @node_id: This is the remote node id that is assinged to the device if one
++ * is available
++ *
++ * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
++ * node index available.
++ */
++enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ u16 *node_id)
++{
++ u16 node_index;
++ u32 remote_node_count = sci_remote_device_node_count(idev);
++
++ node_index = sci_remote_node_table_allocate_remote_node(
++ &ihost->available_remote_nodes, remote_node_count
++ );
++
++ if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
++ ihost->device_table[node_index] = idev;
++
++ *node_id = node_index;
++
++ return SCI_SUCCESS;
++ }
++
++ return SCI_FAILURE_INSUFFICIENT_RESOURCES;
++}
++
++void sci_controller_free_remote_node_context(struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ u16 node_id)
++{
++ u32 remote_node_count = sci_remote_device_node_count(idev);
++
++ if (ihost->device_table[node_id] == idev) {
++ ihost->device_table[node_id] = NULL;
++
++ sci_remote_node_table_release_remote_node_index(
++ &ihost->available_remote_nodes, remote_node_count, node_id
++ );
++ }
++}
++
++void sci_controller_copy_sata_response(void *response_buffer,
++ void *frame_header,
++ void *frame_buffer)
++{
++ /* XXX type safety? */
++ memcpy(response_buffer, frame_header, sizeof(u32));
++
++ memcpy(response_buffer + sizeof(u32),
++ frame_buffer,
++ sizeof(struct dev_to_host_fis) - sizeof(u32));
++}
++
++void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
++{
++ if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
++ writel(ihost->uf_control.get,
++ &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
++}
++
++void isci_tci_free(struct isci_host *ihost, u16 tci)
++{
++ u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);
++
++ ihost->tci_pool[tail] = tci;
++ ihost->tci_tail = tail + 1;
++}
++
++static u16 isci_tci_alloc(struct isci_host *ihost)
++{
++ u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
++ u16 tci = ihost->tci_pool[head];
++
++ ihost->tci_head = head + 1;
++ return tci;
++}
++
++static u16 isci_tci_space(struct isci_host *ihost)
++{
++ return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
++}
++
++u16 isci_alloc_tag(struct isci_host *ihost)
++{
++ if (isci_tci_space(ihost)) {
++ u16 tci = isci_tci_alloc(ihost);
++ u8 seq = ihost->io_request_sequence[tci];
++
++ return ISCI_TAG(seq, tci);
++ }
++
++ return SCI_CONTROLLER_INVALID_IO_TAG;
++}
++
++enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
++{
++ u16 tci = ISCI_TAG_TCI(io_tag);
++ u16 seq = ISCI_TAG_SEQ(io_tag);
++
++ /* prevent tail from passing head */
++ if (isci_tci_active(ihost) == 0)
++ return SCI_FAILURE_INVALID_IO_TAG;
++
++ if (seq == ihost->io_request_sequence[tci]) {
++ ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
++
++ isci_tci_free(ihost, tci);
++
++ return SCI_SUCCESS;
++ }
++ return SCI_FAILURE_INVALID_IO_TAG;
++}
++
++enum sci_status sci_controller_start_io(struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ struct isci_request *ireq)
++{
++ enum sci_status status;
++
++ if (ihost->sm.current_state_id != SCIC_READY) {
++ dev_warn(&ihost->pdev->dev, "invalid state to start I/O");
++ return SCI_FAILURE_INVALID_STATE;
++ }
++
++ status = sci_remote_device_start_io(ihost, idev, ireq);
++ if (status != SCI_SUCCESS)
++ return status;
++
++ set_bit(IREQ_ACTIVE, &ireq->flags);
++ sci_controller_post_request(ihost, ireq->post_context);
++ return SCI_SUCCESS;
++}
++
++enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ struct isci_request *ireq)
++{
++ /* terminate an ongoing (i.e. started) core IO request. This does not
++ * abort the IO request at the target, but rather removes the IO
++ * request from the host controller.
++ */
++ enum sci_status status;
++
++ if (ihost->sm.current_state_id != SCIC_READY) {
++ dev_warn(&ihost->pdev->dev,
++ "invalid state to terminate request\n");
++ return SCI_FAILURE_INVALID_STATE;
++ }
++
++ status = sci_io_request_terminate(ireq);
++ if (status != SCI_SUCCESS)
++ return status;
++
++ /*
++ * Utilize the original post context command and or in the POST_TC_ABORT
++ * request sub-type.
++ */
++ sci_controller_post_request(ihost,
++ ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
++ return SCI_SUCCESS;
++}
++
++/**
++ * sci_controller_complete_io() - This method will perform core specific
++ * completion operations for an IO request. After this method is invoked,
++ * the user should consider the IO request as invalid until it is properly
++ * reused (i.e. re-constructed).
++ * @ihost: The handle to the controller object for which to complete the
++ * IO request.
++ * @idev: The handle to the remote device object for which to complete
++ * the IO request.
++ * @ireq: the handle to the io request object to complete.
++ */
++enum sci_status sci_controller_complete_io(struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ struct isci_request *ireq)
++{
++ enum sci_status status;
++ u16 index;
++
++ switch (ihost->sm.current_state_id) {
++ case SCIC_STOPPING:
++ /* XXX: Implement this function */
++ return SCI_FAILURE;
++ case SCIC_READY:
++ status = sci_remote_device_complete_io(ihost, idev, ireq);
++ if (status != SCI_SUCCESS)
++ return status;
++
++ index = ISCI_TAG_TCI(ireq->io_tag);
++ clear_bit(IREQ_ACTIVE, &ireq->flags);
++ return SCI_SUCCESS;
++ default:
++ dev_warn(&ihost->pdev->dev, "invalid state to complete I/O");
++ return SCI_FAILURE_INVALID_STATE;
++ }
++
++}
++
++enum sci_status sci_controller_continue_io(struct isci_request *ireq)
++{
++ struct isci_host *ihost = ireq->owning_controller;
++
++ if (ihost->sm.current_state_id != SCIC_READY) {
++ dev_warn(&ihost->pdev->dev, "invalid state to continue I/O");
++ return SCI_FAILURE_INVALID_STATE;
++ }
++
++ set_bit(IREQ_ACTIVE, &ireq->flags);
++ sci_controller_post_request(ihost, ireq->post_context);
++ return SCI_SUCCESS;
++}
++
++/**
++ * sci_controller_start_task() - This method is called by the SCIC user to
++ * send/start a framework task management request.
++ * @controller: the handle to the controller object for which to start the task
++ * management request.
++ * @remote_device: the handle to the remote device object for which to start
++ * the task management request.
++ * @task_request: the handle to the task request object to start.
++ */
++enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ struct isci_request *ireq)
++{
++ enum sci_status status;
++
++ if (ihost->sm.current_state_id != SCIC_READY) {
++ dev_warn(&ihost->pdev->dev,
++ "%s: SCIC Controller starting task from invalid "
++ "state\n",
++ __func__);
++ return SCI_TASK_FAILURE_INVALID_STATE;
++ }
++
++ status = sci_remote_device_start_task(ihost, idev, ireq);
++ switch (status) {
++ case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
++ set_bit(IREQ_ACTIVE, &ireq->flags);
++
++ /*
++ * We will let framework know this task request started successfully,
++ * although core is still woring on starting the request (to post tc when
++ * RNC is resumed.)
++ */
++ return SCI_SUCCESS;
++ case SCI_SUCCESS:
++ set_bit(IREQ_ACTIVE, &ireq->flags);
++ sci_controller_post_request(ihost, ireq->post_context);
++ break;
++ default:
++ break;
++ }
++
++ return status;
++}
+diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
+new file mode 100644
+index 0000000..9f33831
+--- /dev/null
++++ b/drivers/scsi/isci/host.h
+@@ -0,0 +1,545 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _SCI_HOST_H_
++#define _SCI_HOST_H_
++
++#include "remote_device.h"
++#include "phy.h"
++#include "isci.h"
++#include "remote_node_table.h"
++#include "registers.h"
++#include "unsolicited_frame_control.h"
++#include "probe_roms.h"
++
++struct isci_request;
++struct scu_task_context;
++
++
++/**
++ * struct sci_power_control -
++ *
++ * This structure defines the fields for managing power control for direct
++ * attached disk devices.
++ */
++struct sci_power_control {
++ /**
++ * This field is set when the power control timer is running and cleared when
++ * it is not.
++ */
++ bool timer_started;
++
++ /**
++ * Timer to control when the directed attached disks can consume power.
++ */
++ struct sci_timer timer;
++
++ /**
++ * This field is used to keep track of how many phys are put into the
++ * requesters field.
++ */
++ u8 phys_waiting;
++
++ /**
++ * This field is used to keep track of how many phys have been granted to consume power
++ */
++ u8 phys_granted_power;
++
++ /**
++ * This field is an array of phys that we are waiting on. The phys are direct
++ * mapped into requesters via struct sci_phy.phy_index
++ */
++ struct isci_phy *requesters[SCI_MAX_PHYS];
++
++};
++
++struct sci_port_configuration_agent;
++typedef void (*port_config_fn)(struct isci_host *,
++ struct sci_port_configuration_agent *,
++ struct isci_port *, struct isci_phy *);
++
++struct sci_port_configuration_agent {
++ u16 phy_configured_mask;
++ u16 phy_ready_mask;
++ struct {
++ u8 min_index;
++ u8 max_index;
++ } phy_valid_port_range[SCI_MAX_PHYS];
++ bool timer_pending;
++ port_config_fn link_up_handler;
++ port_config_fn link_down_handler;
++ struct sci_timer timer;
++};
++
++/**
++ * isci_host - primary host/controller object
++ * @timer: timeout start/stop operations
++ * @device_table: rni (hw remote node index) to remote device lookup table
++ * @available_remote_nodes: rni allocator
++ * @power_control: manage device spin up
++ * @io_request_sequence: generation number for tci's (task contexts)
++ * @task_context_table: hw task context table
++ * @remote_node_context_table: hw remote node context table
++ * @completion_queue: hw-producer driver-consumer communication ring
++ * @completion_queue_get: tracks the driver 'head' of the ring to notify hw
++ * @logical_port_entries: min({driver|silicon}-supported-port-count)
++ * @remote_node_entries: min({driver|silicon}-supported-node-count)
++ * @task_context_entries: min({driver|silicon}-supported-task-count)
++ * @phy_timer: phy startup timer
++ * @invalid_phy_mask: if an invalid_link_up notification is reported a bit for
++ * the phy index is set so further notifications are not
++ * made. Once the phy reports link up and is made part of a
++ * port then this bit is cleared.
++
++ */
++struct isci_host {
++ struct sci_base_state_machine sm;
++ /* XXX can we time this externally */
++ struct sci_timer timer;
++ /* XXX drop reference module params directly */
++ struct sci_user_parameters user_parameters;
++ /* XXX no need to be a union */
++ struct sci_oem_params oem_parameters;
++ struct sci_port_configuration_agent port_agent;
++ struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES];
++ struct sci_remote_node_table available_remote_nodes;
++ struct sci_power_control power_control;
++ u8 io_request_sequence[SCI_MAX_IO_REQUESTS];
++ struct scu_task_context *task_context_table;
++ dma_addr_t task_context_dma;
++ union scu_remote_node_context *remote_node_context_table;
++ u32 *completion_queue;
++ u32 completion_queue_get;
++ u32 logical_port_entries;
++ u32 remote_node_entries;
++ u32 task_context_entries;
++ struct sci_unsolicited_frame_control uf_control;
++
++ /* phy startup */
++ struct sci_timer phy_timer;
++ /* XXX kill */
++ bool phy_startup_timer_pending;
++ u32 next_phy_to_start;
++ /* XXX convert to unsigned long and use bitops */
++ u8 invalid_phy_mask;
++
++ /* TODO attempt dynamic interrupt coalescing scheme */
++ u16 interrupt_coalesce_number;
++ u32 interrupt_coalesce_timeout;
++ struct smu_registers __iomem *smu_registers;
++ struct scu_registers __iomem *scu_registers;
++
++ u16 tci_head;
++ u16 tci_tail;
++ u16 tci_pool[SCI_MAX_IO_REQUESTS];
++
++ int id; /* unique within a given pci device */
++ struct isci_phy phys[SCI_MAX_PHYS];
++ struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
++ struct sas_ha_struct sas_ha;
++
++ spinlock_t state_lock;
++ struct pci_dev *pdev;
++ enum isci_status status;
++ #define IHOST_START_PENDING 0
++ #define IHOST_STOP_PENDING 1
++ unsigned long flags;
++ wait_queue_head_t eventq;
++ struct Scsi_Host *shost;
++ struct tasklet_struct completion_tasklet;
++ struct list_head requests_to_complete;
++ struct list_head requests_to_errorback;
++ spinlock_t scic_lock;
++ struct isci_request *reqs[SCI_MAX_IO_REQUESTS];
++ struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES];
++};
++
++/**
++ * enum sci_controller_states - This enumeration depicts all the states
++ * for the common controller state machine.
++ */
++enum sci_controller_states {
++ /**
++ * Simply the initial state for the base controller state machine.
++ */
++ SCIC_INITIAL = 0,
++
++ /**
++ * This state indicates that the controller is reset. The memory for
++ * the controller is in it's initial state, but the controller requires
++ * initialization.
++ * This state is entered from the INITIAL state.
++ * This state is entered from the RESETTING state.
++ */
++ SCIC_RESET,
++
++ /**
++ * This state is typically an action state that indicates the controller
++ * is in the process of initialization. In this state no new IO operations
++ * are permitted.
++ * This state is entered from the RESET state.
++ */
++ SCIC_INITIALIZING,
++
++ /**
++ * This state indicates that the controller has been successfully
++ * initialized. In this state no new IO operations are permitted.
++ * This state is entered from the INITIALIZING state.
++ */
++ SCIC_INITIALIZED,
++
++ /**
++ * This state indicates the the controller is in the process of becoming
++ * ready (i.e. starting). In this state no new IO operations are permitted.
++ * This state is entered from the INITIALIZED state.
++ */
++ SCIC_STARTING,
++
++ /**
++ * This state indicates the controller is now ready. Thus, the user
++ * is able to perform IO operations on the controller.
++ * This state is entered from the STARTING state.
++ */
++ SCIC_READY,
++
++ /**
++ * This state is typically an action state that indicates the controller
++ * is in the process of resetting. Thus, the user is unable to perform
++ * IO operations on the controller. A reset is considered destructive in
++ * most cases.
++ * This state is entered from the READY state.
++ * This state is entered from the FAILED state.
++ * This state is entered from the STOPPED state.
++ */
++ SCIC_RESETTING,
++
++ /**
++ * This state indicates that the controller is in the process of stopping.
++ * In this state no new IO operations are permitted, but existing IO
++ * operations are allowed to complete.
++ * This state is entered from the READY state.
++ */
++ SCIC_STOPPING,
++
++ /**
++ * This state indicates that the controller has successfully been stopped.
++ * In this state no new IO operations are permitted.
++ * This state is entered from the STOPPING state.
++ */
++ SCIC_STOPPED,
++
++ /**
++ * This state indicates that the controller could not successfully be
++ * initialized. In this state no new IO operations are permitted.
++ * This state is entered from the INITIALIZING state.
++ * This state is entered from the STARTING state.
++ * This state is entered from the STOPPING state.
++ * This state is entered from the RESETTING state.
++ */
++ SCIC_FAILED,
++};
++
++/**
++ * struct isci_pci_info - This class represents the pci function containing the
++ * controllers. Depending on PCI SKU, there could be up to 2 controllers in
++ * the PCI function.
++ */
++#define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
++
++struct isci_pci_info {
++ struct msix_entry msix_entries[SCI_MAX_MSIX_INT];
++ struct isci_host *hosts[SCI_MAX_CONTROLLERS];
++ struct isci_orom *orom;
++};
++
++static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
++{
++ return pci_get_drvdata(pdev);
++}
++
++#define for_each_isci_host(id, ihost, pdev) \
++ for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
++ id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
++ ihost = to_pci_info(pdev)->hosts[++id])
++
++static inline enum isci_status isci_host_get_state(struct isci_host *isci_host)
++{
++ return isci_host->status;
++}
++
++static inline void isci_host_change_state(struct isci_host *isci_host,
++ enum isci_status status)
++{
++ unsigned long flags;
++
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: isci_host = %p, state = 0x%x",
++ __func__,
++ isci_host,
++ status);
++ spin_lock_irqsave(&isci_host->state_lock, flags);
++ isci_host->status = status;
++ spin_unlock_irqrestore(&isci_host->state_lock, flags);
++
++}
++
++static inline void wait_for_start(struct isci_host *ihost)
++{
++ wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
++}
++
++static inline void wait_for_stop(struct isci_host *ihost)
++{
++ wait_event(ihost->eventq, !test_bit(IHOST_STOP_PENDING, &ihost->flags));
++}
++
++static inline void wait_for_device_start(struct isci_host *ihost, struct isci_remote_device *idev)
++{
++ wait_event(ihost->eventq, !test_bit(IDEV_START_PENDING, &idev->flags));
++}
++
++static inline void wait_for_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
++{
++ wait_event(ihost->eventq, !test_bit(IDEV_STOP_PENDING, &idev->flags));
++}
++
++static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
++{
++ return dev->port->ha->lldd_ha;
++}
++
++/* we always use protocol engine group zero */
++#define ISCI_PEG 0
++
++/* see sci_controller_io_tag_allocate|free for how seq and tci are built */
++#define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci)
++
++/* these are returned by the hardware, so sanitize them */
++#define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1))
++#define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1))
++
++/* interrupt coalescing baseline: 9 == 3 to 5us interrupt delay per command */
++#define ISCI_COALESCE_BASE 9
++
++/* expander attached sata devices require 3 rnc slots */
++static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
++{
++ struct domain_device *dev = idev->domain_dev;
++
++ if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
++ !idev->is_direct_attached)
++ return SCU_STP_REMOTE_NODE_COUNT;
++ return SCU_SSP_REMOTE_NODE_COUNT;
++}
++
++/**
++ * sci_controller_clear_invalid_phy() -
++ *
++ * This macro will clear the bit in the invalid phy mask for this controller
++ * object. This is used to control messages reported for invalid link up
++ * notifications.
++ */
++#define sci_controller_clear_invalid_phy(controller, phy) \
++ ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
++
++static inline struct device *sciphy_to_dev(struct isci_phy *iphy)
++{
++
++ if (!iphy || !iphy->isci_port || !iphy->isci_port->isci_host)
++ return NULL;
++
++ return &iphy->isci_port->isci_host->pdev->dev;
++}
++
++static inline struct device *sciport_to_dev(struct isci_port *iport)
++{
++
++ if (!iport || !iport->isci_host)
++ return NULL;
++
++ return &iport->isci_host->pdev->dev;
++}
++
++static inline struct device *scirdev_to_dev(struct isci_remote_device *idev)
++{
++ if (!idev || !idev->isci_port || !idev->isci_port->isci_host)
++ return NULL;
++
++ return &idev->isci_port->isci_host->pdev->dev;
++}
++
++static inline bool is_a2(struct pci_dev *pdev)
++{
++ if (pdev->revision < 4)
++ return true;
++ return false;
++}
++
++static inline bool is_b0(struct pci_dev *pdev)
++{
++ if (pdev->revision == 4)
++ return true;
++ return false;
++}
++
++static inline bool is_c0(struct pci_dev *pdev)
++{
++ if (pdev->revision >= 5)
++ return true;
++ return false;
++}
++
++void sci_controller_post_request(struct isci_host *ihost,
++ u32 request);
++void sci_controller_release_frame(struct isci_host *ihost,
++ u32 frame_index);
++void sci_controller_copy_sata_response(void *response_buffer,
++ void *frame_header,
++ void *frame_buffer);
++enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ u16 *node_id);
++void sci_controller_free_remote_node_context(
++ struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ u16 node_id);
++
++struct isci_request *sci_request_by_tag(struct isci_host *ihost,
++ u16 io_tag);
++
++void sci_controller_power_control_queue_insert(
++ struct isci_host *ihost,
++ struct isci_phy *iphy);
++
++void sci_controller_power_control_queue_remove(
++ struct isci_host *ihost,
++ struct isci_phy *iphy);
++
++void sci_controller_link_up(
++ struct isci_host *ihost,
++ struct isci_port *iport,
++ struct isci_phy *iphy);
++
++void sci_controller_link_down(
++ struct isci_host *ihost,
++ struct isci_port *iport,
++ struct isci_phy *iphy);
++
++void sci_controller_remote_device_stopped(
++ struct isci_host *ihost,
++ struct isci_remote_device *idev);
++
++void sci_controller_copy_task_context(
++ struct isci_host *ihost,
++ struct isci_request *ireq);
++
++void sci_controller_register_setup(struct isci_host *ihost);
++
++enum sci_status sci_controller_continue_io(struct isci_request *ireq);
++int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
++void isci_host_scan_start(struct Scsi_Host *);
++u16 isci_alloc_tag(struct isci_host *ihost);
++enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
++void isci_tci_free(struct isci_host *ihost, u16 tci);
++
++int isci_host_init(struct isci_host *);
++
++void isci_host_init_controller_names(
++ struct isci_host *isci_host,
++ unsigned int controller_idx);
++
++void isci_host_deinit(
++ struct isci_host *);
++
++void isci_host_port_link_up(
++ struct isci_host *,
++ struct isci_port *,
++ struct isci_phy *);
++int isci_host_dev_found(struct domain_device *);
++
++void isci_host_remote_device_start_complete(
++ struct isci_host *,
++ struct isci_remote_device *,
++ enum sci_status);
++
++void sci_controller_disable_interrupts(
++ struct isci_host *ihost);
++
++enum sci_status sci_controller_start_io(
++ struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ struct isci_request *ireq);
++
++enum sci_task_status sci_controller_start_task(
++ struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ struct isci_request *ireq);
++
++enum sci_status sci_controller_terminate_request(
++ struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ struct isci_request *ireq);
++
++enum sci_status sci_controller_complete_io(
++ struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ struct isci_request *ireq);
++
++void sci_port_configuration_agent_construct(
++ struct sci_port_configuration_agent *port_agent);
++
++enum sci_status sci_port_configuration_agent_initialize(
++ struct isci_host *ihost,
++ struct sci_port_configuration_agent *port_agent);
++#endif
+diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
+new file mode 100644
+index 0000000..29aa34e
+--- /dev/null
++++ b/drivers/scsi/isci/init.c
+@@ -0,0 +1,574 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/firmware.h>
++#include <linux/efi.h>
++#include <asm/string.h>
++#include <scsi/scsi_host.h>
++#include "isci.h"
++#include "task.h"
++#include "probe_roms.h"
++
++#define MAJ 1
++#define MIN 0
++#define BUILD 0
++#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
++ __stringify(BUILD)
++
++MODULE_VERSION(DRV_VERSION);
++
++static struct scsi_transport_template *isci_transport_template;
++
++static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = {
++ { PCI_VDEVICE(INTEL, 0x1D61),},
++ { PCI_VDEVICE(INTEL, 0x1D63),},
++ { PCI_VDEVICE(INTEL, 0x1D65),},
++ { PCI_VDEVICE(INTEL, 0x1D67),},
++ { PCI_VDEVICE(INTEL, 0x1D69),},
++ { PCI_VDEVICE(INTEL, 0x1D6B),},
++ { PCI_VDEVICE(INTEL, 0x1D60),},
++ { PCI_VDEVICE(INTEL, 0x1D62),},
++ { PCI_VDEVICE(INTEL, 0x1D64),},
++ { PCI_VDEVICE(INTEL, 0x1D66),},
++ { PCI_VDEVICE(INTEL, 0x1D68),},
++ { PCI_VDEVICE(INTEL, 0x1D6A),},
++ {}
++};
++
++MODULE_DEVICE_TABLE(pci, isci_id_table);
++
++/* linux isci specific settings */
++
++unsigned char no_outbound_task_to = 20;
++module_param(no_outbound_task_to, byte, 0);
++MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
++
++u16 ssp_max_occ_to = 20;
++module_param(ssp_max_occ_to, ushort, 0);
++MODULE_PARM_DESC(ssp_max_occ_to, "SSP Max occupancy timeout (100us incr)");
++
++u16 stp_max_occ_to = 5;
++module_param(stp_max_occ_to, ushort, 0);
++MODULE_PARM_DESC(stp_max_occ_to, "STP Max occupancy timeout (100us incr)");
++
++u16 ssp_inactive_to = 5;
++module_param(ssp_inactive_to, ushort, 0);
++MODULE_PARM_DESC(ssp_inactive_to, "SSP inactivity timeout (100us incr)");
++
++u16 stp_inactive_to = 5;
++module_param(stp_inactive_to, ushort, 0);
++MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
++
++unsigned char phy_gen = 3;
++module_param(phy_gen, byte, 0);
++MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
++
++unsigned char max_concurr_spinup = 1;
++module_param(max_concurr_spinup, byte, 0);
++MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
++
++static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
++ struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
++ struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
++
++ return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
++}
++
++static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
++
++struct device_attribute *isci_host_attrs[] = {
++ &dev_attr_isci_id,
++ NULL
++};
++
++static struct scsi_host_template isci_sht = {
++
++ .module = THIS_MODULE,
++ .name = DRV_NAME,
++ .proc_name = DRV_NAME,
++ .queuecommand = sas_queuecommand,
++ .target_alloc = sas_target_alloc,
++ .slave_configure = sas_slave_configure,
++ .slave_destroy = sas_slave_destroy,
++ .scan_finished = isci_host_scan_finished,
++ .scan_start = isci_host_scan_start,
++ .change_queue_depth = sas_change_queue_depth,
++ .change_queue_type = sas_change_queue_type,
++ .bios_param = sas_bios_param,
++ .can_queue = ISCI_CAN_QUEUE_VAL,
++ .cmd_per_lun = 1,
++ .this_id = -1,
++ .sg_tablesize = SG_ALL,
++ .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
++ .use_clustering = ENABLE_CLUSTERING,
++ .eh_device_reset_handler = sas_eh_device_reset_handler,
++ .eh_bus_reset_handler = isci_bus_reset_handler,
++ .slave_alloc = sas_slave_alloc,
++ .target_destroy = sas_target_destroy,
++ .ioctl = sas_ioctl,
++ .shost_attrs = isci_host_attrs,
++};
++
++static struct sas_domain_function_template isci_transport_ops = {
++
++ /* The class calls these to notify the LLDD of an event. */
++ .lldd_port_formed = isci_port_formed,
++ .lldd_port_deformed = isci_port_deformed,
++
++ /* The class calls these when a device is found or gone. */
++ .lldd_dev_found = isci_remote_device_found,
++ .lldd_dev_gone = isci_remote_device_gone,
++
++ .lldd_execute_task = isci_task_execute_task,
++ /* Task Management Functions. Must be called from process context. */
++ .lldd_abort_task = isci_task_abort_task,
++ .lldd_abort_task_set = isci_task_abort_task_set,
++ .lldd_clear_aca = isci_task_clear_aca,
++ .lldd_clear_task_set = isci_task_clear_task_set,
++ .lldd_I_T_nexus_reset = isci_task_I_T_nexus_reset,
++ .lldd_lu_reset = isci_task_lu_reset,
++ .lldd_query_task = isci_task_query_task,
++
++ /* Port and Adapter management */
++ .lldd_clear_nexus_port = isci_task_clear_nexus_port,
++ .lldd_clear_nexus_ha = isci_task_clear_nexus_ha,
++
++ /* Phy management */
++ .lldd_control_phy = isci_phy_control,
++};
++
++
++/******************************************************************************
++* P R O T E C T E D M E T H O D S
++******************************************************************************/
++
++
++
++/**
++ * isci_register_sas_ha() - This method initializes various lldd
++ * specific members of the sas_ha struct and calls the libsas
++ * sas_register_ha() function.
++ * @isci_host: This parameter specifies the lldd specific wrapper for the
++ * libsas sas_ha struct.
++ *
++ * This method returns an error code indicating sucess or failure. The user
++ * should check for possible memory allocation error return otherwise, a zero
++ * indicates success.
++ */
++static int isci_register_sas_ha(struct isci_host *isci_host)
++{
++ int i;
++ struct sas_ha_struct *sas_ha = &(isci_host->sas_ha);
++ struct asd_sas_phy **sas_phys;
++ struct asd_sas_port **sas_ports;
++
++ sas_phys = devm_kzalloc(&isci_host->pdev->dev,
++ SCI_MAX_PHYS * sizeof(void *),
++ GFP_KERNEL);
++ if (!sas_phys)
++ return -ENOMEM;
++
++ sas_ports = devm_kzalloc(&isci_host->pdev->dev,
++ SCI_MAX_PORTS * sizeof(void *),
++ GFP_KERNEL);
++ if (!sas_ports)
++ return -ENOMEM;
++
++ /*----------------- Libsas Initialization Stuff----------------------
++ * Set various fields in the sas_ha struct:
++ */
++
++ sas_ha->sas_ha_name = DRV_NAME;
++ sas_ha->lldd_module = THIS_MODULE;
++ sas_ha->sas_addr = &isci_host->phys[0].sas_addr[0];
++
++ /* set the array of phy and port structs. */
++ for (i = 0; i < SCI_MAX_PHYS; i++) {
++ sas_phys[i] = &isci_host->phys[i].sas_phy;
++ sas_ports[i] = &isci_host->ports[i].sas_port;
++ }
++
++ sas_ha->sas_phy = sas_phys;
++ sas_ha->sas_port = sas_ports;
++ sas_ha->num_phys = SCI_MAX_PHYS;
++
++ sas_ha->lldd_queue_size = ISCI_CAN_QUEUE_VAL;
++ sas_ha->lldd_max_execute_num = 1;
++ sas_ha->strict_wide_ports = 1;
++
++ sas_register_ha(sas_ha);
++
++ return 0;
++}
++
++static void isci_unregister(struct isci_host *isci_host)
++{
++ struct Scsi_Host *shost;
++
++ if (!isci_host)
++ return;
++
++ shost = isci_host->shost;
++
++ sas_unregister_ha(&isci_host->sas_ha);
++
++ sas_remove_host(isci_host->shost);
++ scsi_remove_host(isci_host->shost);
++ scsi_host_put(isci_host->shost);
++}
++
++static int __devinit isci_pci_init(struct pci_dev *pdev)
++{
++ int err, bar_num, bar_mask = 0;
++ void __iomem * const *iomap;
++
++ err = pcim_enable_device(pdev);
++ if (err) {
++ dev_err(&pdev->dev,
++ "failed enable PCI device %s!\n",
++ pci_name(pdev));
++ return err;
++ }
++
++ for (bar_num = 0; bar_num < SCI_PCI_BAR_COUNT; bar_num++)
++ bar_mask |= 1 << (bar_num * 2);
++
++ err = pcim_iomap_regions(pdev, bar_mask, DRV_NAME);
++ if (err)
++ return err;
++
++ iomap = pcim_iomap_table(pdev);
++ if (!iomap)
++ return -ENOMEM;
++
++ pci_set_master(pdev);
++
++ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
++ if (err) {
++ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
++ if (err)
++ return err;
++ }
++
++ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
++ if (err) {
++ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
++ if (err)
++ return err;
++ }
++
++ return 0;
++}
++
++static int num_controllers(struct pci_dev *pdev)
++{
++ /* bar size alone can tell us if we are running with a dual controller
++ * part, no need to trust revision ids that might be under broken firmware
++ * control
++ */
++ resource_size_t scu_bar_size = pci_resource_len(pdev, SCI_SCU_BAR*2);
++ resource_size_t smu_bar_size = pci_resource_len(pdev, SCI_SMU_BAR*2);
++
++ if (scu_bar_size >= SCI_SCU_BAR_SIZE*SCI_MAX_CONTROLLERS &&
++ smu_bar_size >= SCI_SMU_BAR_SIZE*SCI_MAX_CONTROLLERS)
++ return SCI_MAX_CONTROLLERS;
++ else
++ return 1;
++}
++
++static int isci_setup_interrupts(struct pci_dev *pdev)
++{
++ int err, i, num_msix;
++ struct isci_host *ihost;
++ struct isci_pci_info *pci_info = to_pci_info(pdev);
++
++ /*
++ * Determine the number of vectors associated with this
++ * PCI function.
++ */
++ num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT;
++
++ for (i = 0; i < num_msix; i++)
++ pci_info->msix_entries[i].entry = i;
++
++ err = pci_enable_msix(pdev, pci_info->msix_entries, num_msix);
++ if (err)
++ goto intx;
++
++ for (i = 0; i < num_msix; i++) {
++ int id = i / SCI_NUM_MSI_X_INT;
++ struct msix_entry *msix = &pci_info->msix_entries[i];
++ irq_handler_t isr;
++
++ ihost = pci_info->hosts[id];
++ /* odd numbered vectors are error interrupts */
++ if (i & 1)
++ isr = isci_error_isr;
++ else
++ isr = isci_msix_isr;
++
++ err = devm_request_irq(&pdev->dev, msix->vector, isr, 0,
++ DRV_NAME"-msix", ihost);
++ if (!err)
++ continue;
++
++ dev_info(&pdev->dev, "msix setup failed falling back to intx\n");
++ while (i--) {
++ id = i / SCI_NUM_MSI_X_INT;
++ ihost = pci_info->hosts[id];
++ msix = &pci_info->msix_entries[i];
++ devm_free_irq(&pdev->dev, msix->vector, ihost);
++ }
++ pci_disable_msix(pdev);
++ goto intx;
++ }
++ return 0;
++
++ intx:
++ for_each_isci_host(i, ihost, pdev) {
++ err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr,
++ IRQF_SHARED, DRV_NAME"-intx", ihost);
++ if (err)
++ break;
++ }
++ return err;
++}
++
++static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
++{
++ struct isci_host *isci_host;
++ struct Scsi_Host *shost;
++ int err;
++
++ isci_host = devm_kzalloc(&pdev->dev, sizeof(*isci_host), GFP_KERNEL);
++ if (!isci_host)
++ return NULL;
++
++ isci_host->pdev = pdev;
++ isci_host->id = id;
++
++ shost = scsi_host_alloc(&isci_sht, sizeof(void *));
++ if (!shost)
++ return NULL;
++ isci_host->shost = shost;
++
++ err = isci_host_init(isci_host);
++ if (err)
++ goto err_shost;
++
++ SHOST_TO_SAS_HA(shost) = &isci_host->sas_ha;
++ isci_host->sas_ha.core.shost = shost;
++ shost->transportt = isci_transport_template;
++
++ shost->max_id = ~0;
++ shost->max_lun = ~0;
++ shost->max_cmd_len = MAX_COMMAND_SIZE;
++
++ err = scsi_add_host(shost, &pdev->dev);
++ if (err)
++ goto err_shost;
++
++ err = isci_register_sas_ha(isci_host);
++ if (err)
++ goto err_shost_remove;
++
++ return isci_host;
++
++ err_shost_remove:
++ scsi_remove_host(shost);
++ err_shost:
++ scsi_host_put(shost);
++
++ return NULL;
++}
++
++static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
++{
++ struct isci_pci_info *pci_info;
++ int err, i;
++ struct isci_host *isci_host;
++ const struct firmware *fw = NULL;
++ struct isci_orom *orom = NULL;
++ char *source = "(platform)";
++
++ dev_info(&pdev->dev, "driver configured for rev: %d silicon\n",
++ pdev->revision);
++
++ pci_info = devm_kzalloc(&pdev->dev, sizeof(*pci_info), GFP_KERNEL);
++ if (!pci_info)
++ return -ENOMEM;
++ pci_set_drvdata(pdev, pci_info);
++
++ if (efi_enabled)
++ orom = isci_get_efi_var(pdev);
++
++ if (!orom)
++ orom = isci_request_oprom(pdev);
++
++ for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
++ if (sci_oem_parameters_validate(&orom->ctrl[i])) {
++ dev_warn(&pdev->dev,
++ "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
++ devm_kfree(&pdev->dev, orom);
++ orom = NULL;
++ break;
++ }
++ }
++
++ if (!orom) {
++ source = "(firmware)";
++ orom = isci_request_firmware(pdev, fw);
++ if (!orom) {
++ /* TODO convert this to WARN_TAINT_ONCE once the
++ * orom/efi parameter support is widely available
++ */
++ dev_warn(&pdev->dev,
++ "Loading user firmware failed, using default "
++ "values\n");
++ dev_warn(&pdev->dev,
++ "Default OEM configuration being used: 4 "
++ "narrow ports, and default SAS Addresses\n");
++ }
++ }
++
++ if (orom)
++ dev_info(&pdev->dev,
++ "OEM SAS parameters (version: %u.%u) loaded %s\n",
++ (orom->hdr.version & 0xf0) >> 4,
++ (orom->hdr.version & 0xf), source);
++
++ pci_info->orom = orom;
++
++ err = isci_pci_init(pdev);
++ if (err)
++ return err;
++
++ for (i = 0; i < num_controllers(pdev); i++) {
++ struct isci_host *h = isci_host_alloc(pdev, i);
++
++ if (!h) {
++ err = -ENOMEM;
++ goto err_host_alloc;
++ }
++ pci_info->hosts[i] = h;
++ }
++
++ err = isci_setup_interrupts(pdev);
++ if (err)
++ goto err_host_alloc;
++
++ for_each_isci_host(i, isci_host, pdev)
++ scsi_scan_host(isci_host->shost);
++
++ return 0;
++
++ err_host_alloc:
++ for_each_isci_host(i, isci_host, pdev)
++ isci_unregister(isci_host);
++ return err;
++}
++
++static void __devexit isci_pci_remove(struct pci_dev *pdev)
++{
++ struct isci_host *ihost;
++ int i;
++
++ for_each_isci_host(i, ihost, pdev) {
++ isci_unregister(ihost);
++ isci_host_deinit(ihost);
++ sci_controller_disable_interrupts(ihost);
++ }
++}
++
++static struct pci_driver isci_pci_driver = {
++ .name = DRV_NAME,
++ .id_table = isci_id_table,
++ .probe = isci_pci_probe,
++ .remove = __devexit_p(isci_pci_remove),
++};
++
++static __init int isci_init(void)
++{
++ int err;
++
++ pr_info("%s: Intel(R) C600 SAS Controller Driver - version %s\n",
++ DRV_NAME, DRV_VERSION);
++
++ isci_transport_template = sas_domain_attach_transport(&isci_transport_ops);
++ if (!isci_transport_template)
++ return -ENOMEM;
++
++ err = pci_register_driver(&isci_pci_driver);
++ if (err)
++ sas_release_transport(isci_transport_template);
++
++ return err;
++}
++
++static __exit void isci_exit(void)
++{
++ pci_unregister_driver(&isci_pci_driver);
++ sas_release_transport(isci_transport_template);
++}
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_FIRMWARE(ISCI_FW_NAME);
++module_init(isci_init);
++module_exit(isci_exit);
+diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
+new file mode 100644
+index 0000000..d1de633
+--- /dev/null
++++ b/drivers/scsi/isci/isci.h
+@@ -0,0 +1,538 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __ISCI_H__
++#define __ISCI_H__
++
++#include <linux/interrupt.h>
++#include <linux/types.h>
++
++#define DRV_NAME "isci"
++#define SCI_PCI_BAR_COUNT 2
++#define SCI_NUM_MSI_X_INT 2
++#define SCI_SMU_BAR 0
++#define SCI_SMU_BAR_SIZE (16*1024)
++#define SCI_SCU_BAR 1
++#define SCI_SCU_BAR_SIZE (4*1024*1024)
++#define SCI_IO_SPACE_BAR0 2
++#define SCI_IO_SPACE_BAR1 3
++#define ISCI_CAN_QUEUE_VAL 250 /* < SCI_MAX_IO_REQUESTS ? */
++#define SCIC_CONTROLLER_STOP_TIMEOUT 5000
++
++#define SCI_CONTROLLER_INVALID_IO_TAG 0xFFFF
++
++#define SCI_MAX_PHYS (4UL)
++#define SCI_MAX_PORTS SCI_MAX_PHYS
++#define SCI_MAX_SMP_PHYS (384) /* not silicon constrained */
++#define SCI_MAX_REMOTE_DEVICES (256UL)
++#define SCI_MAX_IO_REQUESTS (256UL)
++#define SCI_MAX_SEQ (16)
++#define SCI_MAX_MSIX_MESSAGES (2)
++#define SCI_MAX_SCATTER_GATHER_ELEMENTS 130 /* not silicon constrained */
++#define SCI_MAX_CONTROLLERS 2
++#define SCI_MAX_DOMAINS SCI_MAX_PORTS
++
++#define SCU_MAX_CRITICAL_NOTIFICATIONS (384)
++#define SCU_MAX_EVENTS_SHIFT (7)
++#define SCU_MAX_EVENTS (1 << SCU_MAX_EVENTS_SHIFT)
++#define SCU_MAX_UNSOLICITED_FRAMES (128)
++#define SCU_MAX_COMPLETION_QUEUE_SCRATCH (128)
++#define SCU_MAX_COMPLETION_QUEUE_ENTRIES (SCU_MAX_CRITICAL_NOTIFICATIONS \
++ + SCU_MAX_EVENTS \
++ + SCU_MAX_UNSOLICITED_FRAMES \
++ + SCI_MAX_IO_REQUESTS \
++ + SCU_MAX_COMPLETION_QUEUE_SCRATCH)
++#define SCU_MAX_COMPLETION_QUEUE_SHIFT (ilog2(SCU_MAX_COMPLETION_QUEUE_ENTRIES))
++
++#define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096)
++#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024)
++#define SCU_INVALID_FRAME_INDEX (0xFFFF)
++
++#define SCU_IO_REQUEST_MAX_SGE_SIZE (0x00FFFFFF)
++#define SCU_IO_REQUEST_MAX_TRANSFER_LENGTH (0x00FFFFFF)
++
++static inline void check_sizes(void)
++{
++ BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_EVENTS);
++ BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES <= 8);
++ BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_UNSOLICITED_FRAMES);
++ BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_COMPLETION_QUEUE_ENTRIES);
++ BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES > SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES);
++ BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_IO_REQUESTS);
++ BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_SEQ);
++}
++
++/**
++ * enum sci_status - This is the general return status enumeration for non-IO,
++ * non-task management related SCI interface methods.
++ *
++ *
++ */
++enum sci_status {
++ /**
++ * This member indicates successful completion.
++ */
++ SCI_SUCCESS = 0,
++
++ /**
++ * This value indicates that the calling method completed successfully,
++ * but that the IO may have completed before having it's start method
++ * invoked. This occurs during SAT translation for requests that do
++ * not require an IO to the target or for any other requests that may
++ * be completed without having to submit IO.
++ */
++ SCI_SUCCESS_IO_COMPLETE_BEFORE_START,
++
++ /**
++ * This Value indicates that the SCU hardware returned an early response
++ * because the io request specified more data than is returned by the
++ * target device (mode pages, inquiry data, etc.). The completion routine
++ * will handle this case to get the actual number of bytes transferred.
++ */
++ SCI_SUCCESS_IO_DONE_EARLY,
++
++ /**
++ * This member indicates that the object for which a state change is
++ * being requested is already in said state.
++ */
++ SCI_WARNING_ALREADY_IN_STATE,
++
++ /**
++ * This member indicates interrupt coalescence timer may cause SAS
++ * specification compliance issues (i.e. SMP target mode response
++ * frames must be returned within 1.9 milliseconds).
++ */
++ SCI_WARNING_TIMER_CONFLICT,
++
++ /**
++ * This field indicates a sequence of action is not completed yet. Mostly,
++ * this status is used when multiple ATA commands are needed in a SATI translation.
++ */
++ SCI_WARNING_SEQUENCE_INCOMPLETE,
++
++ /**
++ * This member indicates that there was a general failure.
++ */
++ SCI_FAILURE,
++
++ /**
++ * This member indicates that the SCI implementation is unable to complete
++ * an operation due to a critical flaw the prevents any further operation
++ * (i.e. an invalid pointer).
++ */
++ SCI_FATAL_ERROR,
++
++ /**
++ * This member indicates the calling function failed, because the state
++ * of the controller is in a state that prevents successful completion.
++ */
++ SCI_FAILURE_INVALID_STATE,
++
++ /**
++ * This member indicates the calling function failed, because there is
++ * insufficient resources/memory to complete the request.
++ */
++ SCI_FAILURE_INSUFFICIENT_RESOURCES,
++
++ /**
++ * This member indicates the calling function failed, because the
++ * controller object required for the operation can't be located.
++ */
++ SCI_FAILURE_CONTROLLER_NOT_FOUND,
++
++ /**
++ * This member indicates the calling function failed, because the
++ * discovered controller type is not supported by the library.
++ */
++ SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE,
++
++ /**
++ * This member indicates the calling function failed, because the
++ * requested initialization data version isn't supported.
++ */
++ SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION,
++
++ /**
++ * This member indicates the calling function failed, because the
++ * requested configuration of SAS Phys into SAS Ports is not supported.
++ */
++ SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION,
++
++ /**
++ * This member indicates the calling function failed, because the
++ * requested protocol is not supported by the remote device, port,
++ * or controller.
++ */
++ SCI_FAILURE_UNSUPPORTED_PROTOCOL,
++
++ /**
++ * This member indicates the calling function failed, because the
++ * requested information type is not supported by the SCI implementation.
++ */
++ SCI_FAILURE_UNSUPPORTED_INFORMATION_TYPE,
++
++ /**
++ * This member indicates the calling function failed, because the
++ * device already exists.
++ */
++ SCI_FAILURE_DEVICE_EXISTS,
++
++ /**
++ * This member indicates the calling function failed, because adding
++ * a phy to the object is not possible.
++ */
++ SCI_FAILURE_ADDING_PHY_UNSUPPORTED,
++
++ /**
++ * This member indicates the calling function failed, because the
++ * requested information type is not supported by the SCI implementation.
++ */
++ SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD,
++
++ /**
++ * This member indicates the calling function failed, because the SCI
++ * implementation does not support the supplied time limit.
++ */
++ SCI_FAILURE_UNSUPPORTED_TIME_LIMIT,
++
++ /**
++ * This member indicates the calling method failed, because the SCI
++ * implementation does not contain the specified Phy.
++ */
++ SCI_FAILURE_INVALID_PHY,
++
++ /**
++ * This member indicates the calling method failed, because the SCI
++ * implementation does not contain the specified Port.
++ */
++ SCI_FAILURE_INVALID_PORT,
++
++ /**
++ * This member indicates the calling method was partly successful
++ * The port was reset but not all phys in port are operational
++ */
++ SCI_FAILURE_RESET_PORT_PARTIAL_SUCCESS,
++
++ /**
++ * This member indicates that calling method failed
++ * The port reset did not complete because none of the phys are operational
++ */
++ SCI_FAILURE_RESET_PORT_FAILURE,
++
++ /**
++ * This member indicates the calling method failed, because the SCI
++ * implementation does not contain the specified remote device.
++ */
++ SCI_FAILURE_INVALID_REMOTE_DEVICE,
++
++ /**
++ * This member indicates the calling method failed, because the remote
++ * device is in a bad state and requires a reset.
++ */
++ SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
++
++ /**
++ * This member indicates the calling method failed, because the SCI
++ * implementation does not contain or support the specified IO tag.
++ */
++ SCI_FAILURE_INVALID_IO_TAG,
++
++ /**
++ * This member indicates that the operation failed and the user should
++ * check the response data associated with the IO.
++ */
++ SCI_FAILURE_IO_RESPONSE_VALID,
++
++ /**
++ * This member indicates that the operation failed, the failure is
++ * controller implementation specific, and the response data associated
++ * with the request is not valid. You can query for the controller
++ * specific error information via sci_controller_get_request_status()
++ */
++ SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
++
++ /**
++ * This member indicated that the operation failed because the
++ * user requested this IO to be terminated.
++ */
++ SCI_FAILURE_IO_TERMINATED,
++
++ /**
++ * This member indicates that the operation failed and the associated
++ * request requires a SCSI abort task to be sent to the target.
++ */
++ SCI_FAILURE_IO_REQUIRES_SCSI_ABORT,
++
++ /**
++ * This member indicates that the operation failed because the supplied
++ * device could not be located.
++ */
++ SCI_FAILURE_DEVICE_NOT_FOUND,
++
++ /**
++ * This member indicates that the operation failed because the
++ * objects association is required and is not correctly set.
++ */
++ SCI_FAILURE_INVALID_ASSOCIATION,
++
++ /**
++ * This member indicates that the operation failed, because a timeout
++ * occurred.
++ */
++ SCI_FAILURE_TIMEOUT,
++
++ /**
++ * This member indicates that the operation failed, because the user
++ * specified a value that is either invalid or not supported.
++ */
++ SCI_FAILURE_INVALID_PARAMETER_VALUE,
++
++ /**
++ * This value indicates that the operation failed, because the number
++ * of messages (MSI-X) is not supported.
++ */
++ SCI_FAILURE_UNSUPPORTED_MESSAGE_COUNT,
++
++ /**
++ * This value indicates that the method failed due to a lack of
++ * available NCQ tags.
++ */
++ SCI_FAILURE_NO_NCQ_TAG_AVAILABLE,
++
++ /**
++ * This value indicates that a protocol violation has occurred on the
++ * link.
++ */
++ SCI_FAILURE_PROTOCOL_VIOLATION,
++
++ /**
++ * This value indicates a failure condition that retry may help to clear.
++ */
++ SCI_FAILURE_RETRY_REQUIRED,
++
++ /**
++ * This field indicates the retry limit was reached when a retry is attempted
++ */
++ SCI_FAILURE_RETRY_LIMIT_REACHED,
++
++ /**
++ * This member indicates the calling method was partly successful.
++ * Mostly, this status is used when a LUN_RESET issued to an expander attached
++ * STP device in READY NCQ substate needs to have RNC suspended/resumed
++ * before posting TC.
++ */
++ SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS,
++
++ /**
++ * This field indicates an illegal phy connection based on the routing attribute
++ * of both expander phy attached to each other.
++ */
++ SCI_FAILURE_ILLEGAL_ROUTING_ATTRIBUTE_CONFIGURATION,
++
++ /**
++ * This field indicates a CONFIG ROUTE INFO command has a response with function result
++ * INDEX DOES NOT EXIST, usually means exceeding max route index.
++ */
++ SCI_FAILURE_EXCEED_MAX_ROUTE_INDEX,
++
++ /**
++ * This value indicates that an unsupported PCI device ID has been
++ * specified. This indicates that attempts to invoke
++ * sci_library_allocate_controller() will fail.
++ */
++ SCI_FAILURE_UNSUPPORTED_PCI_DEVICE_ID
++
++};
++
++/**
++ * enum sci_io_status - This enumeration depicts all of the possible IO
++ * completion status values. Each value in this enumeration maps directly
++ * to a value in the enum sci_status enumeration. Please refer to that
++ * enumeration for detailed comments concerning what the status represents.
++ *
++ * Add the API to retrieve the SCU status from the core. Check to see that the
++ * following status are properly handled: - SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL
++ * - SCI_IO_FAILURE_INVALID_IO_TAG
++ */
++enum sci_io_status {
++ SCI_IO_SUCCESS = SCI_SUCCESS,
++ SCI_IO_FAILURE = SCI_FAILURE,
++ SCI_IO_SUCCESS_COMPLETE_BEFORE_START = SCI_SUCCESS_IO_COMPLETE_BEFORE_START,
++ SCI_IO_SUCCESS_IO_DONE_EARLY = SCI_SUCCESS_IO_DONE_EARLY,
++ SCI_IO_FAILURE_INVALID_STATE = SCI_FAILURE_INVALID_STATE,
++ SCI_IO_FAILURE_INSUFFICIENT_RESOURCES = SCI_FAILURE_INSUFFICIENT_RESOURCES,
++ SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL = SCI_FAILURE_UNSUPPORTED_PROTOCOL,
++ SCI_IO_FAILURE_RESPONSE_VALID = SCI_FAILURE_IO_RESPONSE_VALID,
++ SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
++ SCI_IO_FAILURE_TERMINATED = SCI_FAILURE_IO_TERMINATED,
++ SCI_IO_FAILURE_REQUIRES_SCSI_ABORT = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT,
++ SCI_IO_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE,
++ SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE = SCI_FAILURE_NO_NCQ_TAG_AVAILABLE,
++ SCI_IO_FAILURE_PROTOCOL_VIOLATION = SCI_FAILURE_PROTOCOL_VIOLATION,
++
++ SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
++
++ SCI_IO_FAILURE_RETRY_REQUIRED = SCI_FAILURE_RETRY_REQUIRED,
++ SCI_IO_FAILURE_RETRY_LIMIT_REACHED = SCI_FAILURE_RETRY_LIMIT_REACHED,
++ SCI_IO_FAILURE_INVALID_REMOTE_DEVICE = SCI_FAILURE_INVALID_REMOTE_DEVICE
++};
++
++/**
++ * enum sci_task_status - This enumeration depicts all of the possible task
++ * completion status values. Each value in this enumeration maps directly
++ * to a value in the enum sci_status enumeration. Please refer to that
++ * enumeration for detailed comments concerning what the status represents.
++ *
++ * Check to see that the following status are properly handled:
++ */
++enum sci_task_status {
++ SCI_TASK_SUCCESS = SCI_SUCCESS,
++ SCI_TASK_FAILURE = SCI_FAILURE,
++ SCI_TASK_FAILURE_INVALID_STATE = SCI_FAILURE_INVALID_STATE,
++ SCI_TASK_FAILURE_INSUFFICIENT_RESOURCES = SCI_FAILURE_INSUFFICIENT_RESOURCES,
++ SCI_TASK_FAILURE_UNSUPPORTED_PROTOCOL = SCI_FAILURE_UNSUPPORTED_PROTOCOL,
++ SCI_TASK_FAILURE_INVALID_TAG = SCI_FAILURE_INVALID_IO_TAG,
++ SCI_TASK_FAILURE_RESPONSE_VALID = SCI_FAILURE_IO_RESPONSE_VALID,
++ SCI_TASK_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
++ SCI_TASK_FAILURE_TERMINATED = SCI_FAILURE_IO_TERMINATED,
++ SCI_TASK_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE,
++
++ SCI_TASK_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
++ SCI_TASK_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS = SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS
++
++};
++
++/**
++ * sci_swab32_cpy - convert between scsi and scu-hardware byte format
++ * @dest: receive the 4-byte endian swapped version of src
++ * @src: word aligned source buffer
++ *
++ * scu hardware handles SSP/SMP control, response, and unidentified
++ * frames in "big endian dword" order. Regardless of host endian this
++ * is always a swab32()-per-dword conversion of the standard definition,
++ * i.e. single byte fields swapped and multi-byte fields in little-
++ * endian
++ */
++static inline void sci_swab32_cpy(void *_dest, void *_src, ssize_t word_cnt)
++{
++ u32 *dest = _dest, *src = _src;
++
++ while (--word_cnt >= 0)
++ dest[word_cnt] = swab32(src[word_cnt]);
++}
++
++extern unsigned char no_outbound_task_to;
++extern u16 ssp_max_occ_to;
++extern u16 stp_max_occ_to;
++extern u16 ssp_inactive_to;
++extern u16 stp_inactive_to;
++extern unsigned char phy_gen;
++extern unsigned char max_concurr_spinup;
++
++irqreturn_t isci_msix_isr(int vec, void *data);
++irqreturn_t isci_intx_isr(int vec, void *data);
++irqreturn_t isci_error_isr(int vec, void *data);
++
++/*
++ * Each timer is associated with a cancellation flag that is set when
++ * del_timer() is called and checked in the timer callback function. This
++ * is needed since del_timer_sync() cannot be called with sci_lock held.
++ * For deinit however, del_timer_sync() is used without holding the lock.
++ */
++struct sci_timer {
++ struct timer_list timer;
++ bool cancel;
++};
++
++static inline
++void sci_init_timer(struct sci_timer *tmr, void (*fn)(unsigned long))
++{
++ tmr->timer.function = fn;
++ tmr->timer.data = (unsigned long) tmr;
++ tmr->cancel = 0;
++ init_timer(&tmr->timer);
++}
++
++static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec)
++{
++ tmr->cancel = 0;
++ mod_timer(&tmr->timer, jiffies + msecs_to_jiffies(msec));
++}
++
++static inline void sci_del_timer(struct sci_timer *tmr)
++{
++ tmr->cancel = 1;
++ del_timer(&tmr->timer);
++}
++
++struct sci_base_state_machine {
++ const struct sci_base_state *state_table;
++ u32 initial_state_id;
++ u32 current_state_id;
++ u32 previous_state_id;
++};
++
++typedef void (*sci_state_transition_t)(struct sci_base_state_machine *sm);
++
++struct sci_base_state {
++ sci_state_transition_t enter_state; /* Called on state entry */
++ sci_state_transition_t exit_state; /* Called on state exit */
++};
++
++extern void sci_init_sm(struct sci_base_state_machine *sm,
++ const struct sci_base_state *state_table,
++ u32 initial_state);
++extern void sci_change_state(struct sci_base_state_machine *sm, u32 next_state);
++#endif /* __ISCI_H__ */
+diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
+new file mode 100644
+index 0000000..430fc8f
+--- /dev/null
++++ b/drivers/scsi/isci/phy.c
+@@ -0,0 +1,1325 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "isci.h"
++#include "host.h"
++#include "phy.h"
++#include "scu_event_codes.h"
++#include "probe_roms.h"
++
++/* Maximum arbitration wait time in micro-seconds */
++#define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME (700)
++
++enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy)
++{
++ return iphy->max_negotiated_speed;
++}
++
++static enum sci_status
++sci_phy_transport_layer_initialization(struct isci_phy *iphy,
++ struct scu_transport_layer_registers __iomem *reg)
++{
++ u32 tl_control;
++
++ iphy->transport_layer_registers = reg;
++
++ writel(SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX,
++ &iphy->transport_layer_registers->stp_rni);
++
++ /*
++ * Hardware team recommends that we enable the STP prefetch for all
++ * transports
++ */
++ tl_control = readl(&iphy->transport_layer_registers->control);
++ tl_control |= SCU_TLCR_GEN_BIT(STP_WRITE_DATA_PREFETCH);
++ writel(tl_control, &iphy->transport_layer_registers->control);
++
++ return SCI_SUCCESS;
++}
++
++static enum sci_status
++sci_phy_link_layer_initialization(struct isci_phy *iphy,
++ struct scu_link_layer_registers __iomem *reg)
++{
++ struct isci_host *ihost = iphy->owning_port->owning_controller;
++ int phy_idx = iphy->phy_index;
++ struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx];
++ struct sci_phy_oem_params *phy_oem =
++ &ihost->oem_parameters.phys[phy_idx];
++ u32 phy_configuration;
++ struct sci_phy_cap phy_cap;
++ u32 parity_check = 0;
++ u32 parity_count = 0;
++ u32 llctl, link_rate;
++ u32 clksm_value = 0;
++ u32 sp_timeouts = 0;
++
++ iphy->link_layer_registers = reg;
++
++ /* Set our IDENTIFY frame data */
++ #define SCI_END_DEVICE 0x01
++
++ writel(SCU_SAS_TIID_GEN_BIT(SMP_INITIATOR) |
++ SCU_SAS_TIID_GEN_BIT(SSP_INITIATOR) |
++ SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) |
++ SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) |
++ SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE),
++ &iphy->link_layer_registers->transmit_identification);
++
++ /* Write the device SAS Address */
++ writel(0xFEDCBA98,
++ &iphy->link_layer_registers->sas_device_name_high);
++ writel(phy_idx, &iphy->link_layer_registers->sas_device_name_low);
++
++ /* Write the source SAS Address */
++ writel(phy_oem->sas_address.high,
++ &iphy->link_layer_registers->source_sas_address_high);
++ writel(phy_oem->sas_address.low,
++ &iphy->link_layer_registers->source_sas_address_low);
++
++ /* Clear and Set the PHY Identifier */
++ writel(0, &iphy->link_layer_registers->identify_frame_phy_id);
++ writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx),
++ &iphy->link_layer_registers->identify_frame_phy_id);
++
++ /* Change the initial state of the phy configuration register */
++ phy_configuration =
++ readl(&iphy->link_layer_registers->phy_configuration);
++
++ /* Hold OOB state machine in reset */
++ phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
++ writel(phy_configuration,
++ &iphy->link_layer_registers->phy_configuration);
++
++ /* Configure the SNW capabilities */
++ phy_cap.all = 0;
++ phy_cap.start = 1;
++ phy_cap.gen3_no_ssc = 1;
++ phy_cap.gen2_no_ssc = 1;
++ phy_cap.gen1_no_ssc = 1;
++ if (ihost->oem_parameters.controller.do_enable_ssc == true) {
++ phy_cap.gen3_ssc = 1;
++ phy_cap.gen2_ssc = 1;
++ phy_cap.gen1_ssc = 1;
++ }
++
++ /*
++ * The SAS specification indicates that the phy_capabilities that
++ * are transmitted shall have an even parity. Calculate the parity. */
++ parity_check = phy_cap.all;
++ while (parity_check != 0) {
++ if (parity_check & 0x1)
++ parity_count++;
++ parity_check >>= 1;
++ }
++
++ /*
++ * If parity indicates there are an odd number of bits set, then
++ * set the parity bit to 1 in the phy capabilities. */
++ if ((parity_count % 2) != 0)
++ phy_cap.parity = 1;
++
++ writel(phy_cap.all, &iphy->link_layer_registers->phy_capabilities);
++
++ /* Set the enable spinup period but disable the ability to send
++ * notify enable spinup
++ */
++ writel(SCU_ENSPINUP_GEN_VAL(COUNT,
++ phy_user->notify_enable_spin_up_insertion_frequency),
++ &iphy->link_layer_registers->notify_enable_spinup_control);
++
++ /* Write the ALIGN Insertion Ferequency for connected phy and
++ * inpendent of connected state
++ */
++ clksm_value = SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(CONNECTED,
++ phy_user->in_connection_align_insertion_frequency);
++
++ clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL,
++ phy_user->align_insertion_frequency);
++
++ writel(clksm_value, &iphy->link_layer_registers->clock_skew_management);
++
++ /* @todo Provide a way to write this register correctly */
++ writel(0x02108421,
++ &iphy->link_layer_registers->afe_lookup_table_control);
++
++ llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
++ (u8)ihost->user_parameters.no_outbound_task_timeout);
++
++ switch (phy_user->max_speed_generation) {
++ case SCIC_SDS_PARM_GEN3_SPEED:
++ link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3;
++ break;
++ case SCIC_SDS_PARM_GEN2_SPEED:
++ link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2;
++ break;
++ default:
++ link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1;
++ break;
++ }
++ llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
++ writel(llctl, &iphy->link_layer_registers->link_layer_control);
++
++ sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts);
++
++ /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */
++ sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF);
++
++ /* Set RATE_CHANGE timeout value to 0x3B (59us). This ensures SCU can
++ * lock with 3Gb drive when SCU max rate is set to 1.5Gb.
++ */
++ sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B);
++
++ writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts);
++
++ if (is_a2(ihost->pdev)) {
++ /* Program the max ARB time for the PHY to 700us so we inter-operate with
++ * the PMC expander which shuts down PHYs if the expander PHY generates too
++ * many breaks. This time value will guarantee that the initiator PHY will
++ * generate the break.
++ */
++ writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME,
++ &iphy->link_layer_registers->maximum_arbitration_wait_timer_timeout);
++ }
++
++ /* Disable link layer hang detection, rely on the OS timeout for I/O timeouts. */
++ writel(0, &iphy->link_layer_registers->link_layer_hang_detection_timeout);
++
++ /* We can exit the initial state to the stopped state */
++ sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
++
++ return SCI_SUCCESS;
++}
++
++static void phy_sata_timeout(unsigned long data)
++{
++ struct sci_timer *tmr = (struct sci_timer *)data;
++ struct isci_phy *iphy = container_of(tmr, typeof(*iphy), sata_timer);
++ struct isci_host *ihost = iphy->owning_port->owning_controller;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++
++ if (tmr->cancel)
++ goto done;
++
++ dev_dbg(sciphy_to_dev(iphy),
++ "%s: SCIC SDS Phy 0x%p did not receive signature fis before "
++ "timeout.\n",
++ __func__,
++ iphy);
++
++ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
++done:
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++}
++
++/**
++ * This method returns the port currently containing this phy. If the phy is
++ * currently contained by the dummy port, then the phy is considered to not
++ * be part of a port.
++ * @sci_phy: This parameter specifies the phy for which to retrieve the
++ * containing port.
++ *
++ * This method returns a handle to a port that contains the supplied phy.
++ * NULL This value is returned if the phy is not part of a real
++ * port (i.e. it's contained in the dummy port). !NULL All other
++ * values indicate a handle/pointer to the port containing the phy.
++ */
++struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy)
++{
++ struct isci_port *iport = iphy->owning_port;
++
++ if (iport->physical_port_index == SCIC_SDS_DUMMY_PORT)
++ return NULL;
++
++ return iphy->owning_port;
++}
++
++/**
++ * This method will assign a port to the phy object.
++ * @out]: iphy This parameter specifies the phy for which to assign a port
++ * object.
++ *
++ *
++ */
++void sci_phy_set_port(
++ struct isci_phy *iphy,
++ struct isci_port *iport)
++{
++ iphy->owning_port = iport;
++
++ if (iphy->bcn_received_while_port_unassigned) {
++ iphy->bcn_received_while_port_unassigned = false;
++ sci_port_broadcast_change_received(iphy->owning_port, iphy);
++ }
++}
++
++enum sci_status sci_phy_initialize(struct isci_phy *iphy,
++ struct scu_transport_layer_registers __iomem *tl,
++ struct scu_link_layer_registers __iomem *ll)
++{
++ /* Perfrom the initialization of the TL hardware */
++ sci_phy_transport_layer_initialization(iphy, tl);
++
++ /* Perofrm the initialization of the PE hardware */
++ sci_phy_link_layer_initialization(iphy, ll);
++
++ /* There is nothing that needs to be done in this state just
++ * transition to the stopped state
++ */
++ sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
++
++ return SCI_SUCCESS;
++}
++
++/**
++ * This method assigns the direct attached device ID for this phy.
++ *
++ * @iphy The phy for which the direct attached device id is to
++ * be assigned.
++ * @device_id The direct attached device ID to assign to the phy.
++ * This will either be the RNi for the device or an invalid RNi if there
++ * is no current device assigned to the phy.
++ */
++void sci_phy_setup_transport(struct isci_phy *iphy, u32 device_id)
++{
++ u32 tl_control;
++
++ writel(device_id, &iphy->transport_layer_registers->stp_rni);
++
++ /*
++ * The read should guarantee that the first write gets posted
++ * before the next write
++ */
++ tl_control = readl(&iphy->transport_layer_registers->control);
++ tl_control |= SCU_TLCR_GEN_BIT(CLEAR_TCI_NCQ_MAPPING_TABLE);
++ writel(tl_control, &iphy->transport_layer_registers->control);
++}
++
++static void sci_phy_suspend(struct isci_phy *iphy)
++{
++ u32 scu_sas_pcfg_value;
++
++ scu_sas_pcfg_value =
++ readl(&iphy->link_layer_registers->phy_configuration);
++ scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE);
++ writel(scu_sas_pcfg_value,
++ &iphy->link_layer_registers->phy_configuration);
++
++ sci_phy_setup_transport(iphy, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
++}
++
++void sci_phy_resume(struct isci_phy *iphy)
++{
++ u32 scu_sas_pcfg_value;
++
++ scu_sas_pcfg_value =
++ readl(&iphy->link_layer_registers->phy_configuration);
++ scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE);
++ writel(scu_sas_pcfg_value,
++ &iphy->link_layer_registers->phy_configuration);
++}
++
++void sci_phy_get_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
++{
++ sas->high = readl(&iphy->link_layer_registers->source_sas_address_high);
++ sas->low = readl(&iphy->link_layer_registers->source_sas_address_low);
++}
++
++void sci_phy_get_attached_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
++{
++ struct sas_identify_frame *iaf;
++
++ iaf = &iphy->frame_rcvd.iaf;
++ memcpy(sas, iaf->sas_addr, SAS_ADDR_SIZE);
++}
++
++void sci_phy_get_protocols(struct isci_phy *iphy, struct sci_phy_proto *proto)
++{
++ proto->all = readl(&iphy->link_layer_registers->transmit_identification);
++}
++
++enum sci_status sci_phy_start(struct isci_phy *iphy)
++{
++ enum sci_phy_states state = iphy->sm.current_state_id;
++
++ if (state != SCI_PHY_STOPPED) {
++ dev_dbg(sciphy_to_dev(iphy),
++ "%s: in wrong state: %d\n", __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++
++ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
++ return SCI_SUCCESS;
++}
++
++enum sci_status sci_phy_stop(struct isci_phy *iphy)
++{
++ enum sci_phy_states state = iphy->sm.current_state_id;
++
++ switch (state) {
++ case SCI_PHY_SUB_INITIAL:
++ case SCI_PHY_SUB_AWAIT_OSSP_EN:
++ case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
++ case SCI_PHY_SUB_AWAIT_SAS_POWER:
++ case SCI_PHY_SUB_AWAIT_SATA_POWER:
++ case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
++ case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
++ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
++ case SCI_PHY_SUB_FINAL:
++ case SCI_PHY_READY:
++ break;
++ default:
++ dev_dbg(sciphy_to_dev(iphy),
++ "%s: in wrong state: %d\n", __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++
++ sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
++ return SCI_SUCCESS;
++}
++
++enum sci_status sci_phy_reset(struct isci_phy *iphy)
++{
++ enum sci_phy_states state = iphy->sm.current_state_id;
++
++ if (state != SCI_PHY_READY) {
++ dev_dbg(sciphy_to_dev(iphy),
++ "%s: in wrong state: %d\n", __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++
++ sci_change_state(&iphy->sm, SCI_PHY_RESETTING);
++ return SCI_SUCCESS;
++}
++
++enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy)
++{
++ enum sci_phy_states state = iphy->sm.current_state_id;
++
++ switch (state) {
++ case SCI_PHY_SUB_AWAIT_SAS_POWER: {
++ u32 enable_spinup;
++
++ enable_spinup = readl(&iphy->link_layer_registers->notify_enable_spinup_control);
++ enable_spinup |= SCU_ENSPINUP_GEN_BIT(ENABLE);
++ writel(enable_spinup, &iphy->link_layer_registers->notify_enable_spinup_control);
++
++ /* Change state to the final state this substate machine has run to completion */
++ sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL);
++
++ return SCI_SUCCESS;
++ }
++ case SCI_PHY_SUB_AWAIT_SATA_POWER: {
++ u32 scu_sas_pcfg_value;
++
++ /* Release the spinup hold state and reset the OOB state machine */
++ scu_sas_pcfg_value =
++ readl(&iphy->link_layer_registers->phy_configuration);
++ scu_sas_pcfg_value &=
++ ~(SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD) | SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE));
++ scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
++ writel(scu_sas_pcfg_value,
++ &iphy->link_layer_registers->phy_configuration);
++
++ /* Now restart the OOB operation */
++ scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
++ scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
++ writel(scu_sas_pcfg_value,
++ &iphy->link_layer_registers->phy_configuration);
++
++ /* Change state to the final state this substate machine has run to completion */
++ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_PHY_EN);
++
++ return SCI_SUCCESS;
++ }
++ default:
++ dev_dbg(sciphy_to_dev(iphy),
++ "%s: in wrong state: %d\n", __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++}
++
++static void sci_phy_start_sas_link_training(struct isci_phy *iphy)
++{
++ /* continue the link training for the phy as if it were a SAS PHY
++ * instead of a SATA PHY. This is done because the completion queue had a SAS
++ * PHY DETECTED event when the state machine was expecting a SATA PHY event.
++ */
++ u32 phy_control;
++
++ phy_control = readl(&iphy->link_layer_registers->phy_configuration);
++ phy_control |= SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD);
++ writel(phy_control,
++ &iphy->link_layer_registers->phy_configuration);
++
++ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN);
++
++ iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS;
++}
++
++static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
++{
++ /* This method continues the link training for the phy as if it were a SATA PHY
++ * instead of a SAS PHY. This is done because the completion queue had a SATA
++ * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none
++ */
++ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER);
++
++ iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
++}
++
++/**
++ * sci_phy_complete_link_training - perform processing common to
++ * all protocols upon completion of link training.
++ * @sci_phy: This parameter specifies the phy object for which link training
++ * has completed.
++ * @max_link_rate: This parameter specifies the maximum link rate to be
++ * associated with this phy.
++ * @next_state: This parameter specifies the next state for the phy's starting
++ * sub-state machine.
++ *
++ */
++static void sci_phy_complete_link_training(struct isci_phy *iphy,
++ enum sas_linkrate max_link_rate,
++ u32 next_state)
++{
++ iphy->max_negotiated_speed = max_link_rate;
++
++ sci_change_state(&iphy->sm, next_state);
++}
++
++enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
++{
++ enum sci_phy_states state = iphy->sm.current_state_id;
++
++ switch (state) {
++ case SCI_PHY_SUB_AWAIT_OSSP_EN:
++ switch (scu_get_event_code(event_code)) {
++ case SCU_EVENT_SAS_PHY_DETECTED:
++ sci_phy_start_sas_link_training(iphy);
++ iphy->is_in_link_training = true;
++ break;
++ case SCU_EVENT_SATA_SPINUP_HOLD:
++ sci_phy_start_sata_link_training(iphy);
++ iphy->is_in_link_training = true;
++ break;
++ default:
++ dev_dbg(sciphy_to_dev(iphy),
++ "%s: PHY starting substate machine received "
++ "unexpected event_code %x\n",
++ __func__,
++ event_code);
++ return SCI_FAILURE;
++ }
++ return SCI_SUCCESS;
++ case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
++ switch (scu_get_event_code(event_code)) {
++ case SCU_EVENT_SAS_PHY_DETECTED:
++ /*
++ * Why is this being reported again by the controller?
++ * We would re-enter this state so just stay here */
++ break;
++ case SCU_EVENT_SAS_15:
++ case SCU_EVENT_SAS_15_SSC:
++ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
++ SCI_PHY_SUB_AWAIT_IAF_UF);
++ break;
++ case SCU_EVENT_SAS_30:
++ case SCU_EVENT_SAS_30_SSC:
++ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
++ SCI_PHY_SUB_AWAIT_IAF_UF);
++ break;
++ case SCU_EVENT_SAS_60:
++ case SCU_EVENT_SAS_60_SSC:
++ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
++ SCI_PHY_SUB_AWAIT_IAF_UF);
++ break;
++ case SCU_EVENT_SATA_SPINUP_HOLD:
++ /*
++ * We were doing SAS PHY link training and received a SATA PHY event
++ * continue OOB/SN as if this were a SATA PHY */
++ sci_phy_start_sata_link_training(iphy);
++ break;
++ case SCU_EVENT_LINK_FAILURE:
++ /* Link failure change state back to the starting state */
++ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
++ break;
++ default:
++ dev_warn(sciphy_to_dev(iphy),
++ "%s: PHY starting substate machine received "
++ "unexpected event_code %x\n",
++ __func__, event_code);
++
++ return SCI_FAILURE;
++ break;
++ }
++ return SCI_SUCCESS;
++ case SCI_PHY_SUB_AWAIT_IAF_UF:
++ switch (scu_get_event_code(event_code)) {
++ case SCU_EVENT_SAS_PHY_DETECTED:
++ /* Backup the state machine */
++ sci_phy_start_sas_link_training(iphy);
++ break;
++ case SCU_EVENT_SATA_SPINUP_HOLD:
++ /* We were doing SAS PHY link training and received a
++ * SATA PHY event continue OOB/SN as if this were a
++ * SATA PHY
++ */
++ sci_phy_start_sata_link_training(iphy);
++ break;
++ case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
++ case SCU_EVENT_LINK_FAILURE:
++ case SCU_EVENT_HARD_RESET_RECEIVED:
++ /* Start the oob/sn state machine over again */
++ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
++ break;
++ default:
++ dev_warn(sciphy_to_dev(iphy),
++ "%s: PHY starting substate machine received "
++ "unexpected event_code %x\n",
++ __func__, event_code);
++ return SCI_FAILURE;
++ }
++ return SCI_SUCCESS;
++ case SCI_PHY_SUB_AWAIT_SAS_POWER:
++ switch (scu_get_event_code(event_code)) {
++ case SCU_EVENT_LINK_FAILURE:
++ /* Link failure change state back to the starting state */
++ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
++ break;
++ default:
++ dev_warn(sciphy_to_dev(iphy),
++ "%s: PHY starting substate machine received unexpected "
++ "event_code %x\n",
++ __func__,
++ event_code);
++ return SCI_FAILURE;
++ }
++ return SCI_SUCCESS;
++ case SCI_PHY_SUB_AWAIT_SATA_POWER:
++ switch (scu_get_event_code(event_code)) {
++ case SCU_EVENT_LINK_FAILURE:
++ /* Link failure change state back to the starting state */
++ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
++ break;
++ case SCU_EVENT_SATA_SPINUP_HOLD:
++ /* These events are received every 10ms and are
++ * expected while in this state
++ */
++ break;
++
++ case SCU_EVENT_SAS_PHY_DETECTED:
++ /* There has been a change in the phy type before OOB/SN for the
++ * SATA finished start down the SAS link traning path.
++ */
++ sci_phy_start_sas_link_training(iphy);
++ break;
++
++ default:
++ dev_warn(sciphy_to_dev(iphy),
++ "%s: PHY starting substate machine received "
++ "unexpected event_code %x\n",
++ __func__, event_code);
++
++ return SCI_FAILURE;
++ }
++ return SCI_SUCCESS;
++ case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
++ switch (scu_get_event_code(event_code)) {
++ case SCU_EVENT_LINK_FAILURE:
++ /* Link failure change state back to the starting state */
++ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
++ break;
++ case SCU_EVENT_SATA_SPINUP_HOLD:
++ /* These events might be received since we dont know how many may be in
++ * the completion queue while waiting for power
++ */
++ break;
++ case SCU_EVENT_SATA_PHY_DETECTED:
++ iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
++
++ /* We have received the SATA PHY notification change state */
++ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
++ break;
++ case SCU_EVENT_SAS_PHY_DETECTED:
++ /* There has been a change in the phy type before OOB/SN for the
++ * SATA finished start down the SAS link traning path.
++ */
++ sci_phy_start_sas_link_training(iphy);
++ break;
++ default:
++ dev_warn(sciphy_to_dev(iphy),
++ "%s: PHY starting substate machine received "
++ "unexpected event_code %x\n",
++ __func__,
++ event_code);
++
++ return SCI_FAILURE;;
++ }
++ return SCI_SUCCESS;
++ case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
++ switch (scu_get_event_code(event_code)) {
++ case SCU_EVENT_SATA_PHY_DETECTED:
++ /*
++ * The hardware reports multiple SATA PHY detected events
++ * ignore the extras */
++ break;
++ case SCU_EVENT_SATA_15:
++ case SCU_EVENT_SATA_15_SSC:
++ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
++ SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
++ break;
++ case SCU_EVENT_SATA_30:
++ case SCU_EVENT_SATA_30_SSC:
++ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
++ SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
++ break;
++ case SCU_EVENT_SATA_60:
++ case SCU_EVENT_SATA_60_SSC:
++ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
++ SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
++ break;
++ case SCU_EVENT_LINK_FAILURE:
++ /* Link failure change state back to the starting state */
++ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
++ break;
++ case SCU_EVENT_SAS_PHY_DETECTED:
++ /*
++ * There has been a change in the phy type before OOB/SN for the
++ * SATA finished start down the SAS link traning path. */
++ sci_phy_start_sas_link_training(iphy);
++ break;
++ default:
++ dev_warn(sciphy_to_dev(iphy),
++ "%s: PHY starting substate machine received "
++ "unexpected event_code %x\n",
++ __func__, event_code);
++
++ return SCI_FAILURE;
++ }
++
++ return SCI_SUCCESS;
++ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
++ switch (scu_get_event_code(event_code)) {
++ case SCU_EVENT_SATA_PHY_DETECTED:
++ /* Backup the state machine */
++ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
++ break;
++
++ case SCU_EVENT_LINK_FAILURE:
++ /* Link failure change state back to the starting state */
++ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
++ break;
++
++ default:
++ dev_warn(sciphy_to_dev(iphy),
++ "%s: PHY starting substate machine received "
++ "unexpected event_code %x\n",
++ __func__,
++ event_code);
++
++ return SCI_FAILURE;
++ }
++ return SCI_SUCCESS;
++ case SCI_PHY_READY:
++ switch (scu_get_event_code(event_code)) {
++ case SCU_EVENT_LINK_FAILURE:
++ /* Link failure change state back to the starting state */
++ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
++ break;
++ case SCU_EVENT_BROADCAST_CHANGE:
++ /* Broadcast change received. Notify the port. */
++ if (phy_get_non_dummy_port(iphy) != NULL)
++ sci_port_broadcast_change_received(iphy->owning_port, iphy);
++ else
++ iphy->bcn_received_while_port_unassigned = true;
++ break;
++ default:
++ dev_warn(sciphy_to_dev(iphy),
++ "%sP SCIC PHY 0x%p ready state machine received "
++ "unexpected event_code %x\n",
++ __func__, iphy, event_code);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++ return SCI_SUCCESS;
++ case SCI_PHY_RESETTING:
++ switch (scu_get_event_code(event_code)) {
++ case SCU_EVENT_HARD_RESET_TRANSMITTED:
++ /* Link failure change state back to the starting state */
++ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
++ break;
++ default:
++ dev_warn(sciphy_to_dev(iphy),
++ "%s: SCIC PHY 0x%p resetting state machine received "
++ "unexpected event_code %x\n",
++ __func__, iphy, event_code);
++
++ return SCI_FAILURE_INVALID_STATE;
++ break;
++ }
++ return SCI_SUCCESS;
++ default:
++ dev_dbg(sciphy_to_dev(iphy),
++ "%s: in wrong state: %d\n", __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++}
++
++enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index)
++{
++ enum sci_phy_states state = iphy->sm.current_state_id;
++ struct isci_host *ihost = iphy->owning_port->owning_controller;
++ enum sci_status result;
++ unsigned long flags;
++
++ switch (state) {
++ case SCI_PHY_SUB_AWAIT_IAF_UF: {
++ u32 *frame_words;
++ struct sas_identify_frame iaf;
++
++ result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
++ frame_index,
++ (void **)&frame_words);
++
++ if (result != SCI_SUCCESS)
++ return result;
++
++ sci_swab32_cpy(&iaf, frame_words, sizeof(iaf) / sizeof(u32));
++ if (iaf.frame_type == 0) {
++ u32 state;
++
++ spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
++ memcpy(&iphy->frame_rcvd.iaf, &iaf, sizeof(iaf));
++ spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
++ if (iaf.smp_tport) {
++ /* We got the IAF for an expander PHY go to the final
++ * state since there are no power requirements for
++ * expander phys.
++ */
++ state = SCI_PHY_SUB_FINAL;
++ } else {
++ /* We got the IAF we can now go to the await spinup
++ * semaphore state
++ */
++ state = SCI_PHY_SUB_AWAIT_SAS_POWER;
++ }
++ sci_change_state(&iphy->sm, state);
++ result = SCI_SUCCESS;
++ } else
++ dev_warn(sciphy_to_dev(iphy),
++ "%s: PHY starting substate machine received "
++ "unexpected frame id %x\n",
++ __func__, frame_index);
++
++ sci_controller_release_frame(ihost, frame_index);
++ return result;
++ }
++ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: {
++ struct dev_to_host_fis *frame_header;
++ u32 *fis_frame_data;
++
++ result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
++ frame_index,
++ (void **)&frame_header);
++
++ if (result != SCI_SUCCESS)
++ return result;
++
++ if ((frame_header->fis_type == FIS_REGD2H) &&
++ !(frame_header->status & ATA_BUSY)) {
++ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
++ frame_index,
++ (void **)&fis_frame_data);
++
++ spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
++ sci_controller_copy_sata_response(&iphy->frame_rcvd.fis,
++ frame_header,
++ fis_frame_data);
++ spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
++
++ /* got IAF we can now go to the await spinup semaphore state */
++ sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL);
++
++ result = SCI_SUCCESS;
++ } else
++ dev_warn(sciphy_to_dev(iphy),
++ "%s: PHY starting substate machine received "
++ "unexpected frame id %x\n",
++ __func__, frame_index);
++
++ /* Regardless of the result we are done with this frame with it */
++ sci_controller_release_frame(ihost, frame_index);
++
++ return result;
++ }
++ default:
++ dev_dbg(sciphy_to_dev(iphy),
++ "%s: in wrong state: %d\n", __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++
++}
++
++static void sci_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
++
++ /* This is just an temporary state go off to the starting state */
++ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_OSSP_EN);
++}
++
++static void sci_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
++ struct isci_host *ihost = iphy->owning_port->owning_controller;
++
++ sci_controller_power_control_queue_insert(ihost, iphy);
++}
++
++static void sci_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm)
++{
++ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
++ struct isci_host *ihost = iphy->owning_port->owning_controller;
++
++ sci_controller_power_control_queue_remove(ihost, iphy);
++}
++
++static void sci_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
++ struct isci_host *ihost = iphy->owning_port->owning_controller;
++
++ sci_controller_power_control_queue_insert(ihost, iphy);
++}
++
++static void sci_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm)
++{
++ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
++ struct isci_host *ihost = iphy->owning_port->owning_controller;
++
++ sci_controller_power_control_queue_remove(ihost, iphy);
++}
++
++static void sci_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
++
++ sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
++}
++
++static void sci_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm)
++{
++ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
++
++ sci_del_timer(&iphy->sata_timer);
++}
++
++static void sci_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
++
++ sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
++}
++
++static void sci_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm)
++{
++ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
++
++ sci_del_timer(&iphy->sata_timer);
++}
++
++static void sci_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
++
++ if (sci_port_link_detected(iphy->owning_port, iphy)) {
++
++ /*
++ * Clear the PE suspend condition so we can actually
++ * receive SIG FIS
++ * The hardware will not respond to the XRDY until the PE
++ * suspend condition is cleared.
++ */
++ sci_phy_resume(iphy);
++
++ sci_mod_timer(&iphy->sata_timer,
++ SCIC_SDS_SIGNATURE_FIS_TIMEOUT);
++ } else
++ iphy->is_in_link_training = false;
++}
++
++static void sci_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm)
++{
++ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
++
++ sci_del_timer(&iphy->sata_timer);
++}
++
++static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
++
++ /* State machine has run to completion so exit out and change
++ * the base state machine to the ready state
++ */
++ sci_change_state(&iphy->sm, SCI_PHY_READY);
++}
++
++/**
++ *
++ * @sci_phy: This is the struct isci_phy object to stop.
++ *
++ * This method will stop the struct isci_phy object. This does not reset the
++ * protocol engine it just suspends it and places it in a state where it will
++ * not cause the end device to power up. none
++ */
++static void scu_link_layer_stop_protocol_engine(
++ struct isci_phy *iphy)
++{
++ u32 scu_sas_pcfg_value;
++ u32 enable_spinup_value;
++
++ /* Suspend the protocol engine and place it in a sata spinup hold state */
++ scu_sas_pcfg_value =
++ readl(&iphy->link_layer_registers->phy_configuration);
++ scu_sas_pcfg_value |=
++ (SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
++ SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE) |
++ SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD));
++ writel(scu_sas_pcfg_value,
++ &iphy->link_layer_registers->phy_configuration);
++
++ /* Disable the notify enable spinup primitives */
++ enable_spinup_value = readl(&iphy->link_layer_registers->notify_enable_spinup_control);
++ enable_spinup_value &= ~SCU_ENSPINUP_GEN_BIT(ENABLE);
++ writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control);
++}
++
++/**
++ *
++ *
++ * This method will start the OOB/SN state machine for this struct isci_phy object.
++ */
++static void scu_link_layer_start_oob(
++ struct isci_phy *iphy)
++{
++ u32 scu_sas_pcfg_value;
++
++ scu_sas_pcfg_value =
++ readl(&iphy->link_layer_registers->phy_configuration);
++ scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
++ scu_sas_pcfg_value &=
++ ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
++ SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
++ writel(scu_sas_pcfg_value,
++ &iphy->link_layer_registers->phy_configuration);
++}
++
++/**
++ *
++ *
++ * This method will transmit a hard reset request on the specified phy. The SCU
++ * hardware requires that we reset the OOB state machine and set the hard reset
++ * bit in the phy configuration register. We then must start OOB over with the
++ * hard reset bit set.
++ */
++static void scu_link_layer_tx_hard_reset(
++ struct isci_phy *iphy)
++{
++ u32 phy_configuration_value;
++
++ /*
++ * SAS Phys must wait for the HARD_RESET_TX event notification to transition
++ * to the starting state. */
++ phy_configuration_value =
++ readl(&iphy->link_layer_registers->phy_configuration);
++ phy_configuration_value |=
++ (SCU_SAS_PCFG_GEN_BIT(HARD_RESET) |
++ SCU_SAS_PCFG_GEN_BIT(OOB_RESET));
++ writel(phy_configuration_value,
++ &iphy->link_layer_registers->phy_configuration);
++
++ /* Now take the OOB state machine out of reset */
++ phy_configuration_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
++ phy_configuration_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
++ writel(phy_configuration_value,
++ &iphy->link_layer_registers->phy_configuration);
++}
++
++static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
++ struct isci_port *iport = iphy->owning_port;
++ struct isci_host *ihost = iport->owning_controller;
++
++ /*
++ * @todo We need to get to the controller to place this PE in a
++ * reset state
++ */
++ sci_del_timer(&iphy->sata_timer);
++
++ scu_link_layer_stop_protocol_engine(iphy);
++
++ if (iphy->sm.previous_state_id != SCI_PHY_INITIAL)
++ sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
++}
++
++static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
++ struct isci_port *iport = iphy->owning_port;
++ struct isci_host *ihost = iport->owning_controller;
++
++ scu_link_layer_stop_protocol_engine(iphy);
++ scu_link_layer_start_oob(iphy);
++
++ /* We don't know what kind of phy we are going to be just yet */
++ iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
++ iphy->bcn_received_while_port_unassigned = false;
++
++ if (iphy->sm.previous_state_id == SCI_PHY_READY)
++ sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
++
++ sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL);
++}
++
++static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
++ struct isci_port *iport = iphy->owning_port;
++ struct isci_host *ihost = iport->owning_controller;
++
++ sci_controller_link_up(ihost, phy_get_non_dummy_port(iphy), iphy);
++}
++
++static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm)
++{
++ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
++
++ sci_phy_suspend(iphy);
++}
++
++static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
++
++ /* The phy is being reset, therefore deactivate it from the port. In
++ * the resetting state we don't notify the user regarding link up and
++ * link down notifications
++ */
++ sci_port_deactivate_phy(iphy->owning_port, iphy, false);
++
++ if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
++ scu_link_layer_tx_hard_reset(iphy);
++ } else {
++ /* The SCU does not need to have a discrete reset state so
++ * just go back to the starting state.
++ */
++ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
++ }
++}
++
++static const struct sci_base_state sci_phy_state_table[] = {
++ [SCI_PHY_INITIAL] = { },
++ [SCI_PHY_STOPPED] = {
++ .enter_state = sci_phy_stopped_state_enter,
++ },
++ [SCI_PHY_STARTING] = {
++ .enter_state = sci_phy_starting_state_enter,
++ },
++ [SCI_PHY_SUB_INITIAL] = {
++ .enter_state = sci_phy_starting_initial_substate_enter,
++ },
++ [SCI_PHY_SUB_AWAIT_OSSP_EN] = { },
++ [SCI_PHY_SUB_AWAIT_SAS_SPEED_EN] = { },
++ [SCI_PHY_SUB_AWAIT_IAF_UF] = { },
++ [SCI_PHY_SUB_AWAIT_SAS_POWER] = {
++ .enter_state = sci_phy_starting_await_sas_power_substate_enter,
++ .exit_state = sci_phy_starting_await_sas_power_substate_exit,
++ },
++ [SCI_PHY_SUB_AWAIT_SATA_POWER] = {
++ .enter_state = sci_phy_starting_await_sata_power_substate_enter,
++ .exit_state = sci_phy_starting_await_sata_power_substate_exit
++ },
++ [SCI_PHY_SUB_AWAIT_SATA_PHY_EN] = {
++ .enter_state = sci_phy_starting_await_sata_phy_substate_enter,
++ .exit_state = sci_phy_starting_await_sata_phy_substate_exit
++ },
++ [SCI_PHY_SUB_AWAIT_SATA_SPEED_EN] = {
++ .enter_state = sci_phy_starting_await_sata_speed_substate_enter,
++ .exit_state = sci_phy_starting_await_sata_speed_substate_exit
++ },
++ [SCI_PHY_SUB_AWAIT_SIG_FIS_UF] = {
++ .enter_state = sci_phy_starting_await_sig_fis_uf_substate_enter,
++ .exit_state = sci_phy_starting_await_sig_fis_uf_substate_exit
++ },
++ [SCI_PHY_SUB_FINAL] = {
++ .enter_state = sci_phy_starting_final_substate_enter,
++ },
++ [SCI_PHY_READY] = {
++ .enter_state = sci_phy_ready_state_enter,
++ .exit_state = sci_phy_ready_state_exit,
++ },
++ [SCI_PHY_RESETTING] = {
++ .enter_state = sci_phy_resetting_state_enter,
++ },
++ [SCI_PHY_FINAL] = { },
++};
++
++void sci_phy_construct(struct isci_phy *iphy,
++ struct isci_port *iport, u8 phy_index)
++{
++ sci_init_sm(&iphy->sm, sci_phy_state_table, SCI_PHY_INITIAL);
++
++ /* Copy the rest of the input data to our locals */
++ iphy->owning_port = iport;
++ iphy->phy_index = phy_index;
++ iphy->bcn_received_while_port_unassigned = false;
++ iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
++ iphy->link_layer_registers = NULL;
++ iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
++
++ /* Create the SIGNATURE FIS Timeout timer for this phy */
++ sci_init_timer(&iphy->sata_timer, phy_sata_timeout);
++}
++
++void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
++{
++ struct sci_oem_params *oem = &ihost->oem_parameters;
++ u64 sci_sas_addr;
++ __be64 sas_addr;
++
++ sci_sas_addr = oem->phys[index].sas_address.high;
++ sci_sas_addr <<= 32;
++ sci_sas_addr |= oem->phys[index].sas_address.low;
++ sas_addr = cpu_to_be64(sci_sas_addr);
++ memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr));
++
++ iphy->isci_port = NULL;
++ iphy->sas_phy.enabled = 0;
++ iphy->sas_phy.id = index;
++ iphy->sas_phy.sas_addr = &iphy->sas_addr[0];
++ iphy->sas_phy.frame_rcvd = (u8 *)&iphy->frame_rcvd;
++ iphy->sas_phy.ha = &ihost->sas_ha;
++ iphy->sas_phy.lldd_phy = iphy;
++ iphy->sas_phy.enabled = 1;
++ iphy->sas_phy.class = SAS;
++ iphy->sas_phy.iproto = SAS_PROTOCOL_ALL;
++ iphy->sas_phy.tproto = 0;
++ iphy->sas_phy.type = PHY_TYPE_PHYSICAL;
++ iphy->sas_phy.role = PHY_ROLE_INITIATOR;
++ iphy->sas_phy.oob_mode = OOB_NOT_CONNECTED;
++ iphy->sas_phy.linkrate = SAS_LINK_RATE_UNKNOWN;
++ memset(&iphy->frame_rcvd, 0, sizeof(iphy->frame_rcvd));
++}
++
++
++/**
++ * isci_phy_control() - This function is one of the SAS Domain Template
++ * functions. This is a phy management function.
++ * @phy: This parameter specifies the sphy being controlled.
++ * @func: This parameter specifies the phy control function being invoked.
++ * @buf: This parameter is specific to the phy function being invoked.
++ *
++ * status, zero indicates success.
++ */
++int isci_phy_control(struct asd_sas_phy *sas_phy,
++ enum phy_func func,
++ void *buf)
++{
++ int ret = 0;
++ struct isci_phy *iphy = sas_phy->lldd_phy;
++ struct isci_port *iport = iphy->isci_port;
++ struct isci_host *ihost = sas_phy->ha->lldd_ha;
++ unsigned long flags;
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: phy %p; func %d; buf %p; isci phy %p, port %p\n",
++ __func__, sas_phy, func, buf, iphy, iport);
++
++ switch (func) {
++ case PHY_FUNC_DISABLE:
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++ sci_phy_stop(iphy);
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++ break;
++
++ case PHY_FUNC_LINK_RESET:
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++ sci_phy_stop(iphy);
++ sci_phy_start(iphy);
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++ break;
++
++ case PHY_FUNC_HARD_RESET:
++ if (!iport)
++ return -ENODEV;
++
++ /* Perform the port reset. */
++ ret = isci_port_perform_hard_reset(ihost, iport, iphy);
++
++ break;
++
++ default:
++ dev_dbg(&ihost->pdev->dev,
++ "%s: phy %p; func %d NOT IMPLEMENTED!\n",
++ __func__, sas_phy, func);
++ ret = -ENOSYS;
++ break;
++ }
++ return ret;
++}
+diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h
+new file mode 100644
+index 0000000..67699c8
+--- /dev/null
++++ b/drivers/scsi/isci/phy.h
+@@ -0,0 +1,504 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _ISCI_PHY_H_
++#define _ISCI_PHY_H_
++
++#include <scsi/sas.h>
++#include <scsi/libsas.h>
++#include "isci.h"
++#include "sas.h"
++
++/* This is the timeout value for the SATA phy to wait for a SIGNATURE FIS
++ * before restarting the starting state machine. Technically, the old parallel
++ * ATA specification required up to 30 seconds for a device to issue its
++ * signature FIS as a result of a soft reset. Now we see that devices respond
++ * generally within 15 seconds, but we'll use 25 for now.
++ */
++#define SCIC_SDS_SIGNATURE_FIS_TIMEOUT 25000
++
++/* This is the timeout for the SATA OOB/SN because the hardware does not
++ * recognize a hot plug after OOB signal but before the SN signals. We need to
++ * make sure after a hotplug timeout if we have not received the speed event
++ * notification from the hardware that we restart the hardware OOB state
++ * machine.
++ */
++#define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250
++
++enum sci_phy_protocol {
++ SCIC_SDS_PHY_PROTOCOL_UNKNOWN,
++ SCIC_SDS_PHY_PROTOCOL_SAS,
++ SCIC_SDS_PHY_PROTOCOL_SATA,
++ SCIC_SDS_MAX_PHY_PROTOCOLS
++};
++
++/**
++ * isci_phy - hba local phy infrastructure
++ * @sm:
++ * @protocol: attached device protocol
++ * @phy_index: physical index relative to the controller (0-3)
++ * @bcn_received_while_port_unassigned: bcn to report after port association
++ * @sata_timer: timeout SATA signature FIS arrival
++ */
++struct isci_phy {
++ struct sci_base_state_machine sm;
++ struct isci_port *owning_port;
++ enum sas_linkrate max_negotiated_speed;
++ enum sci_phy_protocol protocol;
++ u8 phy_index;
++ bool bcn_received_while_port_unassigned;
++ bool is_in_link_training;
++ struct sci_timer sata_timer;
++ struct scu_transport_layer_registers __iomem *transport_layer_registers;
++ struct scu_link_layer_registers __iomem *link_layer_registers;
++ struct asd_sas_phy sas_phy;
++ struct isci_port *isci_port;
++ u8 sas_addr[SAS_ADDR_SIZE];
++ union {
++ struct sas_identify_frame iaf;
++ struct dev_to_host_fis fis;
++ } frame_rcvd;
++};
++
++static inline struct isci_phy *to_iphy(struct asd_sas_phy *sas_phy)
++{
++ struct isci_phy *iphy = container_of(sas_phy, typeof(*iphy), sas_phy);
++
++ return iphy;
++}
++
++struct sci_phy_cap {
++ union {
++ struct {
++ /*
++ * The SAS specification indicates the start bit shall
++ * always be set to
++ * 1. This implementation will have the start bit set
++ * to 0 if the PHY CAPABILITIES were either not
++ * received or speed negotiation failed.
++ */
++ u8 start:1;
++ u8 tx_ssc_type:1;
++ u8 res1:2;
++ u8 req_logical_linkrate:4;
++
++ u32 gen1_no_ssc:1;
++ u32 gen1_ssc:1;
++ u32 gen2_no_ssc:1;
++ u32 gen2_ssc:1;
++ u32 gen3_no_ssc:1;
++ u32 gen3_ssc:1;
++ u32 res2:17;
++ u32 parity:1;
++ };
++ u32 all;
++ };
++} __packed;
++
++/* this data structure reflects the link layer transmit identification reg */
++struct sci_phy_proto {
++ union {
++ struct {
++ u16 _r_a:1;
++ u16 smp_iport:1;
++ u16 stp_iport:1;
++ u16 ssp_iport:1;
++ u16 _r_b:4;
++ u16 _r_c:1;
++ u16 smp_tport:1;
++ u16 stp_tport:1;
++ u16 ssp_tport:1;
++ u16 _r_d:4;
++ };
++ u16 all;
++ };
++} __packed;
++
++
++/**
++ * struct sci_phy_properties - This structure defines the properties common to
++ * all phys that can be retrieved.
++ *
++ *
++ */
++struct sci_phy_properties {
++ /**
++ * This field specifies the port that currently contains the
++ * supplied phy. This field may be set to NULL
++ * if the phy is not currently contained in a port.
++ */
++ struct isci_port *iport;
++
++ /**
++ * This field specifies the link rate at which the phy is
++ * currently operating.
++ */
++ enum sas_linkrate negotiated_link_rate;
++
++ /**
++ * This field specifies the index of the phy in relation to other
++ * phys within the controller. This index is zero relative.
++ */
++ u8 index;
++};
++
++/**
++ * struct sci_sas_phy_properties - This structure defines the properties,
++ * specific to a SAS phy, that can be retrieved.
++ *
++ *
++ */
++struct sci_sas_phy_properties {
++ /**
++ * This field delineates the Identify Address Frame received
++ * from the remote end point.
++ */
++ struct sas_identify_frame rcvd_iaf;
++
++ /**
++ * This field delineates the Phy capabilities structure received
++ * from the remote end point.
++ */
++ struct sci_phy_cap rcvd_cap;
++
++};
++
++/**
++ * struct sci_sata_phy_properties - This structure defines the properties,
++ * specific to a SATA phy, that can be retrieved.
++ *
++ *
++ */
++struct sci_sata_phy_properties {
++ /**
++ * This field delineates the signature FIS received from the
++ * attached target.
++ */
++ struct dev_to_host_fis signature_fis;
++
++ /**
++ * This field specifies to the user if a port selector is connected
++ * on the specified phy.
++ */
++ bool is_port_selector_present;
++
++};
++
++/**
++ * enum sci_phy_counter_id - This enumeration depicts the various pieces of
++ * optional information that can be retrieved for a specific phy.
++ *
++ *
++ */
++enum sci_phy_counter_id {
++ /**
++ * This PHY information field tracks the number of frames received.
++ */
++ SCIC_PHY_COUNTER_RECEIVED_FRAME,
++
++ /**
++ * This PHY information field tracks the number of frames transmitted.
++ */
++ SCIC_PHY_COUNTER_TRANSMITTED_FRAME,
++
++ /**
++ * This PHY information field tracks the number of DWORDs received.
++ */
++ SCIC_PHY_COUNTER_RECEIVED_FRAME_WORD,
++
++ /**
++ * This PHY information field tracks the number of DWORDs transmitted.
++ */
++ SCIC_PHY_COUNTER_TRANSMITTED_FRAME_DWORD,
++
++ /**
++ * This PHY information field tracks the number of times DWORD
++ * synchronization was lost.
++ */
++ SCIC_PHY_COUNTER_LOSS_OF_SYNC_ERROR,
++
++ /**
++ * This PHY information field tracks the number of received DWORDs with
++ * running disparity errors.
++ */
++ SCIC_PHY_COUNTER_RECEIVED_DISPARITY_ERROR,
++
++ /**
++ * This PHY information field tracks the number of received frames with a
++ * CRC error (not including short or truncated frames).
++ */
++ SCIC_PHY_COUNTER_RECEIVED_FRAME_CRC_ERROR,
++
++ /**
++ * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT)
++ * primitives received.
++ */
++ SCIC_PHY_COUNTER_RECEIVED_DONE_ACK_NAK_TIMEOUT,
++
++ /**
++ * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT)
++ * primitives transmitted.
++ */
++ SCIC_PHY_COUNTER_TRANSMITTED_DONE_ACK_NAK_TIMEOUT,
++
++ /**
++ * This PHY information field tracks the number of times the inactivity
++ * timer for connections on the phy has been utilized.
++ */
++ SCIC_PHY_COUNTER_INACTIVITY_TIMER_EXPIRED,
++
++ /**
++ * This PHY information field tracks the number of DONE (CREDIT TIMEOUT)
++ * primitives received.
++ */
++ SCIC_PHY_COUNTER_RECEIVED_DONE_CREDIT_TIMEOUT,
++
++ /**
++ * This PHY information field tracks the number of DONE (CREDIT TIMEOUT)
++ * primitives transmitted.
++ */
++ SCIC_PHY_COUNTER_TRANSMITTED_DONE_CREDIT_TIMEOUT,
++
++ /**
++ * This PHY information field tracks the number of CREDIT BLOCKED
++ * primitives received.
++ * @note Depending on remote device implementation, credit blocks
++ * may occur regularly.
++ */
++ SCIC_PHY_COUNTER_RECEIVED_CREDIT_BLOCKED,
++
++ /**
++ * This PHY information field contains the number of short frames
++ * received. A short frame is simply a frame smaller then what is
++ * allowed by either the SAS or SATA specification.
++ */
++ SCIC_PHY_COUNTER_RECEIVED_SHORT_FRAME,
++
++ /**
++ * This PHY information field contains the number of frames received after
++ * credit has been exhausted.
++ */
++ SCIC_PHY_COUNTER_RECEIVED_FRAME_WITHOUT_CREDIT,
++
++ /**
++ * This PHY information field contains the number of frames received after
++ * a DONE has been received.
++ */
++ SCIC_PHY_COUNTER_RECEIVED_FRAME_AFTER_DONE,
++
++ /**
++ * This PHY information field contains the number of times the phy
++ * failed to achieve DWORD synchronization during speed negotiation.
++ */
++ SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR
++};
++
++enum sci_phy_states {
++ /**
++ * Simply the initial state for the base domain state machine.
++ */
++ SCI_PHY_INITIAL,
++
++ /**
++ * This state indicates that the phy has successfully been stopped.
++ * In this state no new IO operations are permitted on this phy.
++ * This state is entered from the INITIAL state.
++ * This state is entered from the STARTING state.
++ * This state is entered from the READY state.
++ * This state is entered from the RESETTING state.
++ */
++ SCI_PHY_STOPPED,
++
++ /**
++ * This state indicates that the phy is in the process of becomming
++ * ready. In this state no new IO operations are permitted on this phy.
++ * This state is entered from the STOPPED state.
++ * This state is entered from the READY state.
++ * This state is entered from the RESETTING state.
++ */
++ SCI_PHY_STARTING,
++
++ /**
++ * Initial state
++ */
++ SCI_PHY_SUB_INITIAL,
++
++ /**
++ * Wait state for the hardware OSSP event type notification
++ */
++ SCI_PHY_SUB_AWAIT_OSSP_EN,
++
++ /**
++ * Wait state for the PHY speed notification
++ */
++ SCI_PHY_SUB_AWAIT_SAS_SPEED_EN,
++
++ /**
++ * Wait state for the IAF Unsolicited frame notification
++ */
++ SCI_PHY_SUB_AWAIT_IAF_UF,
++
++ /**
++ * Wait state for the request to consume power
++ */
++ SCI_PHY_SUB_AWAIT_SAS_POWER,
++
++ /**
++ * Wait state for request to consume power
++ */
++ SCI_PHY_SUB_AWAIT_SATA_POWER,
++
++ /**
++ * Wait state for the SATA PHY notification
++ */
++ SCI_PHY_SUB_AWAIT_SATA_PHY_EN,
++
++ /**
++ * Wait for the SATA PHY speed notification
++ */
++ SCI_PHY_SUB_AWAIT_SATA_SPEED_EN,
++
++ /**
++ * Wait state for the SIGNATURE FIS unsolicited frame notification
++ */
++ SCI_PHY_SUB_AWAIT_SIG_FIS_UF,
++
++ /**
++ * Exit state for this state machine
++ */
++ SCI_PHY_SUB_FINAL,
++
++ /**
++ * This state indicates the the phy is now ready. Thus, the user
++ * is able to perform IO operations utilizing this phy as long as it
++ * is currently part of a valid port.
++ * This state is entered from the STARTING state.
++ */
++ SCI_PHY_READY,
++
++ /**
++ * This state indicates that the phy is in the process of being reset.
++ * In this state no new IO operations are permitted on this phy.
++ * This state is entered from the READY state.
++ */
++ SCI_PHY_RESETTING,
++
++ /**
++ * Simply the final state for the base phy state machine.
++ */
++ SCI_PHY_FINAL,
++};
++
++void sci_phy_construct(
++ struct isci_phy *iphy,
++ struct isci_port *iport,
++ u8 phy_index);
++
++struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy);
++
++void sci_phy_set_port(
++ struct isci_phy *iphy,
++ struct isci_port *iport);
++
++enum sci_status sci_phy_initialize(
++ struct isci_phy *iphy,
++ struct scu_transport_layer_registers __iomem *transport_layer_registers,
++ struct scu_link_layer_registers __iomem *link_layer_registers);
++
++enum sci_status sci_phy_start(
++ struct isci_phy *iphy);
++
++enum sci_status sci_phy_stop(
++ struct isci_phy *iphy);
++
++enum sci_status sci_phy_reset(
++ struct isci_phy *iphy);
++
++void sci_phy_resume(
++ struct isci_phy *iphy);
++
++void sci_phy_setup_transport(
++ struct isci_phy *iphy,
++ u32 device_id);
++
++enum sci_status sci_phy_event_handler(
++ struct isci_phy *iphy,
++ u32 event_code);
++
++enum sci_status sci_phy_frame_handler(
++ struct isci_phy *iphy,
++ u32 frame_index);
++
++enum sci_status sci_phy_consume_power_handler(
++ struct isci_phy *iphy);
++
++void sci_phy_get_sas_address(
++ struct isci_phy *iphy,
++ struct sci_sas_address *sas_address);
++
++void sci_phy_get_attached_sas_address(
++ struct isci_phy *iphy,
++ struct sci_sas_address *sas_address);
++
++struct sci_phy_proto;
++void sci_phy_get_protocols(
++ struct isci_phy *iphy,
++ struct sci_phy_proto *protocols);
++enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy);
++
++struct isci_host;
++void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index);
++int isci_phy_control(struct asd_sas_phy *phy, enum phy_func func, void *buf);
++
++#endif /* !defined(_ISCI_PHY_H_) */
+diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
+new file mode 100644
+index 0000000..8f6f9b7
+--- /dev/null
++++ b/drivers/scsi/isci/port.c
+@@ -0,0 +1,1757 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "isci.h"
++#include "port.h"
++#include "request.h"
++
++#define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000)
++#define SCU_DUMMY_INDEX (0xFFFF)
++
++static void isci_port_change_state(struct isci_port *iport, enum isci_status status)
++{
++ unsigned long flags;
++
++ dev_dbg(&iport->isci_host->pdev->dev,
++ "%s: iport = %p, state = 0x%x\n",
++ __func__, iport, status);
++
++ /* XXX pointless lock */
++ spin_lock_irqsave(&iport->state_lock, flags);
++ iport->status = status;
++ spin_unlock_irqrestore(&iport->state_lock, flags);
++}
++
++static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
++{
++ u8 index;
++
++ proto->all = 0;
++ for (index = 0; index < SCI_MAX_PHYS; index++) {
++ struct isci_phy *iphy = iport->phy_table[index];
++
++ if (!iphy)
++ continue;
++ sci_phy_get_protocols(iphy, proto);
++ }
++}
++
++static u32 sci_port_get_phys(struct isci_port *iport)
++{
++ u32 index;
++ u32 mask;
++
++ mask = 0;
++ for (index = 0; index < SCI_MAX_PHYS; index++)
++ if (iport->phy_table[index])
++ mask |= (1 << index);
++
++ return mask;
++}
++
++/**
++ * sci_port_get_properties() - This method simply returns the properties
++ * regarding the port, such as: physical index, protocols, sas address, etc.
++ * @port: this parameter specifies the port for which to retrieve the physical
++ * index.
++ * @properties: This parameter specifies the properties structure into which to
++ * copy the requested information.
++ *
++ * Indicate if the user specified a valid port. SCI_SUCCESS This value is
++ * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This
++ * value is returned if the specified port is not valid. When this value is
++ * returned, no data is copied to the properties output parameter.
++ */
++static enum sci_status sci_port_get_properties(struct isci_port *iport,
++ struct sci_port_properties *prop)
++{
++ if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
++ return SCI_FAILURE_INVALID_PORT;
++
++ prop->index = iport->logical_port_index;
++ prop->phy_mask = sci_port_get_phys(iport);
++ sci_port_get_sas_address(iport, &prop->local.sas_address);
++ sci_port_get_protocols(iport, &prop->local.protocols);
++ sci_port_get_attached_sas_address(iport, &prop->remote.sas_address);
++
++ return SCI_SUCCESS;
++}
++
++static void sci_port_bcn_enable(struct isci_port *iport)
++{
++ struct isci_phy *iphy;
++ u32 val;
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
++ iphy = iport->phy_table[i];
++ if (!iphy)
++ continue;
++ val = readl(&iphy->link_layer_registers->link_layer_control);
++ /* clear the bit by writing 1. */
++ writel(val, &iphy->link_layer_registers->link_layer_control);
++ }
++}
++
++/* called under sci_lock to stabilize phy:port associations */
++void isci_port_bcn_enable(struct isci_host *ihost, struct isci_port *iport)
++{
++ int i;
++
++ clear_bit(IPORT_BCN_BLOCKED, &iport->flags);
++ wake_up(&ihost->eventq);
++
++ if (!test_and_clear_bit(IPORT_BCN_PENDING, &iport->flags))
++ return;
++
++ for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
++ struct isci_phy *iphy = iport->phy_table[i];
++
++ if (!iphy)
++ continue;
++
++ ihost->sas_ha.notify_port_event(&iphy->sas_phy,
++ PORTE_BROADCAST_RCVD);
++ break;
++ }
++}
++
++static void isci_port_bc_change_received(struct isci_host *ihost,
++ struct isci_port *iport,
++ struct isci_phy *iphy)
++{
++ if (iport && test_bit(IPORT_BCN_BLOCKED, &iport->flags)) {
++ dev_dbg(&ihost->pdev->dev,
++ "%s: disabled BCN; isci_phy = %p, sas_phy = %p\n",
++ __func__, iphy, &iphy->sas_phy);
++ set_bit(IPORT_BCN_PENDING, &iport->flags);
++ atomic_inc(&iport->event);
++ wake_up(&ihost->eventq);
++ } else {
++ dev_dbg(&ihost->pdev->dev,
++ "%s: isci_phy = %p, sas_phy = %p\n",
++ __func__, iphy, &iphy->sas_phy);
++
++ ihost->sas_ha.notify_port_event(&iphy->sas_phy,
++ PORTE_BROADCAST_RCVD);
++ }
++ sci_port_bcn_enable(iport);
++}
++
++static void isci_port_link_up(struct isci_host *isci_host,
++ struct isci_port *iport,
++ struct isci_phy *iphy)
++{
++ unsigned long flags;
++ struct sci_port_properties properties;
++ unsigned long success = true;
++
++ BUG_ON(iphy->isci_port != NULL);
++
++ iphy->isci_port = iport;
++
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: isci_port = %p\n",
++ __func__, iport);
++
++ spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
++
++ isci_port_change_state(iphy->isci_port, isci_starting);
++
++ sci_port_get_properties(iport, &properties);
++
++ if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) {
++ u64 attached_sas_address;
++
++ iphy->sas_phy.oob_mode = SATA_OOB_MODE;
++ iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis);
++
++ /*
++ * For direct-attached SATA devices, the SCI core will
++ * automagically assign a SAS address to the end device
++ * for the purpose of creating a port. This SAS address
++ * will not be the same as assigned to the PHY and needs
++ * to be obtained from struct sci_port_properties properties.
++ */
++ attached_sas_address = properties.remote.sas_address.high;
++ attached_sas_address <<= 32;
++ attached_sas_address |= properties.remote.sas_address.low;
++ swab64s(&attached_sas_address);
++
++ memcpy(&iphy->sas_phy.attached_sas_addr,
++ &attached_sas_address, sizeof(attached_sas_address));
++ } else if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
++ iphy->sas_phy.oob_mode = SAS_OOB_MODE;
++ iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
++
++ /* Copy the attached SAS address from the IAF */
++ memcpy(iphy->sas_phy.attached_sas_addr,
++ iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE);
++ } else {
++ dev_err(&isci_host->pdev->dev, "%s: unkown target\n", __func__);
++ success = false;
++ }
++
++ iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy);
++
++ spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
++
++ /* Notify libsas that we have an address frame, if indeed
++ * we've found an SSP, SMP, or STP target */
++ if (success)
++ isci_host->sas_ha.notify_port_event(&iphy->sas_phy,
++ PORTE_BYTES_DMAED);
++}
++
++
++/**
++ * isci_port_link_down() - This function is called by the sci core when a link
++ * becomes inactive.
++ * @isci_host: This parameter specifies the isci host object.
++ * @phy: This parameter specifies the isci phy with the active link.
++ * @port: This parameter specifies the isci port with the active link.
++ *
++ */
++static void isci_port_link_down(struct isci_host *isci_host,
++ struct isci_phy *isci_phy,
++ struct isci_port *isci_port)
++{
++ struct isci_remote_device *isci_device;
++
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: isci_port = %p\n", __func__, isci_port);
++
++ if (isci_port) {
++
++ /* check to see if this is the last phy on this port. */
++ if (isci_phy->sas_phy.port &&
++ isci_phy->sas_phy.port->num_phys == 1) {
++ atomic_inc(&isci_port->event);
++ isci_port_bcn_enable(isci_host, isci_port);
++
++ /* change the state for all devices on this port. The
++ * next task sent to this device will be returned as
++ * SAS_TASK_UNDELIVERED, and the scsi mid layer will
++ * remove the target
++ */
++ list_for_each_entry(isci_device,
++ &isci_port->remote_dev_list,
++ node) {
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: isci_device = %p\n",
++ __func__, isci_device);
++ set_bit(IDEV_GONE, &isci_device->flags);
++ }
++ }
++ isci_port_change_state(isci_port, isci_stopping);
++ }
++
++ /* Notify libsas of the borken link, this will trigger calls to our
++ * isci_port_deformed and isci_dev_gone functions.
++ */
++ sas_phy_disconnected(&isci_phy->sas_phy);
++ isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
++ PHYE_LOSS_OF_SIGNAL);
++
++ isci_phy->isci_port = NULL;
++
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: isci_port = %p - Done\n", __func__, isci_port);
++}
++
++
++/**
++ * isci_port_ready() - This function is called by the sci core when a link
++ * becomes ready.
++ * @isci_host: This parameter specifies the isci host object.
++ * @port: This parameter specifies the sci port with the active link.
++ *
++ */
++static void isci_port_ready(struct isci_host *isci_host, struct isci_port *isci_port)
++{
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: isci_port = %p\n", __func__, isci_port);
++
++ complete_all(&isci_port->start_complete);
++ isci_port_change_state(isci_port, isci_ready);
++ return;
++}
++
++/**
++ * isci_port_not_ready() - This function is called by the sci core when a link
++ * is not ready. All remote devices on this link will be removed if they are
++ * in the stopping state.
++ * @isci_host: This parameter specifies the isci host object.
++ * @port: This parameter specifies the sci port with the active link.
++ *
++ */
++static void isci_port_not_ready(struct isci_host *isci_host, struct isci_port *isci_port)
++{
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: isci_port = %p\n", __func__, isci_port);
++}
++
++static void isci_port_stop_complete(struct isci_host *ihost,
++ struct isci_port *iport,
++ enum sci_status completion_status)
++{
++ dev_dbg(&ihost->pdev->dev, "Port stop complete\n");
++}
++
++/**
++ * isci_port_hard_reset_complete() - This function is called by the sci core
++ * when the hard reset complete notification has been received.
++ * @port: This parameter specifies the sci port with the active link.
++ * @completion_status: This parameter specifies the core status for the reset
++ * process.
++ *
++ */
++static void isci_port_hard_reset_complete(struct isci_port *isci_port,
++ enum sci_status completion_status)
++{
++ dev_dbg(&isci_port->isci_host->pdev->dev,
++ "%s: isci_port = %p, completion_status=%x\n",
++ __func__, isci_port, completion_status);
++
++ /* Save the status of the hard reset from the port. */
++ isci_port->hard_reset_status = completion_status;
++
++ complete_all(&isci_port->hard_reset_complete);
++}
++
++/* This method will return a true value if the specified phy can be assigned to
++ * this port The following is a list of phys for each port that are allowed: -
++ * Port 0 - 3 2 1 0 - Port 1 - 1 - Port 2 - 3 2 - Port 3 - 3 This method
++ * doesn't preclude all configurations. It merely ensures that a phy is part
++ * of the allowable set of phy identifiers for that port. For example, one
++ * could assign phy 3 to port 0 and no other phys. Please refer to
++ * sci_port_is_phy_mask_valid() for information regarding whether the
++ * phy_mask for a port can be supported. bool true if this is a valid phy
++ * assignment for the port false if this is not a valid phy assignment for the
++ * port
++ */
++bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
++{
++ struct isci_host *ihost = iport->owning_controller;
++ struct sci_user_parameters *user = &ihost->user_parameters;
++
++ /* Initialize to invalid value. */
++ u32 existing_phy_index = SCI_MAX_PHYS;
++ u32 index;
++
++ if ((iport->physical_port_index == 1) && (phy_index != 1))
++ return false;
++
++ if (iport->physical_port_index == 3 && phy_index != 3)
++ return false;
++
++ if (iport->physical_port_index == 2 &&
++ (phy_index == 0 || phy_index == 1))
++ return false;
++
++ for (index = 0; index < SCI_MAX_PHYS; index++)
++ if (iport->phy_table[index] && index != phy_index)
++ existing_phy_index = index;
++
++ /* Ensure that all of the phys in the port are capable of
++ * operating at the same maximum link rate.
++ */
++ if (existing_phy_index < SCI_MAX_PHYS &&
++ user->phys[phy_index].max_speed_generation !=
++ user->phys[existing_phy_index].max_speed_generation)
++ return false;
++
++ return true;
++}
++
++/**
++ *
++ * @sci_port: This is the port object for which to determine if the phy mask
++ * can be supported.
++ *
++ * This method will return a true value if the port's phy mask can be supported
++ * by the SCU. The following is a list of valid PHY mask configurations for
++ * each port: - Port 0 - [[3 2] 1] 0 - Port 1 - [1] - Port 2 - [[3] 2]
++ * - Port 3 - [3] This method returns a boolean indication specifying if the
++ * phy mask can be supported. true if this is a valid phy assignment for the
++ * port false if this is not a valid phy assignment for the port
++ */
++static bool sci_port_is_phy_mask_valid(
++ struct isci_port *iport,
++ u32 phy_mask)
++{
++ if (iport->physical_port_index == 0) {
++ if (((phy_mask & 0x0F) == 0x0F)
++ || ((phy_mask & 0x03) == 0x03)
++ || ((phy_mask & 0x01) == 0x01)
++ || (phy_mask == 0))
++ return true;
++ } else if (iport->physical_port_index == 1) {
++ if (((phy_mask & 0x02) == 0x02)
++ || (phy_mask == 0))
++ return true;
++ } else if (iport->physical_port_index == 2) {
++ if (((phy_mask & 0x0C) == 0x0C)
++ || ((phy_mask & 0x04) == 0x04)
++ || (phy_mask == 0))
++ return true;
++ } else if (iport->physical_port_index == 3) {
++ if (((phy_mask & 0x08) == 0x08)
++ || (phy_mask == 0))
++ return true;
++ }
++
++ return false;
++}
++
++/*
++ * This method retrieves a currently active (i.e. connected) phy contained in
++ * the port. Currently, the lowest order phy that is connected is returned.
++ * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is
++ * returned if there are no currently active (i.e. connected to a remote end
++ * point) phys contained in the port. All other values specify a struct sci_phy
++ * object that is active in the port.
++ */
++static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport)
++{
++ u32 index;
++ struct isci_phy *iphy;
++
++ for (index = 0; index < SCI_MAX_PHYS; index++) {
++ /* Ensure that the phy is both part of the port and currently
++ * connected to the remote end-point.
++ */
++ iphy = iport->phy_table[index];
++ if (iphy && sci_port_active_phy(iport, iphy))
++ return iphy;
++ }
++
++ return NULL;
++}
++
++static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy)
++{
++ /* Check to see if we can add this phy to a port
++ * that means that the phy is not part of a port and that the port does
++ * not already have a phy assinged to the phy index.
++ */
++ if (!iport->phy_table[iphy->phy_index] &&
++ !phy_get_non_dummy_port(iphy) &&
++ sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
++ /* Phy is being added in the stopped state so we are in MPC mode
++ * make logical port index = physical port index
++ */
++ iport->logical_port_index = iport->physical_port_index;
++ iport->phy_table[iphy->phy_index] = iphy;
++ sci_phy_set_port(iphy, iport);
++
++ return SCI_SUCCESS;
++ }
++
++ return SCI_FAILURE;
++}
++
++static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy)
++{
++ /* Make sure that this phy is part of this port */
++ if (iport->phy_table[iphy->phy_index] == iphy &&
++ phy_get_non_dummy_port(iphy) == iport) {
++ struct isci_host *ihost = iport->owning_controller;
++
++ /* Yep it is assigned to this port so remove it */
++ sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]);
++ iport->phy_table[iphy->phy_index] = NULL;
++ return SCI_SUCCESS;
++ }
++
++ return SCI_FAILURE;
++}
++
++void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
++{
++ u32 index;
++
++ sas->high = 0;
++ sas->low = 0;
++ for (index = 0; index < SCI_MAX_PHYS; index++)
++ if (iport->phy_table[index])
++ sci_phy_get_sas_address(iport->phy_table[index], sas);
++}
++
++void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
++{
++ struct isci_phy *iphy;
++
++ /*
++ * Ensure that the phy is both part of the port and currently
++ * connected to the remote end-point.
++ */
++ iphy = sci_port_get_a_connected_phy(iport);
++ if (iphy) {
++ if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) {
++ sci_phy_get_attached_sas_address(iphy, sas);
++ } else {
++ sci_phy_get_sas_address(iphy, sas);
++ sas->low += iphy->phy_index;
++ }
++ } else {
++ sas->high = 0;
++ sas->low = 0;
++ }
++}
++
++/**
++ * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
++ *
++ * @sci_port: logical port on which we need to create the remote node context
++ * @rni: remote node index for this remote node context.
++ *
++ * This routine will construct a dummy remote node context data structure
++ * This structure will be posted to the hardware to work around a scheduler
++ * error in the hardware.
++ */
++static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni)
++{
++ union scu_remote_node_context *rnc;
++
++ rnc = &iport->owning_controller->remote_node_context_table[rni];
++
++ memset(rnc, 0, sizeof(union scu_remote_node_context));
++
++ rnc->ssp.remote_sas_address_hi = 0;
++ rnc->ssp.remote_sas_address_lo = 0;
++
++ rnc->ssp.remote_node_index = rni;
++ rnc->ssp.remote_node_port_width = 1;
++ rnc->ssp.logical_port_index = iport->physical_port_index;
++
++ rnc->ssp.nexus_loss_timer_enable = false;
++ rnc->ssp.check_bit = false;
++ rnc->ssp.is_valid = true;
++ rnc->ssp.is_remote_node_context = true;
++ rnc->ssp.function_number = 0;
++ rnc->ssp.arbitration_wait_time = 0;
++}
++
++/*
++ * construct a dummy task context data structure. This
++ * structure will be posted to the hardwre to work around a scheduler error
++ * in the hardware.
++ */
++static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag)
++{
++ struct isci_host *ihost = iport->owning_controller;
++ struct scu_task_context *task_context;
++
++ task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
++ memset(task_context, 0, sizeof(struct scu_task_context));
++
++ task_context->initiator_request = 1;
++ task_context->connection_rate = 1;
++ task_context->logical_port_index = iport->physical_port_index;
++ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
++ task_context->task_index = ISCI_TAG_TCI(tag);
++ task_context->valid = SCU_TASK_CONTEXT_VALID;
++ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
++ task_context->remote_node_index = iport->reserved_rni;
++ task_context->do_not_dma_ssp_good_response = 1;
++ task_context->task_phase = 0x01;
++}
++
++static void sci_port_destroy_dummy_resources(struct isci_port *iport)
++{
++ struct isci_host *ihost = iport->owning_controller;
++
++ if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG)
++ isci_free_tag(ihost, iport->reserved_tag);
++
++ if (iport->reserved_rni != SCU_DUMMY_INDEX)
++ sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes,
++ 1, iport->reserved_rni);
++
++ iport->reserved_rni = SCU_DUMMY_INDEX;
++ iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
++}
++
++void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
++{
++ u8 index;
++
++ for (index = 0; index < SCI_MAX_PHYS; index++) {
++ if (iport->active_phy_mask & (1 << index))
++ sci_phy_setup_transport(iport->phy_table[index], device_id);
++ }
++}
++
++static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy,
++ bool do_notify_user)
++{
++ struct isci_host *ihost = iport->owning_controller;
++
++ if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA)
++ sci_phy_resume(iphy);
++
++ iport->active_phy_mask |= 1 << iphy->phy_index;
++
++ sci_controller_clear_invalid_phy(ihost, iphy);
++
++ if (do_notify_user == true)
++ isci_port_link_up(ihost, iport, iphy);
++}
++
++void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
++ bool do_notify_user)
++{
++ struct isci_host *ihost = iport->owning_controller;
++
++ iport->active_phy_mask &= ~(1 << iphy->phy_index);
++
++ iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
++
++ /* Re-assign the phy back to the LP as if it were a narrow port */
++ writel(iphy->phy_index,
++ &iport->port_pe_configuration_register[iphy->phy_index]);
++
++ if (do_notify_user == true)
++ isci_port_link_down(ihost, iphy, iport);
++}
++
++static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy)
++{
++ struct isci_host *ihost = iport->owning_controller;
++
++ /*
++ * Check to see if we have alreay reported this link as bad and if
++ * not go ahead and tell the SCI_USER that we have discovered an
++ * invalid link.
++ */
++ if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) {
++ ihost->invalid_phy_mask |= 1 << iphy->phy_index;
++ dev_warn(&ihost->pdev->dev, "Invalid link up!\n");
++ }
++}
++
++static bool is_port_ready_state(enum sci_port_states state)
++{
++ switch (state) {
++ case SCI_PORT_READY:
++ case SCI_PORT_SUB_WAITING:
++ case SCI_PORT_SUB_OPERATIONAL:
++ case SCI_PORT_SUB_CONFIGURING:
++ return true;
++ default:
++ return false;
++ }
++}
++
++/* flag dummy rnc hanling when exiting a ready state */
++static void port_state_machine_change(struct isci_port *iport,
++ enum sci_port_states state)
++{
++ struct sci_base_state_machine *sm = &iport->sm;
++ enum sci_port_states old_state = sm->current_state_id;
++
++ if (is_port_ready_state(old_state) && !is_port_ready_state(state))
++ iport->ready_exit = true;
++
++ sci_change_state(sm, state);
++ iport->ready_exit = false;
++}
++
++/**
++ * sci_port_general_link_up_handler - phy can be assigned to port?
++ * @sci_port: sci_port object for which has a phy that has gone link up.
++ * @sci_phy: This is the struct isci_phy object that has gone link up.
++ * @do_notify_user: This parameter specifies whether to inform the user (via
++ * sci_port_link_up()) as to the fact that a new phy as become ready.
++ *
++ * Determine if this phy can be assigned to this
++ * port . If the phy is not a valid PHY for
++ * this port then the function will notify the user. A PHY can only be
++ * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in
++ * the same port. none
++ */
++static void sci_port_general_link_up_handler(struct isci_port *iport,
++ struct isci_phy *iphy,
++ bool do_notify_user)
++{
++ struct sci_sas_address port_sas_address;
++ struct sci_sas_address phy_sas_address;
++
++ sci_port_get_attached_sas_address(iport, &port_sas_address);
++ sci_phy_get_attached_sas_address(iphy, &phy_sas_address);
++
++ /* If the SAS address of the new phy matches the SAS address of
++ * other phys in the port OR this is the first phy in the port,
++ * then activate the phy and allow it to be used for operations
++ * in this port.
++ */
++ if ((phy_sas_address.high == port_sas_address.high &&
++ phy_sas_address.low == port_sas_address.low) ||
++ iport->active_phy_mask == 0) {
++ struct sci_base_state_machine *sm = &iport->sm;
++
++ sci_port_activate_phy(iport, iphy, do_notify_user);
++ if (sm->current_state_id == SCI_PORT_RESETTING)
++ port_state_machine_change(iport, SCI_PORT_READY);
++ } else
++ sci_port_invalid_link_up(iport, iphy);
++}
++
++
++
++/**
++ * This method returns false if the port only has a single phy object assigned.
++ * If there are no phys or more than one phy then the method will return
++ * true.
++ * @sci_port: The port for which the wide port condition is to be checked.
++ *
++ * bool true Is returned if this is a wide ported port. false Is returned if
++ * this is a narrow port.
++ */
++static bool sci_port_is_wide(struct isci_port *iport)
++{
++ u32 index;
++ u32 phy_count = 0;
++
++ for (index = 0; index < SCI_MAX_PHYS; index++) {
++ if (iport->phy_table[index] != NULL) {
++ phy_count++;
++ }
++ }
++
++ return phy_count != 1;
++}
++
++/**
++ * This method is called by the PHY object when the link is detected. if the
++ * port wants the PHY to continue on to the link up state then the port
++ * layer must return true. If the port object returns false the phy object
++ * must halt its attempt to go link up.
++ * @sci_port: The port associated with the phy object.
++ * @sci_phy: The phy object that is trying to go link up.
++ *
++ * true if the phy object can continue to the link up condition. true Is
++ * returned if this phy can continue to the ready state. false Is returned if
++ * can not continue on to the ready state. This notification is in place for
++ * wide ports and direct attached phys. Since there are no wide ported SATA
++ * devices this could become an invalid port configuration.
++ */
++bool sci_port_link_detected(
++ struct isci_port *iport,
++ struct isci_phy *iphy)
++{
++ if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
++ (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) &&
++ sci_port_is_wide(iport)) {
++ sci_port_invalid_link_up(iport, iphy);
++
++ return false;
++ }
++
++ return true;
++}
++
++static void port_timeout(unsigned long data)
++{
++ struct sci_timer *tmr = (struct sci_timer *)data;
++ struct isci_port *iport = container_of(tmr, typeof(*iport), timer);
++ struct isci_host *ihost = iport->owning_controller;
++ unsigned long flags;
++ u32 current_state;
++
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++
++ if (tmr->cancel)
++ goto done;
++
++ current_state = iport->sm.current_state_id;
++
++ if (current_state == SCI_PORT_RESETTING) {
++ /* if the port is still in the resetting state then the timeout
++ * fired before the reset completed.
++ */
++ port_state_machine_change(iport, SCI_PORT_FAILED);
++ } else if (current_state == SCI_PORT_STOPPED) {
++ /* if the port is stopped then the start request failed In this
++ * case stay in the stopped state.
++ */
++ dev_err(sciport_to_dev(iport),
++ "%s: SCIC Port 0x%p failed to stop before tiemout.\n",
++ __func__,
++ iport);
++ } else if (current_state == SCI_PORT_STOPPING) {
++ /* if the port is still stopping then the stop has not completed */
++ isci_port_stop_complete(iport->owning_controller,
++ iport,
++ SCI_FAILURE_TIMEOUT);
++ } else {
++ /* The port is in the ready state and we have a timer
++ * reporting a timeout this should not happen.
++ */
++ dev_err(sciport_to_dev(iport),
++ "%s: SCIC Port 0x%p is processing a timeout operation "
++ "in state %d.\n", __func__, iport, current_state);
++ }
++
++done:
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++}
++
++/* --------------------------------------------------------------------------- */
++
++/**
++ * This function updates the hardwares VIIT entry for this port.
++ *
++ *
++ */
++static void sci_port_update_viit_entry(struct isci_port *iport)
++{
++ struct sci_sas_address sas_address;
++
++ sci_port_get_sas_address(iport, &sas_address);
++
++ writel(sas_address.high,
++ &iport->viit_registers->initiator_sas_address_hi);
++ writel(sas_address.low,
++ &iport->viit_registers->initiator_sas_address_lo);
++
++ /* This value get cleared just in case its not already cleared */
++ writel(0, &iport->viit_registers->reserved);
++
++ /* We are required to update the status register last */
++ writel(SCU_VIIT_ENTRY_ID_VIIT |
++ SCU_VIIT_IPPT_INITIATOR |
++ ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) |
++ SCU_VIIT_STATUS_ALL_VALID,
++ &iport->viit_registers->status);
++}
++
++enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport)
++{
++ u16 index;
++ struct isci_phy *iphy;
++ enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS;
++
++ /*
++ * Loop through all of the phys in this port and find the phy with the
++ * lowest maximum link rate. */
++ for (index = 0; index < SCI_MAX_PHYS; index++) {
++ iphy = iport->phy_table[index];
++ if (iphy && sci_port_active_phy(iport, iphy) &&
++ iphy->max_negotiated_speed < max_allowed_speed)
++ max_allowed_speed = iphy->max_negotiated_speed;
++ }
++
++ return max_allowed_speed;
++}
++
++static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
++{
++ u32 pts_control_value;
++
++ pts_control_value = readl(&iport->port_task_scheduler_registers->control);
++ pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND);
++ writel(pts_control_value, &iport->port_task_scheduler_registers->control);
++}
++
++/**
++ * sci_port_post_dummy_request() - post dummy/workaround request
++ * @sci_port: port to post task
++ *
++ * Prevent the hardware scheduler from posting new requests to the front
++ * of the scheduler queue causing a starvation problem for currently
++ * ongoing requests.
++ *
++ */
++static void sci_port_post_dummy_request(struct isci_port *iport)
++{
++ struct isci_host *ihost = iport->owning_controller;
++ u16 tag = iport->reserved_tag;
++ struct scu_task_context *tc;
++ u32 command;
++
++ tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
++ tc->abort = 0;
++
++ command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
++ iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
++ ISCI_TAG_TCI(tag);
++
++ sci_controller_post_request(ihost, command);
++}
++
++/**
++ * This routine will abort the dummy request. This will alow the hardware to
++ * power down parts of the silicon to save power.
++ *
++ * @sci_port: The port on which the task must be aborted.
++ *
++ */
++static void sci_port_abort_dummy_request(struct isci_port *iport)
++{
++ struct isci_host *ihost = iport->owning_controller;
++ u16 tag = iport->reserved_tag;
++ struct scu_task_context *tc;
++ u32 command;
++
++ tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
++ tc->abort = 1;
++
++ command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT |
++ iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
++ ISCI_TAG_TCI(tag);
++
++ sci_controller_post_request(ihost, command);
++}
++
++/**
++ *
++ * @sci_port: This is the struct isci_port object to resume.
++ *
++ * This method will resume the port task scheduler for this port object. none
++ */
++static void
++sci_port_resume_port_task_scheduler(struct isci_port *iport)
++{
++ u32 pts_control_value;
++
++ pts_control_value = readl(&iport->port_task_scheduler_registers->control);
++ pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND);
++ writel(pts_control_value, &iport->port_task_scheduler_registers->control);
++}
++
++static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
++
++ sci_port_suspend_port_task_scheduler(iport);
++
++ iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS;
++
++ if (iport->active_phy_mask != 0) {
++ /* At least one of the phys on the port is ready */
++ port_state_machine_change(iport,
++ SCI_PORT_SUB_OPERATIONAL);
++ }
++}
++
++static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
++{
++ u32 index;
++ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
++ struct isci_host *ihost = iport->owning_controller;
++
++ isci_port_ready(ihost, iport);
++
++ for (index = 0; index < SCI_MAX_PHYS; index++) {
++ if (iport->phy_table[index]) {
++ writel(iport->physical_port_index,
++ &iport->port_pe_configuration_register[
++ iport->phy_table[index]->phy_index]);
++ }
++ }
++
++ sci_port_update_viit_entry(iport);
++
++ sci_port_resume_port_task_scheduler(iport);
++
++ /*
++ * Post the dummy task for the port so the hardware can schedule
++ * io correctly
++ */
++ sci_port_post_dummy_request(iport);
++}
++
++static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
++{
++ struct isci_host *ihost = iport->owning_controller;
++ u8 phys_index = iport->physical_port_index;
++ union scu_remote_node_context *rnc;
++ u16 rni = iport->reserved_rni;
++ u32 command;
++
++ rnc = &ihost->remote_node_context_table[rni];
++
++ rnc->ssp.is_valid = false;
++
++ /* ensure the preceding tc abort request has reached the
++ * controller and give it ample time to act before posting the rnc
++ * invalidate
++ */
++ readl(&ihost->smu_registers->interrupt_status); /* flush */
++ udelay(10);
++
++ command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE |
++ phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
++
++ sci_controller_post_request(ihost, command);
++}
++
++/**
++ *
++ * @object: This is the object which is cast to a struct isci_port object.
++ *
++ * This method will perform the actions required by the struct isci_port on
++ * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
++ * the port not ready and suspends the port task scheduler. none
++ */
++static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm)
++{
++ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
++ struct isci_host *ihost = iport->owning_controller;
++
++ /*
++ * Kill the dummy task for this port if it has not yet posted
++ * the hardware will treat this as a NOP and just return abort
++ * complete.
++ */
++ sci_port_abort_dummy_request(iport);
++
++ isci_port_not_ready(ihost, iport);
++
++ if (iport->ready_exit)
++ sci_port_invalidate_dummy_remote_node(iport);
++}
++
++static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
++ struct isci_host *ihost = iport->owning_controller;
++
++ if (iport->active_phy_mask == 0) {
++ isci_port_not_ready(ihost, iport);
++
++ port_state_machine_change(iport,
++ SCI_PORT_SUB_WAITING);
++ } else if (iport->started_request_count == 0)
++ port_state_machine_change(iport,
++ SCI_PORT_SUB_OPERATIONAL);
++}
++
++static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm)
++{
++ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
++
++ sci_port_suspend_port_task_scheduler(iport);
++ if (iport->ready_exit)
++ sci_port_invalidate_dummy_remote_node(iport);
++}
++
++enum sci_status sci_port_start(struct isci_port *iport)
++{
++ struct isci_host *ihost = iport->owning_controller;
++ enum sci_status status = SCI_SUCCESS;
++ enum sci_port_states state;
++ u32 phy_mask;
++
++ state = iport->sm.current_state_id;
++ if (state != SCI_PORT_STOPPED) {
++ dev_warn(sciport_to_dev(iport),
++ "%s: in wrong state: %d\n", __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++
++ if (iport->assigned_device_count > 0) {
++ /* TODO This is a start failure operation because
++ * there are still devices assigned to this port.
++ * There must be no devices assigned to a port on a
++ * start operation.
++ */
++ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
++ }
++
++ if (iport->reserved_rni == SCU_DUMMY_INDEX) {
++ u16 rni = sci_remote_node_table_allocate_remote_node(
++ &ihost->available_remote_nodes, 1);
++
++ if (rni != SCU_DUMMY_INDEX)
++ sci_port_construct_dummy_rnc(iport, rni);
++ else
++ status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
++ iport->reserved_rni = rni;
++ }
++
++ if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
++ u16 tag;
++
++ tag = isci_alloc_tag(ihost);
++ if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
++ status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
++ else
++ sci_port_construct_dummy_task(iport, tag);
++ iport->reserved_tag = tag;
++ }
++
++ if (status == SCI_SUCCESS) {
++ phy_mask = sci_port_get_phys(iport);
++
++ /*
++ * There are one or more phys assigned to this port. Make sure
++ * the port's phy mask is in fact legal and supported by the
++ * silicon.
++ */
++ if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) {
++ port_state_machine_change(iport,
++ SCI_PORT_READY);
++
++ return SCI_SUCCESS;
++ }
++ status = SCI_FAILURE;
++ }
++
++ if (status != SCI_SUCCESS)
++ sci_port_destroy_dummy_resources(iport);
++
++ return status;
++}
++
++enum sci_status sci_port_stop(struct isci_port *iport)
++{
++ enum sci_port_states state;
++
++ state = iport->sm.current_state_id;
++ switch (state) {
++ case SCI_PORT_STOPPED:
++ return SCI_SUCCESS;
++ case SCI_PORT_SUB_WAITING:
++ case SCI_PORT_SUB_OPERATIONAL:
++ case SCI_PORT_SUB_CONFIGURING:
++ case SCI_PORT_RESETTING:
++ port_state_machine_change(iport,
++ SCI_PORT_STOPPING);
++ return SCI_SUCCESS;
++ default:
++ dev_warn(sciport_to_dev(iport),
++ "%s: in wrong state: %d\n", __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++}
++
++static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
++{
++ enum sci_status status = SCI_FAILURE_INVALID_PHY;
++ struct isci_phy *iphy = NULL;
++ enum sci_port_states state;
++ u32 phy_index;
++
++ state = iport->sm.current_state_id;
++ if (state != SCI_PORT_SUB_OPERATIONAL) {
++ dev_warn(sciport_to_dev(iport),
++ "%s: in wrong state: %d\n", __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++
++ /* Select a phy on which we can send the hard reset request. */
++ for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) {
++ iphy = iport->phy_table[phy_index];
++ if (iphy && !sci_port_active_phy(iport, iphy)) {
++ /*
++ * We found a phy but it is not ready select
++ * different phy
++ */
++ iphy = NULL;
++ }
++ }
++
++ /* If we have a phy then go ahead and start the reset procedure */
++ if (!iphy)
++ return status;
++ status = sci_phy_reset(iphy);
++
++ if (status != SCI_SUCCESS)
++ return status;
++
++ sci_mod_timer(&iport->timer, timeout);
++ iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED;
++
++ port_state_machine_change(iport, SCI_PORT_RESETTING);
++ return SCI_SUCCESS;
++}
++
++/**
++ * sci_port_add_phy() -
++ * @sci_port: This parameter specifies the port in which the phy will be added.
++ * @sci_phy: This parameter is the phy which is to be added to the port.
++ *
++ * This method will add a PHY to the selected port. This method returns an
++ * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
++ * status is a failure to add the phy to the port.
++ */
++enum sci_status sci_port_add_phy(struct isci_port *iport,
++ struct isci_phy *iphy)
++{
++ enum sci_status status;
++ enum sci_port_states state;
++
++ state = iport->sm.current_state_id;
++ switch (state) {
++ case SCI_PORT_STOPPED: {
++ struct sci_sas_address port_sas_address;
++
++ /* Read the port assigned SAS Address if there is one */
++ sci_port_get_sas_address(iport, &port_sas_address);
++
++ if (port_sas_address.high != 0 && port_sas_address.low != 0) {
++ struct sci_sas_address phy_sas_address;
++
++ /* Make sure that the PHY SAS Address matches the SAS Address
++ * for this port
++ */
++ sci_phy_get_sas_address(iphy, &phy_sas_address);
++
++ if (port_sas_address.high != phy_sas_address.high ||
++ port_sas_address.low != phy_sas_address.low)
++ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
++ }
++ return sci_port_set_phy(iport, iphy);
++ }
++ case SCI_PORT_SUB_WAITING:
++ case SCI_PORT_SUB_OPERATIONAL:
++ status = sci_port_set_phy(iport, iphy);
++
++ if (status != SCI_SUCCESS)
++ return status;
++
++ sci_port_general_link_up_handler(iport, iphy, true);
++ iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
++ port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
++
++ return status;
++ case SCI_PORT_SUB_CONFIGURING:
++ status = sci_port_set_phy(iport, iphy);
++
++ if (status != SCI_SUCCESS)
++ return status;
++ sci_port_general_link_up_handler(iport, iphy, true);
++
++ /* Re-enter the configuring state since this may be the last phy in
++ * the port.
++ */
++ port_state_machine_change(iport,
++ SCI_PORT_SUB_CONFIGURING);
++ return SCI_SUCCESS;
++ default:
++ dev_warn(sciport_to_dev(iport),
++ "%s: in wrong state: %d\n", __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++}
++
++/**
++ * sci_port_remove_phy() -
++ * @sci_port: This parameter specifies the port in which the phy will be added.
++ * @sci_phy: This parameter is the phy which is to be added to the port.
++ *
++ * This method will remove the PHY from the selected PORT. This method returns
++ * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
++ * other status is a failure to add the phy to the port.
++ */
++enum sci_status sci_port_remove_phy(struct isci_port *iport,
++ struct isci_phy *iphy)
++{
++ enum sci_status status;
++ enum sci_port_states state;
++
++ state = iport->sm.current_state_id;
++
++ switch (state) {
++ case SCI_PORT_STOPPED:
++ return sci_port_clear_phy(iport, iphy);
++ case SCI_PORT_SUB_OPERATIONAL:
++ status = sci_port_clear_phy(iport, iphy);
++ if (status != SCI_SUCCESS)
++ return status;
++
++ sci_port_deactivate_phy(iport, iphy, true);
++ iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
++ port_state_machine_change(iport,
++ SCI_PORT_SUB_CONFIGURING);
++ return SCI_SUCCESS;
++ case SCI_PORT_SUB_CONFIGURING:
++ status = sci_port_clear_phy(iport, iphy);
++
++ if (status != SCI_SUCCESS)
++ return status;
++ sci_port_deactivate_phy(iport, iphy, true);
++
++ /* Re-enter the configuring state since this may be the last phy in
++ * the port
++ */
++ port_state_machine_change(iport,
++ SCI_PORT_SUB_CONFIGURING);
++ return SCI_SUCCESS;
++ default:
++ dev_warn(sciport_to_dev(iport),
++ "%s: in wrong state: %d\n", __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++}
++
++enum sci_status sci_port_link_up(struct isci_port *iport,
++ struct isci_phy *iphy)
++{
++ enum sci_port_states state;
++
++ state = iport->sm.current_state_id;
++ switch (state) {
++ case SCI_PORT_SUB_WAITING:
++ /* Since this is the first phy going link up for the port we
++ * can just enable it and continue
++ */
++ sci_port_activate_phy(iport, iphy, true);
++
++ port_state_machine_change(iport,
++ SCI_PORT_SUB_OPERATIONAL);
++ return SCI_SUCCESS;
++ case SCI_PORT_SUB_OPERATIONAL:
++ sci_port_general_link_up_handler(iport, iphy, true);
++ return SCI_SUCCESS;
++ case SCI_PORT_RESETTING:
++ /* TODO We should make sure that the phy that has gone
++ * link up is the same one on which we sent the reset. It is
++ * possible that the phy on which we sent the reset is not the
++ * one that has gone link up and we want to make sure that
++ * phy being reset comes back. Consider the case where a
++ * reset is sent but before the hardware processes the reset it
++ * get a link up on the port because of a hot plug event.
++ * because of the reset request this phy will go link down
++ * almost immediately.
++ */
++
++ /* In the resetting state we don't notify the user regarding
++ * link up and link down notifications.
++ */
++ sci_port_general_link_up_handler(iport, iphy, false);
++ return SCI_SUCCESS;
++ default:
++ dev_warn(sciport_to_dev(iport),
++ "%s: in wrong state: %d\n", __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++}
++
++enum sci_status sci_port_link_down(struct isci_port *iport,
++ struct isci_phy *iphy)
++{
++ enum sci_port_states state;
++
++ state = iport->sm.current_state_id;
++ switch (state) {
++ case SCI_PORT_SUB_OPERATIONAL:
++ sci_port_deactivate_phy(iport, iphy, true);
++
++ /* If there are no active phys left in the port, then
++ * transition the port to the WAITING state until such time
++ * as a phy goes link up
++ */
++ if (iport->active_phy_mask == 0)
++ port_state_machine_change(iport,
++ SCI_PORT_SUB_WAITING);
++ return SCI_SUCCESS;
++ case SCI_PORT_RESETTING:
++ /* In the resetting state we don't notify the user regarding
++ * link up and link down notifications. */
++ sci_port_deactivate_phy(iport, iphy, false);
++ return SCI_SUCCESS;
++ default:
++ dev_warn(sciport_to_dev(iport),
++ "%s: in wrong state: %d\n", __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++}
++
++enum sci_status sci_port_start_io(struct isci_port *iport,
++ struct isci_remote_device *idev,
++ struct isci_request *ireq)
++{
++ enum sci_port_states state;
++
++ state = iport->sm.current_state_id;
++ switch (state) {
++ case SCI_PORT_SUB_WAITING:
++ return SCI_FAILURE_INVALID_STATE;
++ case SCI_PORT_SUB_OPERATIONAL:
++ iport->started_request_count++;
++ return SCI_SUCCESS;
++ default:
++ dev_warn(sciport_to_dev(iport),
++ "%s: in wrong state: %d\n", __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++}
++
++enum sci_status sci_port_complete_io(struct isci_port *iport,
++ struct isci_remote_device *idev,
++ struct isci_request *ireq)
++{
++ enum sci_port_states state;
++
++ state = iport->sm.current_state_id;
++ switch (state) {
++ case SCI_PORT_STOPPED:
++ dev_warn(sciport_to_dev(iport),
++ "%s: in wrong state: %d\n", __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ case SCI_PORT_STOPPING:
++ sci_port_decrement_request_count(iport);
++
++ if (iport->started_request_count == 0)
++ port_state_machine_change(iport,
++ SCI_PORT_STOPPED);
++ break;
++ case SCI_PORT_READY:
++ case SCI_PORT_RESETTING:
++ case SCI_PORT_FAILED:
++ case SCI_PORT_SUB_WAITING:
++ case SCI_PORT_SUB_OPERATIONAL:
++ sci_port_decrement_request_count(iport);
++ break;
++ case SCI_PORT_SUB_CONFIGURING:
++ sci_port_decrement_request_count(iport);
++ if (iport->started_request_count == 0) {
++ port_state_machine_change(iport,
++ SCI_PORT_SUB_OPERATIONAL);
++ }
++ break;
++ }
++ return SCI_SUCCESS;
++}
++
++static void sci_port_enable_port_task_scheduler(struct isci_port *iport)
++{
++ u32 pts_control_value;
++
++ /* enable the port task scheduler in a suspended state */
++ pts_control_value = readl(&iport->port_task_scheduler_registers->control);
++ pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND);
++ writel(pts_control_value, &iport->port_task_scheduler_registers->control);
++}
++
++static void sci_port_disable_port_task_scheduler(struct isci_port *iport)
++{
++ u32 pts_control_value;
++
++ pts_control_value = readl(&iport->port_task_scheduler_registers->control);
++ pts_control_value &=
++ ~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND));
++ writel(pts_control_value, &iport->port_task_scheduler_registers->control);
++}
++
++static void sci_port_post_dummy_remote_node(struct isci_port *iport)
++{
++ struct isci_host *ihost = iport->owning_controller;
++ u8 phys_index = iport->physical_port_index;
++ union scu_remote_node_context *rnc;
++ u16 rni = iport->reserved_rni;
++ u32 command;
++
++ rnc = &ihost->remote_node_context_table[rni];
++ rnc->ssp.is_valid = true;
++
++ command = SCU_CONTEXT_COMMAND_POST_RNC_32 |
++ phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
++
++ sci_controller_post_request(ihost, command);
++
++ /* ensure hardware has seen the post rnc command and give it
++ * ample time to act before sending the suspend
++ */
++ readl(&ihost->smu_registers->interrupt_status); /* flush */
++ udelay(10);
++
++ command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX |
++ phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
++
++ sci_controller_post_request(ihost, command);
++}
++
++static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
++
++ if (iport->sm.previous_state_id == SCI_PORT_STOPPING) {
++ /*
++ * If we enter this state becasuse of a request to stop
++ * the port then we want to disable the hardwares port
++ * task scheduler. */
++ sci_port_disable_port_task_scheduler(iport);
++ }
++}
++
++static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm)
++{
++ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
++
++ /* Enable and suspend the port task scheduler */
++ sci_port_enable_port_task_scheduler(iport);
++}
++
++static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
++ struct isci_host *ihost = iport->owning_controller;
++ u32 prev_state;
++
++ prev_state = iport->sm.previous_state_id;
++ if (prev_state == SCI_PORT_RESETTING)
++ isci_port_hard_reset_complete(iport, SCI_SUCCESS);
++ else
++ isci_port_not_ready(ihost, iport);
++
++ /* Post and suspend the dummy remote node context for this port. */
++ sci_port_post_dummy_remote_node(iport);
++
++ /* Start the ready substate machine */
++ port_state_machine_change(iport,
++ SCI_PORT_SUB_WAITING);
++}
++
++static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm)
++{
++ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
++
++ sci_del_timer(&iport->timer);
++}
++
++static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm)
++{
++ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
++
++ sci_del_timer(&iport->timer);
++
++ sci_port_destroy_dummy_resources(iport);
++}
++
++static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
++
++ isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
++}
++
++/* --------------------------------------------------------------------------- */
++
++static const struct sci_base_state sci_port_state_table[] = {
++ [SCI_PORT_STOPPED] = {
++ .enter_state = sci_port_stopped_state_enter,
++ .exit_state = sci_port_stopped_state_exit
++ },
++ [SCI_PORT_STOPPING] = {
++ .exit_state = sci_port_stopping_state_exit
++ },
++ [SCI_PORT_READY] = {
++ .enter_state = sci_port_ready_state_enter,
++ },
++ [SCI_PORT_SUB_WAITING] = {
++ .enter_state = sci_port_ready_substate_waiting_enter,
++ },
++ [SCI_PORT_SUB_OPERATIONAL] = {
++ .enter_state = sci_port_ready_substate_operational_enter,
++ .exit_state = sci_port_ready_substate_operational_exit
++ },
++ [SCI_PORT_SUB_CONFIGURING] = {
++ .enter_state = sci_port_ready_substate_configuring_enter,
++ .exit_state = sci_port_ready_substate_configuring_exit
++ },
++ [SCI_PORT_RESETTING] = {
++ .exit_state = sci_port_resetting_state_exit
++ },
++ [SCI_PORT_FAILED] = {
++ .enter_state = sci_port_failed_state_enter,
++ }
++};
++
++void sci_port_construct(struct isci_port *iport, u8 index,
++ struct isci_host *ihost)
++{
++ sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED);
++
++ iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
++ iport->physical_port_index = index;
++ iport->active_phy_mask = 0;
++ iport->ready_exit = false;
++
++ iport->owning_controller = ihost;
++
++ iport->started_request_count = 0;
++ iport->assigned_device_count = 0;
++
++ iport->reserved_rni = SCU_DUMMY_INDEX;
++ iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
++
++ sci_init_timer(&iport->timer, port_timeout);
++
++ iport->port_task_scheduler_registers = NULL;
++
++ for (index = 0; index < SCI_MAX_PHYS; index++)
++ iport->phy_table[index] = NULL;
++}
++
++void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
++{
++ INIT_LIST_HEAD(&iport->remote_dev_list);
++ INIT_LIST_HEAD(&iport->domain_dev_list);
++ spin_lock_init(&iport->state_lock);
++ init_completion(&iport->start_complete);
++ iport->isci_host = ihost;
++ isci_port_change_state(iport, isci_freed);
++ atomic_set(&iport->event, 0);
++}
++
++/**
++ * isci_port_get_state() - This function gets the status of the port object.
++ * @isci_port: This parameter points to the isci_port object
++ *
++ * status of the object as a isci_status enum.
++ */
++enum isci_status isci_port_get_state(
++ struct isci_port *isci_port)
++{
++ return isci_port->status;
++}
++
++void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
++{
++ struct isci_host *ihost = iport->owning_controller;
++
++ /* notify the user. */
++ isci_port_bc_change_received(ihost, iport, iphy);
++}
++
++int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
++ struct isci_phy *iphy)
++{
++ unsigned long flags;
++ enum sci_status status;
++ int idx, ret = TMF_RESP_FUNC_COMPLETE;
++
++ dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
++ __func__, iport);
++
++ init_completion(&iport->hard_reset_complete);
++
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++
++ #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
++ status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
++
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++
++ if (status == SCI_SUCCESS) {
++ wait_for_completion(&iport->hard_reset_complete);
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: iport = %p; hard reset completion\n",
++ __func__, iport);
++
++ if (iport->hard_reset_status != SCI_SUCCESS)
++ ret = TMF_RESP_FUNC_FAILED;
++ } else {
++ ret = TMF_RESP_FUNC_FAILED;
++
++ dev_err(&ihost->pdev->dev,
++ "%s: iport = %p; sci_port_hard_reset call"
++ " failed 0x%x\n",
++ __func__, iport, status);
++
++ }
++
++ /* If the hard reset for the port has failed, consider this
++ * the same as link failures on all phys in the port.
++ */
++ if (ret != TMF_RESP_FUNC_COMPLETE) {
++
++ dev_err(&ihost->pdev->dev,
++ "%s: iport = %p; hard reset failed "
++ "(0x%x) - driving explicit link fail for all phys\n",
++ __func__, iport, iport->hard_reset_status);
++
++ /* Down all phys in the port. */
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++ for (idx = 0; idx < SCI_MAX_PHYS; ++idx) {
++ struct isci_phy *iphy = iport->phy_table[idx];
++
++ if (!iphy)
++ continue;
++ sci_phy_stop(iphy);
++ sci_phy_start(iphy);
++ }
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++ }
++ return ret;
++}
++
++/**
++ * isci_port_deformed() - This function is called by libsas when a port becomes
++ * inactive.
++ * @phy: This parameter specifies the libsas phy with the inactive port.
++ *
++ */
++void isci_port_deformed(struct asd_sas_phy *phy)
++{
++ pr_debug("%s: sas_phy = %p\n", __func__, phy);
++}
++
++/**
++ * isci_port_formed() - This function is called by libsas when a port becomes
++ * active.
++ * @phy: This parameter specifies the libsas phy with the active port.
++ *
++ */
++void isci_port_formed(struct asd_sas_phy *phy)
++{
++ pr_debug("%s: sas_phy = %p, sas_port = %p\n", __func__, phy, phy->port);
++}
+diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
+new file mode 100644
+index 0000000..b50ecd4
+--- /dev/null
++++ b/drivers/scsi/isci/port.h
+@@ -0,0 +1,306 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _ISCI_PORT_H_
++#define _ISCI_PORT_H_
++
++#include <scsi/libsas.h>
++#include "isci.h"
++#include "sas.h"
++#include "phy.h"
++
++#define SCIC_SDS_DUMMY_PORT 0xFF
++
++struct isci_phy;
++struct isci_host;
++
++enum isci_status {
++ isci_freed = 0x00,
++ isci_starting = 0x01,
++ isci_ready = 0x02,
++ isci_ready_for_io = 0x03,
++ isci_stopping = 0x04,
++ isci_stopped = 0x05,
++};
++
++/**
++ * struct isci_port - isci direct attached sas port object
++ * @event: counts bcns and port stop events (for bcn filtering)
++ * @ready_exit: several states constitute 'ready'. When exiting ready we
++ * need to take extra port-teardown actions that are
++ * skipped when exiting to another 'ready' state.
++ * @logical_port_index: software port index
++ * @physical_port_index: hardware port index
++ * @active_phy_mask: identifies phy members
++ * @reserved_tag:
++ * @reserved_rni: reserver for port task scheduler workaround
++ * @started_request_count: reference count for outstanding commands
++ * @not_ready_reason: set during state transitions and notified
++ * @timer: timeout start/stop operations
++ */
++struct isci_port {
++ enum isci_status status;
++ #define IPORT_BCN_BLOCKED 0
++ #define IPORT_BCN_PENDING 1
++ unsigned long flags;
++ atomic_t event;
++ struct isci_host *isci_host;
++ struct asd_sas_port sas_port;
++ struct list_head remote_dev_list;
++ spinlock_t state_lock;
++ struct list_head domain_dev_list;
++ struct completion start_complete;
++ struct completion hard_reset_complete;
++ enum sci_status hard_reset_status;
++ struct sci_base_state_machine sm;
++ bool ready_exit;
++ u8 logical_port_index;
++ u8 physical_port_index;
++ u8 active_phy_mask;
++ u16 reserved_rni;
++ u16 reserved_tag;
++ u32 started_request_count;
++ u32 assigned_device_count;
++ u32 not_ready_reason;
++ struct isci_phy *phy_table[SCI_MAX_PHYS];
++ struct isci_host *owning_controller;
++ struct sci_timer timer;
++ struct scu_port_task_scheduler_registers __iomem *port_task_scheduler_registers;
++ /* XXX rework: only one register, no need to replicate per-port */
++ u32 __iomem *port_pe_configuration_register;
++ struct scu_viit_entry __iomem *viit_registers;
++};
++
++enum sci_port_not_ready_reason_code {
++ SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS,
++ SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED,
++ SCIC_PORT_NOT_READY_INVALID_PORT_CONFIGURATION,
++ SCIC_PORT_NOT_READY_RECONFIGURING,
++
++ SCIC_PORT_NOT_READY_REASON_CODE_MAX
++};
++
++struct sci_port_end_point_properties {
++ struct sci_sas_address sas_address;
++ struct sci_phy_proto protocols;
++};
++
++struct sci_port_properties {
++ u32 index;
++ struct sci_port_end_point_properties local;
++ struct sci_port_end_point_properties remote;
++ u32 phy_mask;
++};
++
++/**
++ * enum sci_port_states - This enumeration depicts all the states for the
++ * common port state machine.
++ *
++ *
++ */
++enum sci_port_states {
++ /**
++ * This state indicates that the port has successfully been stopped.
++ * In this state no new IO operations are permitted.
++ * This state is entered from the STOPPING state.
++ */
++ SCI_PORT_STOPPED,
++
++ /**
++ * This state indicates that the port is in the process of stopping.
++ * In this state no new IO operations are permitted, but existing IO
++ * operations are allowed to complete.
++ * This state is entered from the READY state.
++ */
++ SCI_PORT_STOPPING,
++
++ /**
++ * This state indicates the port is now ready. Thus, the user is
++ * able to perform IO operations on this port.
++ * This state is entered from the STARTING state.
++ */
++ SCI_PORT_READY,
++
++ /**
++ * The substate where the port is started and ready but has no
++ * active phys.
++ */
++ SCI_PORT_SUB_WAITING,
++
++ /**
++ * The substate where the port is started and ready and there is
++ * at least one phy operational.
++ */
++ SCI_PORT_SUB_OPERATIONAL,
++
++ /**
++ * The substate where the port is started and there was an
++ * add/remove phy event. This state is only used in Automatic
++ * Port Configuration Mode (APC)
++ */
++ SCI_PORT_SUB_CONFIGURING,
++
++ /**
++ * This state indicates the port is in the process of performing a hard
++ * reset. Thus, the user is unable to perform IO operations on this
++ * port.
++ * This state is entered from the READY state.
++ */
++ SCI_PORT_RESETTING,
++
++ /**
++ * This state indicates the port has failed a reset request. This state
++ * is entered when a port reset request times out.
++ * This state is entered from the RESETTING state.
++ */
++ SCI_PORT_FAILED,
++
++
++};
++
++static inline void sci_port_decrement_request_count(struct isci_port *iport)
++{
++ if (WARN_ONCE(iport->started_request_count == 0,
++ "%s: tried to decrement started_request_count past 0!?",
++ __func__))
++ /* pass */;
++ else
++ iport->started_request_count--;
++}
++
++#define sci_port_active_phy(port, phy) \
++ (((port)->active_phy_mask & (1 << (phy)->phy_index)) != 0)
++
++void sci_port_construct(
++ struct isci_port *iport,
++ u8 port_index,
++ struct isci_host *ihost);
++
++enum sci_status sci_port_start(struct isci_port *iport);
++enum sci_status sci_port_stop(struct isci_port *iport);
++
++enum sci_status sci_port_add_phy(
++ struct isci_port *iport,
++ struct isci_phy *iphy);
++
++enum sci_status sci_port_remove_phy(
++ struct isci_port *iport,
++ struct isci_phy *iphy);
++
++void sci_port_setup_transports(
++ struct isci_port *iport,
++ u32 device_id);
++
++void isci_port_bcn_enable(struct isci_host *, struct isci_port *);
++
++void sci_port_deactivate_phy(
++ struct isci_port *iport,
++ struct isci_phy *iphy,
++ bool do_notify_user);
++
++bool sci_port_link_detected(
++ struct isci_port *iport,
++ struct isci_phy *iphy);
++
++enum sci_status sci_port_link_up(struct isci_port *iport,
++ struct isci_phy *iphy);
++enum sci_status sci_port_link_down(struct isci_port *iport,
++ struct isci_phy *iphy);
++
++struct isci_request;
++struct isci_remote_device;
++enum sci_status sci_port_start_io(
++ struct isci_port *iport,
++ struct isci_remote_device *idev,
++ struct isci_request *ireq);
++
++enum sci_status sci_port_complete_io(
++ struct isci_port *iport,
++ struct isci_remote_device *idev,
++ struct isci_request *ireq);
++
++enum sas_linkrate sci_port_get_max_allowed_speed(
++ struct isci_port *iport);
++
++void sci_port_broadcast_change_received(
++ struct isci_port *iport,
++ struct isci_phy *iphy);
++
++bool sci_port_is_valid_phy_assignment(
++ struct isci_port *iport,
++ u32 phy_index);
++
++void sci_port_get_sas_address(
++ struct isci_port *iport,
++ struct sci_sas_address *sas_address);
++
++void sci_port_get_attached_sas_address(
++ struct isci_port *iport,
++ struct sci_sas_address *sas_address);
++
++enum isci_status isci_port_get_state(
++ struct isci_port *isci_port);
++
++void isci_port_formed(struct asd_sas_phy *);
++void isci_port_deformed(struct asd_sas_phy *);
++
++void isci_port_init(
++ struct isci_port *port,
++ struct isci_host *host,
++ int index);
++
++int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
++ struct isci_phy *iphy);
++#endif /* !defined(_ISCI_PORT_H_) */
+diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
+new file mode 100644
+index 0000000..486b113
+--- /dev/null
++++ b/drivers/scsi/isci/port_config.c
+@@ -0,0 +1,754 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "host.h"
++
++#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10)
++#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10)
++#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (100)
++
++enum SCIC_SDS_APC_ACTIVITY {
++ SCIC_SDS_APC_SKIP_PHY,
++ SCIC_SDS_APC_ADD_PHY,
++ SCIC_SDS_APC_START_TIMER,
++
++ SCIC_SDS_APC_ACTIVITY_MAX
++};
++
++/*
++ * ******************************************************************************
++ * General port configuration agent routines
++ * ****************************************************************************** */
++
++/**
++ *
++ * @address_one: A SAS Address to be compared.
++ * @address_two: A SAS Address to be compared.
++ *
++ * Compare the two SAS Address and if SAS Address One is greater than SAS
++ * Address Two then return > 0 else if SAS Address One is less than SAS Address
++ * Two return < 0 Otherwise they are the same return 0 A signed value of x > 0
++ * > y where x is returned for Address One > Address Two y is returned for
++ * Address One < Address Two 0 is returned ofr Address One = Address Two
++ */
++static s32 sci_sas_address_compare(
++ struct sci_sas_address address_one,
++ struct sci_sas_address address_two)
++{
++ if (address_one.high > address_two.high) {
++ return 1;
++ } else if (address_one.high < address_two.high) {
++ return -1;
++ } else if (address_one.low > address_two.low) {
++ return 1;
++ } else if (address_one.low < address_two.low) {
++ return -1;
++ }
++
++ /* The two SAS Address must be identical */
++ return 0;
++}
++
++/**
++ *
++ * @controller: The controller object used for the port search.
++ * @phy: The phy object to match.
++ *
++ * This routine will find a matching port for the phy. This means that the
++ * port and phy both have the same broadcast sas address and same received sas
++ * address. The port address or the NULL if there is no matching
++ * port. port address if the port can be found to match the phy.
++ * NULL if there is no matching port for the phy.
++ */
++static struct isci_port *sci_port_configuration_agent_find_port(
++ struct isci_host *ihost,
++ struct isci_phy *iphy)
++{
++ u8 i;
++ struct sci_sas_address port_sas_address;
++ struct sci_sas_address port_attached_device_address;
++ struct sci_sas_address phy_sas_address;
++ struct sci_sas_address phy_attached_device_address;
++
++ /*
++ * Since this phy can be a member of a wide port check to see if one or
++ * more phys match the sent and received SAS address as this phy in which
++ * case it should participate in the same port.
++ */
++ sci_phy_get_sas_address(iphy, &phy_sas_address);
++ sci_phy_get_attached_sas_address(iphy, &phy_attached_device_address);
++
++ for (i = 0; i < ihost->logical_port_entries; i++) {
++ struct isci_port *iport = &ihost->ports[i];
++
++ sci_port_get_sas_address(iport, &port_sas_address);
++ sci_port_get_attached_sas_address(iport, &port_attached_device_address);
++
++ if (sci_sas_address_compare(port_sas_address, phy_sas_address) == 0 &&
++ sci_sas_address_compare(port_attached_device_address, phy_attached_device_address) == 0)
++ return iport;
++ }
++
++ return NULL;
++}
++
++/**
++ *
++ * @controller: This is the controller object that contains the port agent
++ * @port_agent: This is the port configruation agent for the controller.
++ *
++ * This routine will validate the port configuration is correct for the SCU
++ * hardware. The SCU hardware allows for port configurations as follows. LP0
++ * -> (PE0), (PE0, PE1), (PE0, PE1, PE2, PE3) LP1 -> (PE1) LP2 -> (PE2), (PE2,
++ * PE3) LP3 -> (PE3) enum sci_status SCI_SUCCESS the port configuration is valid for
++ * this port configuration agent. SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION
++ * the port configuration is not valid for this port configuration agent.
++ */
++static enum sci_status sci_port_configuration_agent_validate_ports(
++ struct isci_host *ihost,
++ struct sci_port_configuration_agent *port_agent)
++{
++ struct sci_sas_address first_address;
++ struct sci_sas_address second_address;
++
++ /*
++ * Sanity check the max ranges for all the phys the max index
++ * is always equal to the port range index */
++ if (port_agent->phy_valid_port_range[0].max_index != 0 ||
++ port_agent->phy_valid_port_range[1].max_index != 1 ||
++ port_agent->phy_valid_port_range[2].max_index != 2 ||
++ port_agent->phy_valid_port_range[3].max_index != 3)
++ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
++
++ /*
++ * This is a request to configure a single x4 port or at least attempt
++ * to make all the phys into a single port */
++ if (port_agent->phy_valid_port_range[0].min_index == 0 &&
++ port_agent->phy_valid_port_range[1].min_index == 0 &&
++ port_agent->phy_valid_port_range[2].min_index == 0 &&
++ port_agent->phy_valid_port_range[3].min_index == 0)
++ return SCI_SUCCESS;
++
++ /*
++ * This is a degenerate case where phy 1 and phy 2 are assigned
++ * to the same port this is explicitly disallowed by the hardware
++ * unless they are part of the same x4 port and this condition was
++ * already checked above. */
++ if (port_agent->phy_valid_port_range[2].min_index == 1) {
++ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
++ }
++
++ /*
++ * PE0 and PE3 can never have the same SAS Address unless they
++ * are part of the same x4 wide port and we have already checked
++ * for this condition. */
++ sci_phy_get_sas_address(&ihost->phys[0], &first_address);
++ sci_phy_get_sas_address(&ihost->phys[3], &second_address);
++
++ if (sci_sas_address_compare(first_address, second_address) == 0) {
++ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
++ }
++
++ /*
++ * PE0 and PE1 are configured into a 2x1 ports make sure that the
++ * SAS Address for PE0 and PE2 are different since they can not be
++ * part of the same port. */
++ if (port_agent->phy_valid_port_range[0].min_index == 0 &&
++ port_agent->phy_valid_port_range[1].min_index == 1) {
++ sci_phy_get_sas_address(&ihost->phys[0], &first_address);
++ sci_phy_get_sas_address(&ihost->phys[2], &second_address);
++
++ if (sci_sas_address_compare(first_address, second_address) == 0) {
++ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
++ }
++ }
++
++ /*
++ * PE2 and PE3 are configured into a 2x1 ports make sure that the
++ * SAS Address for PE1 and PE3 are different since they can not be
++ * part of the same port. */
++ if (port_agent->phy_valid_port_range[2].min_index == 2 &&
++ port_agent->phy_valid_port_range[3].min_index == 3) {
++ sci_phy_get_sas_address(&ihost->phys[1], &first_address);
++ sci_phy_get_sas_address(&ihost->phys[3], &second_address);
++
++ if (sci_sas_address_compare(first_address, second_address) == 0) {
++ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
++ }
++ }
++
++ return SCI_SUCCESS;
++}
++
++/*
++ * ******************************************************************************
++ * Manual port configuration agent routines
++ * ****************************************************************************** */
++
++/* verify all of the phys in the same port are using the same SAS address */
++static enum sci_status
++sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
++ struct sci_port_configuration_agent *port_agent)
++{
++ u32 phy_mask;
++ u32 assigned_phy_mask;
++ struct sci_sas_address sas_address;
++ struct sci_sas_address phy_assigned_address;
++ u8 port_index;
++ u8 phy_index;
++
++ assigned_phy_mask = 0;
++ sas_address.high = 0;
++ sas_address.low = 0;
++
++ for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) {
++ phy_mask = ihost->oem_parameters.ports[port_index].phy_mask;
++
++ if (!phy_mask)
++ continue;
++ /*
++ * Make sure that one or more of the phys were not already assinged to
++ * a different port. */
++ if ((phy_mask & ~assigned_phy_mask) == 0) {
++ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
++ }
++
++ /* Find the starting phy index for this round through the loop */
++ for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
++ if ((phy_mask & (1 << phy_index)) == 0)
++ continue;
++ sci_phy_get_sas_address(&ihost->phys[phy_index],
++ &sas_address);
++
++ /*
++ * The phy_index can be used as the starting point for the
++ * port range since the hardware starts all logical ports
++ * the same as the PE index. */
++ port_agent->phy_valid_port_range[phy_index].min_index = port_index;
++ port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
++
++ if (phy_index != port_index) {
++ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
++ }
++
++ break;
++ }
++
++ /*
++ * See how many additional phys are being added to this logical port.
++ * Note: We have not moved the current phy_index so we will actually
++ * compare the startting phy with itself.
++ * This is expected and required to add the phy to the port. */
++ while (phy_index < SCI_MAX_PHYS) {
++ if ((phy_mask & (1 << phy_index)) == 0)
++ continue;
++ sci_phy_get_sas_address(&ihost->phys[phy_index],
++ &phy_assigned_address);
++
++ if (sci_sas_address_compare(sas_address, phy_assigned_address) != 0) {
++ /*
++ * The phy mask specified that this phy is part of the same port
++ * as the starting phy and it is not so fail this configuration */
++ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
++ }
++
++ port_agent->phy_valid_port_range[phy_index].min_index = port_index;
++ port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
++
++ sci_port_add_phy(&ihost->ports[port_index],
++ &ihost->phys[phy_index]);
++
++ assigned_phy_mask |= (1 << phy_index);
++ }
++
++ phy_index++;
++ }
++
++ return sci_port_configuration_agent_validate_ports(ihost, port_agent);
++}
++
++static void mpc_agent_timeout(unsigned long data)
++{
++ u8 index;
++ struct sci_timer *tmr = (struct sci_timer *)data;
++ struct sci_port_configuration_agent *port_agent;
++ struct isci_host *ihost;
++ unsigned long flags;
++ u16 configure_phy_mask;
++
++ port_agent = container_of(tmr, typeof(*port_agent), timer);
++ ihost = container_of(port_agent, typeof(*ihost), port_agent);
++
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++
++ if (tmr->cancel)
++ goto done;
++
++ port_agent->timer_pending = false;
++
++ /* Find the mask of phys that are reported read but as yet unconfigured into a port */
++ configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
++
++ for (index = 0; index < SCI_MAX_PHYS; index++) {
++ struct isci_phy *iphy = &ihost->phys[index];
++
++ if (configure_phy_mask & (1 << index)) {
++ port_agent->link_up_handler(ihost, port_agent,
++ phy_get_non_dummy_port(iphy),
++ iphy);
++ }
++ }
++
++done:
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++}
++
++static void sci_mpc_agent_link_up(struct isci_host *ihost,
++ struct sci_port_configuration_agent *port_agent,
++ struct isci_port *iport,
++ struct isci_phy *iphy)
++{
++ /* If the port is NULL then the phy was not assigned to a port.
++ * This is because the phy was not given the same SAS Address as
++ * the other PHYs in the port.
++ */
++ if (!iport)
++ return;
++
++ port_agent->phy_ready_mask |= (1 << iphy->phy_index);
++ sci_port_link_up(iport, iphy);
++ if ((iport->active_phy_mask & (1 << iphy->phy_index)))
++ port_agent->phy_configured_mask |= (1 << iphy->phy_index);
++}
++
++/**
++ *
++ * @controller: This is the controller object that receives the link down
++ * notification.
++ * @port: This is the port object associated with the phy. If the is no
++ * associated port this is an NULL. The port is an invalid
++ * handle only if the phy was never port of this port. This happens when
++ * the phy is not broadcasting the same SAS address as the other phys in the
++ * assigned port.
++ * @phy: This is the phy object which has gone link down.
++ *
++ * This function handles the manual port configuration link down notifications.
++ * Since all ports and phys are associated at initialization time we just turn
++ * around and notifiy the port object of the link down event. If this PHY is
++ * not associated with a port there is no action taken. Is it possible to get a
++ * link down notification from a phy that has no assocoated port?
++ */
++static void sci_mpc_agent_link_down(
++ struct isci_host *ihost,
++ struct sci_port_configuration_agent *port_agent,
++ struct isci_port *iport,
++ struct isci_phy *iphy)
++{
++ if (iport != NULL) {
++ /*
++ * If we can form a new port from the remainder of the phys
++ * then we want to start the timer to allow the SCI User to
++ * cleanup old devices and rediscover the port before
++ * rebuilding the port with the phys that remain in the ready
++ * state.
++ */
++ port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
++ port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
++
++ /*
++ * Check to see if there are more phys waiting to be
++ * configured into a port. If there are allow the SCI User
++ * to tear down this port, if necessary, and then reconstruct
++ * the port after the timeout.
++ */
++ if ((port_agent->phy_configured_mask == 0x0000) &&
++ (port_agent->phy_ready_mask != 0x0000) &&
++ !port_agent->timer_pending) {
++ port_agent->timer_pending = true;
++
++ sci_mod_timer(&port_agent->timer,
++ SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT);
++ }
++
++ sci_port_link_down(iport, iphy);
++ }
++}
++
++/* verify phys are assigned a valid SAS address for automatic port
++ * configuration mode.
++ */
++static enum sci_status
++sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
++ struct sci_port_configuration_agent *port_agent)
++{
++ u8 phy_index;
++ u8 port_index;
++ struct sci_sas_address sas_address;
++ struct sci_sas_address phy_assigned_address;
++
++ phy_index = 0;
++
++ while (phy_index < SCI_MAX_PHYS) {
++ port_index = phy_index;
++
++ /* Get the assigned SAS Address for the first PHY on the controller. */
++ sci_phy_get_sas_address(&ihost->phys[phy_index],
++ &sas_address);
++
++ while (++phy_index < SCI_MAX_PHYS) {
++ sci_phy_get_sas_address(&ihost->phys[phy_index],
++ &phy_assigned_address);
++
++ /* Verify each of the SAS address are all the same for every PHY */
++ if (sci_sas_address_compare(sas_address, phy_assigned_address) == 0) {
++ port_agent->phy_valid_port_range[phy_index].min_index = port_index;
++ port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
++ } else {
++ port_agent->phy_valid_port_range[phy_index].min_index = phy_index;
++ port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
++ break;
++ }
++ }
++ }
++
++ return sci_port_configuration_agent_validate_ports(ihost, port_agent);
++}
++
++static void sci_apc_agent_configure_ports(struct isci_host *ihost,
++ struct sci_port_configuration_agent *port_agent,
++ struct isci_phy *iphy,
++ bool start_timer)
++{
++ u8 port_index;
++ enum sci_status status;
++ struct isci_port *iport;
++ enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY;
++
++ iport = sci_port_configuration_agent_find_port(ihost, iphy);
++
++ if (iport) {
++ if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index))
++ apc_activity = SCIC_SDS_APC_ADD_PHY;
++ else
++ apc_activity = SCIC_SDS_APC_SKIP_PHY;
++ } else {
++ /*
++ * There is no matching Port for this PHY so lets search through the
++ * Ports and see if we can add the PHY to its own port or maybe start
++ * the timer and wait to see if a wider port can be made.
++ *
++ * Note the break when we reach the condition of the port id == phy id */
++ for (port_index = port_agent->phy_valid_port_range[iphy->phy_index].min_index;
++ port_index <= port_agent->phy_valid_port_range[iphy->phy_index].max_index;
++ port_index++) {
++
++ iport = &ihost->ports[port_index];
++
++ /* First we must make sure that this PHY can be added to this Port. */
++ if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
++ /*
++ * Port contains a PHY with a greater PHY ID than the current
++ * PHY that has gone link up. This phy can not be part of any
++ * port so skip it and move on. */
++ if (iport->active_phy_mask > (1 << iphy->phy_index)) {
++ apc_activity = SCIC_SDS_APC_SKIP_PHY;
++ break;
++ }
++
++ /*
++ * We have reached the end of our Port list and have not found
++ * any reason why we should not either add the PHY to the port
++ * or wait for more phys to become active. */
++ if (iport->physical_port_index == iphy->phy_index) {
++ /*
++ * The Port either has no active PHYs.
++ * Consider that if the port had any active PHYs we would have
++ * or active PHYs with
++ * a lower PHY Id than this PHY. */
++ if (apc_activity != SCIC_SDS_APC_START_TIMER) {
++ apc_activity = SCIC_SDS_APC_ADD_PHY;
++ }
++
++ break;
++ }
++
++ /*
++ * The current Port has no active PHYs and this PHY could be part
++ * of this Port. Since we dont know as yet setup to start the
++ * timer and see if there is a better configuration. */
++ if (iport->active_phy_mask == 0) {
++ apc_activity = SCIC_SDS_APC_START_TIMER;
++ }
++ } else if (iport->active_phy_mask != 0) {
++ /*
++ * The Port has an active phy and the current Phy can not
++ * participate in this port so skip the PHY and see if
++ * there is a better configuration. */
++ apc_activity = SCIC_SDS_APC_SKIP_PHY;
++ }
++ }
++ }
++
++ /*
++ * Check to see if the start timer operations should instead map to an
++ * add phy operation. This is caused because we have been waiting to
++ * add a phy to a port but could not becuase the automatic port
++ * configuration engine had a choice of possible ports for the phy.
++ * Since we have gone through a timeout we are going to restrict the
++ * choice to the smallest possible port. */
++ if (
++ (start_timer == false)
++ && (apc_activity == SCIC_SDS_APC_START_TIMER)
++ ) {
++ apc_activity = SCIC_SDS_APC_ADD_PHY;
++ }
++
++ switch (apc_activity) {
++ case SCIC_SDS_APC_ADD_PHY:
++ status = sci_port_add_phy(iport, iphy);
++
++ if (status == SCI_SUCCESS) {
++ port_agent->phy_configured_mask |= (1 << iphy->phy_index);
++ }
++ break;
++
++ case SCIC_SDS_APC_START_TIMER:
++ /*
++ * This can occur for either a link down event, or a link
++ * up event where we cannot yet tell the port to which a
++ * phy belongs.
++ */
++ if (port_agent->timer_pending)
++ sci_del_timer(&port_agent->timer);
++
++ port_agent->timer_pending = true;
++ sci_mod_timer(&port_agent->timer,
++ SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
++ break;
++
++ case SCIC_SDS_APC_SKIP_PHY:
++ default:
++ /* do nothing the PHY can not be made part of a port at this time. */
++ break;
++ }
++}
++
++/**
++ * sci_apc_agent_link_up - handle apc link up events
++ * @scic: This is the controller object that receives the link up
++ * notification.
++ * @sci_port: This is the port object associated with the phy. If the is no
++ * associated port this is an NULL.
++ * @sci_phy: This is the phy object which has gone link up.
++ *
++ * This method handles the automatic port configuration for link up
++ * notifications. Is it possible to get a link down notification from a phy
++ * that has no assocoated port?
++ */
++static void sci_apc_agent_link_up(struct isci_host *ihost,
++ struct sci_port_configuration_agent *port_agent,
++ struct isci_port *iport,
++ struct isci_phy *iphy)
++{
++ u8 phy_index = iphy->phy_index;
++
++ if (!iport) {
++ /* the phy is not the part of this port */
++ port_agent->phy_ready_mask |= 1 << phy_index;
++ sci_apc_agent_configure_ports(ihost, port_agent, iphy, true);
++ } else {
++ /* the phy is already the part of the port */
++ u32 port_state = iport->sm.current_state_id;
++
++ /* if the PORT'S state is resetting then the link up is from
++ * port hard reset in this case, we need to tell the port
++ * that link up is recieved
++ */
++ BUG_ON(port_state != SCI_PORT_RESETTING);
++ port_agent->phy_ready_mask |= 1 << phy_index;
++ sci_port_link_up(iport, iphy);
++ }
++}
++
++/**
++ *
++ * @controller: This is the controller object that receives the link down
++ * notification.
++ * @iport: This is the port object associated with the phy. If the is no
++ * associated port this is an NULL.
++ * @iphy: This is the phy object which has gone link down.
++ *
++ * This method handles the automatic port configuration link down
++ * notifications. not associated with a port there is no action taken. Is it
++ * possible to get a link down notification from a phy that has no assocoated
++ * port?
++ */
++static void sci_apc_agent_link_down(
++ struct isci_host *ihost,
++ struct sci_port_configuration_agent *port_agent,
++ struct isci_port *iport,
++ struct isci_phy *iphy)
++{
++ port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
++
++ if (!iport)
++ return;
++ if (port_agent->phy_configured_mask & (1 << iphy->phy_index)) {
++ enum sci_status status;
++
++ status = sci_port_remove_phy(iport, iphy);
++
++ if (status == SCI_SUCCESS)
++ port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
++ }
++}
++
++/* configure the phys into ports when the timer fires */
++static void apc_agent_timeout(unsigned long data)
++{
++ u32 index;
++ struct sci_timer *tmr = (struct sci_timer *)data;
++ struct sci_port_configuration_agent *port_agent;
++ struct isci_host *ihost;
++ unsigned long flags;
++ u16 configure_phy_mask;
++
++ port_agent = container_of(tmr, typeof(*port_agent), timer);
++ ihost = container_of(port_agent, typeof(*ihost), port_agent);
++
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++
++ if (tmr->cancel)
++ goto done;
++
++ port_agent->timer_pending = false;
++
++ configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
++
++ if (!configure_phy_mask)
++ return;
++
++ for (index = 0; index < SCI_MAX_PHYS; index++) {
++ if ((configure_phy_mask & (1 << index)) == 0)
++ continue;
++
++ sci_apc_agent_configure_ports(ihost, port_agent,
++ &ihost->phys[index], false);
++ }
++
++done:
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++}
++
++/*
++ * ******************************************************************************
++ * Public port configuration agent routines
++ * ****************************************************************************** */
++
++/**
++ *
++ *
++ * This method will construct the port configuration agent for operation. This
++ * call is universal for both manual port configuration and automatic port
++ * configuration modes.
++ */
++void sci_port_configuration_agent_construct(
++ struct sci_port_configuration_agent *port_agent)
++{
++ u32 index;
++
++ port_agent->phy_configured_mask = 0x00;
++ port_agent->phy_ready_mask = 0x00;
++
++ port_agent->link_up_handler = NULL;
++ port_agent->link_down_handler = NULL;
++
++ port_agent->timer_pending = false;
++
++ for (index = 0; index < SCI_MAX_PORTS; index++) {
++ port_agent->phy_valid_port_range[index].min_index = 0;
++ port_agent->phy_valid_port_range[index].max_index = 0;
++ }
++}
++
++enum sci_status sci_port_configuration_agent_initialize(
++ struct isci_host *ihost,
++ struct sci_port_configuration_agent *port_agent)
++{
++ enum sci_status status;
++ enum sci_port_configuration_mode mode;
++
++ mode = ihost->oem_parameters.controller.mode_type;
++
++ if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
++ status = sci_mpc_agent_validate_phy_configuration(
++ ihost, port_agent);
++
++ port_agent->link_up_handler = sci_mpc_agent_link_up;
++ port_agent->link_down_handler = sci_mpc_agent_link_down;
++
++ sci_init_timer(&port_agent->timer, mpc_agent_timeout);
++ } else {
++ status = sci_apc_agent_validate_phy_configuration(
++ ihost, port_agent);
++
++ port_agent->link_up_handler = sci_apc_agent_link_up;
++ port_agent->link_down_handler = sci_apc_agent_link_down;
++
++ sci_init_timer(&port_agent->timer, apc_agent_timeout);
++ }
++
++ return status;
++}
+diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
+new file mode 100644
+index 0000000..b5f4341
+--- /dev/null
++++ b/drivers/scsi/isci/probe_roms.c
+@@ -0,0 +1,243 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ */
++
++/* probe_roms - scan for oem parameters */
++
++#include <linux/kernel.h>
++#include <linux/firmware.h>
++#include <linux/uaccess.h>
++#include <linux/efi.h>
++#include <asm/probe_roms.h>
++
++#include "isci.h"
++#include "task.h"
++#include "probe_roms.h"
++
++static efi_char16_t isci_efivar_name[] = {
++ 'R', 's', 't', 'S', 'c', 'u', 'O'
++};
++
++struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
++{
++ void __iomem *oprom = pci_map_biosrom(pdev);
++ struct isci_orom *rom = NULL;
++ size_t len, i;
++ int j;
++ char oem_sig[4];
++ struct isci_oem_hdr oem_hdr;
++ u8 *tmp, sum;
++
++ if (!oprom)
++ return NULL;
++
++ len = pci_biosrom_size(pdev);
++ rom = devm_kzalloc(&pdev->dev, sizeof(*rom), GFP_KERNEL);
++ if (!rom) {
++ dev_warn(&pdev->dev,
++ "Unable to allocate memory for orom\n");
++ return NULL;
++ }
++
++ for (i = 0; i < len && rom; i += ISCI_OEM_SIG_SIZE) {
++ memcpy_fromio(oem_sig, oprom + i, ISCI_OEM_SIG_SIZE);
++
++ /* we think we found the OEM table */
++ if (memcmp(oem_sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) == 0) {
++ size_t copy_len;
++
++ memcpy_fromio(&oem_hdr, oprom + i, sizeof(oem_hdr));
++
++ copy_len = min(oem_hdr.len - sizeof(oem_hdr),
++ sizeof(*rom));
++
++ memcpy_fromio(rom,
++ oprom + i + sizeof(oem_hdr),
++ copy_len);
++
++ /* calculate checksum */
++ tmp = (u8 *)&oem_hdr;
++ for (j = 0, sum = 0; j < sizeof(oem_hdr); j++, tmp++)
++ sum += *tmp;
++
++ tmp = (u8 *)rom;
++ for (j = 0; j < sizeof(*rom); j++, tmp++)
++ sum += *tmp;
++
++ if (sum != 0) {
++ dev_warn(&pdev->dev,
++ "OEM table checksum failed\n");
++ continue;
++ }
++
++ /* keep going if that's not the oem param table */
++ if (memcmp(rom->hdr.signature,
++ ISCI_ROM_SIG,
++ ISCI_ROM_SIG_SIZE) != 0)
++ continue;
++
++ dev_info(&pdev->dev,
++ "OEM parameter table found in OROM\n");
++ break;
++ }
++ }
++
++ if (i >= len) {
++ dev_err(&pdev->dev, "oprom parse error\n");
++ devm_kfree(&pdev->dev, rom);
++ rom = NULL;
++ }
++ pci_unmap_biosrom(oprom);
++
++ return rom;
++}
++
++enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
++ struct isci_orom *orom, int scu_index)
++{
++ /* check for valid inputs */
++ if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS ||
++ scu_index > orom->hdr.num_elements || !oem)
++ return -EINVAL;
++
++ *oem = orom->ctrl[scu_index];
++ return 0;
++}
++
++struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw)
++{
++ struct isci_orom *orom = NULL, *data;
++ int i, j;
++
++ if (request_firmware(&fw, ISCI_FW_NAME, &pdev->dev) != 0)
++ return NULL;
++
++ if (fw->size < sizeof(*orom))
++ goto out;
++
++ data = (struct isci_orom *)fw->data;
++
++ if (strncmp(ISCI_ROM_SIG, data->hdr.signature,
++ strlen(ISCI_ROM_SIG)) != 0)
++ goto out;
++
++ orom = devm_kzalloc(&pdev->dev, fw->size, GFP_KERNEL);
++ if (!orom)
++ goto out;
++
++ memcpy(orom, fw->data, fw->size);
++
++ if (is_c0(pdev))
++ goto out;
++
++ /*
++ * deprecated: override default amp_control for pre-preproduction
++ * silicon revisions
++ */
++ for (i = 0; i < ARRAY_SIZE(orom->ctrl); i++)
++ for (j = 0; j < ARRAY_SIZE(orom->ctrl[i].phys); j++) {
++ orom->ctrl[i].phys[j].afe_tx_amp_control0 = 0xe7c03;
++ orom->ctrl[i].phys[j].afe_tx_amp_control1 = 0xe7c03;
++ orom->ctrl[i].phys[j].afe_tx_amp_control2 = 0xe7c03;
++ orom->ctrl[i].phys[j].afe_tx_amp_control3 = 0xe7c03;
++ }
++ out:
++ release_firmware(fw);
++
++ return orom;
++}
++
++static struct efi *get_efi(void)
++{
++#ifdef CONFIG_EFI
++ return &efi;
++#else
++ return NULL;
++#endif
++}
++
++struct isci_orom *isci_get_efi_var(struct pci_dev *pdev)
++{
++ efi_status_t status;
++ struct isci_orom *rom;
++ struct isci_oem_hdr *oem_hdr;
++ u8 *tmp, sum;
++ int j;
++ unsigned long data_len;
++ u8 *efi_data;
++ u32 efi_attrib = 0;
++
++ data_len = 1024;
++ efi_data = devm_kzalloc(&pdev->dev, data_len, GFP_KERNEL);
++ if (!efi_data) {
++ dev_warn(&pdev->dev,
++ "Unable to allocate memory for EFI data\n");
++ return NULL;
++ }
++
++ rom = (struct isci_orom *)(efi_data + sizeof(struct isci_oem_hdr));
++
++ if (get_efi())
++ status = get_efi()->get_variable(isci_efivar_name,
++ &ISCI_EFI_VENDOR_GUID,
++ &efi_attrib,
++ &data_len,
++ efi_data);
++ else
++ status = EFI_NOT_FOUND;
++
++ if (status != EFI_SUCCESS) {
++ dev_warn(&pdev->dev,
++ "Unable to obtain EFI var data for OEM parms\n");
++ return NULL;
++ }
++
++ oem_hdr = (struct isci_oem_hdr *)efi_data;
++
++ if (memcmp(oem_hdr->sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) != 0) {
++ dev_warn(&pdev->dev,
++ "Invalid OEM header signature\n");
++ return NULL;
++ }
++
++ /* calculate checksum */
++ tmp = (u8 *)efi_data;
++ for (j = 0, sum = 0; j < (sizeof(*oem_hdr) + sizeof(*rom)); j++, tmp++)
++ sum += *tmp;
++
++ if (sum != 0) {
++ dev_warn(&pdev->dev,
++ "OEM table checksum failed\n");
++ return NULL;
++ }
++
++ if (memcmp(rom->hdr.signature,
++ ISCI_ROM_SIG,
++ ISCI_ROM_SIG_SIZE) != 0) {
++ dev_warn(&pdev->dev,
++ "Invalid OEM table signature\n");
++ return NULL;
++ }
++
++ return rom;
++}
+diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
+new file mode 100644
+index 0000000..dc007e6
+--- /dev/null
++++ b/drivers/scsi/isci/probe_roms.h
+@@ -0,0 +1,249 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _ISCI_PROBE_ROMS_H_
++#define _ISCI_PROBE_ROMS_H_
++
++#ifdef __KERNEL__
++#include <linux/firmware.h>
++#include <linux/pci.h>
++#include <linux/efi.h>
++#include "isci.h"
++
++#define SCIC_SDS_PARM_NO_SPEED 0
++
++/* generation 1 (i.e. 1.5 Gb/s) */
++#define SCIC_SDS_PARM_GEN1_SPEED 1
++
++/* generation 2 (i.e. 3.0 Gb/s) */
++#define SCIC_SDS_PARM_GEN2_SPEED 2
++
++/* generation 3 (i.e. 6.0 Gb/s) */
++#define SCIC_SDS_PARM_GEN3_SPEED 3
++#define SCIC_SDS_PARM_MAX_SPEED SCIC_SDS_PARM_GEN3_SPEED
++
++/* parameters that can be set by module parameters */
++struct sci_user_parameters {
++ struct sci_phy_user_params {
++ /**
++ * This field specifies the NOTIFY (ENABLE SPIN UP) primitive
++ * insertion frequency for this phy index.
++ */
++ u32 notify_enable_spin_up_insertion_frequency;
++
++ /**
++ * This method specifies the number of transmitted DWORDs within which
++ * to transmit a single ALIGN primitive. This value applies regardless
++ * of what type of device is attached or connection state. A value of
++ * 0 indicates that no ALIGN primitives will be inserted.
++ */
++ u16 align_insertion_frequency;
++
++ /**
++ * This method specifies the number of transmitted DWORDs within which
++ * to transmit 2 ALIGN primitives. This applies for SAS connections
++ * only. A minimum value of 3 is required for this field.
++ */
++ u16 in_connection_align_insertion_frequency;
++
++ /**
++ * This field indicates the maximum speed generation to be utilized
++ * by phys in the supplied port.
++ * - A value of 1 indicates generation 1 (i.e. 1.5 Gb/s).
++ * - A value of 2 indicates generation 2 (i.e. 3.0 Gb/s).
++ * - A value of 3 indicates generation 3 (i.e. 6.0 Gb/s).
++ */
++ u8 max_speed_generation;
++
++ } phys[SCI_MAX_PHYS];
++
++ /**
++ * This field specifies the maximum number of direct attached devices
++ * that can have power supplied to them simultaneously.
++ */
++ u8 max_number_concurrent_device_spin_up;
++
++ /**
++ * This field specifies the number of seconds to allow a phy to consume
++ * power before yielding to another phy.
++ *
++ */
++ u8 phy_spin_up_delay_interval;
++
++ /**
++ * These timer values specifies how long a link will remain open with no
++ * activity in increments of a microsecond, it can be in increments of
++ * 100 microseconds if the upper most bit is set.
++ *
++ */
++ u16 stp_inactivity_timeout;
++ u16 ssp_inactivity_timeout;
++
++ /**
++ * These timer values specifies how long a link will remain open in increments
++ * of 100 microseconds.
++ *
++ */
++ u16 stp_max_occupancy_timeout;
++ u16 ssp_max_occupancy_timeout;
++
++ /**
++ * This timer value specifies how long a link will remain open with no
++ * outbound traffic in increments of a microsecond.
++ *
++ */
++ u8 no_outbound_task_timeout;
++
++};
++
++#define SCIC_SDS_PARM_PHY_MASK_MIN 0x0
++#define SCIC_SDS_PARM_PHY_MASK_MAX 0xF
++#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4
++
++struct sci_oem_params;
++int sci_oem_parameters_validate(struct sci_oem_params *oem);
++
++struct isci_orom;
++struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
++enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
++ struct isci_orom *orom, int scu_index);
++struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw);
++struct isci_orom *isci_get_efi_var(struct pci_dev *pdev);
++
++struct isci_oem_hdr {
++ u8 sig[4];
++ u8 rev_major;
++ u8 rev_minor;
++ u16 len;
++ u8 checksum;
++ u8 reserved1;
++ u16 reserved2;
++} __attribute__ ((packed));
++
++#else
++#define SCI_MAX_PORTS 4
++#define SCI_MAX_PHYS 4
++#define SCI_MAX_CONTROLLERS 2
++#endif
++
++#define ISCI_FW_NAME "isci/isci_firmware.bin"
++
++#define ROMSIGNATURE 0xaa55
++
++#define ISCI_OEM_SIG "$OEM"
++#define ISCI_OEM_SIG_SIZE 4
++#define ISCI_ROM_SIG "ISCUOEMB"
++#define ISCI_ROM_SIG_SIZE 8
++
++#define ISCI_EFI_VENDOR_GUID \
++ EFI_GUID(0x193dfefa, 0xa445, 0x4302, 0x99, 0xd8, 0xef, 0x3a, 0xad, \
++ 0x1a, 0x04, 0xc6)
++#define ISCI_EFI_VAR_NAME "RstScuO"
++
++/* Allowed PORT configuration modes APC Automatic PORT configuration mode is
++ * defined by the OEM configuration parameters providing no PHY_MASK parameters
++ * for any PORT. i.e. There are no phys assigned to any of the ports at start.
++ * MPC Manual PORT configuration mode is defined by the OEM configuration
++ * parameters providing a PHY_MASK value for any PORT. It is assumed that any
++ * PORT with no PHY_MASK is an invalid port and not all PHYs must be assigned.
++ * A PORT_PHY mask that assigns just a single PHY to a port and no other PHYs
++ * being assigned is sufficient to declare manual PORT configuration.
++ */
++enum sci_port_configuration_mode {
++ SCIC_PORT_MANUAL_CONFIGURATION_MODE = 0,
++ SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE = 1
++};
++
++struct sci_bios_oem_param_block_hdr {
++ uint8_t signature[ISCI_ROM_SIG_SIZE];
++ uint16_t total_block_length;
++ uint8_t hdr_length;
++ uint8_t version;
++ uint8_t preboot_source;
++ uint8_t num_elements;
++ uint16_t element_length;
++ uint8_t reserved[8];
++} __attribute__ ((packed));
++
++struct sci_oem_params {
++ struct {
++ uint8_t mode_type;
++ uint8_t max_concurrent_dev_spin_up;
++ uint8_t do_enable_ssc;
++ uint8_t reserved;
++ } controller;
++
++ struct {
++ uint8_t phy_mask;
++ } ports[SCI_MAX_PORTS];
++
++ struct sci_phy_oem_params {
++ struct {
++ uint32_t high;
++ uint32_t low;
++ } sas_address;
++
++ uint32_t afe_tx_amp_control0;
++ uint32_t afe_tx_amp_control1;
++ uint32_t afe_tx_amp_control2;
++ uint32_t afe_tx_amp_control3;
++ } phys[SCI_MAX_PHYS];
++} __attribute__ ((packed));
++
++struct isci_orom {
++ struct sci_bios_oem_param_block_hdr hdr;
++ struct sci_oem_params ctrl[SCI_MAX_CONTROLLERS];
++} __attribute__ ((packed));
++
++#endif
+diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
+new file mode 100644
+index 0000000..00afc73
+--- /dev/null
++++ b/drivers/scsi/isci/registers.h
+@@ -0,0 +1,1946 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _SCU_REGISTERS_H_
++#define _SCU_REGISTERS_H_
++
++/**
++ * This file contains the constants and structures for the SCU memory mapped
++ * registers.
++ *
++ *
++ */
++
++#define SCU_VIIT_ENTRY_ID_MASK (0xC0000000)
++#define SCU_VIIT_ENTRY_ID_SHIFT (30)
++
++#define SCU_VIIT_ENTRY_FUNCTION_MASK (0x0FF00000)
++#define SCU_VIIT_ENTRY_FUNCTION_SHIFT (20)
++
++#define SCU_VIIT_ENTRY_IPPTMODE_MASK (0x0001F800)
++#define SCU_VIIT_ENTRY_IPPTMODE_SHIFT (12)
++
++#define SCU_VIIT_ENTRY_LPVIE_MASK (0x00000F00)
++#define SCU_VIIT_ENTRY_LPVIE_SHIFT (8)
++
++#define SCU_VIIT_ENTRY_STATUS_MASK (0x000000FF)
++#define SCU_VIIT_ENTRY_STATUS_SHIFT (0)
++
++#define SCU_VIIT_ENTRY_ID_INVALID (0 << SCU_VIIT_ENTRY_ID_SHIFT)
++#define SCU_VIIT_ENTRY_ID_VIIT (1 << SCU_VIIT_ENTRY_ID_SHIFT)
++#define SCU_VIIT_ENTRY_ID_IIT (2 << SCU_VIIT_ENTRY_ID_SHIFT)
++#define SCU_VIIT_ENTRY_ID_VIRT_EXP (3 << SCU_VIIT_ENTRY_ID_SHIFT)
++
++#define SCU_VIIT_IPPT_SSP_INITIATOR (0x01 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
++#define SCU_VIIT_IPPT_SMP_INITIATOR (0x02 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
++#define SCU_VIIT_IPPT_STP_INITIATOR (0x04 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
++#define SCU_VIIT_IPPT_INITIATOR \
++ (\
++ SCU_VIIT_IPPT_SSP_INITIATOR \
++ | SCU_VIIT_IPPT_SMP_INITIATOR \
++ | SCU_VIIT_IPPT_STP_INITIATOR \
++ )
++
++#define SCU_VIIT_STATUS_RNC_VALID (0x01 << SCU_VIIT_ENTRY_STATUS_SHIFT)
++#define SCU_VIIT_STATUS_ADDRESS_VALID (0x02 << SCU_VIIT_ENTRY_STATUS_SHIFT)
++#define SCU_VIIT_STATUS_RNI_VALID (0x04 << SCU_VIIT_ENTRY_STATUS_SHIFT)
++#define SCU_VIIT_STATUS_ALL_VALID \
++ (\
++ SCU_VIIT_STATUS_RNC_VALID \
++ | SCU_VIIT_STATUS_ADDRESS_VALID \
++ | SCU_VIIT_STATUS_RNI_VALID \
++ )
++
++#define SCU_VIIT_IPPT_SMP_TARGET (0x10 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
++
++/**
++ * struct scu_viit_entry - This is the SCU Virtual Initiator Table Entry
++ *
++ *
++ */
++struct scu_viit_entry {
++ /**
++ * This must be encoded as to the type of initiator that is being constructed
++ * for this port.
++ */
++ u32 status;
++
++ /**
++ * Virtual initiator high SAS Address
++ */
++ u32 initiator_sas_address_hi;
++
++ /**
++ * Virtual initiator low SAS Address
++ */
++ u32 initiator_sas_address_lo;
++
++ /**
++ * This must be 0
++ */
++ u32 reserved;
++
++};
++
++
++/* IIT Status Defines */
++#define SCU_IIT_ENTRY_ID_MASK (0xC0000000)
++#define SCU_IIT_ENTRY_ID_SHIFT (30)
++
++#define SCU_IIT_ENTRY_STATUS_UPDATE_MASK (0x20000000)
++#define SCU_IIT_ENTRY_STATUS_UPDATE_SHIFT (29)
++
++#define SCU_IIT_ENTRY_LPI_MASK (0x00000F00)
++#define SCU_IIT_ENTRY_LPI_SHIFT (8)
++
++#define SCU_IIT_ENTRY_STATUS_MASK (0x000000FF)
++#define SCU_IIT_ENTRY_STATUS_SHIFT (0)
++
++/* IIT Remote Initiator Defines */
++#define SCU_IIT_ENTRY_REMOTE_TAG_MASK (0x0000FFFF)
++#define SCU_IIT_ENTRY_REMOTE_TAG_SHIFT (0)
++
++#define SCU_IIT_ENTRY_REMOTE_RNC_MASK (0x0FFF0000)
++#define SCU_IIT_ENTRY_REMOTE_RNC_SHIFT (16)
++
++#define SCU_IIT_ENTRY_ID_INVALID (0 << SCU_IIT_ENTRY_ID_SHIFT)
++#define SCU_IIT_ENTRY_ID_VIIT (1 << SCU_IIT_ENTRY_ID_SHIFT)
++#define SCU_IIT_ENTRY_ID_IIT (2 << SCU_IIT_ENTRY_ID_SHIFT)
++#define SCU_IIT_ENTRY_ID_VIRT_EXP (3 << SCU_IIT_ENTRY_ID_SHIFT)
++
++/**
++ * struct scu_iit_entry - This will be implemented later when we support
++ * virtual functions
++ *
++ *
++ */
++struct scu_iit_entry {
++ u32 status;
++ u32 remote_initiator_sas_address_hi;
++ u32 remote_initiator_sas_address_lo;
++ u32 remote_initiator;
++
++};
++
++/* Generate a value for an SCU register */
++#define SCU_GEN_VALUE(name, value) \
++ (((value) << name ## _SHIFT) & (name ## _MASK))
++
++/*
++ * Generate a bit value for an SCU register
++ * Make sure that the register MASK is just a single bit */
++#define SCU_GEN_BIT(name) \
++ SCU_GEN_VALUE(name, ((u32)1))
++
++#define SCU_SET_BIT(name, reg_value) \
++ ((reg_value) | SCU_GEN_BIT(name))
++
++#define SCU_CLEAR_BIT(name, reg_value) \
++ ((reg_value)$ ~(SCU_GEN_BIT(name)))
++
++/*
++ * *****************************************************************************
++ * Unions for bitfield definitions of SCU Registers
++ * SMU Post Context Port
++ * ***************************************************************************** */
++#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_SHIFT (0)
++#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_MASK (0x00000FFF)
++#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_SHIFT (12)
++#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_MASK (0x0000F000)
++#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_SHIFT (16)
++#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_MASK (0x00030000)
++#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_SHIFT (18)
++#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_MASK (0x00FC0000)
++#define SMU_POST_CONTEXT_PORT_RESERVED_MASK (0xFF000000)
++
++#define SMU_PCP_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SMU_POST_CONTEXT_PORT_ ## name, value)
++
++/* ***************************************************************************** */
++#define SMU_INTERRUPT_STATUS_COMPLETION_SHIFT (31)
++#define SMU_INTERRUPT_STATUS_COMPLETION_MASK (0x80000000)
++#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_SHIFT (1)
++#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_MASK (0x00000002)
++#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_SHIFT (0)
++#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_MASK (0x00000001)
++#define SMU_INTERRUPT_STATUS_RESERVED_MASK (0x7FFFFFFC)
++
++#define SMU_ISR_GEN_BIT(name) \
++ SCU_GEN_BIT(SMU_INTERRUPT_STATUS_ ## name)
++
++#define SMU_ISR_QUEUE_ERROR SMU_ISR_GEN_BIT(QUEUE_ERROR)
++#define SMU_ISR_QUEUE_SUSPEND SMU_ISR_GEN_BIT(QUEUE_SUSPEND)
++#define SMU_ISR_COMPLETION SMU_ISR_GEN_BIT(COMPLETION)
++
++/* ***************************************************************************** */
++#define SMU_INTERRUPT_MASK_COMPLETION_SHIFT (31)
++#define SMU_INTERRUPT_MASK_COMPLETION_MASK (0x80000000)
++#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_SHIFT (1)
++#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_MASK (0x00000002)
++#define SMU_INTERRUPT_MASK_QUEUE_ERROR_SHIFT (0)
++#define SMU_INTERRUPT_MASK_QUEUE_ERROR_MASK (0x00000001)
++#define SMU_INTERRUPT_MASK_RESERVED_MASK (0x7FFFFFFC)
++
++#define SMU_IMR_GEN_BIT(name) \
++ SCU_GEN_BIT(SMU_INTERRUPT_MASK_ ## name)
++
++#define SMU_IMR_QUEUE_ERROR SMU_IMR_GEN_BIT(QUEUE_ERROR)
++#define SMU_IMR_QUEUE_SUSPEND SMU_IMR_GEN_BIT(QUEUE_SUSPEND)
++#define SMU_IMR_COMPLETION SMU_IMR_GEN_BIT(COMPLETION)
++
++/* ***************************************************************************** */
++#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_SHIFT (0)
++#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_MASK (0x0000001F)
++#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_SHIFT (8)
++#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_MASK (0x0000FF00)
++#define SMU_INTERRUPT_COALESCING_CONTROL_RESERVED_MASK (0xFFFF00E0)
++
++#define SMU_ICC_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SMU_INTERRUPT_COALESCING_CONTROL_ ## name, value)
++
++/* ***************************************************************************** */
++#define SMU_TASK_CONTEXT_RANGE_START_SHIFT (0)
++#define SMU_TASK_CONTEXT_RANGE_START_MASK (0x00000FFF)
++#define SMU_TASK_CONTEXT_RANGE_ENDING_SHIFT (16)
++#define SMU_TASK_CONTEXT_RANGE_ENDING_MASK (0x0FFF0000)
++#define SMU_TASK_CONTEXT_RANGE_ENABLE_SHIFT (31)
++#define SMU_TASK_CONTEXT_RANGE_ENABLE_MASK (0x80000000)
++#define SMU_TASK_CONTEXT_RANGE_RESERVED_MASK (0x7000F000)
++
++#define SMU_TCR_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SMU_TASK_CONTEXT_RANGE_ ## name, value)
++
++#define SMU_TCR_GEN_BIT(name, value) \
++ SCU_GEN_BIT(SMU_TASK_CONTEXT_RANGE_ ## name)
++
++/* ***************************************************************************** */
++
++#define SMU_COMPLETION_QUEUE_PUT_POINTER_SHIFT (0)
++#define SMU_COMPLETION_QUEUE_PUT_POINTER_MASK (0x00003FFF)
++#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_SHIFT (15)
++#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_MASK (0x00008000)
++#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_SHIFT (16)
++#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_MASK (0x03FF0000)
++#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_SHIFT (26)
++#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_MASK (0x04000000)
++#define SMU_COMPLETION_QUEUE_PUT_RESERVED_MASK (0xF8004000)
++
++#define SMU_CQPR_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_PUT_ ## name, value)
++
++#define SMU_CQPR_GEN_BIT(name) \
++ SCU_GEN_BIT(SMU_COMPLETION_QUEUE_PUT_ ## name)
++
++/* ***************************************************************************** */
++
++#define SMU_COMPLETION_QUEUE_GET_POINTER_SHIFT (0)
++#define SMU_COMPLETION_QUEUE_GET_POINTER_MASK (0x00003FFF)
++#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT (15)
++#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_MASK (0x00008000)
++#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT (16)
++#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK (0x03FF0000)
++#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT (26)
++#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_MASK (0x04000000)
++#define SMU_COMPLETION_QUEUE_GET_ENABLE_SHIFT (30)
++#define SMU_COMPLETION_QUEUE_GET_ENABLE_MASK (0x40000000)
++#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_SHIFT (31)
++#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_MASK (0x80000000)
++#define SMU_COMPLETION_QUEUE_GET_RESERVED_MASK (0x38004000)
++
++#define SMU_CQGR_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_GET_ ## name, value)
++
++#define SMU_CQGR_GEN_BIT(name) \
++ SCU_GEN_BIT(SMU_COMPLETION_QUEUE_GET_ ## name)
++
++#define SMU_CQGR_CYCLE_BIT \
++ SMU_CQGR_GEN_BIT(CYCLE_BIT)
++
++#define SMU_CQGR_EVENT_CYCLE_BIT \
++ SMU_CQGR_GEN_BIT(EVENT_CYCLE_BIT)
++
++#define SMU_CQGR_GET_POINTER_SET(value) \
++ SMU_CQGR_GEN_VAL(POINTER, value)
++
++
++/* ***************************************************************************** */
++#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_SHIFT (0)
++#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_MASK (0x00003FFF)
++#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_SHIFT (16)
++#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_MASK (0x03FF0000)
++#define SMU_COMPLETION_QUEUE_CONTROL_RESERVED_MASK (0xFC00C000)
++
++#define SMU_CQC_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_CONTROL_ ## name, value)
++
++#define SMU_CQC_QUEUE_LIMIT_SET(value) \
++ SMU_CQC_GEN_VAL(QUEUE_LIMIT, value)
++
++#define SMU_CQC_EVENT_LIMIT_SET(value) \
++ SMU_CQC_GEN_VAL(EVENT_LIMIT, value)
++
++
++/* ***************************************************************************** */
++#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT (0)
++#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK (0x00000FFF)
++#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT (12)
++#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK (0x00007000)
++#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT (15)
++#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK (0x07FF8000)
++#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_SHIFT (27)
++#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK (0x08000000)
++#define SMU_DEVICE_CONTEXT_CAPACITY_RESERVED_MASK (0xF0000000)
++
++#define SMU_DCC_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SMU_DEVICE_CONTEXT_CAPACITY_ ## name, value)
++
++#define SMU_DCC_GET_MAX_PEG(value) \
++ (\
++ ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK) \
++ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \
++ )
++
++#define SMU_DCC_GET_MAX_LP(value) \
++ (\
++ ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
++ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \
++ )
++
++#define SMU_DCC_GET_MAX_TC(value) \
++ (\
++ ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
++ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT \
++ )
++
++#define SMU_DCC_GET_MAX_RNC(value) \
++ (\
++ ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
++ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT \
++ )
++
++/* -------------------------------------------------------------------------- */
++
++#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_SHIFT (0)
++#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_MASK (0x00000001)
++#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_SHIFT (1)
++#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_MASK (0x00000002)
++#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_SHIFT (16)
++#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_MASK (0x00010000)
++#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_SHIFT (17)
++#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_MASK (0x00020000)
++#define SMU_CONTROL_STATUS_RESERVED_MASK (0xFFFCFFFC)
++
++#define SMU_SMUCSR_GEN_BIT(name) \
++ SCU_GEN_BIT(SMU_CONTROL_STATUS_ ## name)
++
++#define SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED \
++ (SMU_SMUCSR_GEN_BIT(SCHEDULER_RAM_INIT_COMPLETED))
++
++#define SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED \
++ (SMU_SMUCSR_GEN_BIT(CONTEXT_RAM_INIT_COMPLETED))
++
++#define SCU_RAM_INIT_COMPLETED \
++ (\
++ SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED \
++ | SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED \
++ )
++
++/* -------------------------------------------------------------------------- */
++
++#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_SHIFT (0)
++#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_MASK (0x00000001)
++#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_SHIFT (1)
++#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_MASK (0x00000002)
++#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_SHIFT (2)
++#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_MASK (0x00000004)
++#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_SHIFT (3)
++#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_MASK (0x00000008)
++#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_SHIFT (8)
++#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_MASK (0x00000100)
++#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_SHIFT (9)
++#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_MASK (0x00000200)
++#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_SHIFT (10)
++#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_MASK (0x00000400)
++#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_SHIFT (11)
++#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_MASK (0x00000800)
++
++#define SMU_RESET_PROTOCOL_ENGINE(peg, pe) \
++ ((1 << (pe)) << ((peg) * 8))
++
++#define SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \
++ (\
++ SMU_RESET_PROTOCOL_ENGINE(peg, 0) \
++ | SMU_RESET_PROTOCOL_ENGINE(peg, 1) \
++ | SMU_RESET_PROTOCOL_ENGINE(peg, 2) \
++ | SMU_RESET_PROTOCOL_ENGINE(peg, 3) \
++ )
++
++#define SMU_RESET_ALL_PROTOCOL_ENGINES() \
++ (\
++ SMU_RESET_PEG_PROTOCOL_ENGINES(0) \
++ | SMU_RESET_PEG_PROTOCOL_ENGINES(1) \
++ )
++
++#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_SHIFT (16)
++#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_MASK (0x00010000)
++#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_SHIFT (17)
++#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_MASK (0x00020000)
++#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_SHIFT (18)
++#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_MASK (0x00040000)
++#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_SHIFT (19)
++#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_MASK (0x00080000)
++
++#define SMU_RESET_WIDE_PORT_QUEUE(peg, wide_port) \
++ ((1 << ((wide_port) / 2)) << ((peg) * 2) << 16)
++
++#define SMU_SOFTRESET_CONTROL_RESET_PEG0_SHIFT (20)
++#define SMU_SOFTRESET_CONTROL_RESET_PEG0_MASK (0x00100000)
++#define SMU_SOFTRESET_CONTROL_RESET_PEG1_SHIFT (21)
++#define SMU_SOFTRESET_CONTROL_RESET_PEG1_MASK (0x00200000)
++#define SMU_SOFTRESET_CONTROL_RESET_SCU_SHIFT (22)
++#define SMU_SOFTRESET_CONTROL_RESET_SCU_MASK (0x00400000)
++
++/*
++ * It seems to make sense that if you are going to reset the protocol
++ * engine group that you would also reset all of the protocol engines */
++#define SMU_RESET_PROTOCOL_ENGINE_GROUP(peg) \
++ (\
++ (1 << ((peg) + 20)) \
++ | SMU_RESET_WIDE_PORT_QUEUE(peg, 0) \
++ | SMU_RESET_WIDE_PORT_QUEUE(peg, 1) \
++ | SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \
++ )
++
++#define SMU_RESET_ALL_PROTOCOL_ENGINE_GROUPS() \
++ (\
++ SMU_RESET_PROTOCOL_ENGINE_GROUP(0) \
++ | SMU_RESET_PROTOCOL_ENGINE_GROUP(1) \
++ )
++
++#define SMU_RESET_SCU() (0xFFFFFFFF)
++
++
++
++/* ***************************************************************************** */
++#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_SHIFT (0)
++#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_MASK (0x00000FFF)
++#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_SHIFT (16)
++#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_MASK (0x0FFF0000)
++#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_SHIFT (31)
++#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_MASK (0x80000000)
++#define SMU_TASK_CONTEXT_ASSIGNMENT_RESERVED_MASK (0x7000F000)
++
++#define SMU_TCA_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name, value)
++
++#define SMU_TCA_GEN_BIT(name) \
++ SCU_GEN_BIT(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name)
++
++/* ***************************************************************************** */
++#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_SHIFT (0)
++#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_MASK (0x00000FFF)
++#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_RESERVED_MASK (0xFFFFF000)
++
++#define SCU_UFQC_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_ ## name, value)
++
++#define SCU_UFQC_QUEUE_SIZE_SET(value) \
++ SCU_UFQC_GEN_VAL(QUEUE_SIZE, value)
++
++/* ***************************************************************************** */
++#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_SHIFT (0)
++#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_MASK (0x00000FFF)
++#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_SHIFT (12)
++#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_MASK (0x00001000)
++#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_RESERVED_MASK (0xFFFFE000)
++
++#define SCU_UFQPP_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name, value)
++
++#define SCU_UFQPP_GEN_BIT(name) \
++ SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name)
++
++/*
++ * *****************************************************************************
++ * * SDMA Registers
++ * ***************************************************************************** */
++#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_SHIFT (0)
++#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_MASK (0x00000FFF)
++#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_SHIFT (12)
++#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_MASK (12)
++#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_SHIFT (31)
++#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_MASK (0x80000000)
++#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_RESERVED_MASK (0x7FFFE000)
++
++#define SCU_UFQGP_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name, value)
++
++#define SCU_UFQGP_GEN_BIT(name) \
++ SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name)
++
++#define SCU_UFQGP_CYCLE_BIT(value) \
++ SCU_UFQGP_GEN_BIT(CYCLE_BIT, value)
++
++#define SCU_UFQGP_GET_POINTER(value) \
++ SCU_UFQGP_GEN_VALUE(POINTER, value)
++
++#define SCU_UFQGP_ENABLE(value) \
++ (SCU_UFQGP_GEN_BIT(ENABLE) | value)
++
++#define SCU_UFQGP_DISABLE(value) \
++ (~SCU_UFQGP_GEN_BIT(ENABLE) & value)
++
++#define SCU_UFQGP_VALUE(bit, value) \
++ (SCU_UFQGP_CYCLE_BIT(bit) | SCU_UFQGP_GET_POINTER(value))
++
++/* ***************************************************************************** */
++#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SHIFT (0)
++#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_MASK (0x0000FFFF)
++#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT (16)
++#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK (0x00010000)
++#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_SHIFT (17)
++#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_MASK (0x00020000)
++#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_SHIFT (18)
++#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_MASK (0x00040000)
++#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_SHIFT (19)
++#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_MASK (0x00080000)
++#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_SHIFT (20)
++#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_MASK (0x00100000)
++#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_SHIFT (21)
++#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_MASK (0x00200000)
++#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_SHIFT (22)
++#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_MASK (0x00400000)
++#define SCU_PDMA_CONFIGURATION_RESERVED_MASK (0xFF800000)
++
++#define SCU_PDMACR_GEN_VALUE(name, value) \
++ SCU_GEN_VALUE(SCU_PDMA_CONFIGURATION_ ## name, value)
++
++#define SCU_PDMACR_GEN_BIT(name) \
++ SCU_GEN_BIT(SCU_PDMA_CONFIGURATION_ ## name)
++
++#define SCU_PDMACR_BE_GEN_BIT(name) \
++ SCU_PCMACR_GEN_BIT(BIG_ENDIAN_CONTROL_ ## name)
++
++/* ***************************************************************************** */
++#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT (8)
++#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK (0x00000100)
++
++#define SCU_CDMACR_GEN_BIT(name) \
++ SCU_GEN_BIT(SCU_CDMA_CONFIGURATION_ ## name)
++
++/*
++ * *****************************************************************************
++ * * SCU Link Layer Registers
++ * ***************************************************************************** */
++#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_SHIFT (0)
++#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_MASK (0x000000FF)
++#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_SHIFT (8)
++#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_MASK (0x0000FF00)
++#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_SHIFT (16)
++#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_MASK (0x00FF0000)
++#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_SHIFT (24)
++#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_MASK (0xFF000000)
++#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_REQUIRED_MASK (0x00000000)
++#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_DEFAULT_MASK (0x7D00676F)
++#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_RESERVED_MASK (0x00FF0000)
++
++#define SCU_SAS_SPDTOV_GEN_VALUE(name, value) \
++ SCU_GEN_VALUE(SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_ ## name, value)
++
++
++#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_SHIFT (2)
++#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_MASK (0x00000004)
++#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_SHIFT (4)
++#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_MASK (0x00000010)
++#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_SHIFT (5)
++#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_MASK (0x00000020)
++#define SCU_LINK_STATUS_RESERVED_MASK (0xFFFFFFCD)
++
++#define SCU_SAS_LLSTA_GEN_BIT(name) \
++ SCU_GEN_BIT(SCU_LINK_STATUS_ ## name)
++
++
++/* TODO: Where is the SATA_PSELTOV register? */
++
++/*
++ * *****************************************************************************
++ * * SCU SAS Maximum Arbitration Wait Time Timeout Register
++ * ***************************************************************************** */
++#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_SHIFT (0)
++#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_MASK (0x00007FFF)
++#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_SHIFT (15)
++#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_MASK (0x00008000)
++
++#define SCU_SAS_MAWTTOV_GEN_VALUE(name, value) \
++ SCU_GEN_VALUE(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name, value)
++
++#define SCU_SAS_MAWTTOV_GEN_BIT(name) \
++ SCU_GEN_BIT(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name)
++
++
++/*
++ * TODO: Where is the SAS_LNKTOV regsiter?
++ * TODO: Where is the SAS_PHYTOV register? */
++
++#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_SHIFT (1)
++#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_MASK (0x00000002)
++#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_SHIFT (2)
++#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_MASK (0x00000004)
++#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_SHIFT (3)
++#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_MASK (0x00000008)
++#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_SHIFT (8)
++#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_MASK (0x00000100)
++#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_SHIFT (9)
++#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_MASK (0x00000200)
++#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_SHIFT (10)
++#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_MASK (0x00000400)
++#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_SHIFT (11)
++#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_MASK (0x00000800)
++#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_SHIFT (16)
++#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_MASK (0x000F0000)
++#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_SHIFT (24)
++#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_MASK (0x0F000000)
++#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_SHIFT (28)
++#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_MASK (0x70000000)
++#define SCU_SAS_TRANSMIT_IDENTIFICATION_RESERVED_MASK (0x80F0F1F1)
++
++#define SCU_SAS_TIID_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name, value)
++
++#define SCU_SAS_TIID_GEN_BIT(name) \
++ SCU_GEN_BIT(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name)
++
++/* SAS Identify Frame PHY Identifier Register */
++#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_SHIFT (16)
++#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_MASK (0x00010000)
++#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_SHIFT (17)
++#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_MASK (0x00020000)
++#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_SHIFT (18)
++#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_MASK (0x00040000)
++#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_SHIFT (24)
++#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_MASK (0xFF000000)
++#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_RESERVED_MASK (0x00F800FF)
++
++#define SCU_SAS_TIPID_GEN_VALUE(name, value) \
++ SCU_GEN_VALUE(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name, value)
++
++#define SCU_SAS_TIPID_GEN_BIT(name) \
++ SCU_GEN_BIT(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name)
++
++
++#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_SHIFT (4)
++#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_MASK (0x00000010)
++#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_SHIFT (6)
++#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_MASK (0x00000040)
++#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_SHIFT (7)
++#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_MASK (0x00000080)
++#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_SHIFT (8)
++#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_MASK (0x00000100)
++#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_SHIFT (9)
++#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_MASK (0x00000200)
++#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_SHIFT (11)
++#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_MASK (0x00000800)
++#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_SHIFT (12)
++#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_MASK (0x00001000)
++#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_SHIFT (13)
++#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_MASK (0x00002000)
++#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_SHIFT (14)
++#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_MASK (0x00004000)
++#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_SHIFT (15)
++#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_MASK (0x00008000)
++#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_SHIFT (23)
++#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_MASK (0x00800000)
++#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_SHIFT (27)
++#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_MASK (0x08000000)
++#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_SHIFT (28)
++#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_MASK (0x10000000)
++#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_SHIFT (29)
++#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_MASK (0x20000000)
++#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_SHIFT (30)
++#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_MASK (0x40000000)
++#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_SHIFT (31)
++#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_MASK (0x80000000)
++#define SCU_SAS_PHY_CONFIGURATION_REQUIRED_MASK (0x0100000F)
++#define SCU_SAS_PHY_CONFIGURATION_DEFAULT_MASK (0x4180100F)
++#define SCU_SAS_PHY_CONFIGURATION_RESERVED_MASK (0x00000000)
++
++#define SCU_SAS_PCFG_GEN_BIT(name) \
++ SCU_GEN_BIT(SCU_SAS_PHY_CONFIGURATION_ ## name)
++
++#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_SHIFT (0)
++#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_MASK (0x000007FF)
++#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_SHIFT (16)
++#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_MASK (0x00ff0000)
++
++#define SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_##name, value)
++
++#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_SHIFT (0)
++#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_MASK (0x0003FFFF)
++#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_SHIFT (31)
++#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_MASK (0x80000000)
++#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_RESERVED_MASK (0x7FFC0000)
++
++#define SCU_ENSPINUP_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name, value)
++
++#define SCU_ENSPINUP_GEN_BIT(name) \
++ SCU_GEN_BIT(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name)
++
++
++#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_SHIFT (1)
++#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_MASK (0x00000002)
++#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_SHIFT (4)
++#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_MASK (0x000000F0)
++#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_SHIFT (8)
++#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_MASK (0x00000100)
++#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_SHIFT (9)
++#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_MASK (0x00000201)
++#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_SHIFT (10)
++#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_MASK (0x00000401)
++#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_SHIFT (11)
++#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_MASK (0x00000801)
++#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_SHIFT (12)
++#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_MASK (0x00001001)
++#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_SHIFT (13)
++#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_MASK (0x00002001)
++#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_SHIFT (31)
++#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_MASK (0x80000000)
++#define SCU_LINK_LAYER_PHY_CAPABILITIES_DEFAULT_MASK (0x00003F01)
++#define SCU_LINK_LAYER_PHY_CAPABILITIES_REQUIRED_MASK (0x00000001)
++#define SCU_LINK_LAYER_PHY_CAPABILITIES_RESERVED_MASK (0x7FFFC00D)
++
++#define SCU_SAS_PHYCAP_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name, value)
++
++#define SCU_SAS_PHYCAP_GEN_BIT(name) \
++ SCU_GEN_BIT(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name)
++
++
++#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_SHIFT (0)
++#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_MASK (0x000000FF)
++#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_SHIFT (31)
++#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_MASK (0x80000000)
++#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_RESERVED_MASK (0x7FFFFF00)
++
++#define SCU_PSZGCR_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name, value)
++
++#define SCU_PSZGCR_GEN_BIT(name) \
++ SCU_GEN_BIT(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name)
++
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_SHIFT (1)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_MASK (0x00000002)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_SHIFT (2)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_MASK (0x00000004)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_SHIFT (4)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_MASK (0x00000010)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_SHIFT (5)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_MASK (0x00000020)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_SHIFT (16)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_MASK (0x00030000)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_SHIFT (19)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_MASK (0x00080000)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_SHIFT (20)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_MASK (0x00300000)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_SHIFT (23)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_MASK (0x00800000)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_SHIFT (24)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_MASK (0x03000000)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_SHIFT (27)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_MASK (0x08000000)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_SHIFT (28)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_MASK (0x30000000)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_SHIFT (31)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_MASK (0x80000000)
++#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_RESERVED_MASK (0x4444FFC9)
++
++#define SCU_PEG_SCUVZECR_GEN_VAL(name, val) \
++ SCU_GEN_VALUE(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name, val)
++
++#define SCU_PEG_SCUVZECR_GEN_BIT(name) \
++ SCU_GEN_BIT(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name)
++
++
++/*
++ * *****************************************************************************
++ * * Port Task Scheduler registers shift and mask values
++ * ***************************************************************************** */
++#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_SHIFT (0)
++#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_MASK (0x0000FFFF)
++#define SCU_PTSG_CONTROL_TASK_TIMEOUT_SHIFT (16)
++#define SCU_PTSG_CONTROL_TASK_TIMEOUT_MASK (0x00FF0000)
++#define SCU_PTSG_CONTROL_PTSG_ENABLE_SHIFT (24)
++#define SCU_PTSG_CONTROL_PTSG_ENABLE_MASK (0x01000000)
++#define SCU_PTSG_CONTROL_ETM_ENABLE_SHIFT (25)
++#define SCU_PTSG_CONTROL_ETM_ENABLE_MASK (0x02000000)
++#define SCU_PTSG_CONTROL_DEFAULT_MASK (0x00020002)
++#define SCU_PTSG_CONTROL_REQUIRED_MASK (0x00000000)
++#define SCU_PTSG_CONTROL_RESERVED_MASK (0xFC000000)
++
++#define SCU_PTSGCR_GEN_VAL(name, val) \
++ SCU_GEN_VALUE(SCU_PTSG_CONTROL_ ## name, val)
++
++#define SCU_PTSGCR_GEN_BIT(name) \
++ SCU_GEN_BIT(SCU_PTSG_CONTROL_ ## name)
++
++
++/* ***************************************************************************** */
++#define SCU_PTSG_REAL_TIME_CLOCK_SHIFT (0)
++#define SCU_PTSG_REAL_TIME_CLOCK_MASK (0x0000FFFF)
++#define SCU_PTSG_REAL_TIME_CLOCK_RESERVED_MASK (0xFFFF0000)
++
++#define SCU_RTCR_GEN_VAL(name, val) \
++ SCU_GEN_VALUE(SCU_PTSG_ ## name, val)
++
++
++#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_SHIFT (0)
++#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_MASK (0x00FFFFFF)
++#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_RESERVED_MASK (0xFF000000)
++
++#define SCU_RTCCR_GEN_VAL(name, val) \
++ SCU_GEN_VALUE(SCU_PTSG_REAL_TIME_CLOCK_CONTROL_ ## name, val)
++
++
++#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_SHIFT (0)
++#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_MASK (0x00000001)
++#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_SHIFT (1)
++#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_MASK (0x00000002)
++#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_RESERVED_MASK (0xFFFFFFFC)
++
++#define SCU_PTSxCR_GEN_BIT(name) \
++ SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ ## name)
++
++
++#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_SHIFT (0)
++#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_MASK (0x00000001)
++#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_SHIFT (1)
++#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_MASK (0x00000002)
++#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_SHIFT (2)
++#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_MASK (0x00000004)
++#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_RESERVED_MASK (0xFFFFFFF8)
++
++#define SCU_PTSxSR_GEN_BIT(name) \
++ SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ ## name)
++
++
++/*
++ * *****************************************************************************
++ * * SGPIO Register shift and mask values
++ * ***************************************************************************** */
++#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_SHIFT (0)
++#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_MASK (0x00000001)
++#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_SHIFT (1)
++#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_MASK (0x00000002)
++#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_SHIFT (2)
++#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_MASK (0x00000004)
++#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_SHIFT (15)
++#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_MASK (0x00008000)
++#define SCU_SGPIO_CONTROL_SGPIO_RESERVED_MASK (0xFFFF7FF8)
++
++#define SCU_SGICRx_GEN_BIT(name) \
++ SCU_GEN_BIT(SCU_SGPIO_CONTROL_SGPIO_ ## name)
++
++#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_SHIFT (0)
++#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_MASK (0x0000000F)
++#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_SHIFT (4)
++#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_MASK (0x000000F0)
++#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_SHIFT (8)
++#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_MASK (0x00000F00)
++#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_SHIFT (12)
++#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_MASK (0x0000F000)
++#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_RESERVED_MASK (0xFFFF0000)
++
++#define SCU_SGPBRx_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_ ## name, value)
++
++#define SCU_SGPIO_START_DRIVE_LOWER_R0_SHIFT (0)
++#define SCU_SGPIO_START_DRIVE_LOWER_R0_MASK (0x00000003)
++#define SCU_SGPIO_START_DRIVE_LOWER_R1_SHIFT (4)
++#define SCU_SGPIO_START_DRIVE_LOWER_R1_MASK (0x00000030)
++#define SCU_SGPIO_START_DRIVE_LOWER_R2_SHIFT (8)
++#define SCU_SGPIO_START_DRIVE_LOWER_R2_MASK (0x00000300)
++#define SCU_SGPIO_START_DRIVE_LOWER_R3_SHIFT (12)
++#define SCU_SGPIO_START_DRIVE_LOWER_R3_MASK (0x00003000)
++#define SCU_SGPIO_START_DRIVE_LOWER_RESERVED_MASK (0xFFFF8888)
++
++#define SCU_SGSDLRx_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value)
++
++#define SCU_SGPIO_START_DRIVE_UPPER_R0_SHIFT (0)
++#define SCU_SGPIO_START_DRIVE_UPPER_R0_MASK (0x00000003)
++#define SCU_SGPIO_START_DRIVE_UPPER_R1_SHIFT (4)
++#define SCU_SGPIO_START_DRIVE_UPPER_R1_MASK (0x00000030)
++#define SCU_SGPIO_START_DRIVE_UPPER_R2_SHIFT (8)
++#define SCU_SGPIO_START_DRIVE_UPPER_R2_MASK (0x00000300)
++#define SCU_SGPIO_START_DRIVE_UPPER_R3_SHIFT (12)
++#define SCU_SGPIO_START_DRIVE_UPPER_R3_MASK (0x00003000)
++#define SCU_SGPIO_START_DRIVE_UPPER_RESERVED_MASK (0xFFFF8888)
++
++#define SCU_SGSDURx_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value)
++
++#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_SHIFT (0)
++#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_MASK (0x00000003)
++#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_SHIFT (4)
++#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_MASK (0x00000030)
++#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_SHIFT (8)
++#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_MASK (0x00000300)
++#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_SHIFT (12)
++#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_MASK (0x00003000)
++#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_RESERVED_MASK (0xFFFF8888)
++
++#define SCU_SGSIDLRx_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value)
++
++#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_SHIFT (0)
++#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_MASK (0x00000003)
++#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_SHIFT (4)
++#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_MASK (0x00000030)
++#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_SHIFT (8)
++#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_MASK (0x00000300)
++#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_SHIFT (12)
++#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_MASK (0x00003000)
++#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_RESERVED_MASK (0xFFFF8888)
++
++#define SCU_SGSIDURx_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value)
++
++#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_SHIFT (0)
++#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_MASK (0x0000000F)
++#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_RESERVED_MASK (0xFFFFFFF0)
++
++#define SCU_SGVSCR_GEN_VAL(value) \
++ SCU_GEN_VALUE(SCU_SGPIO_VENDOR_SPECIFIC_CODE ## name, value)
++
++#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_SHIFT (0)
++#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_MASK (0x00000003)
++#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_SHIFT (2)
++#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_MASK (0x00000004)
++#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_SHIFT (3)
++#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_MASK (0x00000008)
++#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_SHIFT (4)
++#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_MASK (0x00000030)
++#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_SHIFT (6)
++#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_MASK (0x00000040)
++#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_SHIFT (7)
++#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_MASK (0x00000080)
++#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_SHIFT (8)
++#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_MASK (0x00000300)
++#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_SHIFT (10)
++#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_MASK (0x00000400)
++#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_SHIFT (11)
++#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_MASK (0x00000800)
++#define SCU_SGPIO_OUPUT_DATA_SELECT_RESERVED_MASK (0xFFFFF000)
++
++#define SCU_SGODSR_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name, value)
++
++#define SCU_SGODSR_GEN_BIT(name) \
++ SCU_GEN_BIT(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name)
++
++/*
++ * *****************************************************************************
++ * * SMU Registers
++ * ***************************************************************************** */
++
++/*
++ * ----------------------------------------------------------------------------
++ * SMU Registers
++ * These registers are based off of BAR0
++ *
++ * To calculate the offset for other functions use
++ * BAR0 + FN# * SystemPageSize * 2
++ *
++ * The TCA is only accessable from FN#0 (Physical Function) and each
++ * is programmed by (BAR0 + SCU_SMU_TCA_OFFSET + (FN# * 0x04)) or
++ * TCA0 for FN#0 is at BAR0 + 0x0400
++ * TCA1 for FN#1 is at BAR0 + 0x0404
++ * etc.
++ * ----------------------------------------------------------------------------
++ * Accessable to all FN#s */
++#define SCU_SMU_PCP_OFFSET 0x0000
++#define SCU_SMU_AMR_OFFSET 0x0004
++#define SCU_SMU_ISR_OFFSET 0x0010
++#define SCU_SMU_IMR_OFFSET 0x0014
++#define SCU_SMU_ICC_OFFSET 0x0018
++#define SCU_SMU_HTTLBAR_OFFSET 0x0020
++#define SCU_SMU_HTTUBAR_OFFSET 0x0024
++#define SCU_SMU_TCR_OFFSET 0x0028
++#define SCU_SMU_CQLBAR_OFFSET 0x0030
++#define SCU_SMU_CQUBAR_OFFSET 0x0034
++#define SCU_SMU_CQPR_OFFSET 0x0040
++#define SCU_SMU_CQGR_OFFSET 0x0044
++#define SCU_SMU_CQC_OFFSET 0x0048
++/* Accessable to FN#0 only */
++#define SCU_SMU_RNCLBAR_OFFSET 0x0080
++#define SCU_SMU_RNCUBAR_OFFSET 0x0084
++#define SCU_SMU_DCC_OFFSET 0x0090
++#define SCU_SMU_DFC_OFFSET 0x0094
++#define SCU_SMU_SMUCSR_OFFSET 0x0098
++#define SCU_SMU_SCUSRCR_OFFSET 0x009C
++#define SCU_SMU_SMAW_OFFSET 0x00A0
++#define SCU_SMU_SMDW_OFFSET 0x00A4
++/* Accessable to FN#0 only */
++#define SCU_SMU_TCA_OFFSET 0x0400
++/* Accessable to all FN#s */
++#define SCU_SMU_MT_MLAR0_OFFSET 0x2000
++#define SCU_SMU_MT_MUAR0_OFFSET 0x2004
++#define SCU_SMU_MT_MDR0_OFFSET 0x2008
++#define SCU_SMU_MT_VCR0_OFFSET 0x200C
++#define SCU_SMU_MT_MLAR1_OFFSET 0x2010
++#define SCU_SMU_MT_MUAR1_OFFSET 0x2014
++#define SCU_SMU_MT_MDR1_OFFSET 0x2018
++#define SCU_SMU_MT_VCR1_OFFSET 0x201C
++#define SCU_SMU_MPBA_OFFSET 0x3000
++
++/**
++ * struct smu_registers - These are the SMU registers
++ *
++ *
++ */
++struct smu_registers {
++/* 0x0000 PCP */
++ u32 post_context_port;
++/* 0x0004 AMR */
++ u32 address_modifier;
++ u32 reserved_08;
++ u32 reserved_0C;
++/* 0x0010 ISR */
++ u32 interrupt_status;
++/* 0x0014 IMR */
++ u32 interrupt_mask;
++/* 0x0018 ICC */
++ u32 interrupt_coalesce_control;
++ u32 reserved_1C;
++/* 0x0020 HTTLBAR */
++ u32 host_task_table_lower;
++/* 0x0024 HTTUBAR */
++ u32 host_task_table_upper;
++/* 0x0028 TCR */
++ u32 task_context_range;
++ u32 reserved_2C;
++/* 0x0030 CQLBAR */
++ u32 completion_queue_lower;
++/* 0x0034 CQUBAR */
++ u32 completion_queue_upper;
++ u32 reserved_38;
++ u32 reserved_3C;
++/* 0x0040 CQPR */
++ u32 completion_queue_put;
++/* 0x0044 CQGR */
++ u32 completion_queue_get;
++/* 0x0048 CQC */
++ u32 completion_queue_control;
++ u32 reserved_4C;
++ u32 reserved_5x[4];
++ u32 reserved_6x[4];
++ u32 reserved_7x[4];
++/*
++ * Accessable to FN#0 only
++ * 0x0080 RNCLBAR */
++ u32 remote_node_context_lower;
++/* 0x0084 RNCUBAR */
++ u32 remote_node_context_upper;
++ u32 reserved_88;
++ u32 reserved_8C;
++/* 0x0090 DCC */
++ u32 device_context_capacity;
++/* 0x0094 DFC */
++ u32 device_function_capacity;
++/* 0x0098 SMUCSR */
++ u32 control_status;
++/* 0x009C SCUSRCR */
++ u32 soft_reset_control;
++/* 0x00A0 SMAW */
++ u32 mmr_address_window;
++/* 0x00A4 SMDW */
++ u32 mmr_data_window;
++ u32 reserved_A8;
++ u32 reserved_AC;
++/* A whole bunch of reserved space */
++ u32 reserved_Bx[4];
++ u32 reserved_Cx[4];
++ u32 reserved_Dx[4];
++ u32 reserved_Ex[4];
++ u32 reserved_Fx[4];
++ u32 reserved_1xx[64];
++ u32 reserved_2xx[64];
++ u32 reserved_3xx[64];
++/*
++ * Accessable to FN#0 only
++ * 0x0400 TCA */
++ u32 task_context_assignment[256];
++/* MSI-X registers not included */
++};
++
++/*
++ * *****************************************************************************
++ * SDMA Registers
++ * ***************************************************************************** */
++#define SCU_SDMA_BASE 0x6000
++#define SCU_SDMA_PUFATLHAR_OFFSET 0x0000
++#define SCU_SDMA_PUFATUHAR_OFFSET 0x0004
++#define SCU_SDMA_UFLHBAR_OFFSET 0x0008
++#define SCU_SDMA_UFUHBAR_OFFSET 0x000C
++#define SCU_SDMA_UFQC_OFFSET 0x0010
++#define SCU_SDMA_UFQPP_OFFSET 0x0014
++#define SCU_SDMA_UFQGP_OFFSET 0x0018
++#define SCU_SDMA_PDMACR_OFFSET 0x001C
++#define SCU_SDMA_CDMACR_OFFSET 0x0080
++
++/**
++ * struct scu_sdma_registers - These are the SCU SDMA Registers
++ *
++ *
++ */
++struct scu_sdma_registers {
++/* 0x0000 PUFATLHAR */
++ u32 uf_address_table_lower;
++/* 0x0004 PUFATUHAR */
++ u32 uf_address_table_upper;
++/* 0x0008 UFLHBAR */
++ u32 uf_header_base_address_lower;
++/* 0x000C UFUHBAR */
++ u32 uf_header_base_address_upper;
++/* 0x0010 UFQC */
++ u32 unsolicited_frame_queue_control;
++/* 0x0014 UFQPP */
++ u32 unsolicited_frame_put_pointer;
++/* 0x0018 UFQGP */
++ u32 unsolicited_frame_get_pointer;
++/* 0x001C PDMACR */
++ u32 pdma_configuration;
++/* Reserved until offset 0x80 */
++ u32 reserved_0020_007C[0x18];
++/* 0x0080 CDMACR */
++ u32 cdma_configuration;
++/* Remainder SDMA register space */
++ u32 reserved_0084_0400[0xDF];
++
++};
++
++/*
++ * *****************************************************************************
++ * * SCU Link Registers
++ * ***************************************************************************** */
++#define SCU_PEG0_OFFSET 0x0000
++#define SCU_PEG1_OFFSET 0x8000
++
++#define SCU_TL0_OFFSET 0x0000
++#define SCU_TL1_OFFSET 0x0400
++#define SCU_TL2_OFFSET 0x0800
++#define SCU_TL3_OFFSET 0x0C00
++
++#define SCU_LL_OFFSET 0x0080
++#define SCU_LL0_OFFSET (SCU_TL0_OFFSET + SCU_LL_OFFSET)
++#define SCU_LL1_OFFSET (SCU_TL1_OFFSET + SCU_LL_OFFSET)
++#define SCU_LL2_OFFSET (SCU_TL2_OFFSET + SCU_LL_OFFSET)
++#define SCU_LL3_OFFSET (SCU_TL3_OFFSET + SCU_LL_OFFSET)
++
++/* Transport Layer Offsets (PEG + TL) */
++#define SCU_TLCR_OFFSET 0x0000
++#define SCU_TLADTR_OFFSET 0x0004
++#define SCU_TLTTMR_OFFSET 0x0008
++#define SCU_TLEECR0_OFFSET 0x000C
++#define SCU_STPTLDARNI_OFFSET 0x0010
++
++
++#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_SHIFT (0)
++#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_MASK (0x00000001)
++#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_SHIFT (1)
++#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_MASK (0x00000002)
++#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_SHIFT (3)
++#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_MASK (0x00000008)
++#define SCU_TLCR_CMD_NAK_STATUS_CODE_SHIFT (4)
++#define SCU_TLCR_CMD_NAK_STATUS_CODE_MASK (0x00000010)
++#define SCU_TLCR_RESERVED_MASK (0xFFFFFFEB)
++
++#define SCU_TLCR_GEN_BIT(name) \
++ SCU_GEN_BIT(SCU_TLCR_ ## name)
++
++/**
++ * struct scu_transport_layer_registers - These are the SCU Transport Layer
++ * registers
++ *
++ *
++ */
++struct scu_transport_layer_registers {
++ /* 0x0000 TLCR */
++ u32 control;
++ /* 0x0004 TLADTR */
++ u32 arbitration_delay_timer;
++ /* 0x0008 TLTTMR */
++ u32 timer_test_mode;
++ /* 0x000C reserved */
++ u32 reserved_0C;
++ /* 0x0010 STPTLDARNI */
++ u32 stp_rni;
++ /* 0x0014 TLFEWPORCTRL */
++ u32 tlfe_wpo_read_control;
++ /* 0x0018 TLFEWPORDATA */
++ u32 tlfe_wpo_read_data;
++ /* 0x001C RXTLSSCSR1 */
++ u32 rxtl_single_step_control_status_1;
++ /* 0x0020 RXTLSSCSR2 */
++ u32 rxtl_single_step_control_status_2;
++ /* 0x0024 AWTRDDCR */
++ u32 tlfe_awt_retry_delay_debug_control;
++ /* Remainder of TL memory space */
++ u32 reserved_0028_007F[0x16];
++
++};
++
++/* Protocol Engine Group Registers */
++#define SCU_SCUVZECRx_OFFSET 0x1080
++
++/* Link Layer Offsets (PEG + TL + LL) */
++#define SCU_SAS_SPDTOV_OFFSET 0x0000
++#define SCU_SAS_LLSTA_OFFSET 0x0004
++#define SCU_SATA_PSELTOV_OFFSET 0x0008
++#define SCU_SAS_TIMETOV_OFFSET 0x0010
++#define SCU_SAS_LOSTOT_OFFSET 0x0014
++#define SCU_SAS_LNKTOV_OFFSET 0x0018
++#define SCU_SAS_PHYTOV_OFFSET 0x001C
++#define SCU_SAS_AFERCNT_OFFSET 0x0020
++#define SCU_SAS_WERCNT_OFFSET 0x0024
++#define SCU_SAS_TIID_OFFSET 0x0028
++#define SCU_SAS_TIDNH_OFFSET 0x002C
++#define SCU_SAS_TIDNL_OFFSET 0x0030
++#define SCU_SAS_TISSAH_OFFSET 0x0034
++#define SCU_SAS_TISSAL_OFFSET 0x0038
++#define SCU_SAS_TIPID_OFFSET 0x003C
++#define SCU_SAS_TIRES2_OFFSET 0x0040
++#define SCU_SAS_ADRSTA_OFFSET 0x0044
++#define SCU_SAS_MAWTTOV_OFFSET 0x0048
++#define SCU_SAS_FRPLDFIL_OFFSET 0x0054
++#define SCU_SAS_RFCNT_OFFSET 0x0060
++#define SCU_SAS_TFCNT_OFFSET 0x0064
++#define SCU_SAS_RFDCNT_OFFSET 0x0068
++#define SCU_SAS_TFDCNT_OFFSET 0x006C
++#define SCU_SAS_LERCNT_OFFSET 0x0070
++#define SCU_SAS_RDISERRCNT_OFFSET 0x0074
++#define SCU_SAS_CRERCNT_OFFSET 0x0078
++#define SCU_STPCTL_OFFSET 0x007C
++#define SCU_SAS_PCFG_OFFSET 0x0080
++#define SCU_SAS_CLKSM_OFFSET 0x0084
++#define SCU_SAS_TXCOMWAKE_OFFSET 0x0088
++#define SCU_SAS_TXCOMINIT_OFFSET 0x008C
++#define SCU_SAS_TXCOMSAS_OFFSET 0x0090
++#define SCU_SAS_COMINIT_OFFSET 0x0094
++#define SCU_SAS_COMWAKE_OFFSET 0x0098
++#define SCU_SAS_COMSAS_OFFSET 0x009C
++#define SCU_SAS_SFERCNT_OFFSET 0x00A0
++#define SCU_SAS_CDFERCNT_OFFSET 0x00A4
++#define SCU_SAS_DNFERCNT_OFFSET 0x00A8
++#define SCU_SAS_PRSTERCNT_OFFSET 0x00AC
++#define SCU_SAS_CNTCTL_OFFSET 0x00B0
++#define SCU_SAS_SSPTOV_OFFSET 0x00B4
++#define SCU_FTCTL_OFFSET 0x00B8
++#define SCU_FRCTL_OFFSET 0x00BC
++#define SCU_FTWMRK_OFFSET 0x00C0
++#define SCU_ENSPINUP_OFFSET 0x00C4
++#define SCU_SAS_TRNTOV_OFFSET 0x00C8
++#define SCU_SAS_PHYCAP_OFFSET 0x00CC
++#define SCU_SAS_PHYCTL_OFFSET 0x00D0
++#define SCU_SAS_LLCTL_OFFSET 0x00D8
++#define SCU_AFE_XCVRCR_OFFSET 0x00DC
++#define SCU_AFE_LUTCR_OFFSET 0x00E0
++
++#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_SHIFT (0UL)
++#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_MASK (0x000000FFUL)
++#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_SHIFT (8UL)
++#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_MASK (0x0000FF00UL)
++#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_SHIFT (16UL)
++#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_MASK (0x00FF0000UL)
++#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_SHIFT (24UL)
++#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_MASK (0xFF000000UL)
++
++#define SCU_SAS_PHYTOV_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_##name, value)
++
++#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT (0)
++#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK (0x00000003)
++#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 (0)
++#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2 (1)
++#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3 (2)
++#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_SHIFT (2)
++#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_MASK (0x000003FC)
++#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_SHIFT (16)
++#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_MASK (0x00010000)
++#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_SHIFT (17)
++#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_MASK (0x00020000)
++#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_SHIFT (24)
++#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_MASK (0xFF000000)
++#define SCU_SAS_LINK_LAYER_CONTROL_RESERVED (0x00FCFC00)
++
++#define SCU_SAS_LLCTL_GEN_VAL(name, value) \
++ SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_CONTROL_ ## name, value)
++
++#define SCU_SAS_LLCTL_GEN_BIT(name) \
++ SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name)
++
++
++/* #define SCU_FRXHECR_DCNT_OFFSET 0x00B0 */
++#define SCU_PSZGCR_OFFSET 0x00E4
++#define SCU_SAS_RECPHYCAP_OFFSET 0x00E8
++/* #define SCU_TX_LUTSEL_OFFSET 0x00B8 */
++
++#define SCU_SAS_PTxC_OFFSET 0x00D4 /* Same offset as SAS_TCTSTM */
++
++/**
++ * struct scu_link_layer_registers - SCU Link Layer Registers
++ *
++ *
++ */
++struct scu_link_layer_registers {
++/* 0x0000 SAS_SPDTOV */
++ u32 speed_negotiation_timers;
++/* 0x0004 SAS_LLSTA */
++ u32 link_layer_status;
++/* 0x0008 SATA_PSELTOV */
++ u32 port_selector_timeout;
++ u32 reserved0C;
++/* 0x0010 SAS_TIMETOV */
++ u32 timeout_unit_value;
++/* 0x0014 SAS_RCDTOV */
++ u32 rcd_timeout;
++/* 0x0018 SAS_LNKTOV */
++ u32 link_timer_timeouts;
++/* 0x001C SAS_PHYTOV */
++ u32 sas_phy_timeouts;
++/* 0x0020 SAS_AFERCNT */
++ u32 received_address_frame_error_counter;
++/* 0x0024 SAS_WERCNT */
++ u32 invalid_dword_counter;
++/* 0x0028 SAS_TIID */
++ u32 transmit_identification;
++/* 0x002C SAS_TIDNH */
++ u32 sas_device_name_high;
++/* 0x0030 SAS_TIDNL */
++ u32 sas_device_name_low;
++/* 0x0034 SAS_TISSAH */
++ u32 source_sas_address_high;
++/* 0x0038 SAS_TISSAL */
++ u32 source_sas_address_low;
++/* 0x003C SAS_TIPID */
++ u32 identify_frame_phy_id;
++/* 0x0040 SAS_TIRES2 */
++ u32 identify_frame_reserved;
++/* 0x0044 SAS_ADRSTA */
++ u32 received_address_frame;
++/* 0x0048 SAS_MAWTTOV */
++ u32 maximum_arbitration_wait_timer_timeout;
++/* 0x004C SAS_PTxC */
++ u32 transmit_primitive;
++/* 0x0050 SAS_RORES */
++ u32 error_counter_event_notification_control;
++/* 0x0054 SAS_FRPLDFIL */
++ u32 frxq_payload_fill_threshold;
++/* 0x0058 SAS_LLHANG_TOT */
++ u32 link_layer_hang_detection_timeout;
++ u32 reserved_5C;
++/* 0x0060 SAS_RFCNT */
++ u32 received_frame_count;
++/* 0x0064 SAS_TFCNT */
++ u32 transmit_frame_count;
++/* 0x0068 SAS_RFDCNT */
++ u32 received_dword_count;
++/* 0x006C SAS_TFDCNT */
++ u32 transmit_dword_count;
++/* 0x0070 SAS_LERCNT */
++ u32 loss_of_sync_error_count;
++/* 0x0074 SAS_RDISERRCNT */
++ u32 running_disparity_error_count;
++/* 0x0078 SAS_CRERCNT */
++ u32 received_frame_crc_error_count;
++/* 0x007C STPCTL */
++ u32 stp_control;
++/* 0x0080 SAS_PCFG */
++ u32 phy_configuration;
++/* 0x0084 SAS_CLKSM */
++ u32 clock_skew_management;
++/* 0x0088 SAS_TXCOMWAKE */
++ u32 transmit_comwake_signal;
++/* 0x008C SAS_TXCOMINIT */
++ u32 transmit_cominit_signal;
++/* 0x0090 SAS_TXCOMSAS */
++ u32 transmit_comsas_signal;
++/* 0x0094 SAS_COMINIT */
++ u32 cominit_control;
++/* 0x0098 SAS_COMWAKE */
++ u32 comwake_control;
++/* 0x009C SAS_COMSAS */
++ u32 comsas_control;
++/* 0x00A0 SAS_SFERCNT */
++ u32 received_short_frame_count;
++/* 0x00A4 SAS_CDFERCNT */
++ u32 received_frame_without_credit_count;
++/* 0x00A8 SAS_DNFERCNT */
++ u32 received_frame_after_done_count;
++/* 0x00AC SAS_PRSTERCNT */
++ u32 phy_reset_problem_count;
++/* 0x00B0 SAS_CNTCTL */
++ u32 counter_control;
++/* 0x00B4 SAS_SSPTOV */
++ u32 ssp_timer_timeout_values;
++/* 0x00B8 FTCTL */
++ u32 ftx_control;
++/* 0x00BC FRCTL */
++ u32 frx_control;
++/* 0x00C0 FTWMRK */
++ u32 ftx_watermark;
++/* 0x00C4 ENSPINUP */
++ u32 notify_enable_spinup_control;
++/* 0x00C8 SAS_TRNTOV */
++ u32 sas_training_sequence_timer_values;
++/* 0x00CC SAS_PHYCAP */
++ u32 phy_capabilities;
++/* 0x00D0 SAS_PHYCTL */
++ u32 phy_control;
++ u32 reserved_d4;
++/* 0x00D8 LLCTL */
++ u32 link_layer_control;
++/* 0x00DC AFE_XCVRCR */
++ u32 afe_xcvr_control;
++/* 0x00E0 AFE_LUTCR */
++ u32 afe_lookup_table_control;
++/* 0x00E4 PSZGCR */
++ u32 phy_source_zone_group_control;
++/* 0x00E8 SAS_RECPHYCAP */
++ u32 receive_phycap;
++ u32 reserved_ec;
++/* 0x00F0 SNAFERXRSTCTL */
++ u32 speed_negotiation_afe_rx_reset_control;
++/* 0x00F4 SAS_SSIPMCTL */
++ u32 power_management_control;
++/* 0x00F8 SAS_PSPREQ_PRIM */
++ u32 sas_pm_partial_request_primitive;
++/* 0x00FC SAS_PSSREQ_PRIM */
++ u32 sas_pm_slumber_request_primitive;
++/* 0x0100 SAS_PPSACK_PRIM */
++ u32 sas_pm_ack_primitive_register;
++/* 0x0104 SAS_PSNAK_PRIM */
++ u32 sas_pm_nak_primitive_register;
++/* 0x0108 SAS_SSIPMTOV */
++ u32 sas_primitive_timeout;
++ u32 reserved_10c;
++/* 0x0110 - 0x011C PLAPRDCTRLxREG */
++ u32 pla_product_control[4];
++/* 0x0120 PLAPRDSUMREG */
++ u32 pla_product_sum;
++/* 0x0124 PLACONTROLREG */
++ u32 pla_control;
++/* Remainder of memory space 896 bytes */
++ u32 reserved_0128_037f[0x96];
++
++};
++
++/*
++ * 0x00D4 // Same offset as SAS_TCTSTM SAS_PTxC
++ * u32 primitive_transmit_control; */
++
++/*
++ * ----------------------------------------------------------------------------
++ * SGPIO
++ * ---------------------------------------------------------------------------- */
++#define SCU_SGPIO_OFFSET 0x1400
++
++/* #define SCU_SGPIO_OFFSET 0x6000 // later moves to 0x1400 see HSD 652625 */
++#define SCU_SGPIO_SGICR_OFFSET 0x0000
++#define SCU_SGPIO_SGPBR_OFFSET 0x0004
++#define SCU_SGPIO_SGSDLR_OFFSET 0x0008
++#define SCU_SGPIO_SGSDUR_OFFSET 0x000C
++#define SCU_SGPIO_SGSIDLR_OFFSET 0x0010
++#define SCU_SGPIO_SGSIDUR_OFFSET 0x0014
++#define SCU_SGPIO_SGVSCR_OFFSET 0x0018
++/* Address from 0x0820 to 0x083C */
++#define SCU_SGPIO_SGODSR_OFFSET 0x0020
++
++/**
++ * struct scu_sgpio_registers - SCU SGPIO Registers
++ *
++ *
++ */
++struct scu_sgpio_registers {
++/* 0x0000 SGPIO_SGICR */
++ u32 interface_control;
++/* 0x0004 SGPIO_SGPBR */
++ u32 blink_rate;
++/* 0x0008 SGPIO_SGSDLR */
++ u32 start_drive_lower;
++/* 0x000C SGPIO_SGSDUR */
++ u32 start_drive_upper;
++/* 0x0010 SGPIO_SGSIDLR */
++ u32 serial_input_lower;
++/* 0x0014 SGPIO_SGSIDUR */
++ u32 serial_input_upper;
++/* 0x0018 SGPIO_SGVSCR */
++ u32 vendor_specific_code;
++/* 0x0020 SGPIO_SGODSR */
++ u32 ouput_data_select[8];
++/* Remainder of memory space 256 bytes */
++ u32 reserved_1444_14ff[0x31];
++
++};
++
++/*
++ * *****************************************************************************
++ * * Defines for VIIT entry offsets
++ * * Access additional entries by SCU_VIIT_BASE + index * 0x10
++ * ***************************************************************************** */
++#define SCU_VIIT_BASE 0x1c00
++
++struct scu_viit_registers {
++ u32 registers[256];
++};
++
++/*
++ * *****************************************************************************
++ * * SCU PORT TASK SCHEDULER REGISTERS
++ * ***************************************************************************** */
++
++#define SCU_PTSG_BASE 0x1000
++
++#define SCU_PTSG_PTSGCR_OFFSET 0x0000
++#define SCU_PTSG_RTCR_OFFSET 0x0004
++#define SCU_PTSG_RTCCR_OFFSET 0x0008
++#define SCU_PTSG_PTS0CR_OFFSET 0x0010
++#define SCU_PTSG_PTS0SR_OFFSET 0x0014
++#define SCU_PTSG_PTS1CR_OFFSET 0x0018
++#define SCU_PTSG_PTS1SR_OFFSET 0x001C
++#define SCU_PTSG_PTS2CR_OFFSET 0x0020
++#define SCU_PTSG_PTS2SR_OFFSET 0x0024
++#define SCU_PTSG_PTS3CR_OFFSET 0x0028
++#define SCU_PTSG_PTS3SR_OFFSET 0x002C
++#define SCU_PTSG_PCSPE0CR_OFFSET 0x0030
++#define SCU_PTSG_PCSPE1CR_OFFSET 0x0034
++#define SCU_PTSG_PCSPE2CR_OFFSET 0x0038
++#define SCU_PTSG_PCSPE3CR_OFFSET 0x003C
++#define SCU_PTSG_ETMTSCCR_OFFSET 0x0040
++#define SCU_PTSG_ETMRNSCCR_OFFSET 0x0044
++
++/**
++ * struct scu_port_task_scheduler_registers - These are the control/stats pairs
++ * for each Port Task Scheduler.
++ *
++ *
++ */
++struct scu_port_task_scheduler_registers {
++ u32 control;
++ u32 status;
++};
++
++/**
++ * struct scu_port_task_scheduler_group_registers - These are the PORT Task
++ * Scheduler registers
++ *
++ *
++ */
++struct scu_port_task_scheduler_group_registers {
++/* 0x0000 PTSGCR */
++ u32 control;
++/* 0x0004 RTCR */
++ u32 real_time_clock;
++/* 0x0008 RTCCR */
++ u32 real_time_clock_control;
++/* 0x000C */
++ u32 reserved_0C;
++/*
++ * 0x0010 PTS0CR
++ * 0x0014 PTS0SR
++ * 0x0018 PTS1CR
++ * 0x001C PTS1SR
++ * 0x0020 PTS2CR
++ * 0x0024 PTS2SR
++ * 0x0028 PTS3CR
++ * 0x002C PTS3SR */
++ struct scu_port_task_scheduler_registers port[4];
++/*
++ * 0x0030 PCSPE0CR
++ * 0x0034 PCSPE1CR
++ * 0x0038 PCSPE2CR
++ * 0x003C PCSPE3CR */
++ u32 protocol_engine[4];
++/* 0x0040 ETMTSCCR */
++ u32 tc_scanning_interval_control;
++/* 0x0044 ETMRNSCCR */
++ u32 rnc_scanning_interval_control;
++/* Remainder of memory space 128 bytes */
++ u32 reserved_1048_107f[0x0E];
++
++};
++
++#define SCU_PTSG_SCUVZECR_OFFSET 0x003C
++
++/*
++ * *****************************************************************************
++ * * AFE REGISTERS
++ * ***************************************************************************** */
++#define SCU_AFE_MMR_BASE 0xE000
++
++/*
++ * AFE 0 is at offset 0x0800
++ * AFE 1 is at offset 0x0900
++ * AFE 2 is at offset 0x0a00
++ * AFE 3 is at offset 0x0b00 */
++struct scu_afe_transceiver {
++ /* 0x0000 AFE_XCVR_CTRL0 */
++ u32 afe_xcvr_control0;
++ /* 0x0004 AFE_XCVR_CTRL1 */
++ u32 afe_xcvr_control1;
++ /* 0x0008 */
++ u32 reserved_0008;
++ /* 0x000c afe_dfx_rx_control0 */
++ u32 afe_dfx_rx_control0;
++ /* 0x0010 AFE_DFX_RX_CTRL1 */
++ u32 afe_dfx_rx_control1;
++ /* 0x0014 */
++ u32 reserved_0014;
++ /* 0x0018 AFE_DFX_RX_STS0 */
++ u32 afe_dfx_rx_status0;
++ /* 0x001c AFE_DFX_RX_STS1 */
++ u32 afe_dfx_rx_status1;
++ /* 0x0020 */
++ u32 reserved_0020;
++ /* 0x0024 AFE_TX_CTRL */
++ u32 afe_tx_control;
++ /* 0x0028 AFE_TX_AMP_CTRL0 */
++ u32 afe_tx_amp_control0;
++ /* 0x002c AFE_TX_AMP_CTRL1 */
++ u32 afe_tx_amp_control1;
++ /* 0x0030 AFE_TX_AMP_CTRL2 */
++ u32 afe_tx_amp_control2;
++ /* 0x0034 AFE_TX_AMP_CTRL3 */
++ u32 afe_tx_amp_control3;
++ /* 0x0038 afe_tx_ssc_control */
++ u32 afe_tx_ssc_control;
++ /* 0x003c */
++ u32 reserved_003c;
++ /* 0x0040 AFE_RX_SSC_CTRL0 */
++ u32 afe_rx_ssc_control0;
++ /* 0x0044 AFE_RX_SSC_CTRL1 */
++ u32 afe_rx_ssc_control1;
++ /* 0x0048 AFE_RX_SSC_CTRL2 */
++ u32 afe_rx_ssc_control2;
++ /* 0x004c AFE_RX_EQ_STS0 */
++ u32 afe_rx_eq_status0;
++ /* 0x0050 AFE_RX_EQ_STS1 */
++ u32 afe_rx_eq_status1;
++ /* 0x0054 AFE_RX_CDR_STS */
++ u32 afe_rx_cdr_status;
++ /* 0x0058 */
++ u32 reserved_0058;
++ /* 0x005c AFE_CHAN_CTRL */
++ u32 afe_channel_control;
++ /* 0x0060-0x006c */
++ u32 reserved_0060_006c[0x04];
++ /* 0x0070 AFE_XCVR_EC_STS0 */
++ u32 afe_xcvr_error_capture_status0;
++ /* 0x0074 AFE_XCVR_EC_STS1 */
++ u32 afe_xcvr_error_capture_status1;
++ /* 0x0078 AFE_XCVR_EC_STS2 */
++ u32 afe_xcvr_error_capture_status2;
++ /* 0x007c afe_xcvr_ec_status3 */
++ u32 afe_xcvr_error_capture_status3;
++ /* 0x0080 AFE_XCVR_EC_STS4 */
++ u32 afe_xcvr_error_capture_status4;
++ /* 0x0084 AFE_XCVR_EC_STS5 */
++ u32 afe_xcvr_error_capture_status5;
++ /* 0x0088-0x00fc */
++ u32 reserved_008c_00fc[0x1e];
++};
++
++/**
++ * struct scu_afe_registers - AFE Regsiters
++ *
++ *
++ */
++/* Uaoa AFE registers */
++struct scu_afe_registers {
++ /* 0Xe000 AFE_BIAS_CTRL */
++ u32 afe_bias_control;
++ u32 reserved_0004;
++ /* 0x0008 AFE_PLL_CTRL0 */
++ u32 afe_pll_control0;
++ /* 0x000c AFE_PLL_CTRL1 */
++ u32 afe_pll_control1;
++ /* 0x0010 AFE_PLL_CTRL2 */
++ u32 afe_pll_control2;
++ /* 0x0014 AFE_CB_STS */
++ u32 afe_common_block_status;
++ /* 0x0018-0x007c */
++ u32 reserved_18_7c[0x1a];
++ /* 0x0080 AFE_PMSN_MCTRL0 */
++ u32 afe_pmsn_master_control0;
++ /* 0x0084 AFE_PMSN_MCTRL1 */
++ u32 afe_pmsn_master_control1;
++ /* 0x0088 AFE_PMSN_MCTRL2 */
++ u32 afe_pmsn_master_control2;
++ /* 0x008C-0x00fc */
++ u32 reserved_008c_00fc[0x1D];
++ /* 0x0100 AFE_DFX_MST_CTRL0 */
++ u32 afe_dfx_master_control0;
++ /* 0x0104 AFE_DFX_MST_CTRL1 */
++ u32 afe_dfx_master_control1;
++ /* 0x0108 AFE_DFX_DCL_CTRL */
++ u32 afe_dfx_dcl_control;
++ /* 0x010c AFE_DFX_DMON_CTRL */
++ u32 afe_dfx_digital_monitor_control;
++ /* 0x0110 AFE_DFX_AMONP_CTRL */
++ u32 afe_dfx_analog_p_monitor_control;
++ /* 0x0114 AFE_DFX_AMONN_CTRL */
++ u32 afe_dfx_analog_n_monitor_control;
++ /* 0x0118 AFE_DFX_NTL_STS */
++ u32 afe_dfx_ntl_status;
++ /* 0x011c AFE_DFX_FIFO_STS0 */
++ u32 afe_dfx_fifo_status0;
++ /* 0x0120 AFE_DFX_FIFO_STS1 */
++ u32 afe_dfx_fifo_status1;
++ /* 0x0124 AFE_DFX_MPAT_CTRL */
++ u32 afe_dfx_master_pattern_control;
++ /* 0x0128 AFE_DFX_P0_CTRL */
++ u32 afe_dfx_p0_control;
++ /* 0x012c-0x01a8 AFE_DFX_P0_DRx */
++ u32 afe_dfx_p0_data[32];
++ /* 0x01ac */
++ u32 reserved_01ac;
++ /* 0x01b0-0x020c AFE_DFX_P0_IRx */
++ u32 afe_dfx_p0_instruction[24];
++ /* 0x0210 */
++ u32 reserved_0210;
++ /* 0x0214 AFE_DFX_P1_CTRL */
++ u32 afe_dfx_p1_control;
++ /* 0x0218-0x245 AFE_DFX_P1_DRx */
++ u32 afe_dfx_p1_data[16];
++ /* 0x0258-0x029c */
++ u32 reserved_0258_029c[0x12];
++ /* 0x02a0-0x02bc AFE_DFX_P1_IRx */
++ u32 afe_dfx_p1_instruction[8];
++ /* 0x02c0-0x2fc */
++ u32 reserved_02c0_02fc[0x10];
++ /* 0x0300 AFE_DFX_TX_PMSN_CTRL */
++ u32 afe_dfx_tx_pmsn_control;
++ /* 0x0304 AFE_DFX_RX_PMSN_CTRL */
++ u32 afe_dfx_rx_pmsn_control;
++ u32 reserved_0308;
++ /* 0x030c AFE_DFX_NOA_CTRL0 */
++ u32 afe_dfx_noa_control0;
++ /* 0x0310 AFE_DFX_NOA_CTRL1 */
++ u32 afe_dfx_noa_control1;
++ /* 0x0314 AFE_DFX_NOA_CTRL2 */
++ u32 afe_dfx_noa_control2;
++ /* 0x0318 AFE_DFX_NOA_CTRL3 */
++ u32 afe_dfx_noa_control3;
++ /* 0x031c AFE_DFX_NOA_CTRL4 */
++ u32 afe_dfx_noa_control4;
++ /* 0x0320 AFE_DFX_NOA_CTRL5 */
++ u32 afe_dfx_noa_control5;
++ /* 0x0324 AFE_DFX_NOA_CTRL6 */
++ u32 afe_dfx_noa_control6;
++ /* 0x0328 AFE_DFX_NOA_CTRL7 */
++ u32 afe_dfx_noa_control7;
++ /* 0x032c-0x07fc */
++ u32 reserved_032c_07fc[0x135];
++
++ /* 0x0800-0x0bfc */
++ struct scu_afe_transceiver scu_afe_xcvr[4];
++
++ /* 0x0c00-0x0ffc */
++ u32 reserved_0c00_0ffc[0x0100];
++};
++
++struct scu_protocol_engine_group_registers {
++ u32 table[0xE0];
++};
++
++
++struct scu_viit_iit {
++ u32 table[256];
++};
++
++/**
++ * Placeholder for the ZONE Partition Table information ZONING will not be
++ * included in the 1.1 release.
++ *
++ *
++ */
++struct scu_zone_partition_table {
++ u32 table[2048];
++};
++
++/**
++ * Placeholder for the CRAM register since I am not sure if we need to
++ * read/write to these registers as yet.
++ *
++ *
++ */
++struct scu_completion_ram {
++ u32 ram[128];
++};
++
++/**
++ * Placeholder for the FBRAM registers since I am not sure if we need to
++ * read/write to these registers as yet.
++ *
++ *
++ */
++struct scu_frame_buffer_ram {
++ u32 ram[128];
++};
++
++#define scu_scratch_ram_SIZE_IN_DWORDS 256
++
++/**
++ * Placeholder for the scratch RAM registers.
++ *
++ *
++ */
++struct scu_scratch_ram {
++ u32 ram[scu_scratch_ram_SIZE_IN_DWORDS];
++};
++
++/**
++ * Placeholder since I am not yet sure what these registers are here for.
++ *
++ *
++ */
++struct noa_protocol_engine_partition {
++ u32 reserved[64];
++};
++
++/**
++ * Placeholder since I am not yet sure what these registers are here for.
++ *
++ *
++ */
++struct noa_hub_partition {
++ u32 reserved[64];
++};
++
++/**
++ * Placeholder since I am not yet sure what these registers are here for.
++ *
++ *
++ */
++struct noa_host_interface_partition {
++ u32 reserved[64];
++};
++
++/**
++ * struct transport_link_layer_pair - The SCU Hardware pairs up the TL
++ * registers with the LL registers so we must place them adjcent to make the
++ * array of registers in the PEG.
++ *
++ *
++ */
++struct transport_link_layer_pair {
++ struct scu_transport_layer_registers tl;
++ struct scu_link_layer_registers ll;
++};
++
++/**
++ * struct scu_peg_registers - SCU Protocol Engine Memory mapped register space.
++ * These registers are unique to each protocol engine group. There can be
++ * at most two PEG for a single SCU part.
++ *
++ *
++ */
++struct scu_peg_registers {
++ struct transport_link_layer_pair pe[4];
++ struct scu_port_task_scheduler_group_registers ptsg;
++ struct scu_protocol_engine_group_registers peg;
++ struct scu_sgpio_registers sgpio;
++ u32 reserved_01500_1BFF[0x1C0];
++ struct scu_viit_entry viit[64];
++ struct scu_zone_partition_table zpt0;
++ struct scu_zone_partition_table zpt1;
++};
++
++/**
++ * struct scu_registers - SCU regsiters including both PEG registers if we turn
++ * on that compile option. All of these registers are in the memory mapped
++ * space returned from BAR1.
++ *
++ *
++ */
++struct scu_registers {
++ /* 0x0000 - PEG 0 */
++ struct scu_peg_registers peg0;
++
++ /* 0x6000 - SDMA and Miscellaneous */
++ struct scu_sdma_registers sdma;
++ struct scu_completion_ram cram;
++ struct scu_frame_buffer_ram fbram;
++ u32 reserved_6800_69FF[0x80];
++ struct noa_protocol_engine_partition noa_pe;
++ struct noa_hub_partition noa_hub;
++ struct noa_host_interface_partition noa_if;
++ u32 reserved_6d00_7fff[0x4c0];
++
++ /* 0x8000 - PEG 1 */
++ struct scu_peg_registers peg1;
++
++ /* 0xE000 - AFE Registers */
++ struct scu_afe_registers afe;
++
++ /* 0xF000 - reserved */
++ u32 reserved_f000_211fff[0x80c00];
++
++ /* 0x212000 - scratch RAM */
++ struct scu_scratch_ram scratch_ram;
++};
++
++#endif /* _SCU_REGISTERS_HEADER_ */
+diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
+new file mode 100644
+index 0000000..b6e6368
+--- /dev/null
++++ b/drivers/scsi/isci/remote_device.c
+@@ -0,0 +1,1501 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#include <scsi/sas.h>
++#include "isci.h"
++#include "port.h"
++#include "remote_device.h"
++#include "request.h"
++#include "remote_node_context.h"
++#include "scu_event_codes.h"
++#include "task.h"
++
++/**
++ * isci_remote_device_not_ready() - This function is called by the ihost when
++ * the remote device is not ready. We mark the isci device as ready (not
++ * "ready_for_io") and signal the waiting proccess.
++ * @isci_host: This parameter specifies the isci host object.
++ * @isci_device: This parameter specifies the remote device
++ *
++ * sci_lock is held on entrance to this function.
++ */
++static void isci_remote_device_not_ready(struct isci_host *ihost,
++ struct isci_remote_device *idev, u32 reason)
++{
++ struct isci_request *ireq;
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: isci_device = %p\n", __func__, idev);
++
++ switch (reason) {
++ case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED:
++ set_bit(IDEV_GONE, &idev->flags);
++ break;
++ case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
++ set_bit(IDEV_IO_NCQERROR, &idev->flags);
++
++ /* Kill all outstanding requests for the device. */
++ list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) {
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: isci_device = %p request = %p\n",
++ __func__, idev, ireq);
++
++ sci_controller_terminate_request(ihost,
++ idev,
++ ireq);
++ }
++ /* Fall through into the default case... */
++ default:
++ clear_bit(IDEV_IO_READY, &idev->flags);
++ break;
++ }
++}
++
++/**
++ * isci_remote_device_ready() - This function is called by the ihost when the
++ * remote device is ready. We mark the isci device as ready and signal the
++ * waiting proccess.
++ * @ihost: our valid isci_host
++ * @idev: remote device
++ *
++ */
++static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev)
++{
++ dev_dbg(&ihost->pdev->dev,
++ "%s: idev = %p\n", __func__, idev);
++
++ clear_bit(IDEV_IO_NCQERROR, &idev->flags);
++ set_bit(IDEV_IO_READY, &idev->flags);
++ if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
++ wake_up(&ihost->eventq);
++}
++
++/* called once the remote node context is ready to be freed.
++ * The remote device can now report that its stop operation is complete. none
++ */
++static void rnc_destruct_done(void *_dev)
++{
++ struct isci_remote_device *idev = _dev;
++
++ BUG_ON(idev->started_request_count != 0);
++ sci_change_state(&idev->sm, SCI_DEV_STOPPED);
++}
++
++static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev)
++{
++ struct isci_host *ihost = idev->owning_port->owning_controller;
++ enum sci_status status = SCI_SUCCESS;
++ u32 i;
++
++ for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
++ struct isci_request *ireq = ihost->reqs[i];
++ enum sci_status s;
++
++ if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
++ ireq->target_device != idev)
++ continue;
++
++ s = sci_controller_terminate_request(ihost, idev, ireq);
++ if (s != SCI_SUCCESS)
++ status = s;
++ }
++
++ return status;
++}
++
++enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
++ u32 timeout)
++{
++ struct sci_base_state_machine *sm = &idev->sm;
++ enum sci_remote_device_states state = sm->current_state_id;
++
++ switch (state) {
++ case SCI_DEV_INITIAL:
++ case SCI_DEV_FAILED:
++ case SCI_DEV_FINAL:
++ default:
++ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
++ __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ case SCI_DEV_STOPPED:
++ return SCI_SUCCESS;
++ case SCI_DEV_STARTING:
++ /* device not started so there had better be no requests */
++ BUG_ON(idev->started_request_count != 0);
++ sci_remote_node_context_destruct(&idev->rnc,
++ rnc_destruct_done, idev);
++ /* Transition to the stopping state and wait for the
++ * remote node to complete being posted and invalidated.
++ */
++ sci_change_state(sm, SCI_DEV_STOPPING);
++ return SCI_SUCCESS;
++ case SCI_DEV_READY:
++ case SCI_STP_DEV_IDLE:
++ case SCI_STP_DEV_CMD:
++ case SCI_STP_DEV_NCQ:
++ case SCI_STP_DEV_NCQ_ERROR:
++ case SCI_STP_DEV_AWAIT_RESET:
++ case SCI_SMP_DEV_IDLE:
++ case SCI_SMP_DEV_CMD:
++ sci_change_state(sm, SCI_DEV_STOPPING);
++ if (idev->started_request_count == 0) {
++ sci_remote_node_context_destruct(&idev->rnc,
++ rnc_destruct_done, idev);
++ return SCI_SUCCESS;
++ } else
++ return sci_remote_device_terminate_requests(idev);
++ break;
++ case SCI_DEV_STOPPING:
++ /* All requests should have been terminated, but if there is an
++ * attempt to stop a device already in the stopping state, then
++ * try again to terminate.
++ */
++ return sci_remote_device_terminate_requests(idev);
++ case SCI_DEV_RESETTING:
++ sci_change_state(sm, SCI_DEV_STOPPING);
++ return SCI_SUCCESS;
++ }
++}
++
++enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
++{
++ struct sci_base_state_machine *sm = &idev->sm;
++ enum sci_remote_device_states state = sm->current_state_id;
++
++ switch (state) {
++ case SCI_DEV_INITIAL:
++ case SCI_DEV_STOPPED:
++ case SCI_DEV_STARTING:
++ case SCI_SMP_DEV_IDLE:
++ case SCI_SMP_DEV_CMD:
++ case SCI_DEV_STOPPING:
++ case SCI_DEV_FAILED:
++ case SCI_DEV_RESETTING:
++ case SCI_DEV_FINAL:
++ default:
++ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
++ __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ case SCI_DEV_READY:
++ case SCI_STP_DEV_IDLE:
++ case SCI_STP_DEV_CMD:
++ case SCI_STP_DEV_NCQ:
++ case SCI_STP_DEV_NCQ_ERROR:
++ case SCI_STP_DEV_AWAIT_RESET:
++ sci_change_state(sm, SCI_DEV_RESETTING);
++ return SCI_SUCCESS;
++ }
++}
++
++enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
++{
++ struct sci_base_state_machine *sm = &idev->sm;
++ enum sci_remote_device_states state = sm->current_state_id;
++
++ if (state != SCI_DEV_RESETTING) {
++ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
++ __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++
++ sci_change_state(sm, SCI_DEV_READY);
++ return SCI_SUCCESS;
++}
++
++enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
++ u32 suspend_type)
++{
++ struct sci_base_state_machine *sm = &idev->sm;
++ enum sci_remote_device_states state = sm->current_state_id;
++
++ if (state != SCI_STP_DEV_CMD) {
++ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
++ __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++
++ return sci_remote_node_context_suspend(&idev->rnc,
++ suspend_type, NULL, NULL);
++}
++
++enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
++ u32 frame_index)
++{
++ struct sci_base_state_machine *sm = &idev->sm;
++ enum sci_remote_device_states state = sm->current_state_id;
++ struct isci_host *ihost = idev->owning_port->owning_controller;
++ enum sci_status status;
++
++ switch (state) {
++ case SCI_DEV_INITIAL:
++ case SCI_DEV_STOPPED:
++ case SCI_DEV_STARTING:
++ case SCI_STP_DEV_IDLE:
++ case SCI_SMP_DEV_IDLE:
++ case SCI_DEV_FINAL:
++ default:
++ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
++ __func__, state);
++ /* Return the frame back to the controller */
++ sci_controller_release_frame(ihost, frame_index);
++ return SCI_FAILURE_INVALID_STATE;
++ case SCI_DEV_READY:
++ case SCI_STP_DEV_NCQ_ERROR:
++ case SCI_STP_DEV_AWAIT_RESET:
++ case SCI_DEV_STOPPING:
++ case SCI_DEV_FAILED:
++ case SCI_DEV_RESETTING: {
++ struct isci_request *ireq;
++ struct ssp_frame_hdr hdr;
++ void *frame_header;
++ ssize_t word_cnt;
++
++ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
++ frame_index,
++ &frame_header);
++ if (status != SCI_SUCCESS)
++ return status;
++
++ word_cnt = sizeof(hdr) / sizeof(u32);
++ sci_swab32_cpy(&hdr, frame_header, word_cnt);
++
++ ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
++ if (ireq && ireq->target_device == idev) {
++ /* The IO request is now in charge of releasing the frame */
++ status = sci_io_request_frame_handler(ireq, frame_index);
++ } else {
++ /* We could not map this tag to a valid IO
++ * request Just toss the frame and continue
++ */
++ sci_controller_release_frame(ihost, frame_index);
++ }
++ break;
++ }
++ case SCI_STP_DEV_NCQ: {
++ struct dev_to_host_fis *hdr;
++
++ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
++ frame_index,
++ (void **)&hdr);
++ if (status != SCI_SUCCESS)
++ return status;
++
++ if (hdr->fis_type == FIS_SETDEVBITS &&
++ (hdr->status & ATA_ERR)) {
++ idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
++
++ /* TODO Check sactive and complete associated IO if any. */
++ sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
++ } else if (hdr->fis_type == FIS_REGD2H &&
++ (hdr->status & ATA_ERR)) {
++ /*
++ * Some devices return D2H FIS when an NCQ error is detected.
++ * Treat this like an SDB error FIS ready reason.
++ */
++ idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
++ sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR);
++ } else
++ status = SCI_FAILURE;
++
++ sci_controller_release_frame(ihost, frame_index);
++ break;
++ }
++ case SCI_STP_DEV_CMD:
++ case SCI_SMP_DEV_CMD:
++ /* The device does not process any UF received from the hardware while
++ * in this state. All unsolicited frames are forwarded to the io request
++ * object.
++ */
++ status = sci_io_request_frame_handler(idev->working_request, frame_index);
++ break;
++ }
++
++ return status;
++}
++
++static bool is_remote_device_ready(struct isci_remote_device *idev)
++{
++
++ struct sci_base_state_machine *sm = &idev->sm;
++ enum sci_remote_device_states state = sm->current_state_id;
++
++ switch (state) {
++ case SCI_DEV_READY:
++ case SCI_STP_DEV_IDLE:
++ case SCI_STP_DEV_CMD:
++ case SCI_STP_DEV_NCQ:
++ case SCI_STP_DEV_NCQ_ERROR:
++ case SCI_STP_DEV_AWAIT_RESET:
++ case SCI_SMP_DEV_IDLE:
++ case SCI_SMP_DEV_CMD:
++ return true;
++ default:
++ return false;
++ }
++}
++
++enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
++ u32 event_code)
++{
++ struct sci_base_state_machine *sm = &idev->sm;
++ enum sci_remote_device_states state = sm->current_state_id;
++ enum sci_status status;
++
++ switch (scu_get_event_type(event_code)) {
++ case SCU_EVENT_TYPE_RNC_OPS_MISC:
++ case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
++ case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
++ status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
++ break;
++ case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
++ if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
++ status = SCI_SUCCESS;
++
++ /* Suspend the associated RNC */
++ sci_remote_node_context_suspend(&idev->rnc,
++ SCI_SOFTWARE_SUSPENSION,
++ NULL, NULL);
++
++ dev_dbg(scirdev_to_dev(idev),
++ "%s: device: %p event code: %x: %s\n",
++ __func__, idev, event_code,
++ is_remote_device_ready(idev)
++ ? "I_T_Nexus_Timeout event"
++ : "I_T_Nexus_Timeout event in wrong state");
++
++ break;
++ }
++ /* Else, fall through and treat as unhandled... */
++ default:
++ dev_dbg(scirdev_to_dev(idev),
++ "%s: device: %p event code: %x: %s\n",
++ __func__, idev, event_code,
++ is_remote_device_ready(idev)
++ ? "unexpected event"
++ : "unexpected event in wrong state");
++ status = SCI_FAILURE_INVALID_STATE;
++ break;
++ }
++
++ if (status != SCI_SUCCESS)
++ return status;
++
++ if (state == SCI_STP_DEV_IDLE) {
++
++ /* We pick up suspension events to handle specifically to this
++ * state. We resume the RNC right away.
++ */
++ if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
++ scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX)
++ status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
++ }
++
++ return status;
++}
++
++static void sci_remote_device_start_request(struct isci_remote_device *idev,
++ struct isci_request *ireq,
++ enum sci_status status)
++{
++ struct isci_port *iport = idev->owning_port;
++
++ /* cleanup requests that failed after starting on the port */
++ if (status != SCI_SUCCESS)
++ sci_port_complete_io(iport, idev, ireq);
++ else {
++ kref_get(&idev->kref);
++ idev->started_request_count++;
++ }
++}
++
++enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ struct isci_request *ireq)
++{
++ struct sci_base_state_machine *sm = &idev->sm;
++ enum sci_remote_device_states state = sm->current_state_id;
++ struct isci_port *iport = idev->owning_port;
++ enum sci_status status;
++
++ switch (state) {
++ case SCI_DEV_INITIAL:
++ case SCI_DEV_STOPPED:
++ case SCI_DEV_STARTING:
++ case SCI_STP_DEV_NCQ_ERROR:
++ case SCI_DEV_STOPPING:
++ case SCI_DEV_FAILED:
++ case SCI_DEV_RESETTING:
++ case SCI_DEV_FINAL:
++ default:
++ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
++ __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ case SCI_DEV_READY:
++ /* attempt to start an io request for this device object. The remote
++ * device object will issue the start request for the io and if
++ * successful it will start the request for the port object then
++ * increment its own request count.
++ */
++ status = sci_port_start_io(iport, idev, ireq);
++ if (status != SCI_SUCCESS)
++ return status;
++
++ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
++ if (status != SCI_SUCCESS)
++ break;
++
++ status = sci_request_start(ireq);
++ break;
++ case SCI_STP_DEV_IDLE: {
++ /* handle the start io operation for a sata device that is in
++ * the command idle state. - Evalute the type of IO request to
++ * be started - If its an NCQ request change to NCQ substate -
++ * If its any other command change to the CMD substate
++ *
++ * If this is a softreset we may want to have a different
++ * substate.
++ */
++ enum sci_remote_device_states new_state;
++ struct sas_task *task = isci_request_access_task(ireq);
++
++ status = sci_port_start_io(iport, idev, ireq);
++ if (status != SCI_SUCCESS)
++ return status;
++
++ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
++ if (status != SCI_SUCCESS)
++ break;
++
++ status = sci_request_start(ireq);
++ if (status != SCI_SUCCESS)
++ break;
++
++ if (task->ata_task.use_ncq)
++ new_state = SCI_STP_DEV_NCQ;
++ else {
++ idev->working_request = ireq;
++ new_state = SCI_STP_DEV_CMD;
++ }
++ sci_change_state(sm, new_state);
++ break;
++ }
++ case SCI_STP_DEV_NCQ: {
++ struct sas_task *task = isci_request_access_task(ireq);
++
++ if (task->ata_task.use_ncq) {
++ status = sci_port_start_io(iport, idev, ireq);
++ if (status != SCI_SUCCESS)
++ return status;
++
++ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
++ if (status != SCI_SUCCESS)
++ break;
++
++ status = sci_request_start(ireq);
++ } else
++ return SCI_FAILURE_INVALID_STATE;
++ break;
++ }
++ case SCI_STP_DEV_AWAIT_RESET:
++ return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
++ case SCI_SMP_DEV_IDLE:
++ status = sci_port_start_io(iport, idev, ireq);
++ if (status != SCI_SUCCESS)
++ return status;
++
++ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
++ if (status != SCI_SUCCESS)
++ break;
++
++ status = sci_request_start(ireq);
++ if (status != SCI_SUCCESS)
++ break;
++
++ idev->working_request = ireq;
++ sci_change_state(&idev->sm, SCI_SMP_DEV_CMD);
++ break;
++ case SCI_STP_DEV_CMD:
++ case SCI_SMP_DEV_CMD:
++ /* device is already handling a command it can not accept new commands
++ * until this one is complete.
++ */
++ return SCI_FAILURE_INVALID_STATE;
++ }
++
++ sci_remote_device_start_request(idev, ireq, status);
++ return status;
++}
++
++static enum sci_status common_complete_io(struct isci_port *iport,
++ struct isci_remote_device *idev,
++ struct isci_request *ireq)
++{
++ enum sci_status status;
++
++ status = sci_request_complete(ireq);
++ if (status != SCI_SUCCESS)
++ return status;
++
++ status = sci_port_complete_io(iport, idev, ireq);
++ if (status != SCI_SUCCESS)
++ return status;
++
++ sci_remote_device_decrement_request_count(idev);
++ return status;
++}
++
++enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ struct isci_request *ireq)
++{
++ struct sci_base_state_machine *sm = &idev->sm;
++ enum sci_remote_device_states state = sm->current_state_id;
++ struct isci_port *iport = idev->owning_port;
++ enum sci_status status;
++
++ switch (state) {
++ case SCI_DEV_INITIAL:
++ case SCI_DEV_STOPPED:
++ case SCI_DEV_STARTING:
++ case SCI_STP_DEV_IDLE:
++ case SCI_SMP_DEV_IDLE:
++ case SCI_DEV_FAILED:
++ case SCI_DEV_FINAL:
++ default:
++ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
++ __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ case SCI_DEV_READY:
++ case SCI_STP_DEV_AWAIT_RESET:
++ case SCI_DEV_RESETTING:
++ status = common_complete_io(iport, idev, ireq);
++ break;
++ case SCI_STP_DEV_CMD:
++ case SCI_STP_DEV_NCQ:
++ case SCI_STP_DEV_NCQ_ERROR:
++ status = common_complete_io(iport, idev, ireq);
++ if (status != SCI_SUCCESS)
++ break;
++
++ if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
++ /* This request causes hardware error, device needs to be Lun Reset.
++ * So here we force the state machine to IDLE state so the rest IOs
++ * can reach RNC state handler, these IOs will be completed by RNC with
++ * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
++ */
++ sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
++ } else if (idev->started_request_count == 0)
++ sci_change_state(sm, SCI_STP_DEV_IDLE);
++ break;
++ case SCI_SMP_DEV_CMD:
++ status = common_complete_io(iport, idev, ireq);
++ if (status != SCI_SUCCESS)
++ break;
++ sci_change_state(sm, SCI_SMP_DEV_IDLE);
++ break;
++ case SCI_DEV_STOPPING:
++ status = common_complete_io(iport, idev, ireq);
++ if (status != SCI_SUCCESS)
++ break;
++
++ if (idev->started_request_count == 0)
++ sci_remote_node_context_destruct(&idev->rnc,
++ rnc_destruct_done,
++ idev);
++ break;
++ }
++
++ if (status != SCI_SUCCESS)
++ dev_err(scirdev_to_dev(idev),
++ "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
++ "could not complete\n", __func__, iport,
++ idev, ireq, status);
++ else
++ isci_put_device(idev);
++
++ return status;
++}
++
++static void sci_remote_device_continue_request(void *dev)
++{
++ struct isci_remote_device *idev = dev;
++
++ /* we need to check if this request is still valid to continue. */
++ if (idev->working_request)
++ sci_controller_continue_io(idev->working_request);
++}
++
++enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ struct isci_request *ireq)
++{
++ struct sci_base_state_machine *sm = &idev->sm;
++ enum sci_remote_device_states state = sm->current_state_id;
++ struct isci_port *iport = idev->owning_port;
++ enum sci_status status;
++
++ switch (state) {
++ case SCI_DEV_INITIAL:
++ case SCI_DEV_STOPPED:
++ case SCI_DEV_STARTING:
++ case SCI_SMP_DEV_IDLE:
++ case SCI_SMP_DEV_CMD:
++ case SCI_DEV_STOPPING:
++ case SCI_DEV_FAILED:
++ case SCI_DEV_RESETTING:
++ case SCI_DEV_FINAL:
++ default:
++ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
++ __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ case SCI_STP_DEV_IDLE:
++ case SCI_STP_DEV_CMD:
++ case SCI_STP_DEV_NCQ:
++ case SCI_STP_DEV_NCQ_ERROR:
++ case SCI_STP_DEV_AWAIT_RESET:
++ status = sci_port_start_io(iport, idev, ireq);
++ if (status != SCI_SUCCESS)
++ return status;
++
++ status = sci_remote_node_context_start_task(&idev->rnc, ireq);
++ if (status != SCI_SUCCESS)
++ goto out;
++
++ status = sci_request_start(ireq);
++ if (status != SCI_SUCCESS)
++ goto out;
++
++ /* Note: If the remote device state is not IDLE this will
++ * replace the request that probably resulted in the task
++ * management request.
++ */
++ idev->working_request = ireq;
++ sci_change_state(sm, SCI_STP_DEV_CMD);
++
++ /* The remote node context must cleanup the TCi to NCQ mapping
++ * table. The only way to do this correctly is to either write
++ * to the TLCR register or to invalidate and repost the RNC. In
++ * either case the remote node context state machine will take
++ * the correct action when the remote node context is suspended
++ * and later resumed.
++ */
++ sci_remote_node_context_suspend(&idev->rnc,
++ SCI_SOFTWARE_SUSPENSION, NULL, NULL);
++ sci_remote_node_context_resume(&idev->rnc,
++ sci_remote_device_continue_request,
++ idev);
++
++ out:
++ sci_remote_device_start_request(idev, ireq, status);
++ /* We need to let the controller start request handler know that
++ * it can't post TC yet. We will provide a callback function to
++ * post TC when RNC gets resumed.
++ */
++ return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
++ case SCI_DEV_READY:
++ status = sci_port_start_io(iport, idev, ireq);
++ if (status != SCI_SUCCESS)
++ return status;
++
++ status = sci_remote_node_context_start_task(&idev->rnc, ireq);
++ if (status != SCI_SUCCESS)
++ break;
++
++ status = sci_request_start(ireq);
++ break;
++ }
++ sci_remote_device_start_request(idev, ireq, status);
++
++ return status;
++}
++
++void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request)
++{
++ struct isci_port *iport = idev->owning_port;
++ u32 context;
++
++ context = request |
++ (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
++ (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
++ idev->rnc.remote_node_index;
++
++ sci_controller_post_request(iport->owning_controller, context);
++}
++
++/* called once the remote node context has transisitioned to a
++ * ready state. This is the indication that the remote device object can also
++ * transition to ready.
++ */
++static void remote_device_resume_done(void *_dev)
++{
++ struct isci_remote_device *idev = _dev;
++
++ if (is_remote_device_ready(idev))
++ return;
++
++ /* go 'ready' if we are not already in a ready state */
++ sci_change_state(&idev->sm, SCI_DEV_READY);
++}
++
++static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
++{
++ struct isci_remote_device *idev = _dev;
++ struct isci_host *ihost = idev->owning_port->owning_controller;
++
++ /* For NCQ operation we do not issue a isci_remote_device_not_ready().
++ * As a result, avoid sending the ready notification.
++ */
++ if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ)
++ isci_remote_device_ready(ihost, idev);
++}
++
++static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
++
++ /* Initial state is a transitional state to the stopped state */
++ sci_change_state(&idev->sm, SCI_DEV_STOPPED);
++}
++
++/**
++ * sci_remote_device_destruct() - free remote node context and destruct
++ * @remote_device: This parameter specifies the remote device to be destructed.
++ *
++ * Remote device objects are a limited resource. As such, they must be
++ * protected. Thus calls to construct and destruct are mutually exclusive and
++ * non-reentrant. The return value shall indicate if the device was
++ * successfully destructed or if some failure occurred. enum sci_status This value
++ * is returned if the device is successfully destructed.
++ * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied
++ * device isn't valid (e.g. it's already been destoryed, the handle isn't
++ * valid, etc.).
++ */
++static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
++{
++ struct sci_base_state_machine *sm = &idev->sm;
++ enum sci_remote_device_states state = sm->current_state_id;
++ struct isci_host *ihost;
++
++ if (state != SCI_DEV_STOPPED) {
++ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
++ __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++
++ ihost = idev->owning_port->owning_controller;
++ sci_controller_free_remote_node_context(ihost, idev,
++ idev->rnc.remote_node_index);
++ idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
++ sci_change_state(sm, SCI_DEV_FINAL);
++
++ return SCI_SUCCESS;
++}
++
++/**
++ * isci_remote_device_deconstruct() - This function frees an isci_remote_device.
++ * @ihost: This parameter specifies the isci host object.
++ * @idev: This parameter specifies the remote device to be freed.
++ *
++ */
++static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev)
++{
++ dev_dbg(&ihost->pdev->dev,
++ "%s: isci_device = %p\n", __func__, idev);
++
++ /* There should not be any outstanding io's. All paths to
++ * here should go through isci_remote_device_nuke_requests.
++ * If we hit this condition, we will need a way to complete
++ * io requests in process */
++ BUG_ON(!list_empty(&idev->reqs_in_process));
++
++ sci_remote_device_destruct(idev);
++ list_del_init(&idev->node);
++ isci_put_device(idev);
++}
++
++static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
++ struct isci_host *ihost = idev->owning_port->owning_controller;
++ u32 prev_state;
++
++ /* If we are entering from the stopping state let the SCI User know that
++ * the stop operation has completed.
++ */
++ prev_state = idev->sm.previous_state_id;
++ if (prev_state == SCI_DEV_STOPPING)
++ isci_remote_device_deconstruct(ihost, idev);
++
++ sci_controller_remote_device_stopped(ihost, idev);
++}
++
++static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
++ struct isci_host *ihost = idev->owning_port->owning_controller;
++
++ isci_remote_device_not_ready(ihost, idev,
++ SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
++}
++
++static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
++ struct isci_host *ihost = idev->owning_port->owning_controller;
++ struct domain_device *dev = idev->domain_dev;
++
++ if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
++ sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
++ } else if (dev_is_expander(dev)) {
++ sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
++ } else
++ isci_remote_device_ready(ihost, idev);
++}
++
++static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
++{
++ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
++ struct domain_device *dev = idev->domain_dev;
++
++ if (dev->dev_type == SAS_END_DEV) {
++ struct isci_host *ihost = idev->owning_port->owning_controller;
++
++ isci_remote_device_not_ready(ihost, idev,
++ SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED);
++ }
++}
++
++static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
++
++ sci_remote_node_context_suspend(
++ &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL);
++}
++
++static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
++{
++ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
++
++ sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
++}
++
++static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
++
++ idev->working_request = NULL;
++ if (sci_remote_node_context_is_ready(&idev->rnc)) {
++ /*
++ * Since the RNC is ready, it's alright to finish completion
++ * processing (e.g. signal the remote device is ready). */
++ sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
++ } else {
++ sci_remote_node_context_resume(&idev->rnc,
++ sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
++ idev);
++ }
++}
++
++static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
++ struct isci_host *ihost = idev->owning_port->owning_controller;
++
++ BUG_ON(idev->working_request == NULL);
++
++ isci_remote_device_not_ready(ihost, idev,
++ SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
++}
++
++static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
++ struct isci_host *ihost = idev->owning_port->owning_controller;
++
++ if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
++ isci_remote_device_not_ready(ihost, idev,
++ idev->not_ready_reason);
++}
++
++static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
++ struct isci_host *ihost = idev->owning_port->owning_controller;
++
++ isci_remote_device_ready(ihost, idev);
++}
++
++static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
++ struct isci_host *ihost = idev->owning_port->owning_controller;
++
++ BUG_ON(idev->working_request == NULL);
++
++ isci_remote_device_not_ready(ihost, idev,
++ SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
++}
++
++static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
++{
++ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
++
++ idev->working_request = NULL;
++}
++
++static const struct sci_base_state sci_remote_device_state_table[] = {
++ [SCI_DEV_INITIAL] = {
++ .enter_state = sci_remote_device_initial_state_enter,
++ },
++ [SCI_DEV_STOPPED] = {
++ .enter_state = sci_remote_device_stopped_state_enter,
++ },
++ [SCI_DEV_STARTING] = {
++ .enter_state = sci_remote_device_starting_state_enter,
++ },
++ [SCI_DEV_READY] = {
++ .enter_state = sci_remote_device_ready_state_enter,
++ .exit_state = sci_remote_device_ready_state_exit
++ },
++ [SCI_STP_DEV_IDLE] = {
++ .enter_state = sci_stp_remote_device_ready_idle_substate_enter,
++ },
++ [SCI_STP_DEV_CMD] = {
++ .enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
++ },
++ [SCI_STP_DEV_NCQ] = { },
++ [SCI_STP_DEV_NCQ_ERROR] = {
++ .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
++ },
++ [SCI_STP_DEV_AWAIT_RESET] = { },
++ [SCI_SMP_DEV_IDLE] = {
++ .enter_state = sci_smp_remote_device_ready_idle_substate_enter,
++ },
++ [SCI_SMP_DEV_CMD] = {
++ .enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
++ .exit_state = sci_smp_remote_device_ready_cmd_substate_exit,
++ },
++ [SCI_DEV_STOPPING] = { },
++ [SCI_DEV_FAILED] = { },
++ [SCI_DEV_RESETTING] = {
++ .enter_state = sci_remote_device_resetting_state_enter,
++ .exit_state = sci_remote_device_resetting_state_exit
++ },
++ [SCI_DEV_FINAL] = { },
++};
++
++/**
++ * sci_remote_device_construct() - common construction
++ * @sci_port: SAS/SATA port through which this device is accessed.
++ * @sci_dev: remote device to construct
++ *
++ * This routine just performs benign initialization and does not
++ * allocate the remote_node_context which is left to
++ * sci_remote_device_[de]a_construct(). sci_remote_device_destruct()
++ * frees the remote_node_context(s) for the device.
++ */
++static void sci_remote_device_construct(struct isci_port *iport,
++ struct isci_remote_device *idev)
++{
++ idev->owning_port = iport;
++ idev->started_request_count = 0;
++
++ sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
++
++ sci_remote_node_context_construct(&idev->rnc,
++ SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
++}
++
++/**
++ * sci_remote_device_da_construct() - construct direct attached device.
++ *
++ * The information (e.g. IAF, Signature FIS, etc.) necessary to build
++ * the device is known to the SCI Core since it is contained in the
++ * sci_phy object. Remote node context(s) is/are a global resource
++ * allocated by this routine, freed by sci_remote_device_destruct().
++ *
++ * Returns:
++ * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
++ * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
++ * sata-only controller instance.
++ * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
++ */
++static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
++ struct isci_remote_device *idev)
++{
++ enum sci_status status;
++ struct domain_device *dev = idev->domain_dev;
++
++ sci_remote_device_construct(iport, idev);
++
++ /*
++ * This information is request to determine how many remote node context
++ * entries will be needed to store the remote node.
++ */
++ idev->is_direct_attached = true;
++ status = sci_controller_allocate_remote_node_context(iport->owning_controller,
++ idev,
++ &idev->rnc.remote_node_index);
++
++ if (status != SCI_SUCCESS)
++ return status;
++
++ if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
++ (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
++ /* pass */;
++ else
++ return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
++
++ idev->connection_rate = sci_port_get_max_allowed_speed(iport);
++
++ /* / @todo Should I assign the port width by reading all of the phys on the port? */
++ idev->device_port_width = 1;
++
++ return SCI_SUCCESS;
++}
++
++/**
++ * sci_remote_device_ea_construct() - construct expander attached device
++ *
++ * Remote node context(s) is/are a global resource allocated by this
++ * routine, freed by sci_remote_device_destruct().
++ *
++ * Returns:
++ * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
++ * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
++ * sata-only controller instance.
++ * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
++ */
++static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
++ struct isci_remote_device *idev)
++{
++ struct domain_device *dev = idev->domain_dev;
++ enum sci_status status;
++
++ sci_remote_device_construct(iport, idev);
++
++ status = sci_controller_allocate_remote_node_context(iport->owning_controller,
++ idev,
++ &idev->rnc.remote_node_index);
++ if (status != SCI_SUCCESS)
++ return status;
++
++ if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
++ (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
++ /* pass */;
++ else
++ return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
++
++ /*
++ * For SAS-2 the physical link rate is actually a logical link
++ * rate that incorporates multiplexing. The SCU doesn't
++ * incorporate multiplexing and for the purposes of the
++ * connection the logical link rate is that same as the
++ * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay
++ * one another, so this code works for both situations. */
++ idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
++ dev->linkrate);
++
++ /* / @todo Should I assign the port width by reading all of the phys on the port? */
++ idev->device_port_width = 1;
++
++ return SCI_SUCCESS;
++}
++
++/**
++ * sci_remote_device_start() - This method will start the supplied remote
++ * device. This method enables normal IO requests to flow through to the
++ * remote device.
++ * @remote_device: This parameter specifies the device to be started.
++ * @timeout: This parameter specifies the number of milliseconds in which the
++ * start operation should complete.
++ *
++ * An indication of whether the device was successfully started. SCI_SUCCESS
++ * This value is returned if the device was successfully started.
++ * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
++ * the device when there have been no phys added to it.
++ */
++static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
++ u32 timeout)
++{
++ struct sci_base_state_machine *sm = &idev->sm;
++ enum sci_remote_device_states state = sm->current_state_id;
++ enum sci_status status;
++
++ if (state != SCI_DEV_STOPPED) {
++ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
++ __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++
++ status = sci_remote_node_context_resume(&idev->rnc,
++ remote_device_resume_done,
++ idev);
++ if (status != SCI_SUCCESS)
++ return status;
++
++ sci_change_state(sm, SCI_DEV_STARTING);
++
++ return SCI_SUCCESS;
++}
++
++static enum sci_status isci_remote_device_construct(struct isci_port *iport,
++ struct isci_remote_device *idev)
++{
++ struct isci_host *ihost = iport->isci_host;
++ struct domain_device *dev = idev->domain_dev;
++ enum sci_status status;
++
++ if (dev->parent && dev_is_expander(dev->parent))
++ status = sci_remote_device_ea_construct(iport, idev);
++ else
++ status = sci_remote_device_da_construct(iport, idev);
++
++ if (status != SCI_SUCCESS) {
++ dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
++ __func__, status);
++
++ return status;
++ }
++
++ /* start the device. */
++ status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
++
++ if (status != SCI_SUCCESS)
++ dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
++ status);
++
++ return status;
++}
++
++void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev)
++{
++ DECLARE_COMPLETION_ONSTACK(aborted_task_completion);
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: idev = %p\n", __func__, idev);
++
++ /* Cleanup all requests pending for this device. */
++ isci_terminate_pending_requests(ihost, idev);
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: idev = %p, done\n", __func__, idev);
++}
++
++/**
++ * This function builds the isci_remote_device when a libsas dev_found message
++ * is received.
++ * @isci_host: This parameter specifies the isci host object.
++ * @port: This parameter specifies the isci_port conected to this device.
++ *
++ * pointer to new isci_remote_device.
++ */
++static struct isci_remote_device *
++isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
++{
++ struct isci_remote_device *idev;
++ int i;
++
++ for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
++ idev = &ihost->devices[i];
++ if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags))
++ break;
++ }
++
++ if (i >= SCI_MAX_REMOTE_DEVICES) {
++ dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
++ return NULL;
++ }
++
++ if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n"))
++ return NULL;
++
++ if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
++ return NULL;
++
++ return idev;
++}
++
++void isci_remote_device_release(struct kref *kref)
++{
++ struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
++ struct isci_host *ihost = idev->isci_port->isci_host;
++
++ idev->domain_dev = NULL;
++ idev->isci_port = NULL;
++ clear_bit(IDEV_START_PENDING, &idev->flags);
++ clear_bit(IDEV_STOP_PENDING, &idev->flags);
++ clear_bit(IDEV_IO_READY, &idev->flags);
++ clear_bit(IDEV_GONE, &idev->flags);
++ clear_bit(IDEV_EH, &idev->flags);
++ smp_mb__before_clear_bit();
++ clear_bit(IDEV_ALLOCATED, &idev->flags);
++ wake_up(&ihost->eventq);
++}
++
++/**
++ * isci_remote_device_stop() - This function is called internally to stop the
++ * remote device.
++ * @isci_host: This parameter specifies the isci host object.
++ * @isci_device: This parameter specifies the remote device.
++ *
++ * The status of the ihost request to stop.
++ */
++enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
++{
++ enum sci_status status;
++ unsigned long flags;
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: isci_device = %p\n", __func__, idev);
++
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++ idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
++ set_bit(IDEV_GONE, &idev->flags);
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++
++ /* Kill all outstanding requests. */
++ isci_remote_device_nuke_requests(ihost, idev);
++
++ set_bit(IDEV_STOP_PENDING, &idev->flags);
++
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++ status = sci_remote_device_stop(idev, 50);
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++
++ /* Wait for the stop complete callback. */
++ if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
++ /* nothing to wait for */;
++ else
++ wait_for_device_stop(ihost, idev);
++
++ return status;
++}
++
++/**
++ * isci_remote_device_gone() - This function is called by libsas when a domain
++ * device is removed.
++ * @domain_device: This parameter specifies the libsas domain device.
++ *
++ */
++void isci_remote_device_gone(struct domain_device *dev)
++{
++ struct isci_host *ihost = dev_to_ihost(dev);
++ struct isci_remote_device *idev = dev->lldd_dev;
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: domain_device = %p, isci_device = %p, isci_port = %p\n",
++ __func__, dev, idev, idev->isci_port);
++
++ isci_remote_device_stop(ihost, idev);
++}
++
++
++/**
++ * isci_remote_device_found() - This function is called by libsas when a remote
++ * device is discovered. A remote device object is created and started. the
++ * function then sleeps until the sci core device started message is
++ * received.
++ * @domain_device: This parameter specifies the libsas domain device.
++ *
++ * status, zero indicates success.
++ */
++int isci_remote_device_found(struct domain_device *domain_dev)
++{
++ struct isci_host *isci_host = dev_to_ihost(domain_dev);
++ struct isci_port *isci_port;
++ struct isci_phy *isci_phy;
++ struct asd_sas_port *sas_port;
++ struct asd_sas_phy *sas_phy;
++ struct isci_remote_device *isci_device;
++ enum sci_status status;
++
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: domain_device = %p\n", __func__, domain_dev);
++
++ wait_for_start(isci_host);
++
++ sas_port = domain_dev->port;
++ sas_phy = list_first_entry(&sas_port->phy_list, struct asd_sas_phy,
++ port_phy_el);
++ isci_phy = to_iphy(sas_phy);
++ isci_port = isci_phy->isci_port;
++
++ /* we are being called for a device on this port,
++ * so it has to come up eventually
++ */
++ wait_for_completion(&isci_port->start_complete);
++
++ if ((isci_stopping == isci_port_get_state(isci_port)) ||
++ (isci_stopped == isci_port_get_state(isci_port)))
++ return -ENODEV;
++
++ isci_device = isci_remote_device_alloc(isci_host, isci_port);
++ if (!isci_device)
++ return -ENODEV;
++
++ kref_init(&isci_device->kref);
++ INIT_LIST_HEAD(&isci_device->node);
++
++ spin_lock_irq(&isci_host->scic_lock);
++ isci_device->domain_dev = domain_dev;
++ isci_device->isci_port = isci_port;
++ list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
++
++ set_bit(IDEV_START_PENDING, &isci_device->flags);
++ status = isci_remote_device_construct(isci_port, isci_device);
++
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: isci_device = %p\n",
++ __func__, isci_device);
++
++ if (status == SCI_SUCCESS) {
++ /* device came up, advertise it to the world */
++ domain_dev->lldd_dev = isci_device;
++ } else
++ isci_put_device(isci_device);
++ spin_unlock_irq(&isci_host->scic_lock);
++
++ /* wait for the device ready callback. */
++ wait_for_device_start(isci_host, isci_device);
++
++ return status == SCI_SUCCESS ? 0 : -ENODEV;
++}
++/**
++ * isci_device_is_reset_pending() - This function will check if there is any
++ * pending reset condition on the device.
++ * @request: This parameter is the isci_device object.
++ *
++ * true if there is a reset pending for the device.
++ */
++bool isci_device_is_reset_pending(
++ struct isci_host *isci_host,
++ struct isci_remote_device *isci_device)
++{
++ struct isci_request *isci_request;
++ struct isci_request *tmp_req;
++ bool reset_is_pending = false;
++ unsigned long flags;
++
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: isci_device = %p\n", __func__, isci_device);
++
++ spin_lock_irqsave(&isci_host->scic_lock, flags);
++
++ /* Check for reset on all pending requests. */
++ list_for_each_entry_safe(isci_request, tmp_req,
++ &isci_device->reqs_in_process, dev_node) {
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: isci_device = %p request = %p\n",
++ __func__, isci_device, isci_request);
++
++ if (isci_request->ttype == io_task) {
++ struct sas_task *task = isci_request_access_task(
++ isci_request);
++
++ spin_lock(&task->task_state_lock);
++ if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
++ reset_is_pending = true;
++ spin_unlock(&task->task_state_lock);
++ }
++ }
++
++ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
++
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: isci_device = %p reset_is_pending = %d\n",
++ __func__, isci_device, reset_is_pending);
++
++ return reset_is_pending;
++}
++
++/**
++ * isci_device_clear_reset_pending() - This function will clear if any pending
++ * reset condition flags on the device.
++ * @request: This parameter is the isci_device object.
++ *
++ * true if there is a reset pending for the device.
++ */
++void isci_device_clear_reset_pending(struct isci_host *ihost, struct isci_remote_device *idev)
++{
++ struct isci_request *isci_request;
++ struct isci_request *tmp_req;
++ unsigned long flags = 0;
++
++ dev_dbg(&ihost->pdev->dev, "%s: idev=%p, ihost=%p\n",
++ __func__, idev, ihost);
++
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++
++ /* Clear reset pending on all pending requests. */
++ list_for_each_entry_safe(isci_request, tmp_req,
++ &idev->reqs_in_process, dev_node) {
++ dev_dbg(&ihost->pdev->dev, "%s: idev = %p request = %p\n",
++ __func__, idev, isci_request);
++
++ if (isci_request->ttype == io_task) {
++
++ unsigned long flags2;
++ struct sas_task *task = isci_request_access_task(
++ isci_request);
++
++ spin_lock_irqsave(&task->task_state_lock, flags2);
++ task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
++ spin_unlock_irqrestore(&task->task_state_lock, flags2);
++ }
++ }
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++}
+diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
+new file mode 100644
+index 0000000..57ccfc3
+--- /dev/null
++++ b/drivers/scsi/isci/remote_device.h
+@@ -0,0 +1,352 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _ISCI_REMOTE_DEVICE_H_
++#define _ISCI_REMOTE_DEVICE_H_
++#include <scsi/libsas.h>
++#include <linux/kref.h>
++#include "scu_remote_node_context.h"
++#include "remote_node_context.h"
++#include "port.h"
++
++enum sci_remote_device_not_ready_reason_code {
++ SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED,
++ SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED,
++ SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED,
++ SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED,
++ SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED,
++ SCIC_REMOTE_DEVICE_NOT_READY_REASON_CODE_MAX
++};
++
++/**
++ * isci_remote_device - isci representation of a sas expander / end point
++ * @device_port_width: hw setting for number of simultaneous connections
++ * @connection_rate: per-taskcontext connection rate for this device
++ * @working_request: SATA requests have no tag we for unaccelerated
++ * protocols we need a method to associate unsolicited
++ * frames with a pending request
++ */
++struct isci_remote_device {
++ #define IDEV_START_PENDING 0
++ #define IDEV_STOP_PENDING 1
++ #define IDEV_ALLOCATED 2
++ #define IDEV_EH 3
++ #define IDEV_GONE 4
++ #define IDEV_IO_READY 5
++ #define IDEV_IO_NCQERROR 6
++ unsigned long flags;
++ struct kref kref;
++ struct isci_port *isci_port;
++ struct domain_device *domain_dev;
++ struct list_head node;
++ struct list_head reqs_in_process;
++ struct sci_base_state_machine sm;
++ u32 device_port_width;
++ enum sas_linkrate connection_rate;
++ bool is_direct_attached;
++ struct isci_port *owning_port;
++ struct sci_remote_node_context rnc;
++ /* XXX unify with device reference counting and delete */
++ u32 started_request_count;
++ struct isci_request *working_request;
++ u32 not_ready_reason;
++};
++
++#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000
++
++/* device reference routines must be called under sci_lock */
++static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev)
++{
++ struct isci_remote_device *idev = dev->lldd_dev;
++
++ if (idev && !test_bit(IDEV_GONE, &idev->flags)) {
++ kref_get(&idev->kref);
++ return idev;
++ }
++
++ return NULL;
++}
++
++void isci_remote_device_release(struct kref *kref);
++static inline void isci_put_device(struct isci_remote_device *idev)
++{
++ if (idev)
++ kref_put(&idev->kref, isci_remote_device_release);
++}
++
++enum sci_status isci_remote_device_stop(struct isci_host *ihost,
++ struct isci_remote_device *idev);
++void isci_remote_device_nuke_requests(struct isci_host *ihost,
++ struct isci_remote_device *idev);
++void isci_remote_device_gone(struct domain_device *domain_dev);
++int isci_remote_device_found(struct domain_device *domain_dev);
++bool isci_device_is_reset_pending(struct isci_host *ihost,
++ struct isci_remote_device *idev);
++void isci_device_clear_reset_pending(struct isci_host *ihost,
++ struct isci_remote_device *idev);
++/**
++ * sci_remote_device_stop() - This method will stop both transmission and
++ * reception of link activity for the supplied remote device. This method
++ * disables normal IO requests from flowing through to the remote device.
++ * @remote_device: This parameter specifies the device to be stopped.
++ * @timeout: This parameter specifies the number of milliseconds in which the
++ * stop operation should complete.
++ *
++ * An indication of whether the device was successfully stopped. SCI_SUCCESS
++ * This value is returned if the transmission and reception for the device was
++ * successfully stopped.
++ */
++enum sci_status sci_remote_device_stop(
++ struct isci_remote_device *idev,
++ u32 timeout);
++
++/**
++ * sci_remote_device_reset() - This method will reset the device making it
++ * ready for operation. This method must be called anytime the device is
++ * reset either through a SMP phy control or a port hard reset request.
++ * @remote_device: This parameter specifies the device to be reset.
++ *
++ * This method does not actually cause the device hardware to be reset. This
++ * method resets the software object so that it will be operational after a
++ * device hardware reset completes. An indication of whether the device reset
++ * was accepted. SCI_SUCCESS This value is returned if the device reset is
++ * started.
++ */
++enum sci_status sci_remote_device_reset(
++ struct isci_remote_device *idev);
++
++/**
++ * sci_remote_device_reset_complete() - This method informs the device object
++ * that the reset operation is complete and the device can resume operation
++ * again.
++ * @remote_device: This parameter specifies the device which is to be informed
++ * of the reset complete operation.
++ *
++ * An indication that the device is resuming operation. SCI_SUCCESS the device
++ * is resuming operation.
++ */
++enum sci_status sci_remote_device_reset_complete(
++ struct isci_remote_device *idev);
++
++/**
++ * enum sci_remote_device_states - This enumeration depicts all the states
++ * for the common remote device state machine.
++ *
++ *
++ */
++enum sci_remote_device_states {
++ /**
++ * Simply the initial state for the base remote device state machine.
++ */
++ SCI_DEV_INITIAL,
++
++ /**
++ * This state indicates that the remote device has successfully been
++ * stopped. In this state no new IO operations are permitted.
++ * This state is entered from the INITIAL state.
++ * This state is entered from the STOPPING state.
++ */
++ SCI_DEV_STOPPED,
++
++ /**
++ * This state indicates the the remote device is in the process of
++ * becoming ready (i.e. starting). In this state no new IO operations
++ * are permitted.
++ * This state is entered from the STOPPED state.
++ */
++ SCI_DEV_STARTING,
++
++ /**
++ * This state indicates the remote device is now ready. Thus, the user
++ * is able to perform IO operations on the remote device.
++ * This state is entered from the STARTING state.
++ */
++ SCI_DEV_READY,
++
++ /**
++ * This is the idle substate for the stp remote device. When there are no
++ * active IO for the device it is is in this state.
++ */
++ SCI_STP_DEV_IDLE,
++
++ /**
++ * This is the command state for for the STP remote device. This state is
++ * entered when the device is processing a non-NCQ command. The device object
++ * will fail any new start IO requests until this command is complete.
++ */
++ SCI_STP_DEV_CMD,
++
++ /**
++ * This is the NCQ state for the STP remote device. This state is entered
++ * when the device is processing an NCQ reuqest. It will remain in this state
++ * so long as there is one or more NCQ requests being processed.
++ */
++ SCI_STP_DEV_NCQ,
++
++ /**
++ * This is the NCQ error state for the STP remote device. This state is
++ * entered when an SDB error FIS is received by the device object while in the
++ * NCQ state. The device object will only accept a READ LOG command while in
++ * this state.
++ */
++ SCI_STP_DEV_NCQ_ERROR,
++
++ /**
++ * This is the READY substate indicates the device is waiting for the RESET task
++ * coming to be recovered from certain hardware specific error.
++ */
++ SCI_STP_DEV_AWAIT_RESET,
++
++ /**
++ * This is the ready operational substate for the remote device. This is the
++ * normal operational state for a remote device.
++ */
++ SCI_SMP_DEV_IDLE,
++
++ /**
++ * This is the suspended state for the remote device. This is the state that
++ * the device is placed in when a RNC suspend is received by the SCU hardware.
++ */
++ SCI_SMP_DEV_CMD,
++
++ /**
++ * This state indicates that the remote device is in the process of
++ * stopping. In this state no new IO operations are permitted, but
++ * existing IO operations are allowed to complete.
++ * This state is entered from the READY state.
++ * This state is entered from the FAILED state.
++ */
++ SCI_DEV_STOPPING,
++
++ /**
++ * This state indicates that the remote device has failed.
++ * In this state no new IO operations are permitted.
++ * This state is entered from the INITIALIZING state.
++ * This state is entered from the READY state.
++ */
++ SCI_DEV_FAILED,
++
++ /**
++ * This state indicates the device is being reset.
++ * In this state no new IO operations are permitted.
++ * This state is entered from the READY state.
++ */
++ SCI_DEV_RESETTING,
++
++ /**
++ * Simply the final state for the base remote device state machine.
++ */
++ SCI_DEV_FINAL,
++};
++
++static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc)
++{
++ struct isci_remote_device *idev;
++
++ idev = container_of(rnc, typeof(*idev), rnc);
++
++ return idev;
++}
++
++static inline bool dev_is_expander(struct domain_device *dev)
++{
++ return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV;
++}
++
++static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)
++{
++ /* XXX delete this voodoo when converting to the top-level device
++ * reference count
++ */
++ if (WARN_ONCE(idev->started_request_count == 0,
++ "%s: tried to decrement started_request_count past 0!?",
++ __func__))
++ /* pass */;
++ else
++ idev->started_request_count--;
++}
++
++enum sci_status sci_remote_device_frame_handler(
++ struct isci_remote_device *idev,
++ u32 frame_index);
++
++enum sci_status sci_remote_device_event_handler(
++ struct isci_remote_device *idev,
++ u32 event_code);
++
++enum sci_status sci_remote_device_start_io(
++ struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ struct isci_request *ireq);
++
++enum sci_status sci_remote_device_start_task(
++ struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ struct isci_request *ireq);
++
++enum sci_status sci_remote_device_complete_io(
++ struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ struct isci_request *ireq);
++
++enum sci_status sci_remote_device_suspend(
++ struct isci_remote_device *idev,
++ u32 suspend_type);
++
++void sci_remote_device_post_request(
++ struct isci_remote_device *idev,
++ u32 request);
++
++#endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */
+diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
+new file mode 100644
+index 0000000..748e833
+--- /dev/null
++++ b/drivers/scsi/isci/remote_node_context.c
+@@ -0,0 +1,627 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "host.h"
++#include "isci.h"
++#include "remote_device.h"
++#include "remote_node_context.h"
++#include "scu_event_codes.h"
++#include "scu_task_context.h"
++
++
++/**
++ *
++ * @sci_rnc: The RNC for which the is posted request is being made.
++ *
++ * This method will return true if the RNC is not in the initial state. In all
++ * other states the RNC is considered active and this will return true. The
++ * destroy request of the state machine drives the RNC back to the initial
++ * state. If the state machine changes then this routine will also have to be
++ * changed. bool true if the state machine is not in the initial state false if
++ * the state machine is in the initial state
++ */
++
++/**
++ *
++ * @sci_rnc: The state of the remote node context object to check.
++ *
++ * This method will return true if the remote node context is in a READY state
++ * otherwise it will return false bool true if the remote node context is in
++ * the ready state. false if the remote node context is not in the ready state.
++ */
++bool sci_remote_node_context_is_ready(
++ struct sci_remote_node_context *sci_rnc)
++{
++ u32 current_state = sci_rnc->sm.current_state_id;
++
++ if (current_state == SCI_RNC_READY) {
++ return true;
++ }
++
++ return false;
++}
++
++static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
++{
++ if (id < ihost->remote_node_entries &&
++ ihost->device_table[id])
++ return &ihost->remote_node_context_table[id];
++
++ return NULL;
++}
++
++static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc)
++{
++ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
++ struct domain_device *dev = idev->domain_dev;
++ int rni = sci_rnc->remote_node_index;
++ union scu_remote_node_context *rnc;
++ struct isci_host *ihost;
++ __le64 sas_addr;
++
++ ihost = idev->owning_port->owning_controller;
++ rnc = sci_rnc_by_id(ihost, rni);
++
++ memset(rnc, 0, sizeof(union scu_remote_node_context)
++ * sci_remote_device_node_count(idev));
++
++ rnc->ssp.remote_node_index = rni;
++ rnc->ssp.remote_node_port_width = idev->device_port_width;
++ rnc->ssp.logical_port_index = idev->owning_port->physical_port_index;
++
++ /* sas address is __be64, context ram format is __le64 */
++ sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr));
++ rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr);
++ rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr);
++
++ rnc->ssp.nexus_loss_timer_enable = true;
++ rnc->ssp.check_bit = false;
++ rnc->ssp.is_valid = false;
++ rnc->ssp.is_remote_node_context = true;
++ rnc->ssp.function_number = 0;
++
++ rnc->ssp.arbitration_wait_time = 0;
++
++ if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
++ rnc->ssp.connection_occupancy_timeout =
++ ihost->user_parameters.stp_max_occupancy_timeout;
++ rnc->ssp.connection_inactivity_timeout =
++ ihost->user_parameters.stp_inactivity_timeout;
++ } else {
++ rnc->ssp.connection_occupancy_timeout =
++ ihost->user_parameters.ssp_max_occupancy_timeout;
++ rnc->ssp.connection_inactivity_timeout =
++ ihost->user_parameters.ssp_inactivity_timeout;
++ }
++
++ rnc->ssp.initial_arbitration_wait_time = 0;
++
++ /* Open Address Frame Parameters */
++ rnc->ssp.oaf_connection_rate = idev->connection_rate;
++ rnc->ssp.oaf_features = 0;
++ rnc->ssp.oaf_source_zone_group = 0;
++ rnc->ssp.oaf_more_compatibility_features = 0;
++}
++
++/**
++ *
++ * @sci_rnc:
++ * @callback:
++ * @callback_parameter:
++ *
++ * This method will setup the remote node context object so it will transition
++ * to its ready state. If the remote node context is already setup to
++ * transition to its final state then this function does nothing. none
++ */
++static void sci_remote_node_context_setup_to_resume(
++ struct sci_remote_node_context *sci_rnc,
++ scics_sds_remote_node_context_callback callback,
++ void *callback_parameter)
++{
++ if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) {
++ sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY;
++ sci_rnc->user_callback = callback;
++ sci_rnc->user_cookie = callback_parameter;
++ }
++}
++
++static void sci_remote_node_context_setup_to_destory(
++ struct sci_remote_node_context *sci_rnc,
++ scics_sds_remote_node_context_callback callback,
++ void *callback_parameter)
++{
++ sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL;
++ sci_rnc->user_callback = callback;
++ sci_rnc->user_cookie = callback_parameter;
++}
++
++/**
++ *
++ *
++ * This method just calls the user callback function and then resets the
++ * callback.
++ */
++static void sci_remote_node_context_notify_user(
++ struct sci_remote_node_context *rnc)
++{
++ if (rnc->user_callback != NULL) {
++ (*rnc->user_callback)(rnc->user_cookie);
++
++ rnc->user_callback = NULL;
++ rnc->user_cookie = NULL;
++ }
++}
++
++static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
++{
++ if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
++ sci_remote_node_context_resume(rnc, rnc->user_callback,
++ rnc->user_cookie);
++}
++
++static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
++{
++ union scu_remote_node_context *rnc_buffer;
++ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
++ struct domain_device *dev = idev->domain_dev;
++ struct isci_host *ihost = idev->owning_port->owning_controller;
++
++ rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
++
++ rnc_buffer->ssp.is_valid = true;
++
++ if (!idev->is_direct_attached &&
++ (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) {
++ sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
++ } else {
++ sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
++
++ if (idev->is_direct_attached)
++ sci_port_setup_transports(idev->owning_port,
++ sci_rnc->remote_node_index);
++ }
++}
++
++static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc)
++{
++ union scu_remote_node_context *rnc_buffer;
++ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
++ struct isci_host *ihost = idev->owning_port->owning_controller;
++
++ rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
++
++ rnc_buffer->ssp.is_valid = false;
++
++ sci_remote_device_post_request(rnc_to_dev(sci_rnc),
++ SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
++}
++
++static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
++{
++ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
++
++ /* Check to see if we have gotten back to the initial state because
++ * someone requested to destroy the remote node context object.
++ */
++ if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
++ rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
++ sci_remote_node_context_notify_user(rnc);
++ }
++}
++
++static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
++{
++ struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
++
++ sci_remote_node_context_validate_context_buffer(sci_rnc);
++}
++
++static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
++{
++ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
++
++ sci_remote_node_context_invalidate_context_buffer(rnc);
++}
++
++static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
++{
++ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
++ struct isci_remote_device *idev;
++ struct domain_device *dev;
++
++ idev = rnc_to_dev(rnc);
++ dev = idev->domain_dev;
++
++ /*
++ * For direct attached SATA devices we need to clear the TLCR
++ * NCQ to TCi tag mapping on the phy and in cases where we
++ * resume because of a target reset we also need to update
++ * the STPTLDARNI register with the RNi of the device
++ */
++ if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
++ idev->is_direct_attached)
++ sci_port_setup_transports(idev->owning_port,
++ rnc->remote_node_index);
++
++ sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
++}
++
++static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
++{
++ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
++
++ rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
++
++ if (rnc->user_callback)
++ sci_remote_node_context_notify_user(rnc);
++}
++
++static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
++{
++ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
++
++ sci_remote_node_context_continue_state_transitions(rnc);
++}
++
++static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
++{
++ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
++
++ sci_remote_node_context_continue_state_transitions(rnc);
++}
++
++static const struct sci_base_state sci_remote_node_context_state_table[] = {
++ [SCI_RNC_INITIAL] = {
++ .enter_state = sci_remote_node_context_initial_state_enter,
++ },
++ [SCI_RNC_POSTING] = {
++ .enter_state = sci_remote_node_context_posting_state_enter,
++ },
++ [SCI_RNC_INVALIDATING] = {
++ .enter_state = sci_remote_node_context_invalidating_state_enter,
++ },
++ [SCI_RNC_RESUMING] = {
++ .enter_state = sci_remote_node_context_resuming_state_enter,
++ },
++ [SCI_RNC_READY] = {
++ .enter_state = sci_remote_node_context_ready_state_enter,
++ },
++ [SCI_RNC_TX_SUSPENDED] = {
++ .enter_state = sci_remote_node_context_tx_suspended_state_enter,
++ },
++ [SCI_RNC_TX_RX_SUSPENDED] = {
++ .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
++ },
++ [SCI_RNC_AWAIT_SUSPENSION] = { },
++};
++
++void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
++ u16 remote_node_index)
++{
++ memset(rnc, 0, sizeof(struct sci_remote_node_context));
++
++ rnc->remote_node_index = remote_node_index;
++ rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
++
++ sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
++}
++
++enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
++ u32 event_code)
++{
++ enum scis_sds_remote_node_context_states state;
++
++ state = sci_rnc->sm.current_state_id;
++ switch (state) {
++ case SCI_RNC_POSTING:
++ switch (scu_get_event_code(event_code)) {
++ case SCU_EVENT_POST_RNC_COMPLETE:
++ sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
++ break;
++ default:
++ goto out;
++ }
++ break;
++ case SCI_RNC_INVALIDATING:
++ if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
++ if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL)
++ state = SCI_RNC_INITIAL;
++ else
++ state = SCI_RNC_POSTING;
++ sci_change_state(&sci_rnc->sm, state);
++ } else {
++ switch (scu_get_event_type(event_code)) {
++ case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
++ case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
++ /* We really dont care if the hardware is going to suspend
++ * the device since it's being invalidated anyway */
++ dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
++ "%s: SCIC Remote Node Context 0x%p was "
++ "suspeneded by hardware while being "
++ "invalidated.\n", __func__, sci_rnc);
++ break;
++ default:
++ goto out;
++ }
++ }
++ break;
++ case SCI_RNC_RESUMING:
++ if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) {
++ sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
++ } else {
++ switch (scu_get_event_type(event_code)) {
++ case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
++ case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
++ /* We really dont care if the hardware is going to suspend
++ * the device since it's being resumed anyway */
++ dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
++ "%s: SCIC Remote Node Context 0x%p was "
++ "suspeneded by hardware while being resumed.\n",
++ __func__, sci_rnc);
++ break;
++ default:
++ goto out;
++ }
++ }
++ break;
++ case SCI_RNC_READY:
++ switch (scu_get_event_type(event_code)) {
++ case SCU_EVENT_TL_RNC_SUSPEND_TX:
++ sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
++ sci_rnc->suspension_code = scu_get_event_specifier(event_code);
++ break;
++ case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
++ sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
++ sci_rnc->suspension_code = scu_get_event_specifier(event_code);
++ break;
++ default:
++ goto out;
++ }
++ break;
++ case SCI_RNC_AWAIT_SUSPENSION:
++ switch (scu_get_event_type(event_code)) {
++ case SCU_EVENT_TL_RNC_SUSPEND_TX:
++ sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
++ sci_rnc->suspension_code = scu_get_event_specifier(event_code);
++ break;
++ case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
++ sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
++ sci_rnc->suspension_code = scu_get_event_specifier(event_code);
++ break;
++ default:
++ goto out;
++ }
++ break;
++ default:
++ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
++ "%s: invalid state %d\n", __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++ return SCI_SUCCESS;
++
++ out:
++ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
++ "%s: code: %#x state: %d\n", __func__, event_code, state);
++ return SCI_FAILURE;
++
++}
++
++enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
++ scics_sds_remote_node_context_callback cb_fn,
++ void *cb_p)
++{
++ enum scis_sds_remote_node_context_states state;
++
++ state = sci_rnc->sm.current_state_id;
++ switch (state) {
++ case SCI_RNC_INVALIDATING:
++ sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
++ return SCI_SUCCESS;
++ case SCI_RNC_POSTING:
++ case SCI_RNC_RESUMING:
++ case SCI_RNC_READY:
++ case SCI_RNC_TX_SUSPENDED:
++ case SCI_RNC_TX_RX_SUSPENDED:
++ case SCI_RNC_AWAIT_SUSPENSION:
++ sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
++ sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
++ return SCI_SUCCESS;
++ case SCI_RNC_INITIAL:
++ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
++ "%s: invalid state %d\n", __func__, state);
++ /* We have decided that the destruct request on the remote node context
++ * can not fail since it is either in the initial/destroyed state or is
++ * can be destroyed.
++ */
++ return SCI_SUCCESS;
++ default:
++ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
++ "%s: invalid state %d\n", __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++}
++
++enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
++ u32 suspend_type,
++ scics_sds_remote_node_context_callback cb_fn,
++ void *cb_p)
++{
++ enum scis_sds_remote_node_context_states state;
++
++ state = sci_rnc->sm.current_state_id;
++ if (state != SCI_RNC_READY) {
++ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
++ "%s: invalid state %d\n", __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++
++ sci_rnc->user_callback = cb_fn;
++ sci_rnc->user_cookie = cb_p;
++ sci_rnc->suspension_code = suspend_type;
++
++ if (suspend_type == SCI_SOFTWARE_SUSPENSION) {
++ sci_remote_device_post_request(rnc_to_dev(sci_rnc),
++ SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX);
++ }
++
++ sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
++ return SCI_SUCCESS;
++}
++
++enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
++ scics_sds_remote_node_context_callback cb_fn,
++ void *cb_p)
++{
++ enum scis_sds_remote_node_context_states state;
++
++ state = sci_rnc->sm.current_state_id;
++ switch (state) {
++ case SCI_RNC_INITIAL:
++ if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
++ return SCI_FAILURE_INVALID_STATE;
++
++ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
++ sci_remote_node_context_construct_buffer(sci_rnc);
++ sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
++ return SCI_SUCCESS;
++ case SCI_RNC_POSTING:
++ case SCI_RNC_INVALIDATING:
++ case SCI_RNC_RESUMING:
++ if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
++ return SCI_FAILURE_INVALID_STATE;
++
++ sci_rnc->user_callback = cb_fn;
++ sci_rnc->user_cookie = cb_p;
++ return SCI_SUCCESS;
++ case SCI_RNC_TX_SUSPENDED: {
++ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
++ struct domain_device *dev = idev->domain_dev;
++
++ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
++
++ /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */
++ if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev))
++ sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
++ else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
++ if (idev->is_direct_attached) {
++ /* @todo Fix this since I am being silly in writing to the STPTLDARNI register. */
++ sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
++ } else {
++ sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
++ }
++ } else
++ return SCI_FAILURE;
++ return SCI_SUCCESS;
++ }
++ case SCI_RNC_TX_RX_SUSPENDED:
++ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
++ sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
++ return SCI_FAILURE_INVALID_STATE;
++ case SCI_RNC_AWAIT_SUSPENSION:
++ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
++ return SCI_SUCCESS;
++ default:
++ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
++ "%s: invalid state %d\n", __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++}
++
++enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
++ struct isci_request *ireq)
++{
++ enum scis_sds_remote_node_context_states state;
++
++ state = sci_rnc->sm.current_state_id;
++
++ switch (state) {
++ case SCI_RNC_READY:
++ return SCI_SUCCESS;
++ case SCI_RNC_TX_SUSPENDED:
++ case SCI_RNC_TX_RX_SUSPENDED:
++ case SCI_RNC_AWAIT_SUSPENSION:
++ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
++ "%s: invalid state %d\n", __func__, state);
++ return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
++ default:
++ break;
++ }
++ dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
++ "%s: requested to start IO while still resuming, %d\n",
++ __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++}
++
++enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
++ struct isci_request *ireq)
++{
++ enum scis_sds_remote_node_context_states state;
++
++ state = sci_rnc->sm.current_state_id;
++ switch (state) {
++ case SCI_RNC_RESUMING:
++ case SCI_RNC_READY:
++ case SCI_RNC_AWAIT_SUSPENSION:
++ return SCI_SUCCESS;
++ case SCI_RNC_TX_SUSPENDED:
++ case SCI_RNC_TX_RX_SUSPENDED:
++ sci_remote_node_context_resume(sci_rnc, NULL, NULL);
++ return SCI_SUCCESS;
++ default:
++ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
++ "%s: invalid state %d\n", __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++}
+diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h
+new file mode 100644
+index 0000000..41580ad
+--- /dev/null
++++ b/drivers/scsi/isci/remote_node_context.h
+@@ -0,0 +1,224 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _SCIC_SDS_REMOTE_NODE_CONTEXT_H_
++#define _SCIC_SDS_REMOTE_NODE_CONTEXT_H_
++
++/**
++ * This file contains the structures, constants, and prototypes associated with
++ * the remote node context in the silicon. It exists to model and manage
++ * the remote node context in the silicon.
++ *
++ *
++ */
++
++#include "isci.h"
++
++/**
++ *
++ *
++ * This constant represents an invalid remote device id, it is used to program
++ * the STPDARNI register so the driver knows when it has received a SIGNATURE
++ * FIS from the SCU.
++ */
++#define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX 0x0FFF
++
++#define SCU_HARDWARE_SUSPENSION (0)
++#define SCI_SOFTWARE_SUSPENSION (1)
++
++struct isci_request;
++struct isci_remote_device;
++struct sci_remote_node_context;
++
++typedef void (*scics_sds_remote_node_context_callback)(void *);
++
++/**
++ * This is the enumeration of the remote node context states.
++ */
++enum scis_sds_remote_node_context_states {
++ /**
++ * This state is the initial state for a remote node context. On a resume
++ * request the remote node context will transition to the posting state.
++ */
++ SCI_RNC_INITIAL,
++
++ /**
++ * This is a transition state that posts the RNi to the hardware. Once the RNC
++ * is posted the remote node context will be made ready.
++ */
++ SCI_RNC_POSTING,
++
++ /**
++ * This is a transition state that will post an RNC invalidate to the
++ * hardware. Once the invalidate is complete the remote node context will
++ * transition to the posting state.
++ */
++ SCI_RNC_INVALIDATING,
++
++ /**
++ * This is a transition state that will post an RNC resume to the hardare.
++ * Once the event notification of resume complete is received the remote node
++ * context will transition to the ready state.
++ */
++ SCI_RNC_RESUMING,
++
++ /**
++ * This is the state that the remote node context must be in to accept io
++ * request operations.
++ */
++ SCI_RNC_READY,
++
++ /**
++ * This is the state that the remote node context transitions to when it gets
++ * a TX suspend notification from the hardware.
++ */
++ SCI_RNC_TX_SUSPENDED,
++
++ /**
++ * This is the state that the remote node context transitions to when it gets
++ * a TX RX suspend notification from the hardware.
++ */
++ SCI_RNC_TX_RX_SUSPENDED,
++
++ /**
++ * This state is a wait state for the remote node context that waits for a
++ * suspend notification from the hardware. This state is entered when either
++ * there is a request to supend the remote node context or when there is a TC
++ * completion where the remote node will be suspended by the hardware.
++ */
++ SCI_RNC_AWAIT_SUSPENSION
++};
++
++/**
++ *
++ *
++ * This enumeration is used to define the end destination state for the remote
++ * node context.
++ */
++enum sci_remote_node_context_destination_state {
++ SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED,
++ SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY,
++ SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL
++};
++
++/**
++ * struct sci_remote_node_context - This structure contains the data
++ * associated with the remote node context object. The remote node context
++ * (RNC) object models the the remote device information necessary to manage
++ * the silicon RNC.
++ */
++struct sci_remote_node_context {
++ /**
++ * This field indicates the remote node index (RNI) associated with
++ * this RNC.
++ */
++ u16 remote_node_index;
++
++ /**
++ * This field is the recored suspension code or the reason for the remote node
++ * context suspension.
++ */
++ u32 suspension_code;
++
++ /**
++ * This field is true if the remote node context is resuming from its current
++ * state. This can cause an automatic resume on receiving a suspension
++ * notification.
++ */
++ enum sci_remote_node_context_destination_state destination_state;
++
++ /**
++ * This field contains the callback function that the user requested to be
++ * called when the requested state transition is complete.
++ */
++ scics_sds_remote_node_context_callback user_callback;
++
++ /**
++ * This field contains the parameter that is called when the user requested
++ * state transition is completed.
++ */
++ void *user_cookie;
++
++ /**
++ * This field contains the data for the object's state machine.
++ */
++ struct sci_base_state_machine sm;
++};
++
++void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
++ u16 remote_node_index);
++
++
++bool sci_remote_node_context_is_ready(
++ struct sci_remote_node_context *sci_rnc);
++
++enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
++ u32 event_code);
++enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
++ scics_sds_remote_node_context_callback callback,
++ void *callback_parameter);
++enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
++ u32 suspend_type,
++ scics_sds_remote_node_context_callback cb_fn,
++ void *cb_p);
++enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
++ scics_sds_remote_node_context_callback cb_fn,
++ void *cb_p);
++enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
++ struct isci_request *ireq);
++enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
++ struct isci_request *ireq);
++
++#endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */
+diff --git a/drivers/scsi/isci/remote_node_table.c b/drivers/scsi/isci/remote_node_table.c
+new file mode 100644
+index 0000000..301b314
+--- /dev/null
++++ b/drivers/scsi/isci/remote_node_table.c
+@@ -0,0 +1,598 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/**
++ * This file contains the implementation of the SCIC_SDS_REMOTE_NODE_TABLE
++ * public, protected, and private methods.
++ *
++ *
++ */
++#include "remote_node_table.h"
++#include "remote_node_context.h"
++
++/**
++ *
++ * @remote_node_table: This is the remote node index table from which the
++ * selection will be made.
++ * @group_table_index: This is the index to the group table from which to
++ * search for an available selection.
++ *
++ * This routine will find the bit position in absolute bit terms of the next 32
++ * + bit position. If there are available bits in the first u32 then it is
++ * just bit position. u32 This is the absolute bit position for an available
++ * group.
++ */
++static u32 sci_remote_node_table_get_group_index(
++ struct sci_remote_node_table *remote_node_table,
++ u32 group_table_index)
++{
++ u32 dword_index;
++ u32 *group_table;
++ u32 bit_index;
++
++ group_table = remote_node_table->remote_node_groups[group_table_index];
++
++ for (dword_index = 0; dword_index < remote_node_table->group_array_size; dword_index++) {
++ if (group_table[dword_index] != 0) {
++ for (bit_index = 0; bit_index < 32; bit_index++) {
++ if ((group_table[dword_index] & (1 << bit_index)) != 0) {
++ return (dword_index * 32) + bit_index;
++ }
++ }
++ }
++ }
++
++ return SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX;
++}
++
++/**
++ *
++ * @out]: remote_node_table This the remote node table in which to clear the
++ * selector.
++ * @set_index: This is the remote node selector in which the change will be
++ * made.
++ * @group_index: This is the bit index in the table to be modified.
++ *
++ * This method will clear the group index entry in the specified group index
++ * table. none
++ */
++static void sci_remote_node_table_clear_group_index(
++ struct sci_remote_node_table *remote_node_table,
++ u32 group_table_index,
++ u32 group_index)
++{
++ u32 dword_index;
++ u32 bit_index;
++ u32 *group_table;
++
++ BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
++ BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
++
++ dword_index = group_index / 32;
++ bit_index = group_index % 32;
++ group_table = remote_node_table->remote_node_groups[group_table_index];
++
++ group_table[dword_index] = group_table[dword_index] & ~(1 << bit_index);
++}
++
++/**
++ *
++ * @out]: remote_node_table This the remote node table in which to set the
++ * selector.
++ * @group_table_index: This is the remote node selector in which the change
++ * will be made.
++ * @group_index: This is the bit position in the table to be modified.
++ *
++ * This method will set the group index bit entry in the specified gropu index
++ * table. none
++ */
++static void sci_remote_node_table_set_group_index(
++ struct sci_remote_node_table *remote_node_table,
++ u32 group_table_index,
++ u32 group_index)
++{
++ u32 dword_index;
++ u32 bit_index;
++ u32 *group_table;
++
++ BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
++ BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
++
++ dword_index = group_index / 32;
++ bit_index = group_index % 32;
++ group_table = remote_node_table->remote_node_groups[group_table_index];
++
++ group_table[dword_index] = group_table[dword_index] | (1 << bit_index);
++}
++
++/**
++ *
++ * @out]: remote_node_table This is the remote node table in which to modify
++ * the remote node availability.
++ * @remote_node_index: This is the remote node index that is being returned to
++ * the table.
++ *
++ * This method will set the remote to available in the remote node allocation
++ * table. none
++ */
++static void sci_remote_node_table_set_node_index(
++ struct sci_remote_node_table *remote_node_table,
++ u32 remote_node_index)
++{
++ u32 dword_location;
++ u32 dword_remainder;
++ u32 slot_normalized;
++ u32 slot_position;
++
++ BUG_ON(
++ (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
++ <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
++ );
++
++ dword_location = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
++ dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
++ slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
++ slot_position = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
++
++ remote_node_table->available_remote_nodes[dword_location] |=
++ 1 << (slot_normalized + slot_position);
++}
++
++/**
++ *
++ * @out]: remote_node_table This is the remote node table from which to clear
++ * the available remote node bit.
++ * @remote_node_index: This is the remote node index which is to be cleared
++ * from the table.
++ *
++ * This method clears the remote node index from the table of available remote
++ * nodes. none
++ */
++static void sci_remote_node_table_clear_node_index(
++ struct sci_remote_node_table *remote_node_table,
++ u32 remote_node_index)
++{
++ u32 dword_location;
++ u32 dword_remainder;
++ u32 slot_position;
++ u32 slot_normalized;
++
++ BUG_ON(
++ (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
++ <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
++ );
++
++ dword_location = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
++ dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
++ slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
++ slot_position = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
++
++ remote_node_table->available_remote_nodes[dword_location] &=
++ ~(1 << (slot_normalized + slot_position));
++}
++
++/**
++ *
++ * @out]: remote_node_table The remote node table from which the slot will be
++ * cleared.
++ * @group_index: The index for the slot that is to be cleared.
++ *
++ * This method clears the entire table slot at the specified slot index. none
++ */
++static void sci_remote_node_table_clear_group(
++ struct sci_remote_node_table *remote_node_table,
++ u32 group_index)
++{
++ u32 dword_location;
++ u32 dword_remainder;
++ u32 dword_value;
++
++ BUG_ON(
++ (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
++ <= (group_index / SCU_STP_REMOTE_NODE_COUNT)
++ );
++
++ dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
++ dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
++
++ dword_value = remote_node_table->available_remote_nodes[dword_location];
++ dword_value &= ~(SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
++ remote_node_table->available_remote_nodes[dword_location] = dword_value;
++}
++
++/**
++ *
++ * @remote_node_table:
++ *
++ * THis method sets an entire remote node group in the remote node table.
++ */
++static void sci_remote_node_table_set_group(
++ struct sci_remote_node_table *remote_node_table,
++ u32 group_index)
++{
++ u32 dword_location;
++ u32 dword_remainder;
++ u32 dword_value;
++
++ BUG_ON(
++ (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
++ <= (group_index / SCU_STP_REMOTE_NODE_COUNT)
++ );
++
++ dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
++ dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
++
++ dword_value = remote_node_table->available_remote_nodes[dword_location];
++ dword_value |= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
++ remote_node_table->available_remote_nodes[dword_location] = dword_value;
++}
++
++/**
++ *
++ * @remote_node_table: This is the remote node table that for which the group
++ * value is to be returned.
++ * @group_index: This is the group index to use to find the group value.
++ *
++ * This method will return the group value for the specified group index. The
++ * bit values at the specified remote node group index.
++ */
++static u8 sci_remote_node_table_get_group_value(
++ struct sci_remote_node_table *remote_node_table,
++ u32 group_index)
++{
++ u32 dword_location;
++ u32 dword_remainder;
++ u32 dword_value;
++
++ dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
++ dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
++
++ dword_value = remote_node_table->available_remote_nodes[dword_location];
++ dword_value &= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
++ dword_value = dword_value >> (dword_remainder * 4);
++
++ return (u8)dword_value;
++}
++
++/**
++ *
++ * @out]: remote_node_table The remote that which is to be initialized.
++ * @remote_node_entries: The number of entries to put in the table.
++ *
++ * This method will initialize the remote node table for use. none
++ */
++void sci_remote_node_table_initialize(
++ struct sci_remote_node_table *remote_node_table,
++ u32 remote_node_entries)
++{
++ u32 index;
++
++ /*
++ * Initialize the raw data we could improve the speed by only initializing
++ * those entries that we are actually going to be used */
++ memset(
++ remote_node_table->available_remote_nodes,
++ 0x00,
++ sizeof(remote_node_table->available_remote_nodes)
++ );
++
++ memset(
++ remote_node_table->remote_node_groups,
++ 0x00,
++ sizeof(remote_node_table->remote_node_groups)
++ );
++
++ /* Initialize the available remote node sets */
++ remote_node_table->available_nodes_array_size = (u16)
++ (remote_node_entries / SCIC_SDS_REMOTE_NODES_PER_DWORD)
++ + ((remote_node_entries % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0);
++
++
++ /* Initialize each full DWORD to a FULL SET of remote nodes */
++ for (index = 0; index < remote_node_entries; index++) {
++ sci_remote_node_table_set_node_index(remote_node_table, index);
++ }
++
++ remote_node_table->group_array_size = (u16)
++ (remote_node_entries / (SCU_STP_REMOTE_NODE_COUNT * 32))
++ + ((remote_node_entries % (SCU_STP_REMOTE_NODE_COUNT * 32)) != 0);
++
++ for (index = 0; index < (remote_node_entries / SCU_STP_REMOTE_NODE_COUNT); index++) {
++ /*
++ * These are all guaranteed to be full slot values so fill them in the
++ * available sets of 3 remote nodes */
++ sci_remote_node_table_set_group_index(remote_node_table, 2, index);
++ }
++
++ /* Now fill in any remainders that we may find */
++ if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 2) {
++ sci_remote_node_table_set_group_index(remote_node_table, 1, index);
++ } else if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 1) {
++ sci_remote_node_table_set_group_index(remote_node_table, 0, index);
++ }
++}
++
++/**
++ *
++ * @out]: remote_node_table The remote node table from which to allocate a
++ * remote node.
++ * @table_index: The group index that is to be used for the search.
++ *
++ * This method will allocate a single RNi from the remote node table. The
++ * table index will determine from which remote node group table to search.
++ * This search may fail and another group node table can be specified. The
++ * function is designed to allow a serach of the available single remote node
++ * group up to the triple remote node group. If an entry is found in the
++ * specified table the remote node is removed and the remote node groups are
++ * updated. The RNi value or an invalid remote node context if an RNi can not
++ * be found.
++ */
++static u16 sci_remote_node_table_allocate_single_remote_node(
++ struct sci_remote_node_table *remote_node_table,
++ u32 group_table_index)
++{
++ u8 index;
++ u8 group_value;
++ u32 group_index;
++ u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
++
++ group_index = sci_remote_node_table_get_group_index(
++ remote_node_table, group_table_index);
++
++ /* We could not find an available slot in the table selector 0 */
++ if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
++ group_value = sci_remote_node_table_get_group_value(
++ remote_node_table, group_index);
++
++ for (index = 0; index < SCU_STP_REMOTE_NODE_COUNT; index++) {
++ if (((1 << index) & group_value) != 0) {
++ /* We have selected a bit now clear it */
++ remote_node_index = (u16)(group_index * SCU_STP_REMOTE_NODE_COUNT
++ + index);
++
++ sci_remote_node_table_clear_group_index(
++ remote_node_table, group_table_index, group_index
++ );
++
++ sci_remote_node_table_clear_node_index(
++ remote_node_table, remote_node_index
++ );
++
++ if (group_table_index > 0) {
++ sci_remote_node_table_set_group_index(
++ remote_node_table, group_table_index - 1, group_index
++ );
++ }
++
++ break;
++ }
++ }
++ }
++
++ return remote_node_index;
++}
++
++/**
++ *
++ * @remote_node_table: This is the remote node table from which to allocate the
++ * remote node entries.
++ * @group_table_index: THis is the group table index which must equal two (2)
++ * for this operation.
++ *
++ * This method will allocate three consecutive remote node context entries. If
++ * there are no remaining triple entries the function will return a failure.
++ * The remote node index that represents three consecutive remote node entries
++ * or an invalid remote node context if none can be found.
++ */
++static u16 sci_remote_node_table_allocate_triple_remote_node(
++ struct sci_remote_node_table *remote_node_table,
++ u32 group_table_index)
++{
++ u32 group_index;
++ u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
++
++ group_index = sci_remote_node_table_get_group_index(
++ remote_node_table, group_table_index);
++
++ if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
++ remote_node_index = (u16)group_index * SCU_STP_REMOTE_NODE_COUNT;
++
++ sci_remote_node_table_clear_group_index(
++ remote_node_table, group_table_index, group_index
++ );
++
++ sci_remote_node_table_clear_group(
++ remote_node_table, group_index
++ );
++ }
++
++ return remote_node_index;
++}
++
++/**
++ *
++ * @remote_node_table: This is the remote node table from which the remote node
++ * allocation is to take place.
++ * @remote_node_count: This is ther remote node count which is one of
++ * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3).
++ *
++ * This method will allocate a remote node that mataches the remote node count
++ * specified by the caller. Valid values for remote node count is
++ * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is
++ * the remote node index that is returned or an invalid remote node context.
++ */
++u16 sci_remote_node_table_allocate_remote_node(
++ struct sci_remote_node_table *remote_node_table,
++ u32 remote_node_count)
++{
++ u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
++
++ if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
++ remote_node_index =
++ sci_remote_node_table_allocate_single_remote_node(
++ remote_node_table, 0);
++
++ if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
++ remote_node_index =
++ sci_remote_node_table_allocate_single_remote_node(
++ remote_node_table, 1);
++ }
++
++ if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
++ remote_node_index =
++ sci_remote_node_table_allocate_single_remote_node(
++ remote_node_table, 2);
++ }
++ } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
++ remote_node_index =
++ sci_remote_node_table_allocate_triple_remote_node(
++ remote_node_table, 2);
++ }
++
++ return remote_node_index;
++}
++
++/**
++ *
++ * @remote_node_table:
++ *
++ * This method will free a single remote node index back to the remote node
++ * table. This routine will update the remote node groups
++ */
++static void sci_remote_node_table_release_single_remote_node(
++ struct sci_remote_node_table *remote_node_table,
++ u16 remote_node_index)
++{
++ u32 group_index;
++ u8 group_value;
++
++ group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
++
++ group_value = sci_remote_node_table_get_group_value(remote_node_table, group_index);
++
++ /*
++ * Assert that we are not trying to add an entry to a slot that is already
++ * full. */
++ BUG_ON(group_value == SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE);
++
++ if (group_value == 0x00) {
++ /*
++ * There are no entries in this slot so it must be added to the single
++ * slot table. */
++ sci_remote_node_table_set_group_index(remote_node_table, 0, group_index);
++ } else if ((group_value & (group_value - 1)) == 0) {
++ /*
++ * There is only one entry in this slot so it must be moved from the
++ * single slot table to the dual slot table */
++ sci_remote_node_table_clear_group_index(remote_node_table, 0, group_index);
++ sci_remote_node_table_set_group_index(remote_node_table, 1, group_index);
++ } else {
++ /*
++ * There are two entries in the slot so it must be moved from the dual
++ * slot table to the tripple slot table. */
++ sci_remote_node_table_clear_group_index(remote_node_table, 1, group_index);
++ sci_remote_node_table_set_group_index(remote_node_table, 2, group_index);
++ }
++
++ sci_remote_node_table_set_node_index(remote_node_table, remote_node_index);
++}
++
++/**
++ *
++ * @remote_node_table: This is the remote node table to which the remote node
++ * index is to be freed.
++ *
++ * This method will release a group of three consecutive remote nodes back to
++ * the free remote nodes.
++ */
++static void sci_remote_node_table_release_triple_remote_node(
++ struct sci_remote_node_table *remote_node_table,
++ u16 remote_node_index)
++{
++ u32 group_index;
++
++ group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
++
++ sci_remote_node_table_set_group_index(
++ remote_node_table, 2, group_index
++ );
++
++ sci_remote_node_table_set_group(remote_node_table, group_index);
++}
++
++/**
++ *
++ * @remote_node_table: The remote node table to which the remote node index is
++ * to be freed.
++ * @remote_node_count: This is the count of consecutive remote nodes that are
++ * to be freed.
++ *
++ * This method will release the remote node index back into the remote node
++ * table free pool.
++ */
++void sci_remote_node_table_release_remote_node_index(
++ struct sci_remote_node_table *remote_node_table,
++ u32 remote_node_count,
++ u16 remote_node_index)
++{
++ if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
++ sci_remote_node_table_release_single_remote_node(
++ remote_node_table, remote_node_index);
++ } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
++ sci_remote_node_table_release_triple_remote_node(
++ remote_node_table, remote_node_index);
++ }
++}
++
+diff --git a/drivers/scsi/isci/remote_node_table.h b/drivers/scsi/isci/remote_node_table.h
+new file mode 100644
+index 0000000..721ab98
+--- /dev/null
++++ b/drivers/scsi/isci/remote_node_table.h
+@@ -0,0 +1,188 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _SCIC_SDS_REMOTE_NODE_TABLE_H_
++#define _SCIC_SDS_REMOTE_NODE_TABLE_H_
++
++#include "isci.h"
++
++/**
++ *
++ *
++ * Remote node sets are sets of remote node index in the remtoe node table The
++ * SCU hardware requires that STP remote node entries take three consecutive
++ * remote node index so the table is arranged in sets of three. The bits are
++ * used as 0111 0111 to make a byte and the bits define the set of three remote
++ * nodes to use as a sequence.
++ */
++#define SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE 2
++
++/**
++ *
++ *
++ * Since the remote node table is organized as DWORDS take the remote node sets
++ * in bytes and represent them in DWORDs. The lowest ordered bits are the ones
++ * used in case full DWORD is not being used. i.e. 0000 0000 0000 0000 0111
++ * 0111 0111 0111 // if only a single WORD is in use in the DWORD.
++ */
++#define SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD \
++ (sizeof(u32) * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE)
++/**
++ *
++ *
++ * This is a count of the numeber of remote nodes that can be represented in a
++ * byte
++ */
++#define SCIC_SDS_REMOTE_NODES_PER_BYTE \
++ (SCU_STP_REMOTE_NODE_COUNT * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE)
++
++/**
++ *
++ *
++ * This is a count of the number of remote nodes that can be represented in a
++ * DWROD
++ */
++#define SCIC_SDS_REMOTE_NODES_PER_DWORD \
++ (sizeof(u32) * SCIC_SDS_REMOTE_NODES_PER_BYTE)
++
++/**
++ *
++ *
++ * This is the number of bits in a remote node group
++ */
++#define SCIC_SDS_REMOTE_NODES_BITS_PER_GROUP 4
++
++#define SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX (0xFFFFFFFF)
++#define SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE (0x07)
++#define SCIC_SDS_REMOTE_NODE_TABLE_EMPTY_SLOT_VALUE (0x00)
++
++/**
++ *
++ *
++ * Expander attached sata remote node count
++ */
++#define SCU_STP_REMOTE_NODE_COUNT 3
++
++/**
++ *
++ *
++ * Expander or direct attached ssp remote node count
++ */
++#define SCU_SSP_REMOTE_NODE_COUNT 1
++
++/**
++ *
++ *
++ * Direct attached STP remote node count
++ */
++#define SCU_SATA_REMOTE_NODE_COUNT 1
++
++/**
++ * struct sci_remote_node_table -
++ *
++ *
++ */
++struct sci_remote_node_table {
++ /**
++ * This field contains the array size in dwords
++ */
++ u16 available_nodes_array_size;
++
++ /**
++ * This field contains the array size of the
++ */
++ u16 group_array_size;
++
++ /**
++ * This field is the array of available remote node entries in bits.
++ * Because of the way STP remote node data is allocated on the SCU hardware
++ * the remote nodes must occupy three consecutive remote node context
++ * entries. For ease of allocation and de-allocation we have broken the
++ * sets of three into a single nibble. When the STP RNi is allocated all
++ * of the bits in the nibble are cleared. This math results in a table size
++ * of MAX_REMOTE_NODES / CONSECUTIVE RNi ENTRIES for STP / 2 entries per byte.
++ */
++ u32 available_remote_nodes[
++ (SCI_MAX_REMOTE_DEVICES / SCIC_SDS_REMOTE_NODES_PER_DWORD)
++ + ((SCI_MAX_REMOTE_DEVICES % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0)];
++
++ /**
++ * This field is the nibble selector for the above table. There are three
++ * possible selectors each for fast lookup when trying to find one, two or
++ * three remote node entries.
++ */
++ u32 remote_node_groups[
++ SCU_STP_REMOTE_NODE_COUNT][
++ (SCI_MAX_REMOTE_DEVICES / (32 * SCU_STP_REMOTE_NODE_COUNT))
++ + ((SCI_MAX_REMOTE_DEVICES % (32 * SCU_STP_REMOTE_NODE_COUNT)) != 0)];
++
++};
++
++/* --------------------------------------------------------------------------- */
++
++void sci_remote_node_table_initialize(
++ struct sci_remote_node_table *remote_node_table,
++ u32 remote_node_entries);
++
++u16 sci_remote_node_table_allocate_remote_node(
++ struct sci_remote_node_table *remote_node_table,
++ u32 remote_node_count);
++
++void sci_remote_node_table_release_remote_node_index(
++ struct sci_remote_node_table *remote_node_table,
++ u32 remote_node_count,
++ u16 remote_node_index);
++
++#endif /* _SCIC_SDS_REMOTE_NODE_TABLE_H_ */
+diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
+new file mode 100644
+index 0000000..b5d3a8c
+--- /dev/null
++++ b/drivers/scsi/isci/request.c
+@@ -0,0 +1,3393 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "isci.h"
++#include "task.h"
++#include "request.h"
++#include "scu_completion_codes.h"
++#include "scu_event_codes.h"
++#include "sas.h"
++
++static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
++ int idx)
++{
++ if (idx == 0)
++ return &ireq->tc->sgl_pair_ab;
++ else if (idx == 1)
++ return &ireq->tc->sgl_pair_cd;
++ else if (idx < 0)
++ return NULL;
++ else
++ return &ireq->sg_table[idx - 2];
++}
++
++static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
++ struct isci_request *ireq, u32 idx)
++{
++ u32 offset;
++
++ if (idx == 0) {
++ offset = (void *) &ireq->tc->sgl_pair_ab -
++ (void *) &ihost->task_context_table[0];
++ return ihost->task_context_dma + offset;
++ } else if (idx == 1) {
++ offset = (void *) &ireq->tc->sgl_pair_cd -
++ (void *) &ihost->task_context_table[0];
++ return ihost->task_context_dma + offset;
++ }
++
++ return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
++}
++
++static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
++{
++ e->length = sg_dma_len(sg);
++ e->address_upper = upper_32_bits(sg_dma_address(sg));
++ e->address_lower = lower_32_bits(sg_dma_address(sg));
++ e->address_modifier = 0;
++}
++
++static void sci_request_build_sgl(struct isci_request *ireq)
++{
++ struct isci_host *ihost = ireq->isci_host;
++ struct sas_task *task = isci_request_access_task(ireq);
++ struct scatterlist *sg = NULL;
++ dma_addr_t dma_addr;
++ u32 sg_idx = 0;
++ struct scu_sgl_element_pair *scu_sg = NULL;
++ struct scu_sgl_element_pair *prev_sg = NULL;
++
++ if (task->num_scatter > 0) {
++ sg = task->scatter;
++
++ while (sg) {
++ scu_sg = to_sgl_element_pair(ireq, sg_idx);
++ init_sgl_element(&scu_sg->A, sg);
++ sg = sg_next(sg);
++ if (sg) {
++ init_sgl_element(&scu_sg->B, sg);
++ sg = sg_next(sg);
++ } else
++ memset(&scu_sg->B, 0, sizeof(scu_sg->B));
++
++ if (prev_sg) {
++ dma_addr = to_sgl_element_pair_dma(ihost,
++ ireq,
++ sg_idx);
++
++ prev_sg->next_pair_upper =
++ upper_32_bits(dma_addr);
++ prev_sg->next_pair_lower =
++ lower_32_bits(dma_addr);
++ }
++
++ prev_sg = scu_sg;
++ sg_idx++;
++ }
++ } else { /* handle when no sg */
++ scu_sg = to_sgl_element_pair(ireq, sg_idx);
++
++ dma_addr = dma_map_single(&ihost->pdev->dev,
++ task->scatter,
++ task->total_xfer_len,
++ task->data_dir);
++
++ ireq->zero_scatter_daddr = dma_addr;
++
++ scu_sg->A.length = task->total_xfer_len;
++ scu_sg->A.address_upper = upper_32_bits(dma_addr);
++ scu_sg->A.address_lower = lower_32_bits(dma_addr);
++ }
++
++ if (scu_sg) {
++ scu_sg->next_pair_upper = 0;
++ scu_sg->next_pair_lower = 0;
++ }
++}
++
++static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
++{
++ struct ssp_cmd_iu *cmd_iu;
++ struct sas_task *task = isci_request_access_task(ireq);
++
++ cmd_iu = &ireq->ssp.cmd;
++
++ memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
++ cmd_iu->add_cdb_len = 0;
++ cmd_iu->_r_a = 0;
++ cmd_iu->_r_b = 0;
++ cmd_iu->en_fburst = 0; /* unsupported */
++ cmd_iu->task_prio = task->ssp_task.task_prio;
++ cmd_iu->task_attr = task->ssp_task.task_attr;
++ cmd_iu->_r_c = 0;
++
++ sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
++ sizeof(task->ssp_task.cdb) / sizeof(u32));
++}
++
++static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
++{
++ struct ssp_task_iu *task_iu;
++ struct sas_task *task = isci_request_access_task(ireq);
++ struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
++
++ task_iu = &ireq->ssp.tmf;
++
++ memset(task_iu, 0, sizeof(struct ssp_task_iu));
++
++ memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
++
++ task_iu->task_func = isci_tmf->tmf_code;
++ task_iu->task_tag =
++ (ireq->ttype == tmf_task) ?
++ isci_tmf->io_tag :
++ SCI_CONTROLLER_INVALID_IO_TAG;
++}
++
++/**
++ * This method is will fill in the SCU Task Context for any type of SSP request.
++ * @sci_req:
++ * @task_context:
++ *
++ */
++static void scu_ssp_reqeust_construct_task_context(
++ struct isci_request *ireq,
++ struct scu_task_context *task_context)
++{
++ dma_addr_t dma_addr;
++ struct isci_remote_device *idev;
++ struct isci_port *iport;
++
++ idev = ireq->target_device;
++ iport = idev->owning_port;
++
++ /* Fill in the TC with the its required data */
++ task_context->abort = 0;
++ task_context->priority = 0;
++ task_context->initiator_request = 1;
++ task_context->connection_rate = idev->connection_rate;
++ task_context->protocol_engine_index = ISCI_PEG;
++ task_context->logical_port_index = iport->physical_port_index;
++ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
++ task_context->valid = SCU_TASK_CONTEXT_VALID;
++ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
++
++ task_context->remote_node_index = idev->rnc.remote_node_index;
++ task_context->command_code = 0;
++
++ task_context->link_layer_control = 0;
++ task_context->do_not_dma_ssp_good_response = 1;
++ task_context->strict_ordering = 0;
++ task_context->control_frame = 0;
++ task_context->timeout_enable = 0;
++ task_context->block_guard_enable = 0;
++
++ task_context->address_modifier = 0;
++
++ /* task_context->type.ssp.tag = ireq->io_tag; */
++ task_context->task_phase = 0x01;
++
++ ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
++ (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
++ (iport->physical_port_index <<
++ SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
++ ISCI_TAG_TCI(ireq->io_tag));
++
++ /*
++ * Copy the physical address for the command buffer to the
++ * SCU Task Context
++ */
++ dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
++
++ task_context->command_iu_upper = upper_32_bits(dma_addr);
++ task_context->command_iu_lower = lower_32_bits(dma_addr);
++
++ /*
++ * Copy the physical address for the response buffer to the
++ * SCU Task Context
++ */
++ dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
++
++ task_context->response_iu_upper = upper_32_bits(dma_addr);
++ task_context->response_iu_lower = lower_32_bits(dma_addr);
++}
++
++/**
++ * This method is will fill in the SCU Task Context for a SSP IO request.
++ * @sci_req:
++ *
++ */
++static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
++ enum dma_data_direction dir,
++ u32 len)
++{
++ struct scu_task_context *task_context = ireq->tc;
++
++ scu_ssp_reqeust_construct_task_context(ireq, task_context);
++
++ task_context->ssp_command_iu_length =
++ sizeof(struct ssp_cmd_iu) / sizeof(u32);
++ task_context->type.ssp.frame_type = SSP_COMMAND;
++
++ switch (dir) {
++ case DMA_FROM_DEVICE:
++ case DMA_NONE:
++ default:
++ task_context->task_type = SCU_TASK_TYPE_IOREAD;
++ break;
++ case DMA_TO_DEVICE:
++ task_context->task_type = SCU_TASK_TYPE_IOWRITE;
++ break;
++ }
++
++ task_context->transfer_length_bytes = len;
++
++ if (task_context->transfer_length_bytes > 0)
++ sci_request_build_sgl(ireq);
++}
++
++/**
++ * This method will fill in the SCU Task Context for a SSP Task request. The
++ * following important settings are utilized: -# priority ==
++ * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
++ * ahead of other task destined for the same Remote Node. -# task_type ==
++ * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
++ * (i.e. non-raw frame) is being utilized to perform task management. -#
++ * control_frame == 1. This ensures that the proper endianess is set so
++ * that the bytes are transmitted in the right order for a task frame.
++ * @sci_req: This parameter specifies the task request object being
++ * constructed.
++ *
++ */
++static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
++{
++ struct scu_task_context *task_context = ireq->tc;
++
++ scu_ssp_reqeust_construct_task_context(ireq, task_context);
++
++ task_context->control_frame = 1;
++ task_context->priority = SCU_TASK_PRIORITY_HIGH;
++ task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
++ task_context->transfer_length_bytes = 0;
++ task_context->type.ssp.frame_type = SSP_TASK;
++ task_context->ssp_command_iu_length =
++ sizeof(struct ssp_task_iu) / sizeof(u32);
++}
++
++/**
++ * This method is will fill in the SCU Task Context for any type of SATA
++ * request. This is called from the various SATA constructors.
++ * @sci_req: The general IO request object which is to be used in
++ * constructing the SCU task context.
++ * @task_context: The buffer pointer for the SCU task context which is being
++ * constructed.
++ *
++ * The general io request construction is complete. The buffer assignment for
++ * the command buffer is complete. none Revisit task context construction to
++ * determine what is common for SSP/SMP/STP task context structures.
++ */
++static void scu_sata_reqeust_construct_task_context(
++ struct isci_request *ireq,
++ struct scu_task_context *task_context)
++{
++ dma_addr_t dma_addr;
++ struct isci_remote_device *idev;
++ struct isci_port *iport;
++
++ idev = ireq->target_device;
++ iport = idev->owning_port;
++
++ /* Fill in the TC with the its required data */
++ task_context->abort = 0;
++ task_context->priority = SCU_TASK_PRIORITY_NORMAL;
++ task_context->initiator_request = 1;
++ task_context->connection_rate = idev->connection_rate;
++ task_context->protocol_engine_index = ISCI_PEG;
++ task_context->logical_port_index = iport->physical_port_index;
++ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
++ task_context->valid = SCU_TASK_CONTEXT_VALID;
++ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
++
++ task_context->remote_node_index = idev->rnc.remote_node_index;
++ task_context->command_code = 0;
++
++ task_context->link_layer_control = 0;
++ task_context->do_not_dma_ssp_good_response = 1;
++ task_context->strict_ordering = 0;
++ task_context->control_frame = 0;
++ task_context->timeout_enable = 0;
++ task_context->block_guard_enable = 0;
++
++ task_context->address_modifier = 0;
++ task_context->task_phase = 0x01;
++
++ task_context->ssp_command_iu_length =
++ (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
++
++ /* Set the first word of the H2D REG FIS */
++ task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
++
++ ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
++ (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
++ (iport->physical_port_index <<
++ SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
++ ISCI_TAG_TCI(ireq->io_tag));
++ /*
++ * Copy the physical address for the command buffer to the SCU Task
++ * Context. We must offset the command buffer by 4 bytes because the
++ * first 4 bytes are transfered in the body of the TC.
++ */
++ dma_addr = sci_io_request_get_dma_addr(ireq,
++ ((char *) &ireq->stp.cmd) +
++ sizeof(u32));
++
++ task_context->command_iu_upper = upper_32_bits(dma_addr);
++ task_context->command_iu_lower = lower_32_bits(dma_addr);
++
++ /* SATA Requests do not have a response buffer */
++ task_context->response_iu_upper = 0;
++ task_context->response_iu_lower = 0;
++}
++
++static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
++{
++ struct scu_task_context *task_context = ireq->tc;
++
++ scu_sata_reqeust_construct_task_context(ireq, task_context);
++
++ task_context->control_frame = 0;
++ task_context->priority = SCU_TASK_PRIORITY_NORMAL;
++ task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
++ task_context->type.stp.fis_type = FIS_REGH2D;
++ task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
++}
++
++static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
++ bool copy_rx_frame)
++{
++ struct isci_stp_request *stp_req = &ireq->stp.req;
++
++ scu_stp_raw_request_construct_task_context(ireq);
++
++ stp_req->status = 0;
++ stp_req->sgl.offset = 0;
++ stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
++
++ if (copy_rx_frame) {
++ sci_request_build_sgl(ireq);
++ stp_req->sgl.index = 0;
++ } else {
++ /* The user does not want the data copied to the SGL buffer location */
++ stp_req->sgl.index = -1;
++ }
++
++ return SCI_SUCCESS;
++}
++
++/**
++ *
++ * @sci_req: This parameter specifies the request to be constructed as an
++ * optimized request.
++ * @optimized_task_type: This parameter specifies whether the request is to be
++ * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
++ * value of 1 indicates NCQ.
++ *
++ * This method will perform request construction common to all types of STP
++ * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
++ * returns an indication as to whether the construction was successful.
++ */
++static void sci_stp_optimized_request_construct(struct isci_request *ireq,
++ u8 optimized_task_type,
++ u32 len,
++ enum dma_data_direction dir)
++{
++ struct scu_task_context *task_context = ireq->tc;
++
++ /* Build the STP task context structure */
++ scu_sata_reqeust_construct_task_context(ireq, task_context);
++
++ /* Copy over the SGL elements */
++ sci_request_build_sgl(ireq);
++
++ /* Copy over the number of bytes to be transfered */
++ task_context->transfer_length_bytes = len;
++
++ if (dir == DMA_TO_DEVICE) {
++ /*
++ * The difference between the DMA IN and DMA OUT request task type
++ * values are consistent with the difference between FPDMA READ
++ * and FPDMA WRITE values. Add the supplied task type parameter
++ * to this difference to set the task type properly for this
++ * DATA OUT (WRITE) case. */
++ task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
++ - SCU_TASK_TYPE_DMA_IN);
++ } else {
++ /*
++ * For the DATA IN (READ) case, simply save the supplied
++ * optimized task type. */
++ task_context->task_type = optimized_task_type;
++ }
++}
++
++
++
++static enum sci_status
++sci_io_request_construct_sata(struct isci_request *ireq,
++ u32 len,
++ enum dma_data_direction dir,
++ bool copy)
++{
++ enum sci_status status = SCI_SUCCESS;
++ struct sas_task *task = isci_request_access_task(ireq);
++
++ /* check for management protocols */
++ if (ireq->ttype == tmf_task) {
++ struct isci_tmf *tmf = isci_request_access_tmf(ireq);
++
++ if (tmf->tmf_code == isci_tmf_sata_srst_high ||
++ tmf->tmf_code == isci_tmf_sata_srst_low) {
++ scu_stp_raw_request_construct_task_context(ireq);
++ return SCI_SUCCESS;
++ } else {
++ dev_err(&ireq->owning_controller->pdev->dev,
++ "%s: Request 0x%p received un-handled SAT "
++ "management protocol 0x%x.\n",
++ __func__, ireq, tmf->tmf_code);
++
++ return SCI_FAILURE;
++ }
++ }
++
++ if (!sas_protocol_ata(task->task_proto)) {
++ dev_err(&ireq->owning_controller->pdev->dev,
++ "%s: Non-ATA protocol in SATA path: 0x%x\n",
++ __func__,
++ task->task_proto);
++ return SCI_FAILURE;
++
++ }
++
++ /* non data */
++ if (task->data_dir == DMA_NONE) {
++ scu_stp_raw_request_construct_task_context(ireq);
++ return SCI_SUCCESS;
++ }
++
++ /* NCQ */
++ if (task->ata_task.use_ncq) {
++ sci_stp_optimized_request_construct(ireq,
++ SCU_TASK_TYPE_FPDMAQ_READ,
++ len, dir);
++ return SCI_SUCCESS;
++ }
++
++ /* DMA */
++ if (task->ata_task.dma_xfer) {
++ sci_stp_optimized_request_construct(ireq,
++ SCU_TASK_TYPE_DMA_IN,
++ len, dir);
++ return SCI_SUCCESS;
++ } else /* PIO */
++ return sci_stp_pio_request_construct(ireq, copy);
++
++ return status;
++}
++
++static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
++{
++ struct sas_task *task = isci_request_access_task(ireq);
++
++ ireq->protocol = SCIC_SSP_PROTOCOL;
++
++ scu_ssp_io_request_construct_task_context(ireq,
++ task->data_dir,
++ task->total_xfer_len);
++
++ sci_io_request_build_ssp_command_iu(ireq);
++
++ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
++
++ return SCI_SUCCESS;
++}
++
++enum sci_status sci_task_request_construct_ssp(
++ struct isci_request *ireq)
++{
++ /* Construct the SSP Task SCU Task Context */
++ scu_ssp_task_request_construct_task_context(ireq);
++
++ /* Fill in the SSP Task IU */
++ sci_task_request_build_ssp_task_iu(ireq);
++
++ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
++
++ return SCI_SUCCESS;
++}
++
++static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
++{
++ enum sci_status status;
++ bool copy = false;
++ struct sas_task *task = isci_request_access_task(ireq);
++
++ ireq->protocol = SCIC_STP_PROTOCOL;
++
++ copy = (task->data_dir == DMA_NONE) ? false : true;
++
++ status = sci_io_request_construct_sata(ireq,
++ task->total_xfer_len,
++ task->data_dir,
++ copy);
++
++ if (status == SCI_SUCCESS)
++ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
++
++ return status;
++}
++
++enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
++{
++ enum sci_status status = SCI_SUCCESS;
++
++ /* check for management protocols */
++ if (ireq->ttype == tmf_task) {
++ struct isci_tmf *tmf = isci_request_access_tmf(ireq);
++
++ if (tmf->tmf_code == isci_tmf_sata_srst_high ||
++ tmf->tmf_code == isci_tmf_sata_srst_low) {
++ scu_stp_raw_request_construct_task_context(ireq);
++ } else {
++ dev_err(&ireq->owning_controller->pdev->dev,
++ "%s: Request 0x%p received un-handled SAT "
++ "Protocol 0x%x.\n",
++ __func__, ireq, tmf->tmf_code);
++
++ return SCI_FAILURE;
++ }
++ }
++
++ if (status != SCI_SUCCESS)
++ return status;
++ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
++
++ return status;
++}
++
++/**
++ * sci_req_tx_bytes - bytes transferred when reply underruns request
++ * @sci_req: request that was terminated early
++ */
++#define SCU_TASK_CONTEXT_SRAM 0x200000
++static u32 sci_req_tx_bytes(struct isci_request *ireq)
++{
++ struct isci_host *ihost = ireq->owning_controller;
++ u32 ret_val = 0;
++
++ if (readl(&ihost->smu_registers->address_modifier) == 0) {
++ void __iomem *scu_reg_base = ihost->scu_registers;
++
++ /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
++ * BAR1 is the scu_registers
++ * 0x20002C = 0x200000 + 0x2c
++ * = start of task context SRAM + offset of (type.ssp.data_offset)
++ * TCi is the io_tag of struct sci_request
++ */
++ ret_val = readl(scu_reg_base +
++ (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
++ ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
++ }
++
++ return ret_val;
++}
++
++enum sci_status sci_request_start(struct isci_request *ireq)
++{
++ enum sci_base_request_states state;
++ struct scu_task_context *tc = ireq->tc;
++ struct isci_host *ihost = ireq->owning_controller;
++
++ state = ireq->sm.current_state_id;
++ if (state != SCI_REQ_CONSTRUCTED) {
++ dev_warn(&ihost->pdev->dev,
++ "%s: SCIC IO Request requested to start while in wrong "
++ "state %d\n", __func__, state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++
++ tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
++
++ switch (tc->protocol_type) {
++ case SCU_TASK_CONTEXT_PROTOCOL_SMP:
++ case SCU_TASK_CONTEXT_PROTOCOL_SSP:
++ /* SSP/SMP Frame */
++ tc->type.ssp.tag = ireq->io_tag;
++ tc->type.ssp.target_port_transfer_tag = 0xFFFF;
++ break;
++
++ case SCU_TASK_CONTEXT_PROTOCOL_STP:
++ /* STP/SATA Frame
++ * tc->type.stp.ncq_tag = ireq->ncq_tag;
++ */
++ break;
++
++ case SCU_TASK_CONTEXT_PROTOCOL_NONE:
++ /* / @todo When do we set no protocol type? */
++ break;
++
++ default:
++ /* This should never happen since we build the IO
++ * requests */
++ break;
++ }
++
++ /* Add to the post_context the io tag value */
++ ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
++
++ /* Everything is good go ahead and change state */
++ sci_change_state(&ireq->sm, SCI_REQ_STARTED);
++
++ return SCI_SUCCESS;
++}
++
++enum sci_status
++sci_io_request_terminate(struct isci_request *ireq)
++{
++ enum sci_base_request_states state;
++
++ state = ireq->sm.current_state_id;
++
++ switch (state) {
++ case SCI_REQ_CONSTRUCTED:
++ ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
++ ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ return SCI_SUCCESS;
++ case SCI_REQ_STARTED:
++ case SCI_REQ_TASK_WAIT_TC_COMP:
++ case SCI_REQ_SMP_WAIT_RESP:
++ case SCI_REQ_SMP_WAIT_TC_COMP:
++ case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
++ case SCI_REQ_STP_UDMA_WAIT_D2H:
++ case SCI_REQ_STP_NON_DATA_WAIT_H2D:
++ case SCI_REQ_STP_NON_DATA_WAIT_D2H:
++ case SCI_REQ_STP_PIO_WAIT_H2D:
++ case SCI_REQ_STP_PIO_WAIT_FRAME:
++ case SCI_REQ_STP_PIO_DATA_IN:
++ case SCI_REQ_STP_PIO_DATA_OUT:
++ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
++ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
++ case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
++ sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
++ return SCI_SUCCESS;
++ case SCI_REQ_TASK_WAIT_TC_RESP:
++ /* The task frame was already confirmed to have been
++ * sent by the SCU HW. Since the state machine is
++ * now only waiting for the task response itself,
++ * abort the request and complete it immediately
++ * and don't wait for the task response.
++ */
++ sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ return SCI_SUCCESS;
++ case SCI_REQ_ABORTING:
++ /* If a request has a termination requested twice, return
++ * a failure indication, since HW confirmation of the first
++ * abort is still outstanding.
++ */
++ case SCI_REQ_COMPLETED:
++ default:
++ dev_warn(&ireq->owning_controller->pdev->dev,
++ "%s: SCIC IO Request requested to abort while in wrong "
++ "state %d\n",
++ __func__,
++ ireq->sm.current_state_id);
++ break;
++ }
++
++ return SCI_FAILURE_INVALID_STATE;
++}
++
++enum sci_status sci_request_complete(struct isci_request *ireq)
++{
++ enum sci_base_request_states state;
++ struct isci_host *ihost = ireq->owning_controller;
++
++ state = ireq->sm.current_state_id;
++ if (WARN_ONCE(state != SCI_REQ_COMPLETED,
++ "isci: request completion from wrong state (%d)\n", state))
++ return SCI_FAILURE_INVALID_STATE;
++
++ if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
++ sci_controller_release_frame(ihost,
++ ireq->saved_rx_frame_index);
++
++ /* XXX can we just stop the machine and remove the 'final' state? */
++ sci_change_state(&ireq->sm, SCI_REQ_FINAL);
++ return SCI_SUCCESS;
++}
++
++enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
++ u32 event_code)
++{
++ enum sci_base_request_states state;
++ struct isci_host *ihost = ireq->owning_controller;
++
++ state = ireq->sm.current_state_id;
++
++ if (state != SCI_REQ_STP_PIO_DATA_IN) {
++ dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n",
++ __func__, event_code, state);
++
++ return SCI_FAILURE_INVALID_STATE;
++ }
++
++ switch (scu_get_event_specifier(event_code)) {
++ case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
++ /* We are waiting for data and the SCU has R_ERR the data frame.
++ * Go back to waiting for the D2H Register FIS
++ */
++ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
++ return SCI_SUCCESS;
++ default:
++ dev_err(&ihost->pdev->dev,
++ "%s: pio request unexpected event %#x\n",
++ __func__, event_code);
++
++ /* TODO Should we fail the PIO request when we get an
++ * unexpected event?
++ */
++ return SCI_FAILURE;
++ }
++}
++
++/*
++ * This function copies response data for requests returning response data
++ * instead of sense data.
++ * @sci_req: This parameter specifies the request object for which to copy
++ * the response data.
++ */
++static void sci_io_request_copy_response(struct isci_request *ireq)
++{
++ void *resp_buf;
++ u32 len;
++ struct ssp_response_iu *ssp_response;
++ struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
++
++ ssp_response = &ireq->ssp.rsp;
++
++ resp_buf = &isci_tmf->resp.resp_iu;
++
++ len = min_t(u32,
++ SSP_RESP_IU_MAX_SIZE,
++ be32_to_cpu(ssp_response->response_data_len));
++
++ memcpy(resp_buf, ssp_response->resp_data, len);
++}
++
++static enum sci_status
++request_started_state_tc_event(struct isci_request *ireq,
++ u32 completion_code)
++{
++ struct ssp_response_iu *resp_iu;
++ u8 datapres;
++
++ /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
++ * to determine SDMA status
++ */
++ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
++ ireq->scu_status = SCU_TASK_DONE_GOOD;
++ ireq->sci_status = SCI_SUCCESS;
++ break;
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
++ /* There are times when the SCU hardware will return an early
++ * response because the io request specified more data than is
++ * returned by the target device (mode pages, inquiry data,
++ * etc.). We must check the response stats to see if this is
++ * truly a failed request or a good request that just got
++ * completed early.
++ */
++ struct ssp_response_iu *resp = &ireq->ssp.rsp;
++ ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
++
++ sci_swab32_cpy(&ireq->ssp.rsp,
++ &ireq->ssp.rsp,
++ word_cnt);
++
++ if (resp->status == 0) {
++ ireq->scu_status = SCU_TASK_DONE_GOOD;
++ ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
++ } else {
++ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
++ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
++ }
++ break;
++ }
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
++ ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
++
++ sci_swab32_cpy(&ireq->ssp.rsp,
++ &ireq->ssp.rsp,
++ word_cnt);
++
++ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
++ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
++ break;
++ }
++
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
++ /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
++ * guaranteed to be received before this completion status is
++ * posted?
++ */
++ resp_iu = &ireq->ssp.rsp;
++ datapres = resp_iu->datapres;
++
++ if (datapres == 1 || datapres == 2) {
++ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
++ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
++ } else {
++ ireq->scu_status = SCU_TASK_DONE_GOOD;
++ ireq->sci_status = SCI_SUCCESS;
++ }
++ break;
++ /* only stp device gets suspended. */
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
++ if (ireq->protocol == SCIC_STP_PROTOCOL) {
++ ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
++ SCU_COMPLETION_TL_STATUS_SHIFT;
++ ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
++ } else {
++ ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
++ SCU_COMPLETION_TL_STATUS_SHIFT;
++ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
++ }
++ break;
++
++ /* both stp/ssp device gets suspended */
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
++ ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
++ SCU_COMPLETION_TL_STATUS_SHIFT;
++ ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
++ break;
++
++ /* neither ssp nor stp gets suspended. */
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
++ default:
++ ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
++ SCU_COMPLETION_TL_STATUS_SHIFT;
++ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
++ break;
++ }
++
++ /*
++ * TODO: This is probably wrong for ACK/NAK timeout conditions
++ */
++
++ /* In all cases we will treat this as the completion of the IO req. */
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ return SCI_SUCCESS;
++}
++
++static enum sci_status
++request_aborting_state_tc_event(struct isci_request *ireq,
++ u32 completion_code)
++{
++ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
++ case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
++ case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
++ ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
++ ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ break;
++
++ default:
++ /* Unless we get some strange error wait for the task abort to complete
++ * TODO: Should there be a state change for this completion?
++ */
++ break;
++ }
++
++ return SCI_SUCCESS;
++}
++
++static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
++ u32 completion_code)
++{
++ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
++ ireq->scu_status = SCU_TASK_DONE_GOOD;
++ ireq->sci_status = SCI_SUCCESS;
++ sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
++ break;
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
++ /* Currently, the decision is to simply allow the task request
++ * to timeout if the task IU wasn't received successfully.
++ * There is a potential for receiving multiple task responses if
++ * we decide to send the task IU again.
++ */
++ dev_warn(&ireq->owning_controller->pdev->dev,
++ "%s: TaskRequest:0x%p CompletionCode:%x - "
++ "ACK/NAK timeout\n", __func__, ireq,
++ completion_code);
++
++ sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
++ break;
++ default:
++ /*
++ * All other completion status cause the IO to be complete.
++ * If a NAK was received, then it is up to the user to retry
++ * the request.
++ */
++ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
++ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ break;
++ }
++
++ return SCI_SUCCESS;
++}
++
++static enum sci_status
++smp_request_await_response_tc_event(struct isci_request *ireq,
++ u32 completion_code)
++{
++ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
++ /* In the AWAIT RESPONSE state, any TC completion is
++ * unexpected. but if the TC has success status, we
++ * complete the IO anyway.
++ */
++ ireq->scu_status = SCU_TASK_DONE_GOOD;
++ ireq->sci_status = SCI_SUCCESS;
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ break;
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
++ /* These status has been seen in a specific LSI
++ * expander, which sometimes is not able to send smp
++ * response within 2 ms. This causes our hardware break
++ * the connection and set TC completion with one of
++ * these SMP_XXX_XX_ERR status. For these type of error,
++ * we ask ihost user to retry the request.
++ */
++ ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
++ ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED;
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ break;
++ default:
++ /* All other completion status cause the IO to be complete. If a NAK
++ * was received, then it is up to the user to retry the request
++ */
++ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
++ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ break;
++ }
++
++ return SCI_SUCCESS;
++}
++
++static enum sci_status
++smp_request_await_tc_event(struct isci_request *ireq,
++ u32 completion_code)
++{
++ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
++ ireq->scu_status = SCU_TASK_DONE_GOOD;
++ ireq->sci_status = SCI_SUCCESS;
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ break;
++ default:
++ /* All other completion status cause the IO to be
++ * complete. If a NAK was received, then it is up to
++ * the user to retry the request.
++ */
++ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
++ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ break;
++ }
++
++ return SCI_SUCCESS;
++}
++
++static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
++{
++ struct scu_sgl_element *sgl;
++ struct scu_sgl_element_pair *sgl_pair;
++ struct isci_request *ireq = to_ireq(stp_req);
++ struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
++
++ sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
++ if (!sgl_pair)
++ sgl = NULL;
++ else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
++ if (sgl_pair->B.address_lower == 0 &&
++ sgl_pair->B.address_upper == 0) {
++ sgl = NULL;
++ } else {
++ pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
++ sgl = &sgl_pair->B;
++ }
++ } else {
++ if (sgl_pair->next_pair_lower == 0 &&
++ sgl_pair->next_pair_upper == 0) {
++ sgl = NULL;
++ } else {
++ pio_sgl->index++;
++ pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
++ sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
++ sgl = &sgl_pair->A;
++ }
++ }
++
++ return sgl;
++}
++
++static enum sci_status
++stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
++ u32 completion_code)
++{
++ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
++ ireq->scu_status = SCU_TASK_DONE_GOOD;
++ ireq->sci_status = SCI_SUCCESS;
++ sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
++ break;
++
++ default:
++ /* All other completion status cause the IO to be
++ * complete. If a NAK was received, then it is up to
++ * the user to retry the request.
++ */
++ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
++ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ break;
++ }
++
++ return SCI_SUCCESS;
++}
++
++#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
++
++/* transmit DATA_FIS from (current sgl + offset) for input
++ * parameter length. current sgl and offset is alreay stored in the IO request
++ */
++static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame(
++ struct isci_request *ireq,
++ u32 length)
++{
++ struct isci_stp_request *stp_req = &ireq->stp.req;
++ struct scu_task_context *task_context = ireq->tc;
++ struct scu_sgl_element_pair *sgl_pair;
++ struct scu_sgl_element *current_sgl;
++
++ /* Recycle the TC and reconstruct it for sending out DATA FIS containing
++ * for the data from current_sgl+offset for the input length
++ */
++ sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
++ if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
++ current_sgl = &sgl_pair->A;
++ else
++ current_sgl = &sgl_pair->B;
++
++ /* update the TC */
++ task_context->command_iu_upper = current_sgl->address_upper;
++ task_context->command_iu_lower = current_sgl->address_lower;
++ task_context->transfer_length_bytes = length;
++ task_context->type.stp.fis_type = FIS_DATA;
++
++ /* send the new TC out. */
++ return sci_controller_continue_io(ireq);
++}
++
++static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
++{
++ struct isci_stp_request *stp_req = &ireq->stp.req;
++ struct scu_sgl_element_pair *sgl_pair;
++ struct scu_sgl_element *sgl;
++ enum sci_status status;
++ u32 offset;
++ u32 len = 0;
++
++ offset = stp_req->sgl.offset;
++ sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
++ if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
++ return SCI_FAILURE;
++
++ if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
++ sgl = &sgl_pair->A;
++ len = sgl_pair->A.length - offset;
++ } else {
++ sgl = &sgl_pair->B;
++ len = sgl_pair->B.length - offset;
++ }
++
++ if (stp_req->pio_len == 0)
++ return SCI_SUCCESS;
++
++ if (stp_req->pio_len >= len) {
++ status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
++ if (status != SCI_SUCCESS)
++ return status;
++ stp_req->pio_len -= len;
++
++ /* update the current sgl, offset and save for future */
++ sgl = pio_sgl_next(stp_req);
++ offset = 0;
++ } else if (stp_req->pio_len < len) {
++ sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
++
++ /* Sgl offset will be adjusted and saved for future */
++ offset += stp_req->pio_len;
++ sgl->address_lower += stp_req->pio_len;
++ stp_req->pio_len = 0;
++ }
++
++ stp_req->sgl.offset = offset;
++
++ return status;
++}
++
++/**
++ *
++ * @stp_request: The request that is used for the SGL processing.
++ * @data_buffer: The buffer of data to be copied.
++ * @length: The length of the data transfer.
++ *
++ * Copy the data from the buffer for the length specified to the IO reqeust SGL
++ * specified data region. enum sci_status
++ */
++static enum sci_status
++sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
++ u8 *data_buf, u32 len)
++{
++ struct isci_request *ireq;
++ u8 *src_addr;
++ int copy_len;
++ struct sas_task *task;
++ struct scatterlist *sg;
++ void *kaddr;
++ int total_len = len;
++
++ ireq = to_ireq(stp_req);
++ task = isci_request_access_task(ireq);
++ src_addr = data_buf;
++
++ if (task->num_scatter > 0) {
++ sg = task->scatter;
++
++ while (total_len > 0) {
++ struct page *page = sg_page(sg);
++
++ copy_len = min_t(int, total_len, sg_dma_len(sg));
++ kaddr = kmap_atomic(page, KM_IRQ0);
++ memcpy(kaddr + sg->offset, src_addr, copy_len);
++ kunmap_atomic(kaddr, KM_IRQ0);
++ total_len -= copy_len;
++ src_addr += copy_len;
++ sg = sg_next(sg);
++ }
++ } else {
++ BUG_ON(task->total_xfer_len < total_len);
++ memcpy(task->scatter, src_addr, total_len);
++ }
++
++ return SCI_SUCCESS;
++}
++
++/**
++ *
++ * @sci_req: The PIO DATA IN request that is to receive the data.
++ * @data_buffer: The buffer to copy from.
++ *
++ * Copy the data buffer to the io request data region. enum sci_status
++ */
++static enum sci_status sci_stp_request_pio_data_in_copy_data(
++ struct isci_stp_request *stp_req,
++ u8 *data_buffer)
++{
++ enum sci_status status;
++
++ /*
++ * If there is less than 1K remaining in the transfer request
++ * copy just the data for the transfer */
++ if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
++ status = sci_stp_request_pio_data_in_copy_data_buffer(
++ stp_req, data_buffer, stp_req->pio_len);
++
++ if (status == SCI_SUCCESS)
++ stp_req->pio_len = 0;
++ } else {
++ /* We are transfering the whole frame so copy */
++ status = sci_stp_request_pio_data_in_copy_data_buffer(
++ stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
++
++ if (status == SCI_SUCCESS)
++ stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
++ }
++
++ return status;
++}
++
++static enum sci_status
++stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
++ u32 completion_code)
++{
++ enum sci_status status = SCI_SUCCESS;
++
++ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
++ ireq->scu_status = SCU_TASK_DONE_GOOD;
++ ireq->sci_status = SCI_SUCCESS;
++ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
++ break;
++
++ default:
++ /* All other completion status cause the IO to be
++ * complete. If a NAK was received, then it is up to
++ * the user to retry the request.
++ */
++ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
++ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ break;
++ }
++
++ return status;
++}
++
++static enum sci_status
++pio_data_out_tx_done_tc_event(struct isci_request *ireq,
++ u32 completion_code)
++{
++ enum sci_status status = SCI_SUCCESS;
++ bool all_frames_transferred = false;
++ struct isci_stp_request *stp_req = &ireq->stp.req;
++
++ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
++ /* Transmit data */
++ if (stp_req->pio_len != 0) {
++ status = sci_stp_request_pio_data_out_transmit_data(ireq);
++ if (status == SCI_SUCCESS) {
++ if (stp_req->pio_len == 0)
++ all_frames_transferred = true;
++ }
++ } else if (stp_req->pio_len == 0) {
++ /*
++ * this will happen if the all data is written at the
++ * first time after the pio setup fis is received
++ */
++ all_frames_transferred = true;
++ }
++
++ /* all data transferred. */
++ if (all_frames_transferred) {
++ /*
++ * Change the state to SCI_REQ_STP_PIO_DATA_IN
++ * and wait for PIO_SETUP fis / or D2H REg fis. */
++ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
++ }
++ break;
++
++ default:
++ /*
++ * All other completion status cause the IO to be complete.
++ * If a NAK was received, then it is up to the user to retry
++ * the request.
++ */
++ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
++ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ break;
++ }
++
++ return status;
++}
++
++static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
++ u32 frame_index)
++{
++ struct isci_host *ihost = ireq->owning_controller;
++ struct dev_to_host_fis *frame_header;
++ enum sci_status status;
++ u32 *frame_buffer;
++
++ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
++ frame_index,
++ (void **)&frame_header);
++
++ if ((status == SCI_SUCCESS) &&
++ (frame_header->fis_type == FIS_REGD2H)) {
++ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
++ frame_index,
++ (void **)&frame_buffer);
++
++ sci_controller_copy_sata_response(&ireq->stp.rsp,
++ frame_header,
++ frame_buffer);
++ }
++
++ sci_controller_release_frame(ihost, frame_index);
++
++ return status;
++}
++
++enum sci_status
++sci_io_request_frame_handler(struct isci_request *ireq,
++ u32 frame_index)
++{
++ struct isci_host *ihost = ireq->owning_controller;
++ struct isci_stp_request *stp_req = &ireq->stp.req;
++ enum sci_base_request_states state;
++ enum sci_status status;
++ ssize_t word_cnt;
++
++ state = ireq->sm.current_state_id;
++ switch (state) {
++ case SCI_REQ_STARTED: {
++ struct ssp_frame_hdr ssp_hdr;
++ void *frame_header;
++
++ sci_unsolicited_frame_control_get_header(&ihost->uf_control,
++ frame_index,
++ &frame_header);
++
++ word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
++ sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
++
++ if (ssp_hdr.frame_type == SSP_RESPONSE) {
++ struct ssp_response_iu *resp_iu;
++ ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
++
++ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
++ frame_index,
++ (void **)&resp_iu);
++
++ sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
++
++ resp_iu = &ireq->ssp.rsp;
++
++ if (resp_iu->datapres == 0x01 ||
++ resp_iu->datapres == 0x02) {
++ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
++ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
++ } else {
++ ireq->scu_status = SCU_TASK_DONE_GOOD;
++ ireq->sci_status = SCI_SUCCESS;
++ }
++ } else {
++ /* not a response frame, why did it get forwarded? */
++ dev_err(&ihost->pdev->dev,
++ "%s: SCIC IO Request 0x%p received unexpected "
++ "frame %d type 0x%02x\n", __func__, ireq,
++ frame_index, ssp_hdr.frame_type);
++ }
++
++ /*
++ * In any case we are done with this frame buffer return it to
++ * the controller
++ */
++ sci_controller_release_frame(ihost, frame_index);
++
++ return SCI_SUCCESS;
++ }
++
++ case SCI_REQ_TASK_WAIT_TC_RESP:
++ sci_io_request_copy_response(ireq);
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ sci_controller_release_frame(ihost, frame_index);
++ return SCI_SUCCESS;
++
++ case SCI_REQ_SMP_WAIT_RESP: {
++ struct smp_resp *rsp_hdr = &ireq->smp.rsp;
++ void *frame_header;
++
++ sci_unsolicited_frame_control_get_header(&ihost->uf_control,
++ frame_index,
++ &frame_header);
++
++ /* byte swap the header. */
++ word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
++ sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
++
++ if (rsp_hdr->frame_type == SMP_RESPONSE) {
++ void *smp_resp;
++
++ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
++ frame_index,
++ &smp_resp);
++
++ word_cnt = (sizeof(struct smp_resp) - SMP_RESP_HDR_SZ) /
++ sizeof(u32);
++
++ sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
++ smp_resp, word_cnt);
++
++ ireq->scu_status = SCU_TASK_DONE_GOOD;
++ ireq->sci_status = SCI_SUCCESS;
++ sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
++ } else {
++ /*
++ * This was not a response frame why did it get
++ * forwarded?
++ */
++ dev_err(&ihost->pdev->dev,
++ "%s: SCIC SMP Request 0x%p received unexpected "
++ "frame %d type 0x%02x\n",
++ __func__,
++ ireq,
++ frame_index,
++ rsp_hdr->frame_type);
++
++ ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
++ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ }
++
++ sci_controller_release_frame(ihost, frame_index);
++
++ return SCI_SUCCESS;
++ }
++
++ case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
++ return sci_stp_request_udma_general_frame_handler(ireq,
++ frame_index);
++
++ case SCI_REQ_STP_UDMA_WAIT_D2H:
++ /* Use the general frame handler to copy the resposne data */
++ status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
++
++ if (status != SCI_SUCCESS)
++ return status;
++
++ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
++ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ return SCI_SUCCESS;
++
++ case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
++ struct dev_to_host_fis *frame_header;
++ u32 *frame_buffer;
++
++ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
++ frame_index,
++ (void **)&frame_header);
++
++ if (status != SCI_SUCCESS) {
++ dev_err(&ihost->pdev->dev,
++ "%s: SCIC IO Request 0x%p could not get frame "
++ "header for frame index %d, status %x\n",
++ __func__,
++ stp_req,
++ frame_index,
++ status);
++
++ return status;
++ }
++
++ switch (frame_header->fis_type) {
++ case FIS_REGD2H:
++ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
++ frame_index,
++ (void **)&frame_buffer);
++
++ sci_controller_copy_sata_response(&ireq->stp.rsp,
++ frame_header,
++ frame_buffer);
++
++ /* The command has completed with error */
++ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
++ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
++ break;
++
++ default:
++ dev_warn(&ihost->pdev->dev,
++ "%s: IO Request:0x%p Frame Id:%d protocol "
++ "violation occurred\n", __func__, stp_req,
++ frame_index);
++
++ ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
++ ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
++ break;
++ }
++
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++
++ /* Frame has been decoded return it to the controller */
++ sci_controller_release_frame(ihost, frame_index);
++
++ return status;
++ }
++
++ case SCI_REQ_STP_PIO_WAIT_FRAME: {
++ struct sas_task *task = isci_request_access_task(ireq);
++ struct dev_to_host_fis *frame_header;
++ u32 *frame_buffer;
++
++ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
++ frame_index,
++ (void **)&frame_header);
++
++ if (status != SCI_SUCCESS) {
++ dev_err(&ihost->pdev->dev,
++ "%s: SCIC IO Request 0x%p could not get frame "
++ "header for frame index %d, status %x\n",
++ __func__, stp_req, frame_index, status);
++ return status;
++ }
++
++ switch (frame_header->fis_type) {
++ case FIS_PIO_SETUP:
++ /* Get from the frame buffer the PIO Setup Data */
++ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
++ frame_index,
++ (void **)&frame_buffer);
++
++ /* Get the data from the PIO Setup The SCU Hardware
++ * returns first word in the frame_header and the rest
++ * of the data is in the frame buffer so we need to
++ * back up one dword
++ */
++
++ /* transfer_count: first 16bits in the 4th dword */
++ stp_req->pio_len = frame_buffer[3] & 0xffff;
++
++ /* status: 4th byte in the 3rd dword */
++ stp_req->status = (frame_buffer[2] >> 24) & 0xff;
++
++ sci_controller_copy_sata_response(&ireq->stp.rsp,
++ frame_header,
++ frame_buffer);
++
++ ireq->stp.rsp.status = stp_req->status;
++
++ /* The next state is dependent on whether the
++ * request was PIO Data-in or Data out
++ */
++ if (task->data_dir == DMA_FROM_DEVICE) {
++ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
++ } else if (task->data_dir == DMA_TO_DEVICE) {
++ /* Transmit data */
++ status = sci_stp_request_pio_data_out_transmit_data(ireq);
++ if (status != SCI_SUCCESS)
++ break;
++ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
++ }
++ break;
++
++ case FIS_SETDEVBITS:
++ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
++ break;
++
++ case FIS_REGD2H:
++ if (frame_header->status & ATA_BUSY) {
++ /*
++ * Now why is the drive sending a D2H Register
++ * FIS when it is still busy? Do nothing since
++ * we are still in the right state.
++ */
++ dev_dbg(&ihost->pdev->dev,
++ "%s: SCIC PIO Request 0x%p received "
++ "D2H Register FIS with BSY status "
++ "0x%x\n",
++ __func__,
++ stp_req,
++ frame_header->status);
++ break;
++ }
++
++ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
++ frame_index,
++ (void **)&frame_buffer);
++
++ sci_controller_copy_sata_response(&ireq->stp.req,
++ frame_header,
++ frame_buffer);
++
++ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
++ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ break;
++
++ default:
++ /* FIXME: what do we do here? */
++ break;
++ }
++
++ /* Frame is decoded return it to the controller */
++ sci_controller_release_frame(ihost, frame_index);
++
++ return status;
++ }
++
++ case SCI_REQ_STP_PIO_DATA_IN: {
++ struct dev_to_host_fis *frame_header;
++ struct sata_fis_data *frame_buffer;
++
++ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
++ frame_index,
++ (void **)&frame_header);
++
++ if (status != SCI_SUCCESS) {
++ dev_err(&ihost->pdev->dev,
++ "%s: SCIC IO Request 0x%p could not get frame "
++ "header for frame index %d, status %x\n",
++ __func__,
++ stp_req,
++ frame_index,
++ status);
++ return status;
++ }
++
++ if (frame_header->fis_type != FIS_DATA) {
++ dev_err(&ihost->pdev->dev,
++ "%s: SCIC PIO Request 0x%p received frame %d "
++ "with fis type 0x%02x when expecting a data "
++ "fis.\n",
++ __func__,
++ stp_req,
++ frame_index,
++ frame_header->fis_type);
++
++ ireq->scu_status = SCU_TASK_DONE_GOOD;
++ ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT;
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++
++ /* Frame is decoded return it to the controller */
++ sci_controller_release_frame(ihost, frame_index);
++ return status;
++ }
++
++ if (stp_req->sgl.index < 0) {
++ ireq->saved_rx_frame_index = frame_index;
++ stp_req->pio_len = 0;
++ } else {
++ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
++ frame_index,
++ (void **)&frame_buffer);
++
++ status = sci_stp_request_pio_data_in_copy_data(stp_req,
++ (u8 *)frame_buffer);
++
++ /* Frame is decoded return it to the controller */
++ sci_controller_release_frame(ihost, frame_index);
++ }
++
++ /* Check for the end of the transfer, are there more
++ * bytes remaining for this data transfer
++ */
++ if (status != SCI_SUCCESS || stp_req->pio_len != 0)
++ return status;
++
++ if ((stp_req->status & ATA_BUSY) == 0) {
++ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
++ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ } else {
++ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
++ }
++ return status;
++ }
++
++ case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
++ struct dev_to_host_fis *frame_header;
++ u32 *frame_buffer;
++
++ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
++ frame_index,
++ (void **)&frame_header);
++ if (status != SCI_SUCCESS) {
++ dev_err(&ihost->pdev->dev,
++ "%s: SCIC IO Request 0x%p could not get frame "
++ "header for frame index %d, status %x\n",
++ __func__,
++ stp_req,
++ frame_index,
++ status);
++ return status;
++ }
++
++ switch (frame_header->fis_type) {
++ case FIS_REGD2H:
++ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
++ frame_index,
++ (void **)&frame_buffer);
++
++ sci_controller_copy_sata_response(&ireq->stp.rsp,
++ frame_header,
++ frame_buffer);
++
++ /* The command has completed with error */
++ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
++ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
++ break;
++
++ default:
++ dev_warn(&ihost->pdev->dev,
++ "%s: IO Request:0x%p Frame Id:%d protocol "
++ "violation occurred\n",
++ __func__,
++ stp_req,
++ frame_index);
++
++ ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
++ ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
++ break;
++ }
++
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++
++ /* Frame has been decoded return it to the controller */
++ sci_controller_release_frame(ihost, frame_index);
++
++ return status;
++ }
++ case SCI_REQ_ABORTING:
++ /*
++ * TODO: Is it even possible to get an unsolicited frame in the
++ * aborting state?
++ */
++ sci_controller_release_frame(ihost, frame_index);
++ return SCI_SUCCESS;
++
++ default:
++ dev_warn(&ihost->pdev->dev,
++ "%s: SCIC IO Request given unexpected frame %x while "
++ "in state %d\n",
++ __func__,
++ frame_index,
++ state);
++
++ sci_controller_release_frame(ihost, frame_index);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++}
++
++static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
++ u32 completion_code)
++{
++ enum sci_status status = SCI_SUCCESS;
++
++ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
++ ireq->scu_status = SCU_TASK_DONE_GOOD;
++ ireq->sci_status = SCI_SUCCESS;
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ break;
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
++ /* We must check ther response buffer to see if the D2H
++ * Register FIS was received before we got the TC
++ * completion.
++ */
++ if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
++ sci_remote_device_suspend(ireq->target_device,
++ SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
++
++ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
++ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ } else {
++ /* If we have an error completion status for the
++ * TC then we can expect a D2H register FIS from
++ * the device so we must change state to wait
++ * for it
++ */
++ sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
++ }
++ break;
++
++ /* TODO Check to see if any of these completion status need to
++ * wait for the device to host register fis.
++ */
++ /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
++ * - this comes only for B0
++ */
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
++ sci_remote_device_suspend(ireq->target_device,
++ SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
++ /* Fall through to the default case */
++ default:
++ /* All other completion status cause the IO to be complete. */
++ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
++ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ break;
++ }
++
++ return status;
++}
++
++static enum sci_status
++stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
++ u32 completion_code)
++{
++ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
++ ireq->scu_status = SCU_TASK_DONE_GOOD;
++ ireq->sci_status = SCI_SUCCESS;
++ sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
++ break;
++
++ default:
++ /*
++ * All other completion status cause the IO to be complete.
++ * If a NAK was received, then it is up to the user to retry
++ * the request.
++ */
++ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
++ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ break;
++ }
++
++ return SCI_SUCCESS;
++}
++
++static enum sci_status
++stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
++ u32 completion_code)
++{
++ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
++ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
++ ireq->scu_status = SCU_TASK_DONE_GOOD;
++ ireq->sci_status = SCI_SUCCESS;
++ sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
++ break;
++
++ default:
++ /* All other completion status cause the IO to be complete. If
++ * a NAK was received, then it is up to the user to retry the
++ * request.
++ */
++ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
++ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
++ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
++ break;
++ }
++
++ return SCI_SUCCESS;
++}
++
++enum sci_status
++sci_io_request_tc_completion(struct isci_request *ireq,
++ u32 completion_code)
++{
++ enum sci_base_request_states state;
++ struct isci_host *ihost = ireq->owning_controller;
++
++ state = ireq->sm.current_state_id;
++
++ switch (state) {
++ case SCI_REQ_STARTED:
++ return request_started_state_tc_event(ireq, completion_code);
++
++ case SCI_REQ_TASK_WAIT_TC_COMP:
++ return ssp_task_request_await_tc_event(ireq,
++ completion_code);
++
++ case SCI_REQ_SMP_WAIT_RESP:
++ return smp_request_await_response_tc_event(ireq,
++ completion_code);
++
++ case SCI_REQ_SMP_WAIT_TC_COMP:
++ return smp_request_await_tc_event(ireq, completion_code);
++
++ case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
++ return stp_request_udma_await_tc_event(ireq,
++ completion_code);
++
++ case SCI_REQ_STP_NON_DATA_WAIT_H2D:
++ return stp_request_non_data_await_h2d_tc_event(ireq,
++ completion_code);
++
++ case SCI_REQ_STP_PIO_WAIT_H2D:
++ return stp_request_pio_await_h2d_completion_tc_event(ireq,
++ completion_code);
++
++ case SCI_REQ_STP_PIO_DATA_OUT:
++ return pio_data_out_tx_done_tc_event(ireq, completion_code);
++
++ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
++ return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq,
++ completion_code);
++
++ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
++ return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq,
++ completion_code);
++
++ case SCI_REQ_ABORTING:
++ return request_aborting_state_tc_event(ireq,
++ completion_code);
++
++ default:
++ dev_warn(&ihost->pdev->dev,
++ "%s: SCIC IO Request given task completion "
++ "notification %x while in wrong state %d\n",
++ __func__,
++ completion_code,
++ state);
++ return SCI_FAILURE_INVALID_STATE;
++ }
++}
++
++/**
++ * isci_request_process_response_iu() - This function sets the status and
++ * response iu, in the task struct, from the request object for the upper
++ * layer driver.
++ * @sas_task: This parameter is the task struct from the upper layer driver.
++ * @resp_iu: This parameter points to the response iu of the completed request.
++ * @dev: This parameter specifies the linux device struct.
++ *
++ * none.
++ */
++static void isci_request_process_response_iu(
++ struct sas_task *task,
++ struct ssp_response_iu *resp_iu,
++ struct device *dev)
++{
++ dev_dbg(dev,
++ "%s: resp_iu = %p "
++ "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
++ "resp_iu->response_data_len = %x, "
++ "resp_iu->sense_data_len = %x\nrepsonse data: ",
++ __func__,
++ resp_iu,
++ resp_iu->status,
++ resp_iu->datapres,
++ resp_iu->response_data_len,
++ resp_iu->sense_data_len);
++
++ task->task_status.stat = resp_iu->status;
++
++ /* libsas updates the task status fields based on the response iu. */
++ sas_ssp_task_response(dev, task, resp_iu);
++}
++
++/**
++ * isci_request_set_open_reject_status() - This function prepares the I/O
++ * completion for OPEN_REJECT conditions.
++ * @request: This parameter is the completed isci_request object.
++ * @response_ptr: This parameter specifies the service response for the I/O.
++ * @status_ptr: This parameter specifies the exec status for the I/O.
++ * @complete_to_host_ptr: This parameter specifies the action to be taken by
++ * the LLDD with respect to completing this request or forcing an abort
++ * condition on the I/O.
++ * @open_rej_reason: This parameter specifies the encoded reason for the
++ * abandon-class reject.
++ *
++ * none.
++ */
++static void isci_request_set_open_reject_status(
++ struct isci_request *request,
++ struct sas_task *task,
++ enum service_response *response_ptr,
++ enum exec_status *status_ptr,
++ enum isci_completion_selection *complete_to_host_ptr,
++ enum sas_open_rej_reason open_rej_reason)
++{
++ /* Task in the target is done. */
++ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
++ *response_ptr = SAS_TASK_UNDELIVERED;
++ *status_ptr = SAS_OPEN_REJECT;
++ *complete_to_host_ptr = isci_perform_normal_io_completion;
++ task->task_status.open_rej_reason = open_rej_reason;
++}
++
++/**
++ * isci_request_handle_controller_specific_errors() - This function decodes
++ * controller-specific I/O completion error conditions.
++ * @request: This parameter is the completed isci_request object.
++ * @response_ptr: This parameter specifies the service response for the I/O.
++ * @status_ptr: This parameter specifies the exec status for the I/O.
++ * @complete_to_host_ptr: This parameter specifies the action to be taken by
++ * the LLDD with respect to completing this request or forcing an abort
++ * condition on the I/O.
++ *
++ * none.
++ */
++static void isci_request_handle_controller_specific_errors(
++ struct isci_remote_device *idev,
++ struct isci_request *request,
++ struct sas_task *task,
++ enum service_response *response_ptr,
++ enum exec_status *status_ptr,
++ enum isci_completion_selection *complete_to_host_ptr)
++{
++ unsigned int cstatus;
++
++ cstatus = request->scu_status;
++
++ dev_dbg(&request->isci_host->pdev->dev,
++ "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
++ "- controller status = 0x%x\n",
++ __func__, request, cstatus);
++
++ /* Decode the controller-specific errors; most
++ * important is to recognize those conditions in which
++ * the target may still have a task outstanding that
++ * must be aborted.
++ *
++ * Note that there are SCU completion codes being
++ * named in the decode below for which SCIC has already
++ * done work to handle them in a way other than as
++ * a controller-specific completion code; these are left
++ * in the decode below for completeness sake.
++ */
++ switch (cstatus) {
++ case SCU_TASK_DONE_DMASETUP_DIRERR:
++ /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
++ case SCU_TASK_DONE_XFERCNT_ERR:
++ /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
++ if (task->task_proto == SAS_PROTOCOL_SMP) {
++ /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
++ *response_ptr = SAS_TASK_COMPLETE;
++
++ /* See if the device has been/is being stopped. Note
++ * that we ignore the quiesce state, since we are
++ * concerned about the actual device state.
++ */
++ if (!idev)
++ *status_ptr = SAS_DEVICE_UNKNOWN;
++ else
++ *status_ptr = SAS_ABORTED_TASK;
++
++ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
++
++ *complete_to_host_ptr =
++ isci_perform_normal_io_completion;
++ } else {
++ /* Task in the target is not done. */
++ *response_ptr = SAS_TASK_UNDELIVERED;
++
++ if (!idev)
++ *status_ptr = SAS_DEVICE_UNKNOWN;
++ else
++ *status_ptr = SAM_STAT_TASK_ABORTED;
++
++ clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
++
++ *complete_to_host_ptr =
++ isci_perform_error_io_completion;
++ }
++
++ break;
++
++ case SCU_TASK_DONE_CRC_ERR:
++ case SCU_TASK_DONE_NAK_CMD_ERR:
++ case SCU_TASK_DONE_EXCESS_DATA:
++ case SCU_TASK_DONE_UNEXP_FIS:
++ /* Also SCU_TASK_DONE_UNEXP_RESP: */
++ case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */
++ case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */
++ case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */
++ /* These are conditions in which the target
++ * has completed the task, so that no cleanup
++ * is necessary.
++ */
++ *response_ptr = SAS_TASK_COMPLETE;
++
++ /* See if the device has been/is being stopped. Note
++ * that we ignore the quiesce state, since we are
++ * concerned about the actual device state.
++ */
++ if (!idev)
++ *status_ptr = SAS_DEVICE_UNKNOWN;
++ else
++ *status_ptr = SAS_ABORTED_TASK;
++
++ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
++
++ *complete_to_host_ptr = isci_perform_normal_io_completion;
++ break;
++
++
++ /* Note that the only open reject completion codes seen here will be
++ * abandon-class codes; all others are automatically retried in the SCU.
++ */
++ case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
++
++ isci_request_set_open_reject_status(
++ request, task, response_ptr, status_ptr,
++ complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
++ break;
++
++ case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
++
++ /* Note - the return of AB0 will change when
++ * libsas implements detection of zone violations.
++ */
++ isci_request_set_open_reject_status(
++ request, task, response_ptr, status_ptr,
++ complete_to_host_ptr, SAS_OREJ_RESV_AB0);
++ break;
++
++ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
++
++ isci_request_set_open_reject_status(
++ request, task, response_ptr, status_ptr,
++ complete_to_host_ptr, SAS_OREJ_RESV_AB1);
++ break;
++
++ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
++
++ isci_request_set_open_reject_status(
++ request, task, response_ptr, status_ptr,
++ complete_to_host_ptr, SAS_OREJ_RESV_AB2);
++ break;
++
++ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
++
++ isci_request_set_open_reject_status(
++ request, task, response_ptr, status_ptr,
++ complete_to_host_ptr, SAS_OREJ_RESV_AB3);
++ break;
++
++ case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
++
++ isci_request_set_open_reject_status(
++ request, task, response_ptr, status_ptr,
++ complete_to_host_ptr, SAS_OREJ_BAD_DEST);
++ break;
++
++ case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
++
++ isci_request_set_open_reject_status(
++ request, task, response_ptr, status_ptr,
++ complete_to_host_ptr, SAS_OREJ_STP_NORES);
++ break;
++
++ case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
++
++ isci_request_set_open_reject_status(
++ request, task, response_ptr, status_ptr,
++ complete_to_host_ptr, SAS_OREJ_EPROTO);
++ break;
++
++ case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
++
++ isci_request_set_open_reject_status(
++ request, task, response_ptr, status_ptr,
++ complete_to_host_ptr, SAS_OREJ_CONN_RATE);
++ break;
++
++ case SCU_TASK_DONE_LL_R_ERR:
++ /* Also SCU_TASK_DONE_ACK_NAK_TO: */
++ case SCU_TASK_DONE_LL_PERR:
++ case SCU_TASK_DONE_LL_SY_TERM:
++ /* Also SCU_TASK_DONE_NAK_ERR:*/
++ case SCU_TASK_DONE_LL_LF_TERM:
++ /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
++ case SCU_TASK_DONE_LL_ABORT_ERR:
++ case SCU_TASK_DONE_SEQ_INV_TYPE:
++ /* Also SCU_TASK_DONE_UNEXP_XR: */
++ case SCU_TASK_DONE_XR_IU_LEN_ERR:
++ case SCU_TASK_DONE_INV_FIS_LEN:
++ /* Also SCU_TASK_DONE_XR_WD_LEN: */
++ case SCU_TASK_DONE_SDMA_ERR:
++ case SCU_TASK_DONE_OFFSET_ERR:
++ case SCU_TASK_DONE_MAX_PLD_ERR:
++ case SCU_TASK_DONE_LF_ERR:
++ case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */
++ case SCU_TASK_DONE_SMP_LL_RX_ERR:
++ case SCU_TASK_DONE_UNEXP_DATA:
++ case SCU_TASK_DONE_UNEXP_SDBFIS:
++ case SCU_TASK_DONE_REG_ERR:
++ case SCU_TASK_DONE_SDB_ERR:
++ case SCU_TASK_DONE_TASK_ABORT:
++ default:
++ /* Task in the target is not done. */
++ *response_ptr = SAS_TASK_UNDELIVERED;
++ *status_ptr = SAM_STAT_TASK_ABORTED;
++
++ if (task->task_proto == SAS_PROTOCOL_SMP) {
++ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
++
++ *complete_to_host_ptr = isci_perform_normal_io_completion;
++ } else {
++ clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
++
++ *complete_to_host_ptr = isci_perform_error_io_completion;
++ }
++ break;
++ }
++}
++
++/**
++ * isci_task_save_for_upper_layer_completion() - This function saves the
++ * request for later completion to the upper layer driver.
++ * @host: This parameter is a pointer to the host on which the the request
++ * should be queued (either as an error or success).
++ * @request: This parameter is the completed request.
++ * @response: This parameter is the response code for the completed task.
++ * @status: This parameter is the status code for the completed task.
++ *
++ * none.
++ */
++static void isci_task_save_for_upper_layer_completion(
++ struct isci_host *host,
++ struct isci_request *request,
++ enum service_response response,
++ enum exec_status status,
++ enum isci_completion_selection task_notification_selection)
++{
++ struct sas_task *task = isci_request_access_task(request);
++
++ task_notification_selection
++ = isci_task_set_completion_status(task, response, status,
++ task_notification_selection);
++
++ /* Tasks aborted specifically by a call to the lldd_abort_task
++ * function should not be completed to the host in the regular path.
++ */
++ switch (task_notification_selection) {
++
++ case isci_perform_normal_io_completion:
++
++ /* Normal notification (task_done) */
++ dev_dbg(&host->pdev->dev,
++ "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
++ __func__,
++ task,
++ task->task_status.resp, response,
++ task->task_status.stat, status);
++ /* Add to the completed list. */
++ list_add(&request->completed_node,
++ &host->requests_to_complete);
++
++ /* Take the request off the device's pending request list. */
++ list_del_init(&request->dev_node);
++ break;
++
++ case isci_perform_aborted_io_completion:
++ /* No notification to libsas because this request is
++ * already in the abort path.
++ */
++ dev_dbg(&host->pdev->dev,
++ "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
++ __func__,
++ task,
++ task->task_status.resp, response,
++ task->task_status.stat, status);
++
++ /* Wake up whatever process was waiting for this
++ * request to complete.
++ */
++ WARN_ON(request->io_request_completion == NULL);
++
++ if (request->io_request_completion != NULL) {
++
++ /* Signal whoever is waiting that this
++ * request is complete.
++ */
++ complete(request->io_request_completion);
++ }
++ break;
++
++ case isci_perform_error_io_completion:
++ /* Use sas_task_abort */
++ dev_dbg(&host->pdev->dev,
++ "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
++ __func__,
++ task,
++ task->task_status.resp, response,
++ task->task_status.stat, status);
++ /* Add to the aborted list. */
++ list_add(&request->completed_node,
++ &host->requests_to_errorback);
++ break;
++
++ default:
++ dev_dbg(&host->pdev->dev,
++ "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
++ __func__,
++ task,
++ task->task_status.resp, response,
++ task->task_status.stat, status);
++
++ /* Add to the error to libsas list. */
++ list_add(&request->completed_node,
++ &host->requests_to_errorback);
++ break;
++ }
++}
++
++static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
++{
++ struct task_status_struct *ts = &task->task_status;
++ struct ata_task_resp *resp = (void *)&ts->buf[0];
++
++ resp->frame_len = sizeof(*fis);
++ memcpy(resp->ending_fis, fis, sizeof(*fis));
++ ts->buf_valid_size = sizeof(*resp);
++
++ /* If the device fault bit is set in the status register, then
++ * set the sense data and return.
++ */
++ if (fis->status & ATA_DF)
++ ts->stat = SAS_PROTO_RESPONSE;
++ else
++ ts->stat = SAM_STAT_GOOD;
++
++ ts->resp = SAS_TASK_COMPLETE;
++}
++
++static void isci_request_io_request_complete(struct isci_host *ihost,
++ struct isci_request *request,
++ enum sci_io_status completion_status)
++{
++ struct sas_task *task = isci_request_access_task(request);
++ struct ssp_response_iu *resp_iu;
++ unsigned long task_flags;
++ struct isci_remote_device *idev = isci_lookup_device(task->dev);
++ enum service_response response = SAS_TASK_UNDELIVERED;
++ enum exec_status status = SAS_ABORTED_TASK;
++ enum isci_request_status request_status;
++ enum isci_completion_selection complete_to_host
++ = isci_perform_normal_io_completion;
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: request = %p, task = %p,\n"
++ "task->data_dir = %d completion_status = 0x%x\n",
++ __func__,
++ request,
++ task,
++ task->data_dir,
++ completion_status);
++
++ spin_lock(&request->state_lock);
++ request_status = request->status;
++
++ /* Decode the request status. Note that if the request has been
++ * aborted by a task management function, we don't care
++ * what the status is.
++ */
++ switch (request_status) {
++
++ case aborted:
++ /* "aborted" indicates that the request was aborted by a task
++ * management function, since once a task management request is
++ * perfomed by the device, the request only completes because
++ * of the subsequent driver terminate.
++ *
++ * Aborted also means an external thread is explicitly managing
++ * this request, so that we do not complete it up the stack.
++ *
++ * The target is still there (since the TMF was successful).
++ */
++ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
++ response = SAS_TASK_COMPLETE;
++
++ /* See if the device has been/is being stopped. Note
++ * that we ignore the quiesce state, since we are
++ * concerned about the actual device state.
++ */
++ if (!idev)
++ status = SAS_DEVICE_UNKNOWN;
++ else
++ status = SAS_ABORTED_TASK;
++
++ complete_to_host = isci_perform_aborted_io_completion;
++ /* This was an aborted request. */
++
++ spin_unlock(&request->state_lock);
++ break;
++
++ case aborting:
++ /* aborting means that the task management function tried and
++ * failed to abort the request. We need to note the request
++ * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
++ * target as down.
++ *
++ * Aborting also means an external thread is explicitly managing
++ * this request, so that we do not complete it up the stack.
++ */
++ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
++ response = SAS_TASK_UNDELIVERED;
++
++ if (!idev)
++ /* The device has been /is being stopped. Note that
++ * we ignore the quiesce state, since we are
++ * concerned about the actual device state.
++ */
++ status = SAS_DEVICE_UNKNOWN;
++ else
++ status = SAS_PHY_DOWN;
++
++ complete_to_host = isci_perform_aborted_io_completion;
++
++ /* This was an aborted request. */
++
++ spin_unlock(&request->state_lock);
++ break;
++
++ case terminating:
++
++ /* This was an terminated request. This happens when
++ * the I/O is being terminated because of an action on
++ * the device (reset, tear down, etc.), and the I/O needs
++ * to be completed up the stack.
++ */
++ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
++ response = SAS_TASK_UNDELIVERED;
++
++ /* See if the device has been/is being stopped. Note
++ * that we ignore the quiesce state, since we are
++ * concerned about the actual device state.
++ */
++ if (!idev)
++ status = SAS_DEVICE_UNKNOWN;
++ else
++ status = SAS_ABORTED_TASK;
++
++ complete_to_host = isci_perform_aborted_io_completion;
++
++ /* This was a terminated request. */
++
++ spin_unlock(&request->state_lock);
++ break;
++
++ case dead:
++ /* This was a terminated request that timed-out during the
++ * termination process. There is no task to complete to
++ * libsas.
++ */
++ complete_to_host = isci_perform_normal_io_completion;
++ spin_unlock(&request->state_lock);
++ break;
++
++ default:
++
++ /* The request is done from an SCU HW perspective. */
++ request->status = completed;
++
++ spin_unlock(&request->state_lock);
++
++ /* This is an active request being completed from the core. */
++ switch (completion_status) {
++
++ case SCI_IO_FAILURE_RESPONSE_VALID:
++ dev_dbg(&ihost->pdev->dev,
++ "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
++ __func__,
++ request,
++ task);
++
++ if (sas_protocol_ata(task->task_proto)) {
++ isci_process_stp_response(task, &request->stp.rsp);
++ } else if (SAS_PROTOCOL_SSP == task->task_proto) {
++
++ /* crack the iu response buffer. */
++ resp_iu = &request->ssp.rsp;
++ isci_request_process_response_iu(task, resp_iu,
++ &ihost->pdev->dev);
++
++ } else if (SAS_PROTOCOL_SMP == task->task_proto) {
++
++ dev_err(&ihost->pdev->dev,
++ "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
++ "SAS_PROTOCOL_SMP protocol\n",
++ __func__);
++
++ } else
++ dev_err(&ihost->pdev->dev,
++ "%s: unknown protocol\n", __func__);
++
++ /* use the task status set in the task struct by the
++ * isci_request_process_response_iu call.
++ */
++ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
++ response = task->task_status.resp;
++ status = task->task_status.stat;
++ break;
++
++ case SCI_IO_SUCCESS:
++ case SCI_IO_SUCCESS_IO_DONE_EARLY:
++
++ response = SAS_TASK_COMPLETE;
++ status = SAM_STAT_GOOD;
++ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
++
++ if (task->task_proto == SAS_PROTOCOL_SMP) {
++ void *rsp = &request->smp.rsp;
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: SMP protocol completion\n",
++ __func__);
++
++ sg_copy_from_buffer(
++ &task->smp_task.smp_resp, 1,
++ rsp, sizeof(struct smp_resp));
++ } else if (completion_status
++ == SCI_IO_SUCCESS_IO_DONE_EARLY) {
++
++ /* This was an SSP / STP / SATA transfer.
++ * There is a possibility that less data than
++ * the maximum was transferred.
++ */
++ u32 transferred_length = sci_req_tx_bytes(request);
++
++ task->task_status.residual
++ = task->total_xfer_len - transferred_length;
++
++ /* If there were residual bytes, call this an
++ * underrun.
++ */
++ if (task->task_status.residual != 0)
++ status = SAS_DATA_UNDERRUN;
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
++ __func__,
++ status);
++
++ } else
++ dev_dbg(&ihost->pdev->dev,
++ "%s: SCI_IO_SUCCESS\n",
++ __func__);
++
++ break;
++
++ case SCI_IO_FAILURE_TERMINATED:
++ dev_dbg(&ihost->pdev->dev,
++ "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
++ __func__,
++ request,
++ task);
++
++ /* The request was terminated explicitly. No handling
++ * is needed in the SCSI error handler path.
++ */
++ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
++ response = SAS_TASK_UNDELIVERED;
++
++ /* See if the device has been/is being stopped. Note
++ * that we ignore the quiesce state, since we are
++ * concerned about the actual device state.
++ */
++ if (!idev)
++ status = SAS_DEVICE_UNKNOWN;
++ else
++ status = SAS_ABORTED_TASK;
++
++ complete_to_host = isci_perform_normal_io_completion;
++ break;
++
++ case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
++
++ isci_request_handle_controller_specific_errors(
++ idev, request, task, &response, &status,
++ &complete_to_host);
++
++ break;
++
++ case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
++ /* This is a special case, in that the I/O completion
++ * is telling us that the device needs a reset.
++ * In order for the device reset condition to be
++ * noticed, the I/O has to be handled in the error
++ * handler. Set the reset flag and cause the
++ * SCSI error thread to be scheduled.
++ */
++ spin_lock_irqsave(&task->task_state_lock, task_flags);
++ task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
++ spin_unlock_irqrestore(&task->task_state_lock, task_flags);
++
++ /* Fail the I/O. */
++ response = SAS_TASK_UNDELIVERED;
++ status = SAM_STAT_TASK_ABORTED;
++
++ complete_to_host = isci_perform_error_io_completion;
++ clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
++ break;
++
++ case SCI_FAILURE_RETRY_REQUIRED:
++
++ /* Fail the I/O so it can be retried. */
++ response = SAS_TASK_UNDELIVERED;
++ if (!idev)
++ status = SAS_DEVICE_UNKNOWN;
++ else
++ status = SAS_ABORTED_TASK;
++
++ complete_to_host = isci_perform_normal_io_completion;
++ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
++ break;
++
++
++ default:
++ /* Catch any otherwise unhandled error codes here. */
++ dev_dbg(&ihost->pdev->dev,
++ "%s: invalid completion code: 0x%x - "
++ "isci_request = %p\n",
++ __func__, completion_status, request);
++
++ response = SAS_TASK_UNDELIVERED;
++
++ /* See if the device has been/is being stopped. Note
++ * that we ignore the quiesce state, since we are
++ * concerned about the actual device state.
++ */
++ if (!idev)
++ status = SAS_DEVICE_UNKNOWN;
++ else
++ status = SAS_ABORTED_TASK;
++
++ if (SAS_PROTOCOL_SMP == task->task_proto) {
++ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
++ complete_to_host = isci_perform_normal_io_completion;
++ } else {
++ clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
++ complete_to_host = isci_perform_error_io_completion;
++ }
++ break;
++ }
++ break;
++ }
++
++ switch (task->task_proto) {
++ case SAS_PROTOCOL_SSP:
++ if (task->data_dir == DMA_NONE)
++ break;
++ if (task->num_scatter == 0)
++ /* 0 indicates a single dma address */
++ dma_unmap_single(&ihost->pdev->dev,
++ request->zero_scatter_daddr,
++ task->total_xfer_len, task->data_dir);
++ else /* unmap the sgl dma addresses */
++ dma_unmap_sg(&ihost->pdev->dev, task->scatter,
++ request->num_sg_entries, task->data_dir);
++ break;
++ case SAS_PROTOCOL_SMP: {
++ struct scatterlist *sg = &task->smp_task.smp_req;
++ struct smp_req *smp_req;
++ void *kaddr;
++
++ dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
++
++ /* need to swab it back in case the command buffer is re-used */
++ kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
++ smp_req = kaddr + sg->offset;
++ sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
++ kunmap_atomic(kaddr, KM_IRQ0);
++ break;
++ }
++ default:
++ break;
++ }
++
++ /* Put the completed request on the correct list */
++ isci_task_save_for_upper_layer_completion(ihost, request, response,
++ status, complete_to_host
++ );
++
++ /* complete the io request to the core. */
++ sci_controller_complete_io(ihost, request->target_device, request);
++ isci_put_device(idev);
++
++ /* set terminated handle so it cannot be completed or
++ * terminated again, and to cause any calls into abort
++ * task to recognize the already completed case.
++ */
++ set_bit(IREQ_TERMINATED, &request->flags);
++}
++
++static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
++ struct domain_device *dev = ireq->target_device->domain_dev;
++ struct sas_task *task;
++
++ /* XXX as hch said always creating an internal sas_task for tmf
++ * requests would simplify the driver
++ */
++ task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
++
++ /* all unaccelerated request types (non ssp or ncq) handled with
++ * substates
++ */
++ if (!task && dev->dev_type == SAS_END_DEV) {
++ sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP);
++ } else if (!task &&
++ (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
++ isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
++ sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED);
++ } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
++ sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP);
++ } else if (task && sas_protocol_ata(task->task_proto) &&
++ !task->ata_task.use_ncq) {
++ u32 state;
++
++ if (task->data_dir == DMA_NONE)
++ state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
++ else if (task->ata_task.dma_xfer)
++ state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
++ else /* PIO */
++ state = SCI_REQ_STP_PIO_WAIT_H2D;
++
++ sci_change_state(sm, state);
++ }
++}
++
++static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
++ struct isci_host *ihost = ireq->owning_controller;
++
++ /* Tell the SCI_USER that the IO request is complete */
++ if (!test_bit(IREQ_TMF, &ireq->flags))
++ isci_request_io_request_complete(ihost, ireq,
++ ireq->sci_status);
++ else
++ isci_task_request_complete(ihost, ireq, ireq->sci_status);
++}
++
++static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
++
++ /* Setting the abort bit in the Task Context is required by the silicon. */
++ ireq->tc->abort = 1;
++}
++
++static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
++
++ ireq->target_device->working_request = ireq;
++}
++
++static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
++
++ ireq->target_device->working_request = ireq;
++}
++
++static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
++
++ ireq->target_device->working_request = ireq;
++}
++
++static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
++{
++ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
++ struct scu_task_context *tc = ireq->tc;
++ struct host_to_dev_fis *h2d_fis;
++ enum sci_status status;
++
++ /* Clear the SRST bit */
++ h2d_fis = &ireq->stp.cmd;
++ h2d_fis->control = 0;
++
++ /* Clear the TC control bit */
++ tc->control_frame = 0;
++
++ status = sci_controller_continue_io(ireq);
++ WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
++}
++
++static const struct sci_base_state sci_request_state_table[] = {
++ [SCI_REQ_INIT] = { },
++ [SCI_REQ_CONSTRUCTED] = { },
++ [SCI_REQ_STARTED] = {
++ .enter_state = sci_request_started_state_enter,
++ },
++ [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
++ .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter,
++ },
++ [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
++ [SCI_REQ_STP_PIO_WAIT_H2D] = {
++ .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter,
++ },
++ [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
++ [SCI_REQ_STP_PIO_DATA_IN] = { },
++ [SCI_REQ_STP_PIO_DATA_OUT] = { },
++ [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
++ [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
++ [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
++ .enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
++ },
++ [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
++ .enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
++ },
++ [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
++ [SCI_REQ_TASK_WAIT_TC_COMP] = { },
++ [SCI_REQ_TASK_WAIT_TC_RESP] = { },
++ [SCI_REQ_SMP_WAIT_RESP] = { },
++ [SCI_REQ_SMP_WAIT_TC_COMP] = { },
++ [SCI_REQ_COMPLETED] = {
++ .enter_state = sci_request_completed_state_enter,
++ },
++ [SCI_REQ_ABORTING] = {
++ .enter_state = sci_request_aborting_state_enter,
++ },
++ [SCI_REQ_FINAL] = { },
++};
++
++static void
++sci_general_request_construct(struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ struct isci_request *ireq)
++{
++ sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
++
++ ireq->target_device = idev;
++ ireq->protocol = SCIC_NO_PROTOCOL;
++ ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
++
++ ireq->sci_status = SCI_SUCCESS;
++ ireq->scu_status = 0;
++ ireq->post_context = 0xFFFFFFFF;
++}
++
++static enum sci_status
++sci_io_request_construct(struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ struct isci_request *ireq)
++{
++ struct domain_device *dev = idev->domain_dev;
++ enum sci_status status = SCI_SUCCESS;
++
++ /* Build the common part of the request */
++ sci_general_request_construct(ihost, idev, ireq);
++
++ if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
++ return SCI_FAILURE_INVALID_REMOTE_DEVICE;
++
++ if (dev->dev_type == SAS_END_DEV)
++ /* pass */;
++ else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
++ memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
++ else if (dev_is_expander(dev))
++ /* pass */;
++ else
++ return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
++
++ memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
++
++ return status;
++}
++
++enum sci_status sci_task_request_construct(struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ u16 io_tag, struct isci_request *ireq)
++{
++ struct domain_device *dev = idev->domain_dev;
++ enum sci_status status = SCI_SUCCESS;
++
++ /* Build the common part of the request */
++ sci_general_request_construct(ihost, idev, ireq);
++
++ if (dev->dev_type == SAS_END_DEV ||
++ dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
++ set_bit(IREQ_TMF, &ireq->flags);
++ memset(ireq->tc, 0, sizeof(struct scu_task_context));
++ } else
++ status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
++
++ return status;
++}
++
++static enum sci_status isci_request_ssp_request_construct(
++ struct isci_request *request)
++{
++ enum sci_status status;
++
++ dev_dbg(&request->isci_host->pdev->dev,
++ "%s: request = %p\n",
++ __func__,
++ request);
++ status = sci_io_request_construct_basic_ssp(request);
++ return status;
++}
++
++static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq)
++{
++ struct sas_task *task = isci_request_access_task(ireq);
++ struct host_to_dev_fis *fis = &ireq->stp.cmd;
++ struct ata_queued_cmd *qc = task->uldd_task;
++ enum sci_status status;
++
++ dev_dbg(&ireq->isci_host->pdev->dev,
++ "%s: ireq = %p\n",
++ __func__,
++ ireq);
++
++ memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
++ if (!task->ata_task.device_control_reg_update)
++ fis->flags |= 0x80;
++ fis->flags &= 0xF0;
++
++ status = sci_io_request_construct_basic_sata(ireq);
++
++ if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
++ qc->tf.command == ATA_CMD_FPDMA_READ)) {
++ fis->sector_count = qc->tag << 3;
++ ireq->tc->type.stp.ncq_tag = qc->tag;
++ }
++
++ return status;
++}
++
++static enum sci_status
++sci_io_request_construct_smp(struct device *dev,
++ struct isci_request *ireq,
++ struct sas_task *task)
++{
++ struct scatterlist *sg = &task->smp_task.smp_req;
++ struct isci_remote_device *idev;
++ struct scu_task_context *task_context;
++ struct isci_port *iport;
++ struct smp_req *smp_req;
++ void *kaddr;
++ u8 req_len;
++ u32 cmd;
++
++ kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
++ smp_req = kaddr + sg->offset;
++ /*
++ * Look at the SMP requests' header fields; for certain SAS 1.x SMP
++ * functions under SAS 2.0, a zero request length really indicates
++ * a non-zero default length.
++ */
++ if (smp_req->req_len == 0) {
++ switch (smp_req->func) {
++ case SMP_DISCOVER:
++ case SMP_REPORT_PHY_ERR_LOG:
++ case SMP_REPORT_PHY_SATA:
++ case SMP_REPORT_ROUTE_INFO:
++ smp_req->req_len = 2;
++ break;
++ case SMP_CONF_ROUTE_INFO:
++ case SMP_PHY_CONTROL:
++ case SMP_PHY_TEST_FUNCTION:
++ smp_req->req_len = 9;
++ break;
++ /* Default - zero is a valid default for 2.0. */
++ }
++ }
++ req_len = smp_req->req_len;
++ sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
++ cmd = *(u32 *) smp_req;
++ kunmap_atomic(kaddr, KM_IRQ0);
++
++ if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
++ return SCI_FAILURE;
++
++ ireq->protocol = SCIC_SMP_PROTOCOL;
++
++ /* byte swap the smp request. */
++
++ task_context = ireq->tc;
++
++ idev = ireq->target_device;
++ iport = idev->owning_port;
++
++ /*
++ * Fill in the TC with the its required data
++ * 00h
++ */
++ task_context->priority = 0;
++ task_context->initiator_request = 1;
++ task_context->connection_rate = idev->connection_rate;
++ task_context->protocol_engine_index = ISCI_PEG;
++ task_context->logical_port_index = iport->physical_port_index;
++ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
++ task_context->abort = 0;
++ task_context->valid = SCU_TASK_CONTEXT_VALID;
++ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
++
++ /* 04h */
++ task_context->remote_node_index = idev->rnc.remote_node_index;
++ task_context->command_code = 0;
++ task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
++
++ /* 08h */
++ task_context->link_layer_control = 0;
++ task_context->do_not_dma_ssp_good_response = 1;
++ task_context->strict_ordering = 0;
++ task_context->control_frame = 1;
++ task_context->timeout_enable = 0;
++ task_context->block_guard_enable = 0;
++
++ /* 0ch */
++ task_context->address_modifier = 0;
++
++ /* 10h */
++ task_context->ssp_command_iu_length = req_len;
++
++ /* 14h */
++ task_context->transfer_length_bytes = 0;
++
++ /*
++ * 18h ~ 30h, protocol specific
++ * since commandIU has been build by framework at this point, we just
++ * copy the frist DWord from command IU to this location. */
++ memcpy(&task_context->type.smp, &cmd, sizeof(u32));
++
++ /*
++ * 40h
++ * "For SMP you could program it to zero. We would prefer that way
++ * so that done code will be consistent." - Venki
++ */
++ task_context->task_phase = 0;
++
++ ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
++ (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
++ (iport->physical_port_index <<
++ SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
++ ISCI_TAG_TCI(ireq->io_tag));
++ /*
++ * Copy the physical address for the command buffer to the SCU Task
++ * Context command buffer should not contain command header.
++ */
++ task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
++ task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
++
++ /* SMP response comes as UF, so no need to set response IU address. */
++ task_context->response_iu_upper = 0;
++ task_context->response_iu_lower = 0;
++
++ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
++
++ return SCI_SUCCESS;
++}
++
++/*
++ * isci_smp_request_build() - This function builds the smp request.
++ * @ireq: This parameter points to the isci_request allocated in the
++ * request construct function.
++ *
++ * SCI_SUCCESS on successfull completion, or specific failure code.
++ */
++static enum sci_status isci_smp_request_build(struct isci_request *ireq)
++{
++ struct sas_task *task = isci_request_access_task(ireq);
++ struct device *dev = &ireq->isci_host->pdev->dev;
++ enum sci_status status = SCI_FAILURE;
++
++ status = sci_io_request_construct_smp(dev, ireq, task);
++ if (status != SCI_SUCCESS)
++ dev_dbg(&ireq->isci_host->pdev->dev,
++ "%s: failed with status = %d\n",
++ __func__,
++ status);
++
++ return status;
++}
++
++/**
++ * isci_io_request_build() - This function builds the io request object.
++ * @ihost: This parameter specifies the ISCI host object
++ * @request: This parameter points to the isci_request object allocated in the
++ * request construct function.
++ * @sci_device: This parameter is the handle for the sci core's remote device
++ * object that is the destination for this request.
++ *
++ * SCI_SUCCESS on successfull completion, or specific failure code.
++ */
++static enum sci_status isci_io_request_build(struct isci_host *ihost,
++ struct isci_request *request,
++ struct isci_remote_device *idev)
++{
++ enum sci_status status = SCI_SUCCESS;
++ struct sas_task *task = isci_request_access_task(request);
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: idev = 0x%p; request = %p, "
++ "num_scatter = %d\n",
++ __func__,
++ idev,
++ request,
++ task->num_scatter);
++
++ /* map the sgl addresses, if present.
++ * libata does the mapping for sata devices
++ * before we get the request.
++ */
++ if (task->num_scatter &&
++ !sas_protocol_ata(task->task_proto) &&
++ !(SAS_PROTOCOL_SMP & task->task_proto)) {
++
++ request->num_sg_entries = dma_map_sg(
++ &ihost->pdev->dev,
++ task->scatter,
++ task->num_scatter,
++ task->data_dir
++ );
++
++ if (request->num_sg_entries == 0)
++ return SCI_FAILURE_INSUFFICIENT_RESOURCES;
++ }
++
++ status = sci_io_request_construct(ihost, idev, request);
++
++ if (status != SCI_SUCCESS) {
++ dev_dbg(&ihost->pdev->dev,
++ "%s: failed request construct\n",
++ __func__);
++ return SCI_FAILURE;
++ }
++
++ switch (task->task_proto) {
++ case SAS_PROTOCOL_SMP:
++ status = isci_smp_request_build(request);
++ break;
++ case SAS_PROTOCOL_SSP:
++ status = isci_request_ssp_request_construct(request);
++ break;
++ case SAS_PROTOCOL_SATA:
++ case SAS_PROTOCOL_STP:
++ case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
++ status = isci_request_stp_request_construct(request);
++ break;
++ default:
++ dev_dbg(&ihost->pdev->dev,
++ "%s: unknown protocol\n", __func__);
++ return SCI_FAILURE;
++ }
++
++ return SCI_SUCCESS;
++}
++
++static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
++{
++ struct isci_request *ireq;
++
++ ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
++ ireq->io_tag = tag;
++ ireq->io_request_completion = NULL;
++ ireq->flags = 0;
++ ireq->num_sg_entries = 0;
++ INIT_LIST_HEAD(&ireq->completed_node);
++ INIT_LIST_HEAD(&ireq->dev_node);
++ isci_request_change_state(ireq, allocated);
++
++ return ireq;
++}
++
++static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
++ struct sas_task *task,
++ u16 tag)
++{
++ struct isci_request *ireq;
++
++ ireq = isci_request_from_tag(ihost, tag);
++ ireq->ttype_ptr.io_task_ptr = task;
++ ireq->ttype = io_task;
++ task->lldd_task = ireq;
++
++ return ireq;
++}
++
++struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
++ struct isci_tmf *isci_tmf,
++ u16 tag)
++{
++ struct isci_request *ireq;
++
++ ireq = isci_request_from_tag(ihost, tag);
++ ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
++ ireq->ttype = tmf_task;
++
++ return ireq;
++}
++
++int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
++ struct sas_task *task, u16 tag)
++{
++ enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
++ struct isci_request *ireq;
++ unsigned long flags;
++ int ret = 0;
++
++ /* do common allocation and init of request object. */
++ ireq = isci_io_request_from_tag(ihost, task, tag);
++
++ status = isci_io_request_build(ihost, ireq, idev);
++ if (status != SCI_SUCCESS) {
++ dev_dbg(&ihost->pdev->dev,
++ "%s: request_construct failed - status = 0x%x\n",
++ __func__,
++ status);
++ return status;
++ }
++
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++
++ if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
++
++ if (isci_task_is_ncq_recovery(task)) {
++
++ /* The device is in an NCQ recovery state. Issue the
++ * request on the task side. Note that it will
++ * complete on the I/O request side because the
++ * request was built that way (ie.
++ * ireq->is_task_management_request is false).
++ */
++ status = sci_controller_start_task(ihost,
++ idev,
++ ireq);
++ } else {
++ status = SCI_FAILURE;
++ }
++ } else {
++ /* send the request, let the core assign the IO TAG. */
++ status = sci_controller_start_io(ihost, idev,
++ ireq);
++ }
++
++ if (status != SCI_SUCCESS &&
++ status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
++ dev_dbg(&ihost->pdev->dev,
++ "%s: failed request start (0x%x)\n",
++ __func__, status);
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++ return status;
++ }
++
++ /* Either I/O started OK, or the core has signaled that
++ * the device needs a target reset.
++ *
++ * In either case, hold onto the I/O for later.
++ *
++ * Update it's status and add it to the list in the
++ * remote device object.
++ */
++ list_add(&ireq->dev_node, &idev->reqs_in_process);
++
++ if (status == SCI_SUCCESS) {
++ isci_request_change_state(ireq, started);
++ } else {
++ /* The request did not really start in the
++ * hardware, so clear the request handle
++ * here so no terminations will be done.
++ */
++ set_bit(IREQ_TERMINATED, &ireq->flags);
++ isci_request_change_state(ireq, completed);
++ }
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++
++ if (status ==
++ SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
++ /* Signal libsas that we need the SCSI error
++ * handler thread to work on this I/O and that
++ * we want a device reset.
++ */
++ spin_lock_irqsave(&task->task_state_lock, flags);
++ task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
++ spin_unlock_irqrestore(&task->task_state_lock, flags);
++
++ /* Cause this task to be scheduled in the SCSI error
++ * handler thread.
++ */
++ isci_execpath_callback(ihost, task,
++ sas_task_abort);
++
++ /* Change the status, since we are holding
++ * the I/O until it is managed by the SCSI
++ * error handler.
++ */
++ status = SCI_SUCCESS;
++ }
++
++ return ret;
++}
+diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
+new file mode 100644
+index 0000000..7a1d5a9
+--- /dev/null
++++ b/drivers/scsi/isci/request.h
+@@ -0,0 +1,448 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _ISCI_REQUEST_H_
++#define _ISCI_REQUEST_H_
++
++#include "isci.h"
++#include "host.h"
++#include "scu_task_context.h"
++
++/**
++ * struct isci_request_status - This enum defines the possible states of an I/O
++ * request.
++ *
++ *
++ */
++enum isci_request_status {
++ unallocated = 0x00,
++ allocated = 0x01,
++ started = 0x02,
++ completed = 0x03,
++ aborting = 0x04,
++ aborted = 0x05,
++ terminating = 0x06,
++ dead = 0x07
++};
++
++enum task_type {
++ io_task = 0,
++ tmf_task = 1
++};
++
++enum sci_request_protocol {
++ SCIC_NO_PROTOCOL,
++ SCIC_SMP_PROTOCOL,
++ SCIC_SSP_PROTOCOL,
++ SCIC_STP_PROTOCOL
++}; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
++
++/**
++ * isci_stp_request - extra request infrastructure to handle pio/atapi protocol
++ * @pio_len - number of bytes requested at PIO setup
++ * @status - pio setup ending status value to tell us if we need
++ * to wait for another fis or if the transfer is complete. Upon
++ * receipt of a d2h fis this will be the status field of that fis.
++ * @sgl - track pio transfer progress as we iterate through the sgl
++ * @device_cdb_len - atapi device advertises it's transfer constraints at setup
++ */
++struct isci_stp_request {
++ u32 pio_len;
++ u8 status;
++
++ struct isci_stp_pio_sgl {
++ int index;
++ u8 set;
++ u32 offset;
++ } sgl;
++ u32 device_cdb_len;
++};
++
++struct isci_request {
++ enum isci_request_status status;
++ #define IREQ_COMPLETE_IN_TARGET 0
++ #define IREQ_TERMINATED 1
++ #define IREQ_TMF 2
++ #define IREQ_ACTIVE 3
++ unsigned long flags;
++ /* XXX kill ttype and ttype_ptr, allocate full sas_task */
++ enum task_type ttype;
++ union ttype_ptr_union {
++ struct sas_task *io_task_ptr; /* When ttype==io_task */
++ struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
++ } ttype_ptr;
++ struct isci_host *isci_host;
++ /* For use in the requests_to_{complete|abort} lists: */
++ struct list_head completed_node;
++ /* For use in the reqs_in_process list: */
++ struct list_head dev_node;
++ spinlock_t state_lock;
++ dma_addr_t request_daddr;
++ dma_addr_t zero_scatter_daddr;
++ unsigned int num_sg_entries;
++ /* Note: "io_request_completion" is completed in two different ways
++ * depending on whether this is a TMF or regular request.
++ * - TMF requests are completed in the thread that started them;
++ * - regular requests are completed in the request completion callback
++ * function.
++ * This difference in operation allows the aborter of a TMF request
++ * to be sure that once the TMF request completes, the I/O that the
++ * TMF was aborting is guaranteed to have completed.
++ *
++ * XXX kill io_request_completion
++ */
++ struct completion *io_request_completion;
++ struct sci_base_state_machine sm;
++ struct isci_host *owning_controller;
++ struct isci_remote_device *target_device;
++ u16 io_tag;
++ enum sci_request_protocol protocol;
++ u32 scu_status; /* hardware result */
++ u32 sci_status; /* upper layer disposition */
++ u32 post_context;
++ struct scu_task_context *tc;
++ /* could be larger with sg chaining */
++ #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2)
++ struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
++ /* This field is a pointer to the stored rx frame data. It is used in
++ * STP internal requests and SMP response frames. If this field is
++ * non-NULL the saved frame must be released on IO request completion.
++ */
++ u32 saved_rx_frame_index;
++
++ union {
++ struct {
++ union {
++ struct ssp_cmd_iu cmd;
++ struct ssp_task_iu tmf;
++ };
++ union {
++ struct ssp_response_iu rsp;
++ u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
++ };
++ } ssp;
++ struct {
++ struct smp_resp rsp;
++ } smp;
++ struct {
++ struct isci_stp_request req;
++ struct host_to_dev_fis cmd;
++ struct dev_to_host_fis rsp;
++ } stp;
++ };
++};
++
++static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req)
++{
++ struct isci_request *ireq;
++
++ ireq = container_of(stp_req, typeof(*ireq), stp.req);
++ return ireq;
++}
++
++/**
++ * enum sci_base_request_states - This enumeration depicts all the states for
++ * the common request state machine.
++ *
++ *
++ */
++enum sci_base_request_states {
++ /*
++ * Simply the initial state for the base request state machine.
++ */
++ SCI_REQ_INIT,
++
++ /*
++ * This state indicates that the request has been constructed.
++ * This state is entered from the INITIAL state.
++ */
++ SCI_REQ_CONSTRUCTED,
++
++ /*
++ * This state indicates that the request has been started. This state
++ * is entered from the CONSTRUCTED state.
++ */
++ SCI_REQ_STARTED,
++
++ SCI_REQ_STP_UDMA_WAIT_TC_COMP,
++ SCI_REQ_STP_UDMA_WAIT_D2H,
++
++ SCI_REQ_STP_NON_DATA_WAIT_H2D,
++ SCI_REQ_STP_NON_DATA_WAIT_D2H,
++
++ SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED,
++ SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG,
++ SCI_REQ_STP_SOFT_RESET_WAIT_D2H,
++
++ /*
++ * While in this state the IO request object is waiting for the TC
++ * completion notification for the H2D Register FIS
++ */
++ SCI_REQ_STP_PIO_WAIT_H2D,
++
++ /*
++ * While in this state the IO request object is waiting for either a
++ * PIO Setup FIS or a D2H register FIS. The type of frame received is
++ * based on the result of the prior frame and line conditions.
++ */
++ SCI_REQ_STP_PIO_WAIT_FRAME,
++
++ /*
++ * While in this state the IO request object is waiting for a DATA
++ * frame from the device.
++ */
++ SCI_REQ_STP_PIO_DATA_IN,
++
++ /*
++ * While in this state the IO request object is waiting to transmit
++ * the next data frame to the device.
++ */
++ SCI_REQ_STP_PIO_DATA_OUT,
++
++ /*
++ * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
++ * task management request is waiting for the transmission of the
++ * initial frame (i.e. command, task, etc.).
++ */
++ SCI_REQ_TASK_WAIT_TC_COMP,
++
++ /*
++ * This sub-state indicates that the started task management request
++ * is waiting for the reception of an unsolicited frame
++ * (i.e. response IU).
++ */
++ SCI_REQ_TASK_WAIT_TC_RESP,
++
++ /*
++ * This sub-state indicates that the started task management request
++ * is waiting for the reception of an unsolicited frame
++ * (i.e. response IU).
++ */
++ SCI_REQ_SMP_WAIT_RESP,
++
++ /*
++ * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP
++ * request is waiting for the transmission of the initial frame
++ * (i.e. command, task, etc.).
++ */
++ SCI_REQ_SMP_WAIT_TC_COMP,
++
++ /*
++ * This state indicates that the request has completed.
++ * This state is entered from the STARTED state. This state is entered
++ * from the ABORTING state.
++ */
++ SCI_REQ_COMPLETED,
++
++ /*
++ * This state indicates that the request is in the process of being
++ * terminated/aborted.
++ * This state is entered from the CONSTRUCTED state.
++ * This state is entered from the STARTED state.
++ */
++ SCI_REQ_ABORTING,
++
++ /*
++ * Simply the final state for the base request state machine.
++ */
++ SCI_REQ_FINAL,
++};
++
++enum sci_status sci_request_start(struct isci_request *ireq);
++enum sci_status sci_io_request_terminate(struct isci_request *ireq);
++enum sci_status
++sci_io_request_event_handler(struct isci_request *ireq,
++ u32 event_code);
++enum sci_status
++sci_io_request_frame_handler(struct isci_request *ireq,
++ u32 frame_index);
++enum sci_status
++sci_task_request_terminate(struct isci_request *ireq);
++extern enum sci_status
++sci_request_complete(struct isci_request *ireq);
++extern enum sci_status
++sci_io_request_tc_completion(struct isci_request *ireq, u32 code);
++
++/* XXX open code in caller */
++static inline dma_addr_t
++sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
++{
++
++ char *requested_addr = (char *)virt_addr;
++ char *base_addr = (char *)ireq;
++
++ BUG_ON(requested_addr < base_addr);
++ BUG_ON((requested_addr - base_addr) >= sizeof(*ireq));
++
++ return ireq->request_daddr + (requested_addr - base_addr);
++}
++
++/**
++ * isci_request_change_state() - This function sets the status of the request
++ * object.
++ * @request: This parameter points to the isci_request object
++ * @status: This Parameter is the new status of the object
++ *
++ */
++static inline enum isci_request_status
++isci_request_change_state(struct isci_request *isci_request,
++ enum isci_request_status status)
++{
++ enum isci_request_status old_state;
++ unsigned long flags;
++
++ dev_dbg(&isci_request->isci_host->pdev->dev,
++ "%s: isci_request = %p, state = 0x%x\n",
++ __func__,
++ isci_request,
++ status);
++
++ BUG_ON(isci_request == NULL);
++
++ spin_lock_irqsave(&isci_request->state_lock, flags);
++ old_state = isci_request->status;
++ isci_request->status = status;
++ spin_unlock_irqrestore(&isci_request->state_lock, flags);
++
++ return old_state;
++}
++
++/**
++ * isci_request_change_started_to_newstate() - This function sets the status of
++ * the request object.
++ * @request: This parameter points to the isci_request object
++ * @status: This Parameter is the new status of the object
++ *
++ * state previous to any change.
++ */
++static inline enum isci_request_status
++isci_request_change_started_to_newstate(struct isci_request *isci_request,
++ struct completion *completion_ptr,
++ enum isci_request_status newstate)
++{
++ enum isci_request_status old_state;
++ unsigned long flags;
++
++ spin_lock_irqsave(&isci_request->state_lock, flags);
++
++ old_state = isci_request->status;
++
++ if (old_state == started || old_state == aborting) {
++ BUG_ON(isci_request->io_request_completion != NULL);
++
++ isci_request->io_request_completion = completion_ptr;
++ isci_request->status = newstate;
++ }
++
++ spin_unlock_irqrestore(&isci_request->state_lock, flags);
++
++ dev_dbg(&isci_request->isci_host->pdev->dev,
++ "%s: isci_request = %p, old_state = 0x%x\n",
++ __func__,
++ isci_request,
++ old_state);
++
++ return old_state;
++}
++
++/**
++ * isci_request_change_started_to_aborted() - This function sets the status of
++ * the request object.
++ * @request: This parameter points to the isci_request object
++ * @completion_ptr: This parameter is saved as the kernel completion structure
++ * signalled when the old request completes.
++ *
++ * state previous to any change.
++ */
++static inline enum isci_request_status
++isci_request_change_started_to_aborted(struct isci_request *isci_request,
++ struct completion *completion_ptr)
++{
++ return isci_request_change_started_to_newstate(isci_request,
++ completion_ptr,
++ aborted);
++}
++
++#define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
++
++#define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
++
++struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
++ struct isci_tmf *isci_tmf,
++ u16 tag);
++int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
++ struct sas_task *task, u16 tag);
++void isci_terminate_pending_requests(struct isci_host *ihost,
++ struct isci_remote_device *idev);
++enum sci_status
++sci_task_request_construct(struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ u16 io_tag,
++ struct isci_request *ireq);
++enum sci_status
++sci_task_request_construct_ssp(struct isci_request *ireq);
++enum sci_status
++sci_task_request_construct_sata(struct isci_request *ireq);
++void sci_smp_request_copy_response(struct isci_request *ireq);
++
++static inline int isci_task_is_ncq_recovery(struct sas_task *task)
++{
++ return (sas_protocol_ata(task->task_proto) &&
++ task->ata_task.fis.command == ATA_CMD_READ_LOG_EXT &&
++ task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ);
++
++}
++
++#endif /* !defined(_ISCI_REQUEST_H_) */
+diff --git a/drivers/scsi/isci/sas.h b/drivers/scsi/isci/sas.h
+new file mode 100644
+index 0000000..462b151
+--- /dev/null
++++ b/drivers/scsi/isci/sas.h
+@@ -0,0 +1,219 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _SCI_SAS_H_
++#define _SCI_SAS_H_
++
++#include <linux/kernel.h>
++
++/*
++ * SATA FIS Types These constants depict the various SATA FIS types devined in
++ * the serial ATA specification.
++ * XXX: This needs to go into <scsi/sas.h>
++ */
++#define FIS_REGH2D 0x27
++#define FIS_REGD2H 0x34
++#define FIS_SETDEVBITS 0xA1
++#define FIS_DMA_ACTIVATE 0x39
++#define FIS_DMA_SETUP 0x41
++#define FIS_BIST_ACTIVATE 0x58
++#define FIS_PIO_SETUP 0x5F
++#define FIS_DATA 0x46
++
++/**************************************************************************/
++#define SSP_RESP_IU_MAX_SIZE 280
++
++/*
++ * contents of the SSP COMMAND INFORMATION UNIT.
++ * For specific information on each of these individual fields please
++ * reference the SAS specification SSP transport layer section.
++ * XXX: This needs to go into <scsi/sas.h>
++ */
++struct ssp_cmd_iu {
++ u8 LUN[8];
++ u8 add_cdb_len:6;
++ u8 _r_a:2;
++ u8 _r_b;
++ u8 en_fburst:1;
++ u8 task_prio:4;
++ u8 task_attr:3;
++ u8 _r_c;
++
++ u8 cdb[16];
++} __packed;
++
++/*
++ * contents of the SSP TASK INFORMATION UNIT.
++ * For specific information on each of these individual fields please
++ * reference the SAS specification SSP transport layer section.
++ * XXX: This needs to go into <scsi/sas.h>
++ */
++struct ssp_task_iu {
++ u8 LUN[8];
++ u8 _r_a;
++ u8 task_func;
++ u8 _r_b[4];
++ u16 task_tag;
++ u8 _r_c[12];
++} __packed;
++
++
++/*
++ * struct smp_req_phy_id - This structure defines the contents of
++ * an SMP Request that is comprised of the struct smp_request_header and a
++ * phy identifier.
++ * Examples: SMP_REQUEST_DISCOVER, SMP_REQUEST_REPORT_PHY_SATA.
++ *
++ * For specific information on each of these individual fields please reference
++ * the SAS specification.
++ */
++struct smp_req_phy_id {
++ u8 _r_a[4]; /* bytes 4-7 */
++
++ u8 ign_zone_grp:1; /* byte 8 */
++ u8 _r_b:7;
++
++ u8 phy_id; /* byte 9 */
++ u8 _r_c; /* byte 10 */
++ u8 _r_d; /* byte 11 */
++} __packed;
++
++/*
++ * struct smp_req_config_route_info - This structure defines the
++ * contents of an SMP Configure Route Information request.
++ *
++ * For specific information on each of these individual fields please reference
++ * the SAS specification.
++ */
++struct smp_req_conf_rtinfo {
++ u16 exp_change_cnt; /* bytes 4-5 */
++ u8 exp_rt_idx_hi; /* byte 6 */
++ u8 exp_rt_idx; /* byte 7 */
++
++ u8 _r_a; /* byte 8 */
++ u8 phy_id; /* byte 9 */
++ u16 _r_b; /* bytes 10-11 */
++
++ u8 _r_c:7; /* byte 12 */
++ u8 dis_rt_entry:1;
++ u8 _r_d[3]; /* bytes 13-15 */
++
++ u8 rt_sas_addr[8]; /* bytes 16-23 */
++ u8 _r_e[16]; /* bytes 24-39 */
++} __packed;
++
++/*
++ * struct smp_req_phycntl - This structure defines the contents of an
++ * SMP Phy Controller request.
++ *
++ * For specific information on each of these individual fields please reference
++ * the SAS specification.
++ */
++struct smp_req_phycntl {
++ u16 exp_change_cnt; /* byte 4-5 */
++
++ u8 _r_a[3]; /* bytes 6-8 */
++
++ u8 phy_id; /* byte 9 */
++ u8 phy_op; /* byte 10 */
++
++ u8 upd_pathway:1; /* byte 11 */
++ u8 _r_b:7;
++
++ u8 _r_c[12]; /* byte 12-23 */
++
++ u8 att_dev_name[8]; /* byte 24-31 */
++
++ u8 _r_d:4; /* byte 32 */
++ u8 min_linkrate:4;
++
++ u8 _r_e:4; /* byte 33 */
++ u8 max_linkrate:4;
++
++ u8 _r_f[2]; /* byte 34-35 */
++
++ u8 pathway:4; /* byte 36 */
++ u8 _r_g:4;
++
++ u8 _r_h[3]; /* bytes 37-39 */
++} __packed;
++
++/*
++ * struct smp_req - This structure simply unionizes the existing request
++ * structures into a common request type.
++ *
++ * XXX: This data structure may need to go to scsi/sas.h
++ */
++struct smp_req {
++ u8 type; /* byte 0 */
++ u8 func; /* byte 1 */
++ u8 alloc_resp_len; /* byte 2 */
++ u8 req_len; /* byte 3 */
++ u8 req_data[0];
++} __packed;
++
++#define SMP_RESP_HDR_SZ 4
++
++/*
++ * struct sci_sas_address - This structure depicts how a SAS address is
++ * represented by SCI.
++ * XXX convert this to u8 [SAS_ADDR_SIZE] like the rest of libsas
++ *
++ */
++struct sci_sas_address {
++ u32 high;
++ u32 low;
++};
++#endif
+diff --git a/drivers/scsi/isci/scu_completion_codes.h b/drivers/scsi/isci/scu_completion_codes.h
+new file mode 100644
+index 0000000..c8b329c
+--- /dev/null
++++ b/drivers/scsi/isci/scu_completion_codes.h
+@@ -0,0 +1,283 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _SCU_COMPLETION_CODES_HEADER_
++#define _SCU_COMPLETION_CODES_HEADER_
++
++/**
++ * This file contains the constants and macros for the SCU hardware completion
++ * codes.
++ *
++ *
++ */
++
++#define SCU_COMPLETION_TYPE_SHIFT 28
++#define SCU_COMPLETION_TYPE_MASK 0x70000000
++
++/**
++ * SCU_COMPLETION_TYPE() -
++ *
++ * This macro constructs an SCU completion type
++ */
++#define SCU_COMPLETION_TYPE(type) \
++ ((u32)(type) << SCU_COMPLETION_TYPE_SHIFT)
++
++/**
++ * SCU_COMPLETION_TYPE() -
++ *
++ * These macros contain the SCU completion types SCU_COMPLETION_TYPE
++ */
++#define SCU_COMPLETION_TYPE_TASK SCU_COMPLETION_TYPE(0)
++#define SCU_COMPLETION_TYPE_SDMA SCU_COMPLETION_TYPE(1)
++#define SCU_COMPLETION_TYPE_UFI SCU_COMPLETION_TYPE(2)
++#define SCU_COMPLETION_TYPE_EVENT SCU_COMPLETION_TYPE(3)
++#define SCU_COMPLETION_TYPE_NOTIFY SCU_COMPLETION_TYPE(4)
++
++/**
++ *
++ *
++ * These constants provide the shift and mask values for the various parts of
++ * an SCU completion code.
++ */
++#define SCU_COMPLETION_STATUS_MASK 0x0FFC0000
++#define SCU_COMPLETION_TL_STATUS_MASK 0x0FC00000
++#define SCU_COMPLETION_TL_STATUS_SHIFT 22
++#define SCU_COMPLETION_SDMA_STATUS_MASK 0x003C0000
++#define SCU_COMPLETION_PEG_MASK 0x00010000
++#define SCU_COMPLETION_PORT_MASK 0x00007000
++#define SCU_COMPLETION_PE_MASK SCU_COMPLETION_PORT_MASK
++#define SCU_COMPLETION_PE_SHIFT 12
++#define SCU_COMPLETION_INDEX_MASK 0x00000FFF
++
++/**
++ * SCU_GET_COMPLETION_TYPE() -
++ *
++ * This macro returns the SCU completion type.
++ */
++#define SCU_GET_COMPLETION_TYPE(completion_code) \
++ ((completion_code) & SCU_COMPLETION_TYPE_MASK)
++
++/**
++ * SCU_GET_COMPLETION_STATUS() -
++ *
++ * This macro returns the SCU completion status.
++ */
++#define SCU_GET_COMPLETION_STATUS(completion_code) \
++ ((completion_code) & SCU_COMPLETION_STATUS_MASK)
++
++/**
++ * SCU_GET_COMPLETION_TL_STATUS() -
++ *
++ * This macro returns the transport layer completion status.
++ */
++#define SCU_GET_COMPLETION_TL_STATUS(completion_code) \
++ ((completion_code) & SCU_COMPLETION_TL_STATUS_MASK)
++
++/**
++ * SCU_MAKE_COMPLETION_STATUS() -
++ *
++ * This macro takes a completion code and performs the shift and mask
++ * operations to turn it into a completion code that can be compared to a
++ * SCU_GET_COMPLETION_TL_STATUS.
++ */
++#define SCU_MAKE_COMPLETION_STATUS(completion_code) \
++ ((u32)(completion_code) << SCU_COMPLETION_TL_STATUS_SHIFT)
++
++/**
++ * SCU_NORMALIZE_COMPLETION_STATUS() -
++ *
++ * This macro takes a SCU_GET_COMPLETION_TL_STATUS and normalizes it for a
++ * return code.
++ */
++#define SCU_NORMALIZE_COMPLETION_STATUS(completion_code) \
++ (\
++ ((completion_code) & SCU_COMPLETION_TL_STATUS_MASK) \
++ >> SCU_COMPLETION_TL_STATUS_SHIFT \
++ )
++
++/**
++ * SCU_GET_COMPLETION_SDMA_STATUS() -
++ *
++ * This macro returns the SDMA completion status.
++ */
++#define SCU_GET_COMPLETION_SDMA_STATUS(completion_code) \
++ ((completion_code) & SCU_COMPLETION_SDMA_STATUS_MASK)
++
++/**
++ * SCU_GET_COMPLETION_PEG() -
++ *
++ * This macro returns the Protocol Engine Group from the completion code.
++ */
++#define SCU_GET_COMPLETION_PEG(completion_code) \
++ ((completion_code) & SCU_COMPLETION_PEG_MASK)
++
++/**
++ * SCU_GET_COMPLETION_PORT() -
++ *
++ * This macro reuturns the logical port index from the completion code.
++ */
++#define SCU_GET_COMPLETION_PORT(completion_code) \
++ ((completion_code) & SCU_COMPLETION_PORT_MASK)
++
++/**
++ * SCU_GET_PROTOCOL_ENGINE_INDEX() -
++ *
++ * This macro returns the PE index from the completion code.
++ */
++#define SCU_GET_PROTOCOL_ENGINE_INDEX(completion_code) \
++ (((completion_code) & SCU_COMPLETION_PE_MASK) >> SCU_COMPLETION_PE_SHIFT)
++
++/**
++ * SCU_GET_COMPLETION_INDEX() -
++ *
++ * This macro returns the index of the completion which is either a TCi or an
++ * RNi depending on the completion type.
++ */
++#define SCU_GET_COMPLETION_INDEX(completion_code) \
++ ((completion_code) & SCU_COMPLETION_INDEX_MASK)
++
++#define SCU_UNSOLICITED_FRAME_MASK 0x0FFF0000
++#define SCU_UNSOLICITED_FRAME_SHIFT 16
++
++/**
++ * SCU_GET_FRAME_INDEX() -
++ *
++ * This macro returns a normalized frame index from an unsolicited frame
++ * completion.
++ */
++#define SCU_GET_FRAME_INDEX(completion_code) \
++ (\
++ ((completion_code) & SCU_UNSOLICITED_FRAME_MASK) \
++ >> SCU_UNSOLICITED_FRAME_SHIFT \
++ )
++
++#define SCU_UNSOLICITED_FRAME_ERROR_MASK 0x00008000
++
++/**
++ * SCU_GET_FRAME_ERROR() -
++ *
++ * This macro returns a zero (0) value if there is no frame error otherwise it
++ * returns non-zero (!0).
++ */
++#define SCU_GET_FRAME_ERROR(completion_code) \
++ ((completion_code) & SCU_UNSOLICITED_FRAME_ERROR_MASK)
++
++/**
++ *
++ *
++ * These constants represent normalized completion codes which must be shifted
++ * 18 bits to match it with the hardware completion code. In a 16-bit compiler,
++ * immediate constants are 16-bit values (the size of an int). If we shift
++ * those by 18 bits, we completely lose the value. To ensure the value is a
++ * 32-bit value like we want, each immediate value must be cast to a u32.
++ */
++#define SCU_TASK_DONE_GOOD ((u32)0x00)
++#define SCU_TASK_DONE_CRC_ERR ((u32)0x14)
++#define SCU_TASK_DONE_CHECK_RESPONSE ((u32)0x14)
++#define SCU_TASK_DONE_GEN_RESPONSE ((u32)0x15)
++#define SCU_TASK_DONE_NAK_CMD_ERR ((u32)0x16)
++#define SCU_TASK_DONE_CMD_LL_R_ERR ((u32)0x16)
++#define SCU_TASK_DONE_LL_R_ERR ((u32)0x17)
++#define SCU_TASK_DONE_ACK_NAK_TO ((u32)0x17)
++#define SCU_TASK_DONE_LL_PERR ((u32)0x18)
++#define SCU_TASK_DONE_LL_SY_TERM ((u32)0x19)
++#define SCU_TASK_DONE_NAK_ERR ((u32)0x19)
++#define SCU_TASK_DONE_LL_LF_TERM ((u32)0x1A)
++#define SCU_TASK_DONE_DATA_LEN_ERR ((u32)0x1A)
++#define SCU_TASK_DONE_LL_CL_TERM ((u32)0x1B)
++#define SCU_TASK_DONE_LL_ABORT_ERR ((u32)0x1B)
++#define SCU_TASK_DONE_SEQ_INV_TYPE ((u32)0x1C)
++#define SCU_TASK_DONE_UNEXP_XR ((u32)0x1C)
++#define SCU_TASK_DONE_INV_FIS_TYPE ((u32)0x1D)
++#define SCU_TASK_DONE_XR_IU_LEN_ERR ((u32)0x1D)
++#define SCU_TASK_DONE_INV_FIS_LEN ((u32)0x1E)
++#define SCU_TASK_DONE_XR_WD_LEN ((u32)0x1E)
++#define SCU_TASK_DONE_SDMA_ERR ((u32)0x1F)
++#define SCU_TASK_DONE_OFFSET_ERR ((u32)0x20)
++#define SCU_TASK_DONE_MAX_PLD_ERR ((u32)0x21)
++#define SCU_TASK_DONE_EXCESS_DATA ((u32)0x22)
++#define SCU_TASK_DONE_LF_ERR ((u32)0x23)
++#define SCU_TASK_DONE_UNEXP_FIS ((u32)0x24)
++#define SCU_TASK_DONE_UNEXP_RESP ((u32)0x24)
++#define SCU_TASK_DONE_EARLY_RESP ((u32)0x25)
++#define SCU_TASK_DONE_SMP_RESP_TO_ERR ((u32)0x26)
++#define SCU_TASK_DONE_DMASETUP_DIRERR ((u32)0x27)
++#define SCU_TASK_DONE_SMP_UFI_ERR ((u32)0x27)
++#define SCU_TASK_DONE_XFERCNT_ERR ((u32)0x28)
++#define SCU_TASK_DONE_SMP_FRM_TYPE_ERR ((u32)0x28)
++#define SCU_TASK_DONE_SMP_LL_RX_ERR ((u32)0x29)
++#define SCU_TASK_DONE_RESP_LEN_ERR ((u32)0x2A)
++#define SCU_TASK_DONE_UNEXP_DATA ((u32)0x2B)
++#define SCU_TASK_DONE_OPEN_FAIL ((u32)0x2C)
++#define SCU_TASK_DONE_UNEXP_SDBFIS ((u32)0x2D)
++#define SCU_TASK_DONE_REG_ERR ((u32)0x2E)
++#define SCU_TASK_DONE_SDB_ERR ((u32)0x2F)
++#define SCU_TASK_DONE_TASK_ABORT ((u32)0x30)
++#define SCU_TASK_DONE_CMD_SDMA_ERR ((U32)0x32)
++#define SCU_TASK_DONE_CMD_LL_ABORT_ERR ((U32)0x33)
++#define SCU_TASK_OPEN_REJECT_WRONG_DESTINATION ((u32)0x34)
++#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1 ((u32)0x35)
++#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2 ((u32)0x36)
++#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3 ((u32)0x37)
++#define SCU_TASK_OPEN_REJECT_BAD_DESTINATION ((u32)0x38)
++#define SCU_TASK_OPEN_REJECT_ZONE_VIOLATION ((u32)0x39)
++#define SCU_TASK_DONE_VIIT_ENTRY_NV ((u32)0x3A)
++#define SCU_TASK_DONE_IIT_ENTRY_NV ((u32)0x3B)
++#define SCU_TASK_DONE_RNCNV_OUTBOUND ((u32)0x3C)
++#define SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY ((u32)0x3D)
++#define SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED ((u32)0x3E)
++#define SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED ((u32)0x3F)
++
++#endif /* _SCU_COMPLETION_CODES_HEADER_ */
+diff --git a/drivers/scsi/isci/scu_event_codes.h b/drivers/scsi/isci/scu_event_codes.h
+new file mode 100644
+index 0000000..36a945a
+--- /dev/null
++++ b/drivers/scsi/isci/scu_event_codes.h
+@@ -0,0 +1,336 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __SCU_EVENT_CODES_HEADER__
++#define __SCU_EVENT_CODES_HEADER__
++
++/**
++ * This file contains the constants and macros for the SCU event codes.
++ *
++ *
++ */
++
++#define SCU_EVENT_TYPE_CODE_SHIFT 24
++#define SCU_EVENT_TYPE_CODE_MASK 0x0F000000
++
++#define SCU_EVENT_SPECIFIC_CODE_SHIFT 18
++#define SCU_EVENT_SPECIFIC_CODE_MASK 0x00FC0000
++
++#define SCU_EVENT_CODE_MASK \
++ (SCU_EVENT_TYPE_CODE_MASK | SCU_EVENT_SPECIFIC_CODE_MASK)
++
++/**
++ * SCU_EVENT_TYPE() -
++ *
++ * This macro constructs an SCU event type from the type value.
++ */
++#define SCU_EVENT_TYPE(type) \
++ ((u32)(type) << SCU_EVENT_TYPE_CODE_SHIFT)
++
++/**
++ * SCU_EVENT_SPECIFIC() -
++ *
++ * This macro constructs an SCU event specifier from the code value.
++ */
++#define SCU_EVENT_SPECIFIC(code) \
++ ((u32)(code) << SCU_EVENT_SPECIFIC_CODE_SHIFT)
++
++/**
++ * SCU_EVENT_MESSAGE() -
++ *
++ * This macro constructs a combines an SCU event type and SCU event specifier
++ * from the type and code values.
++ */
++#define SCU_EVENT_MESSAGE(type, code) \
++ ((type) | SCU_EVENT_SPECIFIC(code))
++
++/**
++ * SCU_EVENT_TYPE() -
++ *
++ * SCU_EVENT_TYPES
++ */
++#define SCU_EVENT_TYPE_SMU_COMMAND_ERROR SCU_EVENT_TYPE(0x08)
++#define SCU_EVENT_TYPE_SMU_PCQ_ERROR SCU_EVENT_TYPE(0x09)
++#define SCU_EVENT_TYPE_SMU_ERROR SCU_EVENT_TYPE(0x00)
++#define SCU_EVENT_TYPE_TRANSPORT_ERROR SCU_EVENT_TYPE(0x01)
++#define SCU_EVENT_TYPE_BROADCAST_CHANGE SCU_EVENT_TYPE(0x02)
++#define SCU_EVENT_TYPE_OSSP_EVENT SCU_EVENT_TYPE(0x03)
++#define SCU_EVENT_TYPE_FATAL_MEMORY_ERROR SCU_EVENT_TYPE(0x0F)
++#define SCU_EVENT_TYPE_RNC_SUSPEND_TX SCU_EVENT_TYPE(0x04)
++#define SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX SCU_EVENT_TYPE(0x05)
++#define SCU_EVENT_TYPE_RNC_OPS_MISC SCU_EVENT_TYPE(0x06)
++#define SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT SCU_EVENT_TYPE(0x07)
++#define SCU_EVENT_TYPE_ERR_CNT_EVENT SCU_EVENT_TYPE(0x0A)
++
++/**
++ *
++ *
++ * SCU_EVENT_SPECIFIERS
++ */
++#define SCU_EVENT_SPECIFIER_DRIVER_SUSPEND 0x20
++#define SCU_EVENT_SPECIFIER_RNC_RELEASE 0x00
++
++/**
++ *
++ *
++ * SMU_COMMAND_EVENTS
++ */
++#define SCU_EVENT_INVALID_CONTEXT_COMMAND \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_COMMAND_ERROR, 0x00)
++
++/**
++ *
++ *
++ * SMU_PCQ_EVENTS
++ */
++#define SCU_EVENT_UNCORRECTABLE_PCQ_ERROR \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_PCQ_ERROR, 0x00)
++
++/**
++ *
++ *
++ * SMU_EVENTS
++ */
++#define SCU_EVENT_UNCORRECTABLE_REGISTER_WRITE \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x02)
++#define SCU_EVENT_UNCORRECTABLE_REGISTER_READ \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x03)
++#define SCU_EVENT_PCIE_INTERFACE_ERROR \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x04)
++#define SCU_EVENT_FUNCTION_LEVEL_RESET \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x05)
++
++/**
++ *
++ *
++ * TRANSPORT_LEVEL_ERRORS
++ */
++#define SCU_EVENT_ACK_NAK_TIMEOUT_ERROR \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_TRANSPORT_ERROR, 0x00)
++
++/**
++ *
++ *
++ * BROADCAST_CHANGE_EVENTS
++ */
++#define SCU_EVENT_BROADCAST_CHANGE \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x01)
++#define SCU_EVENT_BROADCAST_RESERVED0 \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x02)
++#define SCU_EVENT_BROADCAST_RESERVED1 \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x03)
++#define SCU_EVENT_BROADCAST_SES \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x04)
++#define SCU_EVENT_BROADCAST_EXPANDER \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x05)
++#define SCU_EVENT_BROADCAST_AEN \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x06)
++#define SCU_EVENT_BROADCAST_RESERVED3 \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x07)
++#define SCU_EVENT_BROADCAST_RESERVED4 \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x08)
++#define SCU_EVENT_PE_SUSPENDED \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x09)
++
++/**
++ *
++ *
++ * OSSP_EVENTS
++ */
++#define SCU_EVENT_PORT_SELECTOR_DETECTED \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x10)
++#define SCU_EVENT_SENT_PORT_SELECTION \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x11)
++#define SCU_EVENT_HARD_RESET_TRANSMITTED \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x12)
++#define SCU_EVENT_HARD_RESET_RECEIVED \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x13)
++#define SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x15)
++#define SCU_EVENT_LINK_FAILURE \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x16)
++#define SCU_EVENT_SATA_SPINUP_HOLD \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x17)
++#define SCU_EVENT_SAS_15_SSC \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x18)
++#define SCU_EVENT_SAS_15 \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x19)
++#define SCU_EVENT_SAS_30_SSC \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1A)
++#define SCU_EVENT_SAS_30 \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1B)
++#define SCU_EVENT_SAS_60_SSC \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1C)
++#define SCU_EVENT_SAS_60 \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1D)
++#define SCU_EVENT_SATA_15_SSC \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1E)
++#define SCU_EVENT_SATA_15 \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1F)
++#define SCU_EVENT_SATA_30_SSC \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x20)
++#define SCU_EVENT_SATA_30 \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x21)
++#define SCU_EVENT_SATA_60_SSC \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x22)
++#define SCU_EVENT_SATA_60 \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x23)
++#define SCU_EVENT_SAS_PHY_DETECTED \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x24)
++#define SCU_EVENT_SATA_PHY_DETECTED \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x25)
++
++/**
++ *
++ *
++ * FATAL_INTERNAL_MEMORY_ERROR_EVENTS
++ */
++#define SCU_EVENT_TSC_RNSC_UNCORRECTABLE_ERROR \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x00)
++#define SCU_EVENT_TC_RNC_UNCORRECTABLE_ERROR \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x01)
++#define SCU_EVENT_ZPT_UNCORRECTABLE_ERROR \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x02)
++
++/**
++ *
++ *
++ * REMOTE_NODE_SUSPEND_EVENTS
++ */
++#define SCU_EVENT_TL_RNC_SUSPEND_TX \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x00)
++#define SCU_EVENT_TL_RNC_SUSPEND_TX_RX \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x00)
++#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x20)
++#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX_RX \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x20)
++
++/**
++ *
++ *
++ * REMOTE_NODE_MISC_EVENTS
++ */
++#define SCU_EVENT_POST_RCN_RELEASE \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, SCU_EVENT_SPECIFIER_RNC_RELEASE)
++#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_ENABLE \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x01)
++#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_DISABLE \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x02)
++#define SCU_EVENT_POST_RNC_COMPLETE \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x03)
++#define SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x04)
++
++/**
++ *
++ *
++ * ERROR_COUNT_EVENT
++ */
++#define SCU_EVENT_RX_CREDIT_BLOCKED_RECEIVED \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x00)
++#define SCU_EVENT_TX_DONE_CREDIT_TIMEOUT \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x01)
++#define SCU_EVENT_RX_DONE_CREDIT_TIMEOUT \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x02)
++
++/**
++ * scu_get_event_type() -
++ *
++ * This macro returns the SCU event type from the event code.
++ */
++#define scu_get_event_type(event_code) \
++ ((event_code) & SCU_EVENT_TYPE_CODE_MASK)
++
++/**
++ * scu_get_event_specifier() -
++ *
++ * This macro returns the SCU event specifier from the event code.
++ */
++#define scu_get_event_specifier(event_code) \
++ ((event_code) & SCU_EVENT_SPECIFIC_CODE_MASK)
++
++/**
++ * scu_get_event_code() -
++ *
++ * This macro returns the combined SCU event type and SCU event specifier from
++ * the event code.
++ */
++#define scu_get_event_code(event_code) \
++ ((event_code) & SCU_EVENT_CODE_MASK)
++
++
++/**
++ *
++ *
++ * PTS_SCHEDULE_EVENT
++ */
++#define SCU_EVENT_SMP_RESPONSE_NO_PE \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x00)
++#define SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE \
++ scu_get_event_specifier(SCU_EVENT_SMP_RESPONSE_NO_PE)
++
++#define SCU_EVENT_TASK_TIMEOUT \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x01)
++#define SCU_EVENT_SPECIFIC_TASK_TIMEOUT \
++ scu_get_event_specifier(SCU_EVENT_TASK_TIMEOUT)
++
++#define SCU_EVENT_IT_NEXUS_TIMEOUT \
++ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x02)
++#define SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT \
++ scu_get_event_specifier(SCU_EVENT_IT_NEXUS_TIMEOUT)
++
++
++#endif /* __SCU_EVENT_CODES_HEADER__ */
+diff --git a/drivers/scsi/isci/scu_remote_node_context.h b/drivers/scsi/isci/scu_remote_node_context.h
+new file mode 100644
+index 0000000..33745ad
+--- /dev/null
++++ b/drivers/scsi/isci/scu_remote_node_context.h
+@@ -0,0 +1,229 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __SCU_REMOTE_NODE_CONTEXT_HEADER__
++#define __SCU_REMOTE_NODE_CONTEXT_HEADER__
++
++/**
++ * This file contains the structures and constatns used by the SCU hardware to
++ * describe a remote node context.
++ *
++ *
++ */
++
++/**
++ * struct ssp_remote_node_context - This structure contains the SCU hardware
++ * definition for an SSP remote node.
++ *
++ *
++ */
++struct ssp_remote_node_context {
++ /* WORD 0 */
++
++ /**
++ * This field is the remote node index assigned for this remote node. All
++ * remote nodes must have a unique remote node index. The value of the remote
++ * node index can not exceed the maximum number of remote nodes reported in
++ * the SCU device context capacity register.
++ */
++ u32 remote_node_index:12;
++ u32 reserved0_1:4;
++
++ /**
++ * This field tells the SCU hardware how many simultaneous connections that
++ * this remote node will support.
++ */
++ u32 remote_node_port_width:4;
++
++ /**
++ * This field tells the SCU hardware which logical port to associate with this
++ * remote node.
++ */
++ u32 logical_port_index:3;
++ u32 reserved0_2:5;
++
++ /**
++ * This field will enable the I_T nexus loss timer for this remote node.
++ */
++ u32 nexus_loss_timer_enable:1;
++
++ /**
++ * This field is the for driver debug only and is not used.
++ */
++ u32 check_bit:1;
++
++ /**
++ * This field must be set to true when the hardware DMAs the remote node
++ * context to the hardware SRAM. When the remote node is being invalidated
++ * this field must be set to false.
++ */
++ u32 is_valid:1;
++
++ /**
++ * This field must be set to true.
++ */
++ u32 is_remote_node_context:1;
++
++ /* WORD 1 - 2 */
++
++ /**
++ * This is the low word of the remote device SAS Address
++ */
++ u32 remote_sas_address_lo;
++
++ /**
++ * This field is the high word of the remote device SAS Address
++ */
++ u32 remote_sas_address_hi;
++
++ /* WORD 3 */
++ /**
++ * This field reprensets the function number assigned to this remote device.
++ * This value must match the virtual function number that is being used to
++ * communicate to the device.
++ */
++ u32 function_number:8;
++ u32 reserved3_1:8;
++
++ /**
++ * This field provides the driver a way to cheat on the arbitration wait time
++ * for this remote node.
++ */
++ u32 arbitration_wait_time:16;
++
++ /* WORD 4 */
++ /**
++ * This field tells the SCU hardware how long this device may occupy the
++ * connection before it must be closed.
++ */
++ u32 connection_occupancy_timeout:16;
++
++ /**
++ * This field tells the SCU hardware how long to maintain a connection when
++ * there are no frames being transmitted on the link.
++ */
++ u32 connection_inactivity_timeout:16;
++
++ /* WORD 5 */
++ /**
++ * This field allows the driver to cheat on the arbitration wait time for this
++ * remote node.
++ */
++ u32 initial_arbitration_wait_time:16;
++
++ /**
++ * This field is tells the hardware what to program for the connection rate in
++ * the open address frame. See the SAS spec for valid values.
++ */
++ u32 oaf_connection_rate:4;
++
++ /**
++ * This field tells the SCU hardware what to program for the features in the
++ * open address frame. See the SAS spec for valid values.
++ */
++ u32 oaf_features:4;
++
++ /**
++ * This field tells the SCU hardware what to use for the source zone group in
++ * the open address frame. See the SAS spec for more details on zoning.
++ */
++ u32 oaf_source_zone_group:8;
++
++ /* WORD 6 */
++ /**
++ * This field tells the SCU hardware what to use as the more capibilities in
++ * the open address frame. See the SAS Spec for details.
++ */
++ u32 oaf_more_compatibility_features;
++
++ /* WORD 7 */
++ u32 reserved7;
++
++};
++
++/**
++ * struct stp_remote_node_context - This structure contains the SCU hardware
++ * definition for a STP remote node.
++ *
++ * STP Targets are not yet supported so this definition is a placeholder until
++ * we do support them.
++ */
++struct stp_remote_node_context {
++ /**
++ * Placeholder data for the STP remote node.
++ */
++ u32 data[8];
++
++};
++
++/**
++ * This union combines the SAS and SATA remote node definitions.
++ *
++ * union scu_remote_node_context
++ */
++union scu_remote_node_context {
++ /**
++ * SSP Remote Node
++ */
++ struct ssp_remote_node_context ssp;
++
++ /**
++ * STP Remote Node
++ */
++ struct stp_remote_node_context stp;
++
++};
++
++#endif /* __SCU_REMOTE_NODE_CONTEXT_HEADER__ */
+diff --git a/drivers/scsi/isci/scu_task_context.h b/drivers/scsi/isci/scu_task_context.h
+new file mode 100644
+index 0000000..7df87d9
+--- /dev/null
++++ b/drivers/scsi/isci/scu_task_context.h
+@@ -0,0 +1,942 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _SCU_TASK_CONTEXT_H_
++#define _SCU_TASK_CONTEXT_H_
++
++/**
++ * This file contains the structures and constants for the SCU hardware task
++ * context.
++ *
++ *
++ */
++
++
++/**
++ * enum scu_ssp_task_type - This enumberation defines the various SSP task
++ * types the SCU hardware will accept. The definition for the various task
++ * types the SCU hardware will accept can be found in the DS specification.
++ *
++ *
++ */
++typedef enum {
++ SCU_TASK_TYPE_IOREAD, /* /< IO READ direction or no direction */
++ SCU_TASK_TYPE_IOWRITE, /* /< IO Write direction */
++ SCU_TASK_TYPE_SMP_REQUEST, /* /< SMP Request type */
++ SCU_TASK_TYPE_RESPONSE, /* /< Driver generated response frame (targt mode) */
++ SCU_TASK_TYPE_RAW_FRAME, /* /< Raw frame request type */
++ SCU_TASK_TYPE_PRIMITIVE /* /< Request for a primitive to be transmitted */
++} scu_ssp_task_type;
++
++/**
++ * enum scu_sata_task_type - This enumeration defines the various SATA task
++ * types the SCU hardware will accept. The definition for the various task
++ * types the SCU hardware will accept can be found in the DS specification.
++ *
++ *
++ */
++typedef enum {
++ SCU_TASK_TYPE_DMA_IN, /* /< Read request */
++ SCU_TASK_TYPE_FPDMAQ_READ, /* /< NCQ read request */
++ SCU_TASK_TYPE_PACKET_DMA_IN, /* /< Packet read request */
++ SCU_TASK_TYPE_SATA_RAW_FRAME, /* /< Raw frame request */
++ RESERVED_4,
++ RESERVED_5,
++ RESERVED_6,
++ RESERVED_7,
++ SCU_TASK_TYPE_DMA_OUT, /* /< Write request */
++ SCU_TASK_TYPE_FPDMAQ_WRITE, /* /< NCQ write Request */
++ SCU_TASK_TYPE_PACKET_DMA_OUT /* /< Packet write request */
++} scu_sata_task_type;
++
++
++/**
++ *
++ *
++ * SCU_CONTEXT_TYPE
++ */
++#define SCU_TASK_CONTEXT_TYPE 0
++#define SCU_RNC_CONTEXT_TYPE 1
++
++/**
++ *
++ *
++ * SCU_TASK_CONTEXT_VALIDITY
++ */
++#define SCU_TASK_CONTEXT_INVALID 0
++#define SCU_TASK_CONTEXT_VALID 1
++
++/**
++ *
++ *
++ * SCU_COMMAND_CODE
++ */
++#define SCU_COMMAND_CODE_INITIATOR_NEW_TASK 0
++#define SCU_COMMAND_CODE_ACTIVE_TASK 1
++#define SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK 2
++#define SCU_COMMAND_CODE_TARGET_RAW_FRAMES 3
++
++/**
++ *
++ *
++ * SCU_TASK_PRIORITY
++ */
++/**
++ *
++ *
++ * This priority is used when there is no priority request for this request.
++ */
++#define SCU_TASK_PRIORITY_NORMAL 0
++
++/**
++ *
++ *
++ * This priority indicates that the task should be scheduled to the head of the
++ * queue. The task will NOT be executed if the TX is suspended for the remote
++ * node.
++ */
++#define SCU_TASK_PRIORITY_HEAD_OF_Q 1
++
++/**
++ *
++ *
++ * This priority indicates that the task will be executed before all
++ * SCU_TASK_PRIORITY_NORMAL and SCU_TASK_PRIORITY_HEAD_OF_Q tasks. The task
++ * WILL be executed if the TX is suspended for the remote node.
++ */
++#define SCU_TASK_PRIORITY_HIGH 2
++
++/**
++ *
++ *
++ * This task priority is reserved and should not be used.
++ */
++#define SCU_TASK_PRIORITY_RESERVED 3
++
++#define SCU_TASK_INITIATOR_MODE 1
++#define SCU_TASK_TARGET_MODE 0
++
++#define SCU_TASK_REGULAR 0
++#define SCU_TASK_ABORTED 1
++
++/* direction bit defintion */
++/**
++ *
++ *
++ * SATA_DIRECTION
++ */
++#define SCU_SATA_WRITE_DATA_DIRECTION 0
++#define SCU_SATA_READ_DATA_DIRECTION 1
++
++/**
++ *
++ *
++ * SCU_COMMAND_CONTEXT_MACROS These macros provide the mask and shift
++ * operations to construct the various SCU commands
++ */
++#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT 21
++#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK 0x00E00000
++#define scu_get_command_request_type(x) \
++ ((x) & SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK)
++
++#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT 18
++#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK 0x001C0000
++#define scu_get_command_request_subtype(x) \
++ ((x) & SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK)
++
++#define SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK \
++ (\
++ SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK \
++ | SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK \
++ )
++#define scu_get_command_request_full_type(x) \
++ ((x) & SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK)
++
++#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT 16
++#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK 0x00010000
++#define scu_get_command_protocl_engine_group(x) \
++ ((x) & SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK)
++
++#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT 12
++#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK 0x00007000
++#define scu_get_command_reqeust_logical_port(x) \
++ ((x) & SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK)
++
++
++#define MAKE_SCU_CONTEXT_COMMAND_TYPE(type) \
++ ((u32)(type) << SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT)
++
++/**
++ * MAKE_SCU_CONTEXT_COMMAND_TYPE() -
++ *
++ * SCU_COMMAND_TYPES These constants provide the grouping of the different SCU
++ * command types.
++ */
++#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC MAKE_SCU_CONTEXT_COMMAND_TYPE(0)
++#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC MAKE_SCU_CONTEXT_COMMAND_TYPE(1)
++#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(2)
++#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(3)
++#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(6)
++
++#define MAKE_SCU_CONTEXT_COMMAND_REQUEST(type, command) \
++ ((type) | ((command) << SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT))
++
++/**
++ *
++ *
++ * SCU_REQUEST_TYPES These constants are the various request types that can be
++ * posted to the SCU hardware.
++ */
++#define SCU_CONTEXT_COMMAND_REQUST_POST_TC \
++ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 0))
++
++#define SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT \
++ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 1))
++
++#define SCU_CONTEXT_COMMAND_REQUST_DUMP_TC \
++ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC, 0))
++
++#define SCU_CONTEXT_COMMAND_POST_RNC_32 \
++ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 0))
++
++#define SCU_CONTEXT_COMMAND_POST_RNC_96 \
++ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 1))
++
++#define SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE \
++ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 2))
++
++#define SCU_CONTEXT_COMMAND_DUMP_RNC_32 \
++ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 0))
++
++#define SCU_CONTEXT_COMMAND_DUMP_RNC_96 \
++ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 1))
++
++#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX \
++ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 0))
++
++#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX \
++ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 1))
++
++#define SCU_CONTEXT_COMMAND_POST_RNC_RESUME \
++ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 2))
++
++#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_ENABLE \
++ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 3))
++
++#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_DISABLE \
++ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 4))
++
++/**
++ *
++ *
++ * SCU_TASK_CONTEXT_PROTOCOL SCU Task context protocol types this is uesd to
++ * program the SCU Task context protocol field in word 0x00.
++ */
++#define SCU_TASK_CONTEXT_PROTOCOL_SMP 0x00
++#define SCU_TASK_CONTEXT_PROTOCOL_SSP 0x01
++#define SCU_TASK_CONTEXT_PROTOCOL_STP 0x02
++#define SCU_TASK_CONTEXT_PROTOCOL_NONE 0x07
++
++/**
++ * struct ssp_task_context - This is the SCU hardware definition for an SSP
++ * request.
++ *
++ *
++ */
++struct ssp_task_context {
++ /* OFFSET 0x18 */
++ u32 reserved00:24;
++ u32 frame_type:8;
++
++ /* OFFSET 0x1C */
++ u32 reserved01;
++
++ /* OFFSET 0x20 */
++ u32 fill_bytes:2;
++ u32 reserved02:6;
++ u32 changing_data_pointer:1;
++ u32 retransmit:1;
++ u32 retry_data_frame:1;
++ u32 tlr_control:2;
++ u32 reserved03:19;
++
++ /* OFFSET 0x24 */
++ u32 uiRsvd4;
++
++ /* OFFSET 0x28 */
++ u32 target_port_transfer_tag:16;
++ u32 tag:16;
++
++ /* OFFSET 0x2C */
++ u32 data_offset;
++};
++
++/**
++ * struct stp_task_context - This is the SCU hardware definition for an STP
++ * request.
++ *
++ *
++ */
++struct stp_task_context {
++ /* OFFSET 0x18 */
++ u32 fis_type:8;
++ u32 pm_port:4;
++ u32 reserved0:3;
++ u32 control:1;
++ u32 command:8;
++ u32 features:8;
++
++ /* OFFSET 0x1C */
++ u32 reserved1;
++
++ /* OFFSET 0x20 */
++ u32 reserved2;
++
++ /* OFFSET 0x24 */
++ u32 reserved3;
++
++ /* OFFSET 0x28 */
++ u32 ncq_tag:5;
++ u32 reserved4:27;
++
++ /* OFFSET 0x2C */
++ u32 data_offset; /* TODO: What is this used for? */
++};
++
++/**
++ * struct smp_task_context - This is the SCU hardware definition for an SMP
++ * request.
++ *
++ *
++ */
++struct smp_task_context {
++ /* OFFSET 0x18 */
++ u32 response_length:8;
++ u32 function_result:8;
++ u32 function:8;
++ u32 frame_type:8;
++
++ /* OFFSET 0x1C */
++ u32 smp_response_ufi:12;
++ u32 reserved1:20;
++
++ /* OFFSET 0x20 */
++ u32 reserved2;
++
++ /* OFFSET 0x24 */
++ u32 reserved3;
++
++ /* OFFSET 0x28 */
++ u32 reserved4;
++
++ /* OFFSET 0x2C */
++ u32 reserved5;
++};
++
++/**
++ * struct primitive_task_context - This is the SCU hardware definition used
++ * when the driver wants to send a primitive on the link.
++ *
++ *
++ */
++struct primitive_task_context {
++ /* OFFSET 0x18 */
++ /**
++ * This field is the control word and it must be 0.
++ */
++ u32 control; /* /< must be set to 0 */
++
++ /* OFFSET 0x1C */
++ /**
++ * This field specifies the primitive that is to be transmitted.
++ */
++ u32 sequence;
++
++ /* OFFSET 0x20 */
++ u32 reserved0;
++
++ /* OFFSET 0x24 */
++ u32 reserved1;
++
++ /* OFFSET 0x28 */
++ u32 reserved2;
++
++ /* OFFSET 0x2C */
++ u32 reserved3;
++};
++
++/**
++ * The union of the protocols that can be selected in the SCU task context
++ * field.
++ *
++ * protocol_context
++ */
++union protocol_context {
++ struct ssp_task_context ssp;
++ struct stp_task_context stp;
++ struct smp_task_context smp;
++ struct primitive_task_context primitive;
++ u32 words[6];
++};
++
++/**
++ * struct scu_sgl_element - This structure represents a single SCU defined SGL
++ * element. SCU SGLs contain a 64 bit address with the maximum data transfer
++ * being 24 bits in size. The SGL can not cross a 4GB boundary.
++ *
++ * struct scu_sgl_element
++ */
++struct scu_sgl_element {
++ /**
++ * This field is the upper 32 bits of the 64 bit physical address.
++ */
++ u32 address_upper;
++
++ /**
++ * This field is the lower 32 bits of the 64 bit physical address.
++ */
++ u32 address_lower;
++
++ /**
++ * This field is the number of bytes to transfer.
++ */
++ u32 length:24;
++
++ /**
++ * This field is the address modifier to be used when a virtual function is
++ * requesting a data transfer.
++ */
++ u32 address_modifier:8;
++
++};
++
++#define SCU_SGL_ELEMENT_PAIR_A 0
++#define SCU_SGL_ELEMENT_PAIR_B 1
++
++/**
++ * struct scu_sgl_element_pair - This structure is the SCU hardware definition
++ * of a pair of SGL elements. The SCU hardware always works on SGL pairs.
++ * They are refered to in the DS specification as SGL A and SGL B. Each SGL
++ * pair is followed by the address of the next pair.
++ *
++ *
++ */
++struct scu_sgl_element_pair {
++ /* OFFSET 0x60-0x68 */
++ /**
++ * This field is the SGL element A of the SGL pair.
++ */
++ struct scu_sgl_element A;
++
++ /* OFFSET 0x6C-0x74 */
++ /**
++ * This field is the SGL element B of the SGL pair.
++ */
++ struct scu_sgl_element B;
++
++ /* OFFSET 0x78-0x7C */
++ /**
++ * This field is the upper 32 bits of the 64 bit address to the next SGL
++ * element pair.
++ */
++ u32 next_pair_upper;
++
++ /**
++ * This field is the lower 32 bits of the 64 bit address to the next SGL
++ * element pair.
++ */
++ u32 next_pair_lower;
++
++};
++
++/**
++ * struct transport_snapshot - This structure is the SCU hardware scratch area
++ * for the task context. This is set to 0 by the driver but can be read by
++ * issuing a dump TC request to the SCU.
++ *
++ *
++ */
++struct transport_snapshot {
++ /* OFFSET 0x48 */
++ u32 xfer_rdy_write_data_length;
++
++ /* OFFSET 0x4C */
++ u32 data_offset;
++
++ /* OFFSET 0x50 */
++ u32 data_transfer_size:24;
++ u32 reserved_50_0:8;
++
++ /* OFFSET 0x54 */
++ u32 next_initiator_write_data_offset;
++
++ /* OFFSET 0x58 */
++ u32 next_initiator_write_data_xfer_size:24;
++ u32 reserved_58_0:8;
++};
++
++/**
++ * struct scu_task_context - This structure defines the contents of the SCU
++ * silicon task context. It lays out all of the fields according to the
++ * expected order and location for the Storage Controller unit.
++ *
++ *
++ */
++struct scu_task_context {
++ /* OFFSET 0x00 ------ */
++ /**
++ * This field must be encoded to one of the valid SCU task priority values
++ * - SCU_TASK_PRIORITY_NORMAL
++ * - SCU_TASK_PRIORITY_HEAD_OF_Q
++ * - SCU_TASK_PRIORITY_HIGH
++ */
++ u32 priority:2;
++
++ /**
++ * This field must be set to true if this is an initiator generated request.
++ * Until target mode is supported all task requests are initiator requests.
++ */
++ u32 initiator_request:1;
++
++ /**
++ * This field must be set to one of the valid connection rates valid values
++ * are 0x8, 0x9, and 0xA.
++ */
++ u32 connection_rate:4;
++
++ /**
++ * This field muse be programed when generating an SMP response since the SMP
++ * connection remains open until the SMP response is generated.
++ */
++ u32 protocol_engine_index:3;
++
++ /**
++ * This field must contain the logical port for the task request.
++ */
++ u32 logical_port_index:3;
++
++ /**
++ * This field must be set to one of the SCU_TASK_CONTEXT_PROTOCOL values
++ * - SCU_TASK_CONTEXT_PROTOCOL_SMP
++ * - SCU_TASK_CONTEXT_PROTOCOL_SSP
++ * - SCU_TASK_CONTEXT_PROTOCOL_STP
++ * - SCU_TASK_CONTEXT_PROTOCOL_NONE
++ */
++ u32 protocol_type:3;
++
++ /**
++ * This filed must be set to the TCi allocated for this task
++ */
++ u32 task_index:12;
++
++ /**
++ * This field is reserved and must be set to 0x00
++ */
++ u32 reserved_00_0:1;
++
++ /**
++ * For a normal task request this must be set to 0. If this is an abort of
++ * this task request it must be set to 1.
++ */
++ u32 abort:1;
++
++ /**
++ * This field must be set to true for the SCU hardware to process the task.
++ */
++ u32 valid:1;
++
++ /**
++ * This field must be set to SCU_TASK_CONTEXT_TYPE
++ */
++ u32 context_type:1;
++
++ /* OFFSET 0x04 */
++ /**
++ * This field contains the RNi that is the target of this request.
++ */
++ u32 remote_node_index:12;
++
++ /**
++ * This field is programmed if this is a mirrored request, which we are not
++ * using, in which case it is the RNi for the mirrored target.
++ */
++ u32 mirrored_node_index:12;
++
++ /**
++ * This field is programmed with the direction of the SATA reqeust
++ * - SCU_SATA_WRITE_DATA_DIRECTION
++ * - SCU_SATA_READ_DATA_DIRECTION
++ */
++ u32 sata_direction:1;
++
++ /**
++ * This field is programmsed with one of the following SCU_COMMAND_CODE
++ * - SCU_COMMAND_CODE_INITIATOR_NEW_TASK
++ * - SCU_COMMAND_CODE_ACTIVE_TASK
++ * - SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK
++ * - SCU_COMMAND_CODE_TARGET_RAW_FRAMES
++ */
++ u32 command_code:2;
++
++ /**
++ * This field is set to true if the remote node should be suspended.
++ * This bit is only valid for SSP & SMP target devices.
++ */
++ u32 suspend_node:1;
++
++ /**
++ * This field is programmed with one of the following command type codes
++ *
++ * For SAS requests use the scu_ssp_task_type
++ * - SCU_TASK_TYPE_IOREAD
++ * - SCU_TASK_TYPE_IOWRITE
++ * - SCU_TASK_TYPE_SMP_REQUEST
++ * - SCU_TASK_TYPE_RESPONSE
++ * - SCU_TASK_TYPE_RAW_FRAME
++ * - SCU_TASK_TYPE_PRIMITIVE
++ *
++ * For SATA requests use the scu_sata_task_type
++ * - SCU_TASK_TYPE_DMA_IN
++ * - SCU_TASK_TYPE_FPDMAQ_READ
++ * - SCU_TASK_TYPE_PACKET_DMA_IN
++ * - SCU_TASK_TYPE_SATA_RAW_FRAME
++ * - SCU_TASK_TYPE_DMA_OUT
++ * - SCU_TASK_TYPE_FPDMAQ_WRITE
++ * - SCU_TASK_TYPE_PACKET_DMA_OUT
++ */
++ u32 task_type:4;
++
++ /* OFFSET 0x08 */
++ /**
++ * This field is reserved and the must be set to 0x00
++ */
++ u32 link_layer_control:8; /* presently all reserved */
++
++ /**
++ * This field is set to true when TLR is to be enabled
++ */
++ u32 ssp_tlr_enable:1;
++
++ /**
++ * This is field specifies if the SCU DMAs a response frame to host
++ * memory for good response frames when operating in target mode.
++ */
++ u32 dma_ssp_target_good_response:1;
++
++ /**
++ * This field indicates if the SCU should DMA the response frame to
++ * host memory.
++ */
++ u32 do_not_dma_ssp_good_response:1;
++
++ /**
++ * This field is set to true when strict ordering is to be enabled
++ */
++ u32 strict_ordering:1;
++
++ /**
++ * This field indicates the type of endianess to be utilized for the
++ * frame. command, task, and response frames utilized control_frame
++ * set to 1.
++ */
++ u32 control_frame:1;
++
++ /**
++ * This field is reserved and the driver should set to 0x00
++ */
++ u32 tl_control_reserved:3;
++
++ /**
++ * This field is set to true when the SCU hardware task timeout control is to
++ * be enabled
++ */
++ u32 timeout_enable:1;
++
++ /**
++ * This field is reserved and the driver should set it to 0x00
++ */
++ u32 pts_control_reserved:7;
++
++ /**
++ * This field should be set to true when block guard is to be enabled
++ */
++ u32 block_guard_enable:1;
++
++ /**
++ * This field is reserved and the driver should set to 0x00
++ */
++ u32 sdma_control_reserved:7;
++
++ /* OFFSET 0x0C */
++ /**
++ * This field is the address modifier for this io request it should be
++ * programmed with the virtual function that is making the request.
++ */
++ u32 address_modifier:16;
++
++ /**
++ * @todo What we support mirrored SMP response frame?
++ */
++ u32 mirrored_protocol_engine:3; /* mirrored protocol Engine Index */
++
++ /**
++ * If this is a mirrored request the logical port index for the mirrored RNi
++ * must be programmed.
++ */
++ u32 mirrored_logical_port:4; /* mirrored local port index */
++
++ /**
++ * This field is reserved and the driver must set it to 0x00
++ */
++ u32 reserved_0C_0:8;
++
++ /**
++ * This field must be set to true if the mirrored request processing is to be
++ * enabled.
++ */
++ u32 mirror_request_enable:1; /* Mirrored request Enable */
++
++ /* OFFSET 0x10 */
++ /**
++ * This field is the command iu length in dwords
++ */
++ u32 ssp_command_iu_length:8;
++
++ /**
++ * This is the target TLR enable bit it must be set to 0 when creatning the
++ * task context.
++ */
++ u32 xfer_ready_tlr_enable:1;
++
++ /**
++ * This field is reserved and the driver must set it to 0x00
++ */
++ u32 reserved_10_0:7;
++
++ /**
++ * This is the maximum burst size that the SCU hardware will send in one
++ * connection its value is (N x 512) and N must be a multiple of 2. If the
++ * value is 0x00 then maximum burst size is disabled.
++ */
++ u32 ssp_max_burst_size:16;
++
++ /* OFFSET 0x14 */
++ /**
++ * This filed is set to the number of bytes to be transfered in the request.
++ */
++ u32 transfer_length_bytes:24; /* In terms of bytes */
++
++ /**
++ * This field is reserved and the driver should set it to 0x00
++ */
++ u32 reserved_14_0:8;
++
++ /* OFFSET 0x18-0x2C */
++ /**
++ * This union provides for the protocol specif part of the SCU Task Context.
++ */
++ union protocol_context type;
++
++ /* OFFSET 0x30-0x34 */
++ /**
++ * This field is the upper 32 bits of the 64 bit physical address of the
++ * command iu buffer
++ */
++ u32 command_iu_upper;
++
++ /**
++ * This field is the lower 32 bits of the 64 bit physical address of the
++ * command iu buffer
++ */
++ u32 command_iu_lower;
++
++ /* OFFSET 0x38-0x3C */
++ /**
++ * This field is the upper 32 bits of the 64 bit physical address of the
++ * response iu buffer
++ */
++ u32 response_iu_upper;
++
++ /**
++ * This field is the lower 32 bits of the 64 bit physical address of the
++ * response iu buffer
++ */
++ u32 response_iu_lower;
++
++ /* OFFSET 0x40 */
++ /**
++ * This field is set to the task phase of the SCU hardware. The driver must
++ * set this to 0x01
++ */
++ u32 task_phase:8;
++
++ /**
++ * This field is set to the transport layer task status. The driver must set
++ * this to 0x00
++ */
++ u32 task_status:8;
++
++ /**
++ * This field is used during initiator write TLR
++ */
++ u32 previous_extended_tag:4;
++
++ /**
++ * This field is set the maximum number of retries for a STP non-data FIS
++ */
++ u32 stp_retry_count:2;
++
++ /**
++ * This field is reserved and the driver must set it to 0x00
++ */
++ u32 reserved_40_1:2;
++
++ /**
++ * This field is used by the SCU TL to determine when to take a snapshot when
++ * tranmitting read data frames.
++ * - 0x00 The entire IO
++ * - 0x01 32k
++ * - 0x02 64k
++ * - 0x04 128k
++ * - 0x08 256k
++ */
++ u32 ssp_tlr_threshold:4;
++
++ /**
++ * This field is reserved and the driver must set it to 0x00
++ */
++ u32 reserved_40_2:4;
++
++ /* OFFSET 0x44 */
++ u32 write_data_length; /* read only set to 0 */
++
++ /* OFFSET 0x48-0x58 */
++ struct transport_snapshot snapshot; /* read only set to 0 */
++
++ /* OFFSET 0x5C */
++ u32 block_protection_enable:1;
++ u32 block_size:2;
++ u32 block_protection_function:2;
++ u32 reserved_5C_0:9;
++ u32 active_sgl_element:2; /* read only set to 0 */
++ u32 sgl_exhausted:1; /* read only set to 0 */
++ u32 payload_data_transfer_error:4; /* read only set to 0 */
++ u32 frame_buffer_offset:11; /* read only set to 0 */
++
++ /* OFFSET 0x60-0x7C */
++ /**
++ * This field is the first SGL element pair found in the TC data structure.
++ */
++ struct scu_sgl_element_pair sgl_pair_ab;
++ /* OFFSET 0x80-0x9C */
++ /**
++ * This field is the second SGL element pair found in the TC data structure.
++ */
++ struct scu_sgl_element_pair sgl_pair_cd;
++
++ /* OFFSET 0xA0-BC */
++ struct scu_sgl_element_pair sgl_snapshot_ac;
++
++ /* OFFSET 0xC0 */
++ u32 active_sgl_element_pair; /* read only set to 0 */
++
++ /* OFFSET 0xC4-0xCC */
++ u32 reserved_C4_CC[3];
++
++ /* OFFSET 0xD0 */
++ u32 intermediate_crc_value:16;
++ u32 initial_crc_seed:16;
++
++ /* OFFSET 0xD4 */
++ u32 application_tag_for_verify:16;
++ u32 application_tag_for_generate:16;
++
++ /* OFFSET 0xD8 */
++ u32 reference_tag_seed_for_verify_function;
++
++ /* OFFSET 0xDC */
++ u32 reserved_DC;
++
++ /* OFFSET 0xE0 */
++ u32 reserved_E0_0:16;
++ u32 application_tag_mask_for_generate:16;
++
++ /* OFFSET 0xE4 */
++ u32 block_protection_control:16;
++ u32 application_tag_mask_for_verify:16;
++
++ /* OFFSET 0xE8 */
++ u32 block_protection_error:8;
++ u32 reserved_E8_0:24;
++
++ /* OFFSET 0xEC */
++ u32 reference_tag_seed_for_verify;
++
++ /* OFFSET 0xF0 */
++ u32 intermediate_crc_valid_snapshot:16;
++ u32 reserved_F0_0:16;
++
++ /* OFFSET 0xF4 */
++ u32 reference_tag_seed_for_verify_function_snapshot;
++
++ /* OFFSET 0xF8 */
++ u32 snapshot_of_reserved_dword_DC_of_tc;
++
++ /* OFFSET 0xFC */
++ u32 reference_tag_seed_for_generate_function_snapshot;
++
++};
++
++#endif /* _SCU_TASK_CONTEXT_H_ */
+diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
+new file mode 100644
+index 0000000..d6bcdd0
+--- /dev/null
++++ b/drivers/scsi/isci/task.c
+@@ -0,0 +1,1676 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/completion.h>
++#include <linux/irqflags.h>
++#include "sas.h"
++#include <scsi/libsas.h>
++#include "remote_device.h"
++#include "remote_node_context.h"
++#include "isci.h"
++#include "request.h"
++#include "task.h"
++#include "host.h"
++
++/**
++* isci_task_refuse() - complete the request to the upper layer driver in
++* the case where an I/O needs to be completed back in the submit path.
++* @ihost: host on which the the request was queued
++* @task: request to complete
++* @response: response code for the completed task.
++* @status: status code for the completed task.
++*
++*/
++static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
++ enum service_response response,
++ enum exec_status status)
++
++{
++ enum isci_completion_selection disposition;
++
++ disposition = isci_perform_normal_io_completion;
++ disposition = isci_task_set_completion_status(task, response, status,
++ disposition);
++
++ /* Tasks aborted specifically by a call to the lldd_abort_task
++ * function should not be completed to the host in the regular path.
++ */
++ switch (disposition) {
++ case isci_perform_normal_io_completion:
++ /* Normal notification (task_done) */
++ dev_dbg(&ihost->pdev->dev,
++ "%s: Normal - task = %p, response=%d, "
++ "status=%d\n",
++ __func__, task, response, status);
++
++ task->lldd_task = NULL;
++
++ isci_execpath_callback(ihost, task, task->task_done);
++ break;
++
++ case isci_perform_aborted_io_completion:
++ /*
++ * No notification because this request is already in the
++ * abort path.
++ */
++ dev_dbg(&ihost->pdev->dev,
++ "%s: Aborted - task = %p, response=%d, "
++ "status=%d\n",
++ __func__, task, response, status);
++ break;
++
++ case isci_perform_error_io_completion:
++ /* Use sas_task_abort */
++ dev_dbg(&ihost->pdev->dev,
++ "%s: Error - task = %p, response=%d, "
++ "status=%d\n",
++ __func__, task, response, status);
++
++ isci_execpath_callback(ihost, task, sas_task_abort);
++ break;
++
++ default:
++ dev_dbg(&ihost->pdev->dev,
++ "%s: isci task notification default case!",
++ __func__);
++ sas_task_abort(task);
++ break;
++ }
++}
++
++#define for_each_sas_task(num, task) \
++ for (; num > 0; num--,\
++ task = list_entry(task->list.next, struct sas_task, list))
++
++
++static inline int isci_device_io_ready(struct isci_remote_device *idev,
++ struct sas_task *task)
++{
++ return idev ? test_bit(IDEV_IO_READY, &idev->flags) ||
++ (test_bit(IDEV_IO_NCQERROR, &idev->flags) &&
++ isci_task_is_ncq_recovery(task))
++ : 0;
++}
++/**
++ * isci_task_execute_task() - This function is one of the SAS Domain Template
++ * functions. This function is called by libsas to send a task down to
++ * hardware.
++ * @task: This parameter specifies the SAS task to send.
++ * @num: This parameter specifies the number of tasks to queue.
++ * @gfp_flags: This parameter specifies the context of this call.
++ *
++ * status, zero indicates success.
++ */
++int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
++{
++ struct isci_host *ihost = dev_to_ihost(task->dev);
++ struct isci_remote_device *idev;
++ unsigned long flags;
++ bool io_ready;
++ u16 tag;
++
++ dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
++
++ for_each_sas_task(num, task) {
++ enum sci_status status = SCI_FAILURE;
++
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++ idev = isci_lookup_device(task->dev);
++ io_ready = isci_device_io_ready(idev, task);
++ tag = isci_alloc_tag(ihost);
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++
++ dev_dbg(&ihost->pdev->dev,
++ "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n",
++ task, num, task->dev, idev, idev ? idev->flags : 0,
++ task->uldd_task);
++
++ if (!idev) {
++ isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
++ SAS_DEVICE_UNKNOWN);
++ } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
++ /* Indicate QUEUE_FULL so that the scsi midlayer
++ * retries.
++ */
++ isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
++ SAS_QUEUE_FULL);
++ } else {
++ /* There is a device and it's ready for I/O. */
++ spin_lock_irqsave(&task->task_state_lock, flags);
++
++ if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
++ /* The I/O was aborted. */
++ spin_unlock_irqrestore(&task->task_state_lock,
++ flags);
++
++ isci_task_refuse(ihost, task,
++ SAS_TASK_UNDELIVERED,
++ SAM_STAT_TASK_ABORTED);
++ } else {
++ task->task_state_flags |= SAS_TASK_AT_INITIATOR;
++ spin_unlock_irqrestore(&task->task_state_lock, flags);
++
++ /* build and send the request. */
++ status = isci_request_execute(ihost, idev, task, tag);
++
++ if (status != SCI_SUCCESS) {
++
++ spin_lock_irqsave(&task->task_state_lock, flags);
++ /* Did not really start this command. */
++ task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
++ spin_unlock_irqrestore(&task->task_state_lock, flags);
++
++ /* Indicate QUEUE_FULL so that the scsi
++ * midlayer retries. if the request
++ * failed for remote device reasons,
++ * it gets returned as
++ * SAS_TASK_UNDELIVERED next time
++ * through.
++ */
++ isci_task_refuse(ihost, task,
++ SAS_TASK_COMPLETE,
++ SAS_QUEUE_FULL);
++ }
++ }
++ }
++ if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++ /* command never hit the device, so just free
++ * the tci and skip the sequence increment
++ */
++ isci_tci_free(ihost, ISCI_TAG_TCI(tag));
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++ }
++ isci_put_device(idev);
++ }
++ return 0;
++}
++
++static enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq)
++{
++ struct isci_tmf *isci_tmf;
++ enum sci_status status;
++
++ if (tmf_task != ireq->ttype)
++ return SCI_FAILURE;
++
++ isci_tmf = isci_request_access_tmf(ireq);
++
++ switch (isci_tmf->tmf_code) {
++
++ case isci_tmf_sata_srst_high:
++ case isci_tmf_sata_srst_low: {
++ struct host_to_dev_fis *fis = &ireq->stp.cmd;
++
++ memset(fis, 0, sizeof(*fis));
++
++ fis->fis_type = 0x27;
++ fis->flags &= ~0x80;
++ fis->flags &= 0xF0;
++ if (isci_tmf->tmf_code == isci_tmf_sata_srst_high)
++ fis->control |= ATA_SRST;
++ else
++ fis->control &= ~ATA_SRST;
++ break;
++ }
++ /* other management commnd go here... */
++ default:
++ return SCI_FAILURE;
++ }
++
++ /* core builds the protocol specific request
++ * based on the h2d fis.
++ */
++ status = sci_task_request_construct_sata(ireq);
++
++ return status;
++}
++
++static struct isci_request *isci_task_request_build(struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ u16 tag, struct isci_tmf *isci_tmf)
++{
++ enum sci_status status = SCI_FAILURE;
++ struct isci_request *ireq = NULL;
++ struct domain_device *dev;
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: isci_tmf = %p\n", __func__, isci_tmf);
++
++ dev = idev->domain_dev;
++
++ /* do common allocation and init of request object. */
++ ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag);
++ if (!ireq)
++ return NULL;
++
++ /* let the core do it's construct. */
++ status = sci_task_request_construct(ihost, idev, tag,
++ ireq);
++
++ if (status != SCI_SUCCESS) {
++ dev_warn(&ihost->pdev->dev,
++ "%s: sci_task_request_construct failed - "
++ "status = 0x%x\n",
++ __func__,
++ status);
++ return NULL;
++ }
++
++ /* XXX convert to get this from task->tproto like other drivers */
++ if (dev->dev_type == SAS_END_DEV) {
++ isci_tmf->proto = SAS_PROTOCOL_SSP;
++ status = sci_task_request_construct_ssp(ireq);
++ if (status != SCI_SUCCESS)
++ return NULL;
++ }
++
++ if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
++ isci_tmf->proto = SAS_PROTOCOL_SATA;
++ status = isci_sata_management_task_request_build(ireq);
++
++ if (status != SCI_SUCCESS)
++ return NULL;
++ }
++ return ireq;
++}
++
++static int isci_task_execute_tmf(struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ struct isci_tmf *tmf, unsigned long timeout_ms)
++{
++ DECLARE_COMPLETION_ONSTACK(completion);
++ enum sci_task_status status = SCI_TASK_FAILURE;
++ struct isci_request *ireq;
++ int ret = TMF_RESP_FUNC_FAILED;
++ unsigned long flags;
++ unsigned long timeleft;
++ u16 tag;
++
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++ tag = isci_alloc_tag(ihost);
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++
++ if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
++ return ret;
++
++ /* sanity check, return TMF_RESP_FUNC_FAILED
++ * if the device is not there and ready.
++ */
++ if (!idev ||
++ (!test_bit(IDEV_IO_READY, &idev->flags) &&
++ !test_bit(IDEV_IO_NCQERROR, &idev->flags))) {
++ dev_dbg(&ihost->pdev->dev,
++ "%s: idev = %p not ready (%#lx)\n",
++ __func__,
++ idev, idev ? idev->flags : 0);
++ goto err_tci;
++ } else
++ dev_dbg(&ihost->pdev->dev,
++ "%s: idev = %p\n",
++ __func__, idev);
++
++ /* Assign the pointer to the TMF's completion kernel wait structure. */
++ tmf->complete = &completion;
++
++ ireq = isci_task_request_build(ihost, idev, tag, tmf);
++ if (!ireq)
++ goto err_tci;
++
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++
++ /* start the TMF io. */
++ status = sci_controller_start_task(ihost, idev, ireq);
++
++ if (status != SCI_TASK_SUCCESS) {
++ dev_dbg(&ihost->pdev->dev,
++ "%s: start_io failed - status = 0x%x, request = %p\n",
++ __func__,
++ status,
++ ireq);
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++ goto err_tci;
++ }
++
++ if (tmf->cb_state_func != NULL)
++ tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
++
++ isci_request_change_state(ireq, started);
++
++ /* add the request to the remote device request list. */
++ list_add(&ireq->dev_node, &idev->reqs_in_process);
++
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++
++ /* Wait for the TMF to complete, or a timeout. */
++ timeleft = wait_for_completion_timeout(&completion,
++ msecs_to_jiffies(timeout_ms));
++
++ if (timeleft == 0) {
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++
++ if (tmf->cb_state_func != NULL)
++ tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data);
++
++ sci_controller_terminate_request(ihost,
++ idev,
++ ireq);
++
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++
++ wait_for_completion(tmf->complete);
++ }
++
++ isci_print_tmf(tmf);
++
++ if (tmf->status == SCI_SUCCESS)
++ ret = TMF_RESP_FUNC_COMPLETE;
++ else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
++ dev_dbg(&ihost->pdev->dev,
++ "%s: tmf.status == "
++ "SCI_FAILURE_IO_RESPONSE_VALID\n",
++ __func__);
++ ret = TMF_RESP_FUNC_COMPLETE;
++ }
++ /* Else - leave the default "failed" status alone. */
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: completed request = %p\n",
++ __func__,
++ ireq);
++
++ return ret;
++
++ err_tci:
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++ isci_tci_free(ihost, ISCI_TAG_TCI(tag));
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++
++ return ret;
++}
++
++static void isci_task_build_tmf(struct isci_tmf *tmf,
++ enum isci_tmf_function_codes code,
++ void (*tmf_sent_cb)(enum isci_tmf_cb_state,
++ struct isci_tmf *,
++ void *),
++ void *cb_data)
++{
++ memset(tmf, 0, sizeof(*tmf));
++
++ tmf->tmf_code = code;
++ tmf->cb_state_func = tmf_sent_cb;
++ tmf->cb_data = cb_data;
++}
++
++static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
++ enum isci_tmf_function_codes code,
++ void (*tmf_sent_cb)(enum isci_tmf_cb_state,
++ struct isci_tmf *,
++ void *),
++ struct isci_request *old_request)
++{
++ isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request);
++ tmf->io_tag = old_request->io_tag;
++}
++
++/**
++ * isci_task_validate_request_to_abort() - This function checks the given I/O
++ * against the "started" state. If the request is still "started", it's
++ * state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
++ * BEFORE CALLING THIS FUNCTION.
++ * @isci_request: This parameter specifies the request object to control.
++ * @isci_host: This parameter specifies the ISCI host object
++ * @isci_device: This is the device to which the request is pending.
++ * @aborted_io_completion: This is a completion structure that will be added to
++ * the request in case it is changed to aborting; this completion is
++ * triggered when the request is fully completed.
++ *
++ * Either "started" on successful change of the task status to "aborted", or
++ * "unallocated" if the task cannot be controlled.
++ */
++static enum isci_request_status isci_task_validate_request_to_abort(
++ struct isci_request *isci_request,
++ struct isci_host *isci_host,
++ struct isci_remote_device *isci_device,
++ struct completion *aborted_io_completion)
++{
++ enum isci_request_status old_state = unallocated;
++
++ /* Only abort the task if it's in the
++ * device's request_in_process list
++ */
++ if (isci_request && !list_empty(&isci_request->dev_node)) {
++ old_state = isci_request_change_started_to_aborted(
++ isci_request, aborted_io_completion);
++
++ }
++
++ return old_state;
++}
++
++/**
++* isci_request_cleanup_completed_loiterer() - This function will take care of
++* the final cleanup on any request which has been explicitly terminated.
++* @isci_host: This parameter specifies the ISCI host object
++* @isci_device: This is the device to which the request is pending.
++* @isci_request: This parameter specifies the terminated request object.
++* @task: This parameter is the libsas I/O request.
++*/
++static void isci_request_cleanup_completed_loiterer(
++ struct isci_host *isci_host,
++ struct isci_remote_device *isci_device,
++ struct isci_request *isci_request,
++ struct sas_task *task)
++{
++ unsigned long flags;
++
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: isci_device=%p, request=%p, task=%p\n",
++ __func__, isci_device, isci_request, task);
++
++ if (task != NULL) {
++
++ spin_lock_irqsave(&task->task_state_lock, flags);
++ task->lldd_task = NULL;
++
++ task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
++
++ isci_set_task_doneflags(task);
++
++ /* If this task is not in the abort path, call task_done. */
++ if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
++
++ spin_unlock_irqrestore(&task->task_state_lock, flags);
++ task->task_done(task);
++ } else
++ spin_unlock_irqrestore(&task->task_state_lock, flags);
++ }
++
++ if (isci_request != NULL) {
++ spin_lock_irqsave(&isci_host->scic_lock, flags);
++ list_del_init(&isci_request->dev_node);
++ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
++ }
++}
++
++/**
++ * isci_terminate_request_core() - This function will terminate the given
++ * request, and wait for it to complete. This function must only be called
++ * from a thread that can wait. Note that the request is terminated and
++ * completed (back to the host, if started there).
++ * @ihost: This SCU.
++ * @idev: The target.
++ * @isci_request: The I/O request to be terminated.
++ *
++ */
++static void isci_terminate_request_core(struct isci_host *ihost,
++ struct isci_remote_device *idev,
++ struct isci_request *isci_request)
++{
++ enum sci_status status = SCI_SUCCESS;
++ bool was_terminated = false;
++ bool needs_cleanup_handling = false;
++ enum isci_request_status request_status;
++ unsigned long flags;
++ unsigned long termination_completed = 1;
++ struct completion *io_request_completion;
++ struct sas_task *task;
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: device = %p; request = %p\n",
++ __func__, idev, isci_request);
++
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++
++ io_request_completion = isci_request->io_request_completion;
++
++ task = (isci_request->ttype == io_task)
++ ? isci_request_access_task(isci_request)
++ : NULL;
++
++ /* Note that we are not going to control
++ * the target to abort the request.
++ */
++ set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags);
++
++ /* Make sure the request wasn't just sitting around signalling
++ * device condition (if the request handle is NULL, then the
++ * request completed but needed additional handling here).
++ */
++ if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
++ was_terminated = true;
++ needs_cleanup_handling = true;
++ status = sci_controller_terminate_request(ihost,
++ idev,
++ isci_request);
++ }
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++
++ /*
++ * The only time the request to terminate will
++ * fail is when the io request is completed and
++ * being aborted.
++ */
++ if (status != SCI_SUCCESS) {
++ dev_dbg(&ihost->pdev->dev,
++ "%s: sci_controller_terminate_request"
++ " returned = 0x%x\n",
++ __func__, status);
++
++ isci_request->io_request_completion = NULL;
++
++ } else {
++ if (was_terminated) {
++ dev_dbg(&ihost->pdev->dev,
++ "%s: before completion wait (%p/%p)\n",
++ __func__, isci_request, io_request_completion);
++
++ /* Wait here for the request to complete. */
++ #define TERMINATION_TIMEOUT_MSEC 500
++ termination_completed
++ = wait_for_completion_timeout(
++ io_request_completion,
++ msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC));
++
++ if (!termination_completed) {
++
++ /* The request to terminate has timed out. */
++ spin_lock_irqsave(&ihost->scic_lock,
++ flags);
++
++ /* Check for state changes. */
++ if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
++
++ /* The best we can do is to have the
++ * request die a silent death if it
++ * ever really completes.
++ *
++ * Set the request state to "dead",
++ * and clear the task pointer so that
++ * an actual completion event callback
++ * doesn't do anything.
++ */
++ isci_request->status = dead;
++ isci_request->io_request_completion
++ = NULL;
++
++ if (isci_request->ttype == io_task) {
++
++ /* Break links with the
++ * sas_task.
++ */
++ isci_request->ttype_ptr.io_task_ptr
++ = NULL;
++ }
++ } else
++ termination_completed = 1;
++
++ spin_unlock_irqrestore(&ihost->scic_lock,
++ flags);
++
++ if (!termination_completed) {
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: *** Timeout waiting for "
++ "termination(%p/%p)\n",
++ __func__, io_request_completion,
++ isci_request);
++
++ /* The request can no longer be referenced
++ * safely since it may go away if the
++ * termination every really does complete.
++ */
++ isci_request = NULL;
++ }
++ }
++ if (termination_completed)
++ dev_dbg(&ihost->pdev->dev,
++ "%s: after completion wait (%p/%p)\n",
++ __func__, isci_request, io_request_completion);
++ }
++
++ if (termination_completed) {
++
++ isci_request->io_request_completion = NULL;
++
++ /* Peek at the status of the request. This will tell
++ * us if there was special handling on the request such that it
++ * needs to be detached and freed here.
++ */
++ spin_lock_irqsave(&isci_request->state_lock, flags);
++ request_status = isci_request->status;
++
++ if ((isci_request->ttype == io_task) /* TMFs are in their own thread */
++ && ((request_status == aborted)
++ || (request_status == aborting)
++ || (request_status == terminating)
++ || (request_status == completed)
++ || (request_status == dead)
++ )
++ ) {
++
++ /* The completion routine won't free a request in
++ * the aborted/aborting/etc. states, so we do
++ * it here.
++ */
++ needs_cleanup_handling = true;
++ }
++ spin_unlock_irqrestore(&isci_request->state_lock, flags);
++
++ }
++ if (needs_cleanup_handling)
++ isci_request_cleanup_completed_loiterer(
++ ihost, idev, isci_request, task);
++ }
++}
++
++/**
++ * isci_terminate_pending_requests() - This function will change the all of the
++ * requests on the given device's state to "aborting", will terminate the
++ * requests, and wait for them to complete. This function must only be
++ * called from a thread that can wait. Note that the requests are all
++ * terminated and completed (back to the host, if started there).
++ * @isci_host: This parameter specifies SCU.
++ * @idev: This parameter specifies the target.
++ *
++ */
++void isci_terminate_pending_requests(struct isci_host *ihost,
++ struct isci_remote_device *idev)
++{
++ struct completion request_completion;
++ enum isci_request_status old_state;
++ unsigned long flags;
++ LIST_HEAD(list);
++
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++ list_splice_init(&idev->reqs_in_process, &list);
++
++ /* assumes that isci_terminate_request_core deletes from the list */
++ while (!list_empty(&list)) {
++ struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node);
++
++ /* Change state to "terminating" if it is currently
++ * "started".
++ */
++ old_state = isci_request_change_started_to_newstate(ireq,
++ &request_completion,
++ terminating);
++ switch (old_state) {
++ case started:
++ case completed:
++ case aborting:
++ break;
++ default:
++ /* termination in progress, or otherwise dispositioned.
++ * We know the request was on 'list' so should be safe
++ * to move it back to reqs_in_process
++ */
++ list_move(&ireq->dev_node, &idev->reqs_in_process);
++ ireq = NULL;
++ break;
++ }
++
++ if (!ireq)
++ continue;
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++
++ init_completion(&request_completion);
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: idev=%p request=%p; task=%p old_state=%d\n",
++ __func__, idev, ireq,
++ ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL,
++ old_state);
++
++ /* If the old_state is started:
++ * This request was not already being aborted. If it had been,
++ * then the aborting I/O (ie. the TMF request) would not be in
++ * the aborting state, and thus would be terminated here. Note
++ * that since the TMF completion's call to the kernel function
++ * "complete()" does not happen until the pending I/O request
++ * terminate fully completes, we do not have to implement a
++ * special wait here for already aborting requests - the
++ * termination of the TMF request will force the request
++ * to finish it's already started terminate.
++ *
++ * If old_state == completed:
++ * This request completed from the SCU hardware perspective
++ * and now just needs cleaning up in terms of freeing the
++ * request and potentially calling up to libsas.
++ *
++ * If old_state == aborting:
++ * This request has already gone through a TMF timeout, but may
++ * not have been terminated; needs cleaning up at least.
++ */
++ isci_terminate_request_core(ihost, idev, ireq);
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++ }
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++}
++
++/**
++ * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
++ * Template functions.
++ * @lun: This parameter specifies the lun to be reset.
++ *
++ * status, zero indicates success.
++ */
++static int isci_task_send_lu_reset_sas(
++ struct isci_host *isci_host,
++ struct isci_remote_device *isci_device,
++ u8 *lun)
++{
++ struct isci_tmf tmf;
++ int ret = TMF_RESP_FUNC_FAILED;
++
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: isci_host = %p, isci_device = %p\n",
++ __func__, isci_host, isci_device);
++ /* Send the LUN reset to the target. By the time the call returns,
++ * the TMF has fully exected in the target (in which case the return
++ * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
++ * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
++ */
++ isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL);
++
++ #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
++ ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
++
++ if (ret == TMF_RESP_FUNC_COMPLETE)
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: %p: TMF_LU_RESET passed\n",
++ __func__, isci_device);
++ else
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: %p: TMF_LU_RESET failed (%x)\n",
++ __func__, isci_device, ret);
++
++ return ret;
++}
++
++static int isci_task_send_lu_reset_sata(struct isci_host *ihost,
++ struct isci_remote_device *idev, u8 *lun)
++{
++ int ret = TMF_RESP_FUNC_FAILED;
++ struct isci_tmf tmf;
++
++ /* Send the soft reset to the target */
++ #define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */
++ isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL);
++
++ ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_SRST_TIMEOUT_MS);
++
++ if (ret != TMF_RESP_FUNC_COMPLETE) {
++ dev_dbg(&ihost->pdev->dev,
++ "%s: Assert SRST failed (%p) = %x",
++ __func__, idev, ret);
++
++ /* Return the failure so that the LUN reset is escalated
++ * to a target reset.
++ */
++ }
++ return ret;
++}
++
++/**
++ * isci_task_lu_reset() - This function is one of the SAS Domain Template
++ * functions. This is one of the Task Management functoins called by libsas,
++ * to reset the given lun. Note the assumption that while this call is
++ * executing, no I/O will be sent by the host to the device.
++ * @lun: This parameter specifies the lun to be reset.
++ *
++ * status, zero indicates success.
++ */
++int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
++{
++ struct isci_host *isci_host = dev_to_ihost(domain_device);
++ struct isci_remote_device *isci_device;
++ unsigned long flags;
++ int ret;
++
++ spin_lock_irqsave(&isci_host->scic_lock, flags);
++ isci_device = isci_lookup_device(domain_device);
++ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
++
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
++ __func__, domain_device, isci_host, isci_device);
++
++ if (isci_device)
++ set_bit(IDEV_EH, &isci_device->flags);
++
++ /* If there is a device reset pending on any request in the
++ * device's list, fail this LUN reset request in order to
++ * escalate to the device reset.
++ */
++ if (!isci_device ||
++ isci_device_is_reset_pending(isci_host, isci_device)) {
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: No dev (%p), or "
++ "RESET PENDING: domain_device=%p\n",
++ __func__, isci_device, domain_device);
++ ret = TMF_RESP_FUNC_FAILED;
++ goto out;
++ }
++
++ /* Send the task management part of the reset. */
++ if (sas_protocol_ata(domain_device->tproto)) {
++ ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun);
++ } else
++ ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
++
++ /* If the LUN reset worked, all the I/O can now be terminated. */
++ if (ret == TMF_RESP_FUNC_COMPLETE)
++ /* Terminate all I/O now. */
++ isci_terminate_pending_requests(isci_host,
++ isci_device);
++
++ out:
++ isci_put_device(isci_device);
++ return ret;
++}
++
++
++/* int (*lldd_clear_nexus_port)(struct asd_sas_port *); */
++int isci_task_clear_nexus_port(struct asd_sas_port *port)
++{
++ return TMF_RESP_FUNC_FAILED;
++}
++
++
++
++int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
++{
++ return TMF_RESP_FUNC_FAILED;
++}
++
++/* Task Management Functions. Must be called from process context. */
++
++/**
++ * isci_abort_task_process_cb() - This is a helper function for the abort task
++ * TMF command. It manages the request state with respect to the successful
++ * transmission / completion of the abort task request.
++ * @cb_state: This parameter specifies when this function was called - after
++ * the TMF request has been started and after it has timed-out.
++ * @tmf: This parameter specifies the TMF in progress.
++ *
++ *
++ */
++static void isci_abort_task_process_cb(
++ enum isci_tmf_cb_state cb_state,
++ struct isci_tmf *tmf,
++ void *cb_data)
++{
++ struct isci_request *old_request;
++
++ old_request = (struct isci_request *)cb_data;
++
++ dev_dbg(&old_request->isci_host->pdev->dev,
++ "%s: tmf=%p, old_request=%p\n",
++ __func__, tmf, old_request);
++
++ switch (cb_state) {
++
++ case isci_tmf_started:
++ /* The TMF has been started. Nothing to do here, since the
++ * request state was already set to "aborted" by the abort
++ * task function.
++ */
++ if ((old_request->status != aborted)
++ && (old_request->status != completed))
++ dev_dbg(&old_request->isci_host->pdev->dev,
++ "%s: Bad request status (%d): tmf=%p, old_request=%p\n",
++ __func__, old_request->status, tmf, old_request);
++ break;
++
++ case isci_tmf_timed_out:
++
++ /* Set the task's state to "aborting", since the abort task
++ * function thread set it to "aborted" (above) in anticipation
++ * of the task management request working correctly. Since the
++ * timeout has now fired, the TMF request failed. We set the
++ * state such that the request completion will indicate the
++ * device is no longer present.
++ */
++ isci_request_change_state(old_request, aborting);
++ break;
++
++ default:
++ dev_dbg(&old_request->isci_host->pdev->dev,
++ "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
++ __func__, cb_state, tmf, old_request);
++ break;
++ }
++}
++
++/**
++ * isci_task_abort_task() - This function is one of the SAS Domain Template
++ * functions. This function is called by libsas to abort a specified task.
++ * @task: This parameter specifies the SAS task to abort.
++ *
++ * status, zero indicates success.
++ */
++int isci_task_abort_task(struct sas_task *task)
++{
++ struct isci_host *isci_host = dev_to_ihost(task->dev);
++ DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
++ struct isci_request *old_request = NULL;
++ enum isci_request_status old_state;
++ struct isci_remote_device *isci_device = NULL;
++ struct isci_tmf tmf;
++ int ret = TMF_RESP_FUNC_FAILED;
++ unsigned long flags;
++ bool any_dev_reset = false;
++
++ /* Get the isci_request reference from the task. Note that
++ * this check does not depend on the pending request list
++ * in the device, because tasks driving resets may land here
++ * after completion in the core.
++ */
++ spin_lock_irqsave(&isci_host->scic_lock, flags);
++ spin_lock(&task->task_state_lock);
++
++ old_request = task->lldd_task;
++
++ /* If task is already done, the request isn't valid */
++ if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
++ (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
++ old_request)
++ isci_device = isci_lookup_device(task->dev);
++
++ spin_unlock(&task->task_state_lock);
++ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
++
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: task = %p\n", __func__, task);
++
++ if (!isci_device || !old_request)
++ goto out;
++
++ set_bit(IDEV_EH, &isci_device->flags);
++
++ /* This version of the driver will fail abort requests for
++ * SATA/STP. Failing the abort request this way will cause the
++ * SCSI error handler thread to escalate to LUN reset
++ */
++ if (sas_protocol_ata(task->task_proto)) {
++ dev_dbg(&isci_host->pdev->dev,
++ " task %p is for a STP/SATA device;"
++ " returning TMF_RESP_FUNC_FAILED\n"
++ " to cause a LUN reset...\n", task);
++ goto out;
++ }
++
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: old_request == %p\n", __func__, old_request);
++
++ any_dev_reset = isci_device_is_reset_pending(isci_host, isci_device);
++
++ spin_lock_irqsave(&task->task_state_lock, flags);
++
++ any_dev_reset = any_dev_reset || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
++
++ /* If the extraction of the request reference from the task
++ * failed, then the request has been completed (or if there is a
++ * pending reset then this abort request function must be failed
++ * in order to escalate to the target reset).
++ */
++ if ((old_request == NULL) || any_dev_reset) {
++
++ /* If the device reset task flag is set, fail the task
++ * management request. Otherwise, the original request
++ * has completed.
++ */
++ if (any_dev_reset) {
++
++ /* Turn off the task's DONE to make sure this
++ * task is escalated to a target reset.
++ */
++ task->task_state_flags &= ~SAS_TASK_STATE_DONE;
++
++ /* Make the reset happen as soon as possible. */
++ task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
++
++ spin_unlock_irqrestore(&task->task_state_lock, flags);
++
++ /* Fail the task management request in order to
++ * escalate to the target reset.
++ */
++ ret = TMF_RESP_FUNC_FAILED;
++
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: Failing task abort in order to "
++ "escalate to target reset because\n"
++ "SAS_TASK_NEED_DEV_RESET is set for "
++ "task %p on dev %p\n",
++ __func__, task, isci_device);
++
++
++ } else {
++ /* The request has already completed and there
++ * is nothing to do here other than to set the task
++ * done bit, and indicate that the task abort function
++ * was sucessful.
++ */
++ isci_set_task_doneflags(task);
++
++ spin_unlock_irqrestore(&task->task_state_lock, flags);
++
++ ret = TMF_RESP_FUNC_COMPLETE;
++
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: abort task not needed for %p\n",
++ __func__, task);
++ }
++ goto out;
++ } else {
++ spin_unlock_irqrestore(&task->task_state_lock, flags);
++ }
++
++ spin_lock_irqsave(&isci_host->scic_lock, flags);
++
++ /* Check the request status and change to "aborted" if currently
++ * "starting"; if true then set the I/O kernel completion
++ * struct that will be triggered when the request completes.
++ */
++ old_state = isci_task_validate_request_to_abort(
++ old_request, isci_host, isci_device,
++ &aborted_io_completion);
++ if ((old_state != started) &&
++ (old_state != completed) &&
++ (old_state != aborting)) {
++
++ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
++
++ /* The request was already being handled by someone else (because
++ * they got to set the state away from started).
++ */
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: device = %p; old_request %p already being aborted\n",
++ __func__,
++ isci_device, old_request);
++ ret = TMF_RESP_FUNC_COMPLETE;
++ goto out;
++ }
++ if (task->task_proto == SAS_PROTOCOL_SMP ||
++ test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
++
++ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
++
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: SMP request (%d)"
++ " or complete_in_target (%d), thus no TMF\n",
++ __func__, (task->task_proto == SAS_PROTOCOL_SMP),
++ test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
++
++ /* Set the state on the task. */
++ isci_task_all_done(task);
++
++ ret = TMF_RESP_FUNC_COMPLETE;
++
++ /* Stopping and SMP devices are not sent a TMF, and are not
++ * reset, but the outstanding I/O request is terminated below.
++ */
++ } else {
++ /* Fill in the tmf stucture */
++ isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
++ isci_abort_task_process_cb,
++ old_request);
++
++ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
++
++ #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */
++ ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
++ ISCI_ABORT_TASK_TIMEOUT_MS);
++
++ if (ret != TMF_RESP_FUNC_COMPLETE)
++ dev_dbg(&isci_host->pdev->dev,
++ "%s: isci_task_send_tmf failed\n",
++ __func__);
++ }
++ if (ret == TMF_RESP_FUNC_COMPLETE) {
++ set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags);
++
++ /* Clean up the request on our side, and wait for the aborted
++ * I/O to complete.
++ */
++ isci_terminate_request_core(isci_host, isci_device, old_request);
++ }
++
++ /* Make sure we do not leave a reference to aborted_io_completion */
++ old_request->io_request_completion = NULL;
++ out:
++ isci_put_device(isci_device);
++ return ret;
++}
++
++/**
++ * isci_task_abort_task_set() - This function is one of the SAS Domain Template
++ * functions. This is one of the Task Management functoins called by libsas,
++ * to abort all task for the given lun.
++ * @d_device: This parameter specifies the domain device associated with this
++ * request.
++ * @lun: This parameter specifies the lun associated with this request.
++ *
++ * status, zero indicates success.
++ */
++int isci_task_abort_task_set(
++ struct domain_device *d_device,
++ u8 *lun)
++{
++ return TMF_RESP_FUNC_FAILED;
++}
++
++
++/**
++ * isci_task_clear_aca() - This function is one of the SAS Domain Template
++ * functions. This is one of the Task Management functoins called by libsas.
++ * @d_device: This parameter specifies the domain device associated with this
++ * request.
++ * @lun: This parameter specifies the lun associated with this request.
++ *
++ * status, zero indicates success.
++ */
++int isci_task_clear_aca(
++ struct domain_device *d_device,
++ u8 *lun)
++{
++ return TMF_RESP_FUNC_FAILED;
++}
++
++
++
++/**
++ * isci_task_clear_task_set() - This function is one of the SAS Domain Template
++ * functions. This is one of the Task Management functoins called by libsas.
++ * @d_device: This parameter specifies the domain device associated with this
++ * request.
++ * @lun: This parameter specifies the lun associated with this request.
++ *
++ * status, zero indicates success.
++ */
++int isci_task_clear_task_set(
++ struct domain_device *d_device,
++ u8 *lun)
++{
++ return TMF_RESP_FUNC_FAILED;
++}
++
++
++/**
++ * isci_task_query_task() - This function is implemented to cause libsas to
++ * correctly escalate the failed abort to a LUN or target reset (this is
++ * because sas_scsi_find_task libsas function does not correctly interpret
++ * all return codes from the abort task call). When TMF_RESP_FUNC_SUCC is
++ * returned, libsas turns this into a LUN reset; when FUNC_FAILED is
++ * returned, libsas will turn this into a target reset
++ * @task: This parameter specifies the sas task being queried.
++ * @lun: This parameter specifies the lun associated with this request.
++ *
++ * status, zero indicates success.
++ */
++int isci_task_query_task(
++ struct sas_task *task)
++{
++ /* See if there is a pending device reset for this device. */
++ if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
++ return TMF_RESP_FUNC_FAILED;
++ else
++ return TMF_RESP_FUNC_SUCC;
++}
++
++/*
++ * isci_task_request_complete() - This function is called by the sci core when
++ * an task request completes.
++ * @ihost: This parameter specifies the ISCI host object
++ * @ireq: This parameter is the completed isci_request object.
++ * @completion_status: This parameter specifies the completion status from the
++ * sci core.
++ *
++ * none.
++ */
++void
++isci_task_request_complete(struct isci_host *ihost,
++ struct isci_request *ireq,
++ enum sci_task_status completion_status)
++{
++ struct isci_tmf *tmf = isci_request_access_tmf(ireq);
++ struct completion *tmf_complete;
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: request = %p, status=%d\n",
++ __func__, ireq, completion_status);
++
++ isci_request_change_state(ireq, completed);
++
++ tmf->status = completion_status;
++ set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
++
++ if (tmf->proto == SAS_PROTOCOL_SSP) {
++ memcpy(&tmf->resp.resp_iu,
++ &ireq->ssp.rsp,
++ SSP_RESP_IU_MAX_SIZE);
++ } else if (tmf->proto == SAS_PROTOCOL_SATA) {
++ memcpy(&tmf->resp.d2h_fis,
++ &ireq->stp.rsp,
++ sizeof(struct dev_to_host_fis));
++ }
++
++ /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
++ tmf_complete = tmf->complete;
++
++ sci_controller_complete_io(ihost, ireq->target_device, ireq);
++ /* set the 'terminated' flag handle to make sure it cannot be terminated
++ * or completed again.
++ */
++ set_bit(IREQ_TERMINATED, &ireq->flags);
++
++ isci_request_change_state(ireq, unallocated);
++ list_del_init(&ireq->dev_node);
++
++ /* The task management part completes last. */
++ complete(tmf_complete);
++}
++
++static void isci_smp_task_timedout(unsigned long _task)
++{
++ struct sas_task *task = (void *) _task;
++ unsigned long flags;
++
++ spin_lock_irqsave(&task->task_state_lock, flags);
++ if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
++ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
++ spin_unlock_irqrestore(&task->task_state_lock, flags);
++
++ complete(&task->completion);
++}
++
++static void isci_smp_task_done(struct sas_task *task)
++{
++ if (!del_timer(&task->timer))
++ return;
++ complete(&task->completion);
++}
++
++static struct sas_task *isci_alloc_task(void)
++{
++ struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL);
++
++ if (task) {
++ INIT_LIST_HEAD(&task->list);
++ spin_lock_init(&task->task_state_lock);
++ task->task_state_flags = SAS_TASK_STATE_PENDING;
++ init_timer(&task->timer);
++ init_completion(&task->completion);
++ }
++
++ return task;
++}
++
++static void isci_free_task(struct isci_host *ihost, struct sas_task *task)
++{
++ if (task) {
++ BUG_ON(!list_empty(&task->list));
++ kfree(task);
++ }
++}
++
++static int isci_smp_execute_task(struct isci_host *ihost,
++ struct domain_device *dev, void *req,
++ int req_size, void *resp, int resp_size)
++{
++ int res, retry;
++ struct sas_task *task = NULL;
++
++ for (retry = 0; retry < 3; retry++) {
++ task = isci_alloc_task();
++ if (!task)
++ return -ENOMEM;
++
++ task->dev = dev;
++ task->task_proto = dev->tproto;
++ sg_init_one(&task->smp_task.smp_req, req, req_size);
++ sg_init_one(&task->smp_task.smp_resp, resp, resp_size);
++
++ task->task_done = isci_smp_task_done;
++
++ task->timer.data = (unsigned long) task;
++ task->timer.function = isci_smp_task_timedout;
++ task->timer.expires = jiffies + 10*HZ;
++ add_timer(&task->timer);
++
++ res = isci_task_execute_task(task, 1, GFP_KERNEL);
++
++ if (res) {
++ del_timer(&task->timer);
++ dev_dbg(&ihost->pdev->dev,
++ "%s: executing SMP task failed:%d\n",
++ __func__, res);
++ goto ex_err;
++ }
++
++ wait_for_completion(&task->completion);
++ res = -ECOMM;
++ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
++ dev_dbg(&ihost->pdev->dev,
++ "%s: smp task timed out or aborted\n",
++ __func__);
++ isci_task_abort_task(task);
++ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
++ dev_dbg(&ihost->pdev->dev,
++ "%s: SMP task aborted and not done\n",
++ __func__);
++ goto ex_err;
++ }
++ }
++ if (task->task_status.resp == SAS_TASK_COMPLETE &&
++ task->task_status.stat == SAM_STAT_GOOD) {
++ res = 0;
++ break;
++ }
++ if (task->task_status.resp == SAS_TASK_COMPLETE &&
++ task->task_status.stat == SAS_DATA_UNDERRUN) {
++ /* no error, but return the number of bytes of
++ * underrun */
++ res = task->task_status.residual;
++ break;
++ }
++ if (task->task_status.resp == SAS_TASK_COMPLETE &&
++ task->task_status.stat == SAS_DATA_OVERRUN) {
++ res = -EMSGSIZE;
++ break;
++ } else {
++ dev_dbg(&ihost->pdev->dev,
++ "%s: task to dev %016llx response: 0x%x "
++ "status 0x%x\n", __func__,
++ SAS_ADDR(dev->sas_addr),
++ task->task_status.resp,
++ task->task_status.stat);
++ isci_free_task(ihost, task);
++ task = NULL;
++ }
++ }
++ex_err:
++ BUG_ON(retry == 3 && task != NULL);
++ isci_free_task(ihost, task);
++ return res;
++}
++
++#define DISCOVER_REQ_SIZE 16
++#define DISCOVER_RESP_SIZE 56
++
++int isci_smp_get_phy_attached_dev_type(struct isci_host *ihost,
++ struct domain_device *dev,
++ int phy_id, int *adt)
++{
++ struct smp_resp *disc_resp;
++ u8 *disc_req;
++ int res;
++
++ disc_resp = kzalloc(DISCOVER_RESP_SIZE, GFP_KERNEL);
++ if (!disc_resp)
++ return -ENOMEM;
++
++ disc_req = kzalloc(DISCOVER_REQ_SIZE, GFP_KERNEL);
++ if (disc_req) {
++ disc_req[0] = SMP_REQUEST;
++ disc_req[1] = SMP_DISCOVER;
++ disc_req[9] = phy_id;
++ } else {
++ kfree(disc_resp);
++ return -ENOMEM;
++ }
++ res = isci_smp_execute_task(ihost, dev, disc_req, DISCOVER_REQ_SIZE,
++ disc_resp, DISCOVER_RESP_SIZE);
++ if (!res) {
++ if (disc_resp->result != SMP_RESP_FUNC_ACC)
++ res = disc_resp->result;
++ else
++ *adt = disc_resp->disc.attached_dev_type;
++ }
++ kfree(disc_req);
++ kfree(disc_resp);
++
++ return res;
++}
++
++static void isci_wait_for_smp_phy_reset(struct isci_remote_device *idev, int phy_num)
++{
++ struct domain_device *dev = idev->domain_dev;
++ struct isci_port *iport = idev->isci_port;
++ struct isci_host *ihost = iport->isci_host;
++ int res, iteration = 0, attached_device_type;
++ #define STP_WAIT_MSECS 25000
++ unsigned long tmo = msecs_to_jiffies(STP_WAIT_MSECS);
++ unsigned long deadline = jiffies + tmo;
++ enum {
++ SMP_PHYWAIT_PHYDOWN,
++ SMP_PHYWAIT_PHYUP,
++ SMP_PHYWAIT_DONE
++ } phy_state = SMP_PHYWAIT_PHYDOWN;
++
++ /* While there is time, wait for the phy to go away and come back */
++ while (time_is_after_jiffies(deadline) && phy_state != SMP_PHYWAIT_DONE) {
++ int event = atomic_read(&iport->event);
++
++ ++iteration;
++
++ tmo = wait_event_timeout(ihost->eventq,
++ event != atomic_read(&iport->event) ||
++ !test_bit(IPORT_BCN_BLOCKED, &iport->flags),
++ tmo);
++ /* link down, stop polling */
++ if (!test_bit(IPORT_BCN_BLOCKED, &iport->flags))
++ break;
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: iport %p, iteration %d,"
++ " phase %d: time_remaining %lu, bcns = %d\n",
++ __func__, iport, iteration, phy_state,
++ tmo, test_bit(IPORT_BCN_PENDING, &iport->flags));
++
++ res = isci_smp_get_phy_attached_dev_type(ihost, dev, phy_num,
++ &attached_device_type);
++ tmo = deadline - jiffies;
++
++ if (res) {
++ dev_dbg(&ihost->pdev->dev,
++ "%s: iteration %d, phase %d:"
++ " SMP error=%d, time_remaining=%lu\n",
++ __func__, iteration, phy_state, res, tmo);
++ break;
++ }
++ dev_dbg(&ihost->pdev->dev,
++ "%s: iport %p, iteration %d,"
++ " phase %d: time_remaining %lu, bcns = %d, "
++ "attdevtype = %x\n",
++ __func__, iport, iteration, phy_state,
++ tmo, test_bit(IPORT_BCN_PENDING, &iport->flags),
++ attached_device_type);
++
++ switch (phy_state) {
++ case SMP_PHYWAIT_PHYDOWN:
++ /* Has the device gone away? */
++ if (!attached_device_type)
++ phy_state = SMP_PHYWAIT_PHYUP;
++
++ break;
++
++ case SMP_PHYWAIT_PHYUP:
++ /* Has the device come back? */
++ if (attached_device_type)
++ phy_state = SMP_PHYWAIT_DONE;
++ break;
++
++ case SMP_PHYWAIT_DONE:
++ break;
++ }
++
++ }
++ dev_dbg(&ihost->pdev->dev, "%s: done\n", __func__);
++}
++
++static int isci_reset_device(struct isci_host *ihost,
++ struct isci_remote_device *idev)
++{
++ struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
++ struct isci_port *iport = idev->isci_port;
++ enum sci_status status;
++ unsigned long flags;
++ int rc;
++
++ dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
++
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++ status = sci_remote_device_reset(idev);
++ if (status != SCI_SUCCESS) {
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++
++ dev_dbg(&ihost->pdev->dev,
++ "%s: sci_remote_device_reset(%p) returned %d!\n",
++ __func__, idev, status);
++
++ return TMF_RESP_FUNC_FAILED;
++ }
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++
++ /* Make sure all pending requests are able to be fully terminated. */
++ isci_device_clear_reset_pending(ihost, idev);
++
++ /* If this is a device on an expander, disable BCN processing. */
++ if (!scsi_is_sas_phy_local(phy))
++ set_bit(IPORT_BCN_BLOCKED, &iport->flags);
++
++ rc = sas_phy_reset(phy, true);
++
++ /* Terminate in-progress I/O now. */
++ isci_remote_device_nuke_requests(ihost, idev);
++
++ /* Since all pending TCs have been cleaned, resume the RNC. */
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++ status = sci_remote_device_reset_complete(idev);
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++
++ /* If this is a device on an expander, bring the phy back up. */
++ if (!scsi_is_sas_phy_local(phy)) {
++ /* A phy reset will cause the device to go away then reappear.
++ * Since libsas will take action on incoming BCNs (eg. remove
++ * a device going through an SMP phy-control driven reset),
++ * we need to wait until the phy comes back up before letting
++ * discovery proceed in libsas.
++ */
++ isci_wait_for_smp_phy_reset(idev, phy->number);
++
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++ isci_port_bcn_enable(ihost, idev->isci_port);
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++ }
++
++ if (status != SCI_SUCCESS) {
++ dev_dbg(&ihost->pdev->dev,
++ "%s: sci_remote_device_reset_complete(%p) "
++ "returned %d!\n", __func__, idev, status);
++ }
++
++ dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
++
++ return rc;
++}
++
++int isci_task_I_T_nexus_reset(struct domain_device *dev)
++{
++ struct isci_host *ihost = dev_to_ihost(dev);
++ struct isci_remote_device *idev;
++ unsigned long flags;
++ int ret;
++
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++ idev = isci_lookup_device(dev);
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++
++ if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
++ ret = TMF_RESP_FUNC_COMPLETE;
++ goto out;
++ }
++
++ ret = isci_reset_device(ihost, idev);
++ out:
++ isci_put_device(idev);
++ return ret;
++}
++
++int isci_bus_reset_handler(struct scsi_cmnd *cmd)
++{
++ struct domain_device *dev = sdev_to_domain_dev(cmd->device);
++ struct isci_host *ihost = dev_to_ihost(dev);
++ struct isci_remote_device *idev;
++ unsigned long flags;
++ int ret;
++
++ spin_lock_irqsave(&ihost->scic_lock, flags);
++ idev = isci_lookup_device(dev);
++ spin_unlock_irqrestore(&ihost->scic_lock, flags);
++
++ if (!idev) {
++ ret = TMF_RESP_FUNC_COMPLETE;
++ goto out;
++ }
++
++ ret = isci_reset_device(ihost, idev);
++ out:
++ isci_put_device(idev);
++ return ret;
++}
+diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
+new file mode 100644
+index 0000000..4a7fa90
+--- /dev/null
++++ b/drivers/scsi/isci/task.h
+@@ -0,0 +1,367 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++#ifndef _ISCI_TASK_H_
++#define _ISCI_TASK_H_
++
++#include <scsi/sas_ata.h>
++#include "host.h"
++
++struct isci_request;
++
++/**
++ * enum isci_tmf_cb_state - This enum defines the possible states in which the
++ * TMF callback function is invoked during the TMF execution process.
++ *
++ *
++ */
++enum isci_tmf_cb_state {
++
++ isci_tmf_init_state = 0,
++ isci_tmf_started,
++ isci_tmf_timed_out
++};
++
++/**
++ * enum isci_tmf_function_codes - This enum defines the possible preparations
++ * of task management requests.
++ *
++ *
++ */
++enum isci_tmf_function_codes {
++
++ isci_tmf_func_none = 0,
++ isci_tmf_ssp_task_abort = TMF_ABORT_TASK,
++ isci_tmf_ssp_lun_reset = TMF_LU_RESET,
++ isci_tmf_sata_srst_high = TMF_LU_RESET + 0x100, /* Non SCSI */
++ isci_tmf_sata_srst_low = TMF_LU_RESET + 0x101 /* Non SCSI */
++};
++/**
++ * struct isci_tmf - This class represents the task management object which
++ * acts as an interface to libsas for processing task management requests
++ *
++ *
++ */
++struct isci_tmf {
++
++ struct completion *complete;
++ enum sas_protocol proto;
++ union {
++ struct ssp_response_iu resp_iu;
++ struct dev_to_host_fis d2h_fis;
++ u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
++ } resp;
++ unsigned char lun[8];
++ u16 io_tag;
++ struct isci_remote_device *device;
++ enum isci_tmf_function_codes tmf_code;
++ int status;
++
++ /* The optional callback function allows the user process to
++ * track the TMF transmit / timeout conditions.
++ */
++ void (*cb_state_func)(
++ enum isci_tmf_cb_state,
++ struct isci_tmf *, void *);
++ void *cb_data;
++
++};
++
++static inline void isci_print_tmf(struct isci_tmf *tmf)
++{
++ if (SAS_PROTOCOL_SATA == tmf->proto)
++ dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
++ "%s: status = %x\n"
++ "tmf->resp.d2h_fis.status = %x\n"
++ "tmf->resp.d2h_fis.error = %x\n",
++ __func__,
++ tmf->status,
++ tmf->resp.d2h_fis.status,
++ tmf->resp.d2h_fis.error);
++ else
++ dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
++ "%s: status = %x\n"
++ "tmf->resp.resp_iu.data_present = %x\n"
++ "tmf->resp.resp_iu.status = %x\n"
++ "tmf->resp.resp_iu.data_length = %x\n"
++ "tmf->resp.resp_iu.data[0] = %x\n"
++ "tmf->resp.resp_iu.data[1] = %x\n"
++ "tmf->resp.resp_iu.data[2] = %x\n"
++ "tmf->resp.resp_iu.data[3] = %x\n",
++ __func__,
++ tmf->status,
++ tmf->resp.resp_iu.datapres,
++ tmf->resp.resp_iu.status,
++ be32_to_cpu(tmf->resp.resp_iu.response_data_len),
++ tmf->resp.resp_iu.resp_data[0],
++ tmf->resp.resp_iu.resp_data[1],
++ tmf->resp.resp_iu.resp_data[2],
++ tmf->resp.resp_iu.resp_data[3]);
++}
++
++
++int isci_task_execute_task(
++ struct sas_task *task,
++ int num,
++ gfp_t gfp_flags);
++
++int isci_task_abort_task(
++ struct sas_task *task);
++
++int isci_task_abort_task_set(
++ struct domain_device *d_device,
++ u8 *lun);
++
++int isci_task_clear_aca(
++ struct domain_device *d_device,
++ u8 *lun);
++
++int isci_task_clear_task_set(
++ struct domain_device *d_device,
++ u8 *lun);
++
++int isci_task_query_task(
++ struct sas_task *task);
++
++int isci_task_lu_reset(
++ struct domain_device *d_device,
++ u8 *lun);
++
++int isci_task_clear_nexus_port(
++ struct asd_sas_port *port);
++
++int isci_task_clear_nexus_ha(
++ struct sas_ha_struct *ha);
++
++int isci_task_I_T_nexus_reset(
++ struct domain_device *d_device);
++
++void isci_task_request_complete(
++ struct isci_host *isci_host,
++ struct isci_request *request,
++ enum sci_task_status completion_status);
++
++u16 isci_task_ssp_request_get_io_tag_to_manage(
++ struct isci_request *request);
++
++u8 isci_task_ssp_request_get_function(
++ struct isci_request *request);
++
++
++void *isci_task_ssp_request_get_response_data_address(
++ struct isci_request *request);
++
++u32 isci_task_ssp_request_get_response_data_length(
++ struct isci_request *request);
++
++int isci_queuecommand(
++ struct scsi_cmnd *scsi_cmd,
++ void (*donefunc)(struct scsi_cmnd *));
++
++int isci_bus_reset_handler(struct scsi_cmnd *cmd);
++
++/**
++ * enum isci_completion_selection - This enum defines the possible actions to
++ * take with respect to a given request's notification back to libsas.
++ *
++ *
++ */
++enum isci_completion_selection {
++
++ isci_perform_normal_io_completion, /* Normal notify (task_done) */
++ isci_perform_aborted_io_completion, /* No notification. */
++ isci_perform_error_io_completion /* Use sas_task_abort */
++};
++
++static inline void isci_set_task_doneflags(
++ struct sas_task *task)
++{
++ /* Since no futher action will be taken on this task,
++ * make sure to mark it complete from the lldd perspective.
++ */
++ task->task_state_flags |= SAS_TASK_STATE_DONE;
++ task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
++ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
++}
++/**
++ * isci_task_all_done() - This function clears the task bits to indicate the
++ * LLDD is done with the task.
++ *
++ *
++ */
++static inline void isci_task_all_done(
++ struct sas_task *task)
++{
++ unsigned long flags;
++
++ /* Since no futher action will be taken on this task,
++ * make sure to mark it complete from the lldd perspective.
++ */
++ spin_lock_irqsave(&task->task_state_lock, flags);
++ isci_set_task_doneflags(task);
++ spin_unlock_irqrestore(&task->task_state_lock, flags);
++}
++
++/**
++ * isci_task_set_completion_status() - This function sets the completion status
++ * for the request.
++ * @task: This parameter is the completed request.
++ * @response: This parameter is the response code for the completed task.
++ * @status: This parameter is the status code for the completed task.
++ *
++* @return The new notification mode for the request.
++*/
++static inline enum isci_completion_selection
++isci_task_set_completion_status(
++ struct sas_task *task,
++ enum service_response response,
++ enum exec_status status,
++ enum isci_completion_selection task_notification_selection)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&task->task_state_lock, flags);
++
++ /* If a device reset is being indicated, make sure the I/O
++ * is in the error path.
++ */
++ if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
++ /* Fail the I/O to make sure it goes into the error path. */
++ response = SAS_TASK_UNDELIVERED;
++ status = SAM_STAT_TASK_ABORTED;
++
++ task_notification_selection = isci_perform_error_io_completion;
++ }
++ task->task_status.resp = response;
++ task->task_status.stat = status;
++
++ switch (task_notification_selection) {
++
++ case isci_perform_error_io_completion:
++
++ if (task->task_proto == SAS_PROTOCOL_SMP) {
++ /* There is no error escalation in the SMP case.
++ * Convert to a normal completion to avoid the
++ * timeout in the discovery path and to let the
++ * next action take place quickly.
++ */
++ task_notification_selection
++ = isci_perform_normal_io_completion;
++
++ /* Fall through to the normal case... */
++ } else {
++ /* Use sas_task_abort */
++ /* Leave SAS_TASK_STATE_DONE clear
++ * Leave SAS_TASK_AT_INITIATOR set.
++ */
++ break;
++ }
++
++ case isci_perform_aborted_io_completion:
++ /* This path can occur with task-managed requests as well as
++ * requests terminated because of LUN or device resets.
++ */
++ /* Fall through to the normal case... */
++ case isci_perform_normal_io_completion:
++ /* Normal notification (task_done) */
++ isci_set_task_doneflags(task);
++ break;
++ default:
++ WARN_ONCE(1, "unknown task_notification_selection: %d\n",
++ task_notification_selection);
++ break;
++ }
++
++ spin_unlock_irqrestore(&task->task_state_lock, flags);
++
++ return task_notification_selection;
++
++}
++/**
++* isci_execpath_callback() - This function is called from the task
++* execute path when the task needs to callback libsas about the submit-time
++* task failure. The callback occurs either through the task's done function
++* or through sas_task_abort. In the case of regular non-discovery SATA/STP I/O
++* requests, libsas takes the host lock before calling execute task. Therefore
++* in this situation the host lock must be managed before calling the func.
++*
++* @ihost: This parameter is the controller to which the I/O request was sent.
++* @task: This parameter is the I/O request.
++* @func: This parameter is the function to call in the correct context.
++* @status: This parameter is the status code for the completed task.
++*
++*/
++static inline void isci_execpath_callback(struct isci_host *ihost,
++ struct sas_task *task,
++ void (*func)(struct sas_task *))
++{
++ struct domain_device *dev = task->dev;
++
++ if (dev_is_sata(dev) && task->uldd_task) {
++ unsigned long flags;
++
++ /* Since we are still in the submit path, and since
++ * libsas takes the host lock on behalf of SATA
++ * devices before I/O starts (in the non-discovery case),
++ * we need to unlock before we can call the callback function.
++ */
++ raw_local_irq_save(flags);
++ spin_unlock(dev->sata_dev.ap->lock);
++ func(task);
++ spin_lock(dev->sata_dev.ap->lock);
++ raw_local_irq_restore(flags);
++ } else
++ func(task);
++}
++#endif /* !defined(_SCI_TASK_H_) */
+diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c
+new file mode 100644
+index 0000000..16f88ab
+--- /dev/null
++++ b/drivers/scsi/isci/unsolicited_frame_control.c
+@@ -0,0 +1,225 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "host.h"
++#include "unsolicited_frame_control.h"
++#include "registers.h"
++
++int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
++{
++ struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control;
++ struct sci_unsolicited_frame *uf;
++ u32 buf_len, header_len, i;
++ dma_addr_t dma;
++ size_t size;
++ void *virt;
++
++ /*
++ * Prepare all of the memory sizes for the UF headers, UF address
++ * table, and UF buffers themselves.
++ */
++ buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
++ header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header);
++ size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]);
++
++ /*
++ * The Unsolicited Frame buffers are set at the start of the UF
++ * memory descriptor entry. The headers and address table will be
++ * placed after the buffers.
++ */
++ virt = dmam_alloc_coherent(&ihost->pdev->dev, size, &dma, GFP_KERNEL);
++ if (!virt)
++ return -ENOMEM;
++
++ /*
++ * Program the location of the UF header table into the SCU.
++ * Notes:
++ * - The address must align on a 64-byte boundary. Guaranteed to be
++ * on 64-byte boundary already 1KB boundary for unsolicited frames.
++ * - Program unused header entries to overlap with the last
++ * unsolicited frame. The silicon will never DMA to these unused
++ * headers, since we program the UF address table pointers to
++ * NULL.
++ */
++ uf_control->headers.physical_address = dma + buf_len;
++ uf_control->headers.array = virt + buf_len;
++
++ /*
++ * Program the location of the UF address table into the SCU.
++ * Notes:
++ * - The address must align on a 64-bit boundary. Guaranteed to be on 64
++ * byte boundary already due to above programming headers being on a
++ * 64-bit boundary and headers are on a 64-bytes in size.
++ */
++ uf_control->address_table.physical_address = dma + buf_len + header_len;
++ uf_control->address_table.array = virt + buf_len + header_len;
++ uf_control->get = 0;
++
++ /*
++ * UF buffer requirements are:
++ * - The last entry in the UF queue is not NULL.
++ * - There is a power of 2 number of entries (NULL or not-NULL)
++ * programmed into the queue.
++ * - Aligned on a 1KB boundary. */
++
++ /*
++ * Program the actual used UF buffers into the UF address table and
++ * the controller's array of UFs.
++ */
++ for (i = 0; i < SCU_MAX_UNSOLICITED_FRAMES; i++) {
++ uf = &uf_control->buffers.array[i];
++
++ uf_control->address_table.array[i] = dma;
++
++ uf->buffer = virt;
++ uf->header = &uf_control->headers.array[i];
++ uf->state = UNSOLICITED_FRAME_EMPTY;
++
++ /*
++ * Increment the address of the physical and virtual memory
++ * pointers. Everything is aligned on 1k boundary with an
++ * increment of 1k.
++ */
++ virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
++ dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
++ }
++
++ return 0;
++}
++
++enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control,
++ u32 frame_index,
++ void **frame_header)
++{
++ if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
++ /* Skip the first word in the frame since this is a controll word used
++ * by the hardware.
++ */
++ *frame_header = &uf_control->buffers.array[frame_index].header->data;
++
++ return SCI_SUCCESS;
++ }
++
++ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
++}
++
++enum sci_status sci_unsolicited_frame_control_get_buffer(struct sci_unsolicited_frame_control *uf_control,
++ u32 frame_index,
++ void **frame_buffer)
++{
++ if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
++ *frame_buffer = uf_control->buffers.array[frame_index].buffer;
++
++ return SCI_SUCCESS;
++ }
++
++ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
++}
++
++bool sci_unsolicited_frame_control_release_frame(struct sci_unsolicited_frame_control *uf_control,
++ u32 frame_index)
++{
++ u32 frame_get;
++ u32 frame_cycle;
++
++ frame_get = uf_control->get & (SCU_MAX_UNSOLICITED_FRAMES - 1);
++ frame_cycle = uf_control->get & SCU_MAX_UNSOLICITED_FRAMES;
++
++ /*
++ * In the event there are NULL entries in the UF table, we need to
++ * advance the get pointer in order to find out if this frame should
++ * be released (i.e. update the get pointer)
++ */
++ while (lower_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
++ upper_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
++ frame_get < SCU_MAX_UNSOLICITED_FRAMES)
++ frame_get++;
++
++ /*
++ * The table has a NULL entry as it's last element. This is
++ * illegal.
++ */
++ BUG_ON(frame_get >= SCU_MAX_UNSOLICITED_FRAMES);
++ if (frame_index >= SCU_MAX_UNSOLICITED_FRAMES)
++ return false;
++
++ uf_control->buffers.array[frame_index].state = UNSOLICITED_FRAME_RELEASED;
++
++ if (frame_get != frame_index) {
++ /*
++ * Frames remain in use until we advance the get pointer
++ * so there is nothing we can do here
++ */
++ return false;
++ }
++
++ /*
++ * The frame index is equal to the current get pointer so we
++ * can now free up all of the frame entries that
++ */
++ while (uf_control->buffers.array[frame_get].state == UNSOLICITED_FRAME_RELEASED) {
++ uf_control->buffers.array[frame_get].state = UNSOLICITED_FRAME_EMPTY;
++
++ if (frame_get+1 == SCU_MAX_UNSOLICITED_FRAMES-1) {
++ frame_cycle ^= SCU_MAX_UNSOLICITED_FRAMES;
++ frame_get = 0;
++ } else
++ frame_get++;
++ }
++
++ uf_control->get = SCU_UFQGP_GEN_BIT(ENABLE_BIT) | frame_cycle | frame_get;
++
++ return true;
++}
+diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h
+new file mode 100644
+index 0000000..75d8966
+--- /dev/null
++++ b/drivers/scsi/isci/unsolicited_frame_control.h
+@@ -0,0 +1,278 @@
++/*
++ * This file is provided under a dual BSD/GPLv2 license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * The full GNU General Public License is included in this distribution
++ * in the file called LICENSE.GPL.
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in
++ * the documentation and/or other materials provided with the
++ * distribution.
++ * * Neither the name of Intel Corporation nor the names of its
++ * contributors may be used to endorse or promote products derived
++ * from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_
++#define _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_
++
++#include "isci.h"
++
++#define SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS 15
++
++/**
++ * struct scu_unsolicited_frame_header -
++ *
++ * This structure delineates the format of an unsolicited frame header. The
++ * first DWORD are UF attributes defined by the silicon architecture. The data
++ * depicts actual header information received on the link.
++ */
++struct scu_unsolicited_frame_header {
++ /**
++ * This field indicates if there is an Initiator Index Table entry with
++ * which this header is associated.
++ */
++ u32 iit_exists:1;
++
++ /**
++ * This field simply indicates the protocol type (i.e. SSP, STP, SMP).
++ */
++ u32 protocol_type:3;
++
++ /**
++ * This field indicates if the frame is an address frame (IAF or OAF)
++ * or if it is a information unit frame.
++ */
++ u32 is_address_frame:1;
++
++ /**
++ * This field simply indicates the connection rate at which the frame
++ * was received.
++ */
++ u32 connection_rate:4;
++
++ u32 reserved:23;
++
++ /**
++ * This field represents the actual header data received on the link.
++ */
++ u32 data[SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS];
++
++};
++
++
++
++/**
++ * enum unsolicited_frame_state -
++ *
++ * This enumeration represents the current unsolicited frame state. The
++ * controller object can not updtate the hardware unsolicited frame put pointer
++ * unless it has already processed the priror unsolicited frames.
++ */
++enum unsolicited_frame_state {
++ /**
++ * This state is when the frame is empty and not in use. It is
++ * different from the released state in that the hardware could DMA
++ * data to this frame buffer.
++ */
++ UNSOLICITED_FRAME_EMPTY,
++
++ /**
++ * This state is set when the frame buffer is in use by by some
++ * object in the system.
++ */
++ UNSOLICITED_FRAME_IN_USE,
++
++ /**
++ * This state is set when the frame is returned to the free pool
++ * but one or more frames prior to this one are still in use.
++ * Once all of the frame before this one are freed it will go to
++ * the empty state.
++ */
++ UNSOLICITED_FRAME_RELEASED,
++
++ UNSOLICITED_FRAME_MAX_STATES
++};
++
++/**
++ * struct sci_unsolicited_frame -
++ *
++ * This is the unsolicited frame data structure it acts as the container for
++ * the current frame state, frame header and frame buffer.
++ */
++struct sci_unsolicited_frame {
++ /**
++ * This field contains the current frame state
++ */
++ enum unsolicited_frame_state state;
++
++ /**
++ * This field points to the frame header data.
++ */
++ struct scu_unsolicited_frame_header *header;
++
++ /**
++ * This field points to the frame buffer data.
++ */
++ void *buffer;
++
++};
++
++/**
++ * struct sci_uf_header_array -
++ *
++ * This structure contains all of the unsolicited frame header information.
++ */
++struct sci_uf_header_array {
++ /**
++ * This field is represents a virtual pointer to the start
++ * address of the UF address table. The table contains
++ * 64-bit pointers as required by the hardware.
++ */
++ struct scu_unsolicited_frame_header *array;
++
++ /**
++ * This field specifies the physical address location for the UF
++ * buffer array.
++ */
++ dma_addr_t physical_address;
++
++};
++
++/**
++ * struct sci_uf_buffer_array -
++ *
++ * This structure contains all of the unsolicited frame buffer (actual payload)
++ * information.
++ */
++struct sci_uf_buffer_array {
++ /**
++ * This field is the unsolicited frame data its used to manage
++ * the data for the unsolicited frame requests. It also represents
++ * the virtual address location that corresponds to the
++ * physical_address field.
++ */
++ struct sci_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES];
++
++ /**
++ * This field specifies the physical address location for the UF
++ * buffer array.
++ */
++ dma_addr_t physical_address;
++};
++
++/**
++ * struct sci_uf_address_table_array -
++ *
++ * This object maintains all of the unsolicited frame address table specific
++ * data. The address table is a collection of 64-bit pointers that point to
++ * 1KB buffers into which the silicon will DMA unsolicited frames.
++ */
++struct sci_uf_address_table_array {
++ /**
++ * This field represents a virtual pointer that refers to the
++ * starting address of the UF address table.
++ * 64-bit pointers are required by the hardware.
++ */
++ u64 *array;
++
++ /**
++ * This field specifies the physical address location for the UF
++ * address table.
++ */
++ dma_addr_t physical_address;
++
++};
++
++/**
++ * struct sci_unsolicited_frame_control -
++ *
++ * This object contains all of the data necessary to handle unsolicited frames.
++ */
++struct sci_unsolicited_frame_control {
++ /**
++ * This field is the software copy of the unsolicited frame queue
++ * get pointer. The controller object writes this value to the
++ * hardware to let the hardware put more unsolicited frame entries.
++ */
++ u32 get;
++
++ /**
++ * This field contains all of the unsolicited frame header
++ * specific fields.
++ */
++ struct sci_uf_header_array headers;
++
++ /**
++ * This field contains all of the unsolicited frame buffer
++ * specific fields.
++ */
++ struct sci_uf_buffer_array buffers;
++
++ /**
++ * This field contains all of the unsolicited frame address table
++ * specific fields.
++ */
++ struct sci_uf_address_table_array address_table;
++
++};
++
++struct isci_host;
++
++int sci_unsolicited_frame_control_construct(struct isci_host *ihost);
++
++enum sci_status sci_unsolicited_frame_control_get_header(
++ struct sci_unsolicited_frame_control *uf_control,
++ u32 frame_index,
++ void **frame_header);
++
++enum sci_status sci_unsolicited_frame_control_get_buffer(
++ struct sci_unsolicited_frame_control *uf_control,
++ u32 frame_index,
++ void **frame_buffer);
++
++bool sci_unsolicited_frame_control_release_frame(
++ struct sci_unsolicited_frame_control *uf_control,
++ u32 frame_index);
++
++#endif /* _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ */
+--
+1.7.7.3
+
Added: dists/squeeze/linux-2.6/debian/patches/features/x86/x86-Introduce-pci_map_biosrom.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/squeeze/linux-2.6/debian/patches/features/x86/x86-Introduce-pci_map_biosrom.patch Wed Dec 21 06:29:16 2011 (r18403)
@@ -0,0 +1,548 @@
+From: Dan Williams <dan.j.williams at intel.com>
+Date: Tue, 8 Mar 2011 10:36:19 -0800
+Subject: [PATCH 4/5] x86: Introduce pci_map_biosrom()
+
+commit 5d94e81f69d4b1d1102d3ab557ce0a817c11fbbb upstream.
+
+The isci driver needs to retrieve its preboot OROM image which contains
+necessary runtime parameters like platform specific sas addresses and
+phy configuration. There is no ROM BAR associated with this area,
+instead we will need to scan legacy expansion ROM space.
+
+1/ Promote the probe_roms_32 implementation to x86-64
+2/ Add a facility to find and map an adapter rom by pci device (according to
+ PCI Firmware Specification Revision 3.0)
+
+Signed-off-by: Dave Jiang <dave.jiang at intel.com>
+LKML-Reference: <20110308183226.6246.90354.stgit at localhost6.localdomain6>
+Signed-off-by: Dan Williams <dan.j.williams at intel.com>
+Signed-off-by: H. Peter Anvin <hpa at linux.intel.com>
+---
+ arch/x86/include/asm/probe_roms.h | 8 +
+ arch/x86/include/asm/setup.h | 2 +-
+ arch/x86/kernel/Makefile | 2 +-
+ arch/x86/kernel/head32.c | 1 -
+ arch/x86/kernel/probe_roms.c | 267 +++++++++++++++++++++++++++++++++++++
+ arch/x86/kernel/probe_roms_32.c | 166 -----------------------
+ arch/x86/kernel/x86_init.c | 2 +-
+ 7 files changed, 278 insertions(+), 170 deletions(-)
+ create mode 100644 arch/x86/include/asm/probe_roms.h
+ create mode 100644 arch/x86/kernel/probe_roms.c
+ delete mode 100644 arch/x86/kernel/probe_roms_32.c
+
+diff --git a/arch/x86/include/asm/probe_roms.h b/arch/x86/include/asm/probe_roms.h
+new file mode 100644
+index 0000000..4950a0b
+--- /dev/null
++++ b/arch/x86/include/asm/probe_roms.h
+@@ -0,0 +1,8 @@
++#ifndef _PROBE_ROMS_H_
++#define _PROBE_ROMS_H_
++struct pci_dev;
++
++extern void __iomem *pci_map_biosrom(struct pci_dev *pdev);
++extern void pci_unmap_biosrom(void __iomem *rom);
++extern size_t pci_biosrom_size(struct pci_dev *pdev);
++#endif
+diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
+index fee81d0..0fbcc1e 100644
+--- a/arch/x86/include/asm/setup.h
++++ b/arch/x86/include/asm/setup.h
+@@ -95,10 +95,10 @@ void *extend_brk(size_t size, size_t align);
+ : : "i" (sz)); \
+ }
+
++extern void probe_roms(void);
+ #ifdef __i386__
+
+ void __init i386_start_kernel(void);
+-extern void probe_roms(void);
+
+ #else
+ void __init x86_64_start_kernel(char *real_mode);
+diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
+index d1911ab..24010a9 100644
+--- a/arch/x86/kernel/Makefile
++++ b/arch/x86/kernel/Makefile
+@@ -36,7 +36,7 @@ obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
+ obj-y += time.o ioport.o ldt.o dumpstack.o
+ obj-y += setup.o x86_init.o i8259.o irqinit.o
+ obj-$(CONFIG_X86_VISWS) += visws_quirks.o
+-obj-$(CONFIG_X86_32) += probe_roms_32.o
++obj-y += probe_roms.o
+ obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
+ obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
+ obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o
+diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
+index 4f8e250..59e0ec0 100644
+--- a/arch/x86/kernel/head32.c
++++ b/arch/x86/kernel/head32.c
+@@ -20,7 +20,6 @@
+ static void __init i386_default_early_setup(void)
+ {
+ /* Initilize 32bit specific setup functions */
+- x86_init.resources.probe_roms = probe_roms;
+ x86_init.resources.reserve_resources = i386_reserve_resources;
+ x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
+
+diff --git a/arch/x86/kernel/probe_roms.c b/arch/x86/kernel/probe_roms.c
+new file mode 100644
+index 0000000..ba0a4cc
+--- /dev/null
++++ b/arch/x86/kernel/probe_roms.c
+@@ -0,0 +1,267 @@
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/uaccess.h>
++#include <linux/mmzone.h>
++#include <linux/ioport.h>
++#include <linux/seq_file.h>
++#include <linux/console.h>
++#include <linux/init.h>
++#include <linux/edd.h>
++#include <linux/dmi.h>
++#include <linux/pfn.h>
++#include <linux/pci.h>
++#include <asm/pci-direct.h>
++
++
++#include <asm/e820.h>
++#include <asm/mmzone.h>
++#include <asm/setup.h>
++#include <asm/sections.h>
++#include <asm/io.h>
++#include <asm/setup_arch.h>
++
++static struct resource system_rom_resource = {
++ .name = "System ROM",
++ .start = 0xf0000,
++ .end = 0xfffff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++};
++
++static struct resource extension_rom_resource = {
++ .name = "Extension ROM",
++ .start = 0xe0000,
++ .end = 0xeffff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++};
++
++static struct resource adapter_rom_resources[] = { {
++ .name = "Adapter ROM",
++ .start = 0xc8000,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++} };
++
++static struct resource video_rom_resource = {
++ .name = "Video ROM",
++ .start = 0xc0000,
++ .end = 0xc7fff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++};
++
++/* does this oprom support the given pci device, or any of the devices
++ * that the driver supports?
++ */
++static bool match_id(struct pci_dev *pdev, unsigned short vendor, unsigned short device)
++{
++ struct pci_driver *drv = pdev->driver;
++ const struct pci_device_id *id;
++
++ if (pdev->vendor == vendor && pdev->device == device)
++ return true;
++
++ for (id = drv ? drv->id_table : NULL; id && id->vendor; id++)
++ if (id->vendor == vendor && id->device == device)
++ break;
++
++ return id && id->vendor;
++}
++
++static bool probe_list(struct pci_dev *pdev, unsigned short vendor,
++ const unsigned char *rom_list)
++{
++ unsigned short device;
++
++ do {
++ if (probe_kernel_address(rom_list, device) != 0)
++ device = 0;
++
++ if (device && match_id(pdev, vendor, device))
++ break;
++
++ rom_list += 2;
++ } while (device);
++
++ return !!device;
++}
++
++static struct resource *find_oprom(struct pci_dev *pdev)
++{
++ struct resource *oprom = NULL;
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(adapter_rom_resources); i++) {
++ struct resource *res = &adapter_rom_resources[i];
++ unsigned short offset, vendor, device, list, rev;
++ const unsigned char *rom;
++
++ if (res->end == 0)
++ break;
++
++ rom = isa_bus_to_virt(res->start);
++ if (probe_kernel_address(rom + 0x18, offset) != 0)
++ continue;
++
++ if (probe_kernel_address(rom + offset + 0x4, vendor) != 0)
++ continue;
++
++ if (probe_kernel_address(rom + offset + 0x6, device) != 0)
++ continue;
++
++ if (match_id(pdev, vendor, device)) {
++ oprom = res;
++ break;
++ }
++
++ if (probe_kernel_address(rom + offset + 0x8, list) == 0 &&
++ probe_kernel_address(rom + offset + 0xc, rev) == 0 &&
++ rev >= 3 && list &&
++ probe_list(pdev, vendor, rom + offset + list)) {
++ oprom = res;
++ break;
++ }
++ }
++
++ return oprom;
++}
++
++void *pci_map_biosrom(struct pci_dev *pdev)
++{
++ struct resource *oprom = find_oprom(pdev);
++
++ if (!oprom)
++ return NULL;
++
++ return ioremap(oprom->start, resource_size(oprom));
++}
++EXPORT_SYMBOL(pci_map_biosrom);
++
++void pci_unmap_biosrom(void __iomem *image)
++{
++ iounmap(image);
++}
++EXPORT_SYMBOL(pci_unmap_biosrom);
++
++size_t pci_biosrom_size(struct pci_dev *pdev)
++{
++ struct resource *oprom = find_oprom(pdev);
++
++ return oprom ? resource_size(oprom) : 0;
++}
++EXPORT_SYMBOL(pci_biosrom_size);
++
++#define ROMSIGNATURE 0xaa55
++
++static int __init romsignature(const unsigned char *rom)
++{
++ const unsigned short * const ptr = (const unsigned short *)rom;
++ unsigned short sig;
++
++ return probe_kernel_address(ptr, sig) == 0 && sig == ROMSIGNATURE;
++}
++
++static int __init romchecksum(const unsigned char *rom, unsigned long length)
++{
++ unsigned char sum, c;
++
++ for (sum = 0; length && probe_kernel_address(rom++, c) == 0; length--)
++ sum += c;
++ return !length && !sum;
++}
++
++void __init probe_roms(void)
++{
++ const unsigned char *rom;
++ unsigned long start, length, upper;
++ unsigned char c;
++ int i;
++
++ /* video rom */
++ upper = adapter_rom_resources[0].start;
++ for (start = video_rom_resource.start; start < upper; start += 2048) {
++ rom = isa_bus_to_virt(start);
++ if (!romsignature(rom))
++ continue;
++
++ video_rom_resource.start = start;
++
++ if (probe_kernel_address(rom + 2, c) != 0)
++ continue;
++
++ /* 0 < length <= 0x7f * 512, historically */
++ length = c * 512;
++
++ /* if checksum okay, trust length byte */
++ if (length && romchecksum(rom, length))
++ video_rom_resource.end = start + length - 1;
++
++ request_resource(&iomem_resource, &video_rom_resource);
++ break;
++ }
++
++ start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
++ if (start < upper)
++ start = upper;
++
++ /* system rom */
++ request_resource(&iomem_resource, &system_rom_resource);
++ upper = system_rom_resource.start;
++
++ /* check for extension rom (ignore length byte!) */
++ rom = isa_bus_to_virt(extension_rom_resource.start);
++ if (romsignature(rom)) {
++ length = extension_rom_resource.end - extension_rom_resource.start + 1;
++ if (romchecksum(rom, length)) {
++ request_resource(&iomem_resource, &extension_rom_resource);
++ upper = extension_rom_resource.start;
++ }
++ }
++
++ /* check for adapter roms on 2k boundaries */
++ for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper; start += 2048) {
++ rom = isa_bus_to_virt(start);
++ if (!romsignature(rom))
++ continue;
++
++ if (probe_kernel_address(rom + 2, c) != 0)
++ continue;
++
++ /* 0 < length <= 0x7f * 512, historically */
++ length = c * 512;
++
++ /* but accept any length that fits if checksum okay */
++ if (!length || start + length > upper || !romchecksum(rom, length))
++ continue;
++
++ adapter_rom_resources[i].start = start;
++ adapter_rom_resources[i].end = start + length - 1;
++ request_resource(&iomem_resource, &adapter_rom_resources[i]);
++
++ start = adapter_rom_resources[i++].end & ~2047UL;
++ }
++}
++
+diff --git a/arch/x86/kernel/probe_roms_32.c b/arch/x86/kernel/probe_roms_32.c
+deleted file mode 100644
+index 071e7fe..0000000
+--- a/arch/x86/kernel/probe_roms_32.c
++++ /dev/null
+@@ -1,166 +0,0 @@
+-#include <linux/sched.h>
+-#include <linux/mm.h>
+-#include <linux/uaccess.h>
+-#include <linux/mmzone.h>
+-#include <linux/ioport.h>
+-#include <linux/seq_file.h>
+-#include <linux/console.h>
+-#include <linux/init.h>
+-#include <linux/edd.h>
+-#include <linux/dmi.h>
+-#include <linux/pfn.h>
+-#include <linux/pci.h>
+-#include <asm/pci-direct.h>
+-
+-
+-#include <asm/e820.h>
+-#include <asm/mmzone.h>
+-#include <asm/setup.h>
+-#include <asm/sections.h>
+-#include <asm/io.h>
+-#include <asm/setup_arch.h>
+-
+-static struct resource system_rom_resource = {
+- .name = "System ROM",
+- .start = 0xf0000,
+- .end = 0xfffff,
+- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+-};
+-
+-static struct resource extension_rom_resource = {
+- .name = "Extension ROM",
+- .start = 0xe0000,
+- .end = 0xeffff,
+- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+-};
+-
+-static struct resource adapter_rom_resources[] = { {
+- .name = "Adapter ROM",
+- .start = 0xc8000,
+- .end = 0,
+- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+-}, {
+- .name = "Adapter ROM",
+- .start = 0,
+- .end = 0,
+- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+-}, {
+- .name = "Adapter ROM",
+- .start = 0,
+- .end = 0,
+- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+-}, {
+- .name = "Adapter ROM",
+- .start = 0,
+- .end = 0,
+- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+-}, {
+- .name = "Adapter ROM",
+- .start = 0,
+- .end = 0,
+- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+-}, {
+- .name = "Adapter ROM",
+- .start = 0,
+- .end = 0,
+- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+-} };
+-
+-static struct resource video_rom_resource = {
+- .name = "Video ROM",
+- .start = 0xc0000,
+- .end = 0xc7fff,
+- .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+-};
+-
+-#define ROMSIGNATURE 0xaa55
+-
+-static int __init romsignature(const unsigned char *rom)
+-{
+- const unsigned short * const ptr = (const unsigned short *)rom;
+- unsigned short sig;
+-
+- return probe_kernel_address(ptr, sig) == 0 && sig == ROMSIGNATURE;
+-}
+-
+-static int __init romchecksum(const unsigned char *rom, unsigned long length)
+-{
+- unsigned char sum, c;
+-
+- for (sum = 0; length && probe_kernel_address(rom++, c) == 0; length--)
+- sum += c;
+- return !length && !sum;
+-}
+-
+-void __init probe_roms(void)
+-{
+- const unsigned char *rom;
+- unsigned long start, length, upper;
+- unsigned char c;
+- int i;
+-
+- /* video rom */
+- upper = adapter_rom_resources[0].start;
+- for (start = video_rom_resource.start; start < upper; start += 2048) {
+- rom = isa_bus_to_virt(start);
+- if (!romsignature(rom))
+- continue;
+-
+- video_rom_resource.start = start;
+-
+- if (probe_kernel_address(rom + 2, c) != 0)
+- continue;
+-
+- /* 0 < length <= 0x7f * 512, historically */
+- length = c * 512;
+-
+- /* if checksum okay, trust length byte */
+- if (length && romchecksum(rom, length))
+- video_rom_resource.end = start + length - 1;
+-
+- request_resource(&iomem_resource, &video_rom_resource);
+- break;
+- }
+-
+- start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
+- if (start < upper)
+- start = upper;
+-
+- /* system rom */
+- request_resource(&iomem_resource, &system_rom_resource);
+- upper = system_rom_resource.start;
+-
+- /* check for extension rom (ignore length byte!) */
+- rom = isa_bus_to_virt(extension_rom_resource.start);
+- if (romsignature(rom)) {
+- length = extension_rom_resource.end - extension_rom_resource.start + 1;
+- if (romchecksum(rom, length)) {
+- request_resource(&iomem_resource, &extension_rom_resource);
+- upper = extension_rom_resource.start;
+- }
+- }
+-
+- /* check for adapter roms on 2k boundaries */
+- for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper; start += 2048) {
+- rom = isa_bus_to_virt(start);
+- if (!romsignature(rom))
+- continue;
+-
+- if (probe_kernel_address(rom + 2, c) != 0)
+- continue;
+-
+- /* 0 < length <= 0x7f * 512, historically */
+- length = c * 512;
+-
+- /* but accept any length that fits if checksum okay */
+- if (!length || start + length > upper || !romchecksum(rom, length))
+- continue;
+-
+- adapter_rom_resources[i].start = start;
+- adapter_rom_resources[i].end = start + length - 1;
+- request_resource(&iomem_resource, &adapter_rom_resources[i]);
+-
+- start = adapter_rom_resources[i++].end & ~2047UL;
+- }
+-}
+-
+diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
+index 4449a4a..492c737 100644
+--- a/arch/x86/kernel/x86_init.c
++++ b/arch/x86/kernel/x86_init.c
+@@ -26,7 +26,7 @@ void __init x86_init_pgd_noop(pgd_t *unused) { }
+ struct x86_init_ops x86_init __initdata = {
+
+ .resources = {
+- .probe_roms = x86_init_noop,
++ .probe_roms = probe_roms,
+ .reserve_resources = reserve_standard_io_resources,
+ .memory_setup = default_machine_specific_memory_setup,
+ },
+--
+1.7.7.3
+
Modified: dists/squeeze/linux-2.6/debian/patches/series/40
==============================================================================
--- dists/squeeze/linux-2.6/debian/patches/series/40 Wed Dec 21 03:37:24 2011 (r18402)
+++ dists/squeeze/linux-2.6/debian/patches/series/40 Wed Dec 21 06:29:16 2011 (r18403)
@@ -34,3 +34,8 @@
+ bugfix/all/ipv6-Allow-inet6_dump_addr-to-handle-more-than-64-ad.patch
+ bugfix/all/stable/2.6.32.50.patch
++ features/all/kernel.h-add-BUILD_BUG_ON_NOT_POWER_OF_2.patch
++ features/all/libsas-fix-definition-of-wideport-include-local-sas-.patch
++ debian/libsas-Avoid-ABI-change-from-addition-of-sas_ha_stru.patch
++ features/x86/x86-Introduce-pci_map_biosrom.patch
++ features/x86/isci-Import-from-Linux-3.1.patch
More information about the Kernel-svn-changes
mailing list