r4058 - in branches/dist/sarge-security/kernel-2.4/s390: . kernel-image-2.4.27-s390 kernel-image-2.4.27-s390/config kernel-image-2.4.27-s390/debian kernel-patch-2.4.27-s390 kernel-patch-2.4.27-s390/debian
Bastian Blank
waldi at costa.debian.org
Fri Aug 26 17:16:24 UTC 2005
Author: waldi
Date: 2005-08-26 17:15:28 +0000 (Fri, 26 Aug 2005)
New Revision: 4058
Added:
branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/
branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/config/
branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/config/s390
branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/config/s390x
branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/debian/
branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/debian/changelog
branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/debian/compat
branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/debian/control
branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/debian/copyright
branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/debian/rules
branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/post-install
branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/
branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/Makefile
branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/debian/
branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/debian/changelog
branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/debian/control
branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/debian/copyright
branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/debian/kpatches
branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/debian/rules
branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/linux-2.4.27-s390.diff
Log:
/branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390,
/branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390: Import.
Added: branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/config/s390
===================================================================
--- branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/config/s390 2005-08-26 17:09:37 UTC (rev 4057)
+++ branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/config/s390 2005-08-26 17:15:28 UTC (rev 4058)
@@ -0,0 +1,583 @@
+#
+# Automatically generated make config: don't edit
+#
+# CONFIG_ISA is not set
+# CONFIG_EISA is not set
+# CONFIG_MCA is not set
+CONFIG_UID16=y
+# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+# CONFIG_GENERIC_BUST_SPINLOCK is not set
+CONFIG_ARCH_S390=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+# CONFIG_MODVERSIONS is not set
+CONFIG_KMOD=y
+
+#
+# Processor type and features
+#
+CONFIG_SMP=y
+CONFIG_MATHEMU=y
+CONFIG_NR_CPUS=32
+
+#
+# General setup
+#
+CONFIG_FAST_IRQ=y
+CONFIG_MACHCHK_WARNING=y
+CONFIG_CHSC=y
+CONFIG_QDIO=y
+# CONFIG_QDIO_PERF_STATS is not set
+CONFIG_IPL=y
+# CONFIG_IPL_TAPE is not set
+CONFIG_IPL_VM=y
+CONFIG_NET=y
+CONFIG_SYSVIPC=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_SYSCTL=y
+CONFIG_KCORE_ELF=y
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_MISC=m
+# CONFIG_PROCESS_DEBUG is not set
+CONFIG_PFAULT=y
+# CONFIG_SHARED_KERNEL is not set
+CONFIG_NO_IDLE_HZ=y
+# CONFIG_NO_IDLE_HZ_INIT is not set
+CONFIG_VIRT_TIMER=y
+CONFIG_APPLDATA_BASE=y
+CONFIG_APPLDATA_MEM=m
+CONFIG_APPLDATA_OS=m
+CONFIG_APPLDATA_NET_SUM=m
+CONFIG_CMM=y
+CONFIG_CMM_PROC=y
+
+#
+# SCSI support
+#
+CONFIG_SCSI=m
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+CONFIG_SD_EXTRA_DEVS=40
+CONFIG_CHR_DEV_ST=m
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=m
+# CONFIG_BLK_DEV_SR_VENDOR is not set
+CONFIG_SR_EXTRA_DEVS=10
+# CONFIG_CHR_DEV_SCH is not set
+CONFIG_CHR_DEV_SG=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+CONFIG_SCSI_DEBUG_QUEUES=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+
+#
+# SCSI low-level drivers
+#
+CONFIG_ZFCP=m
+CONFIG_ZFCP_HBAAPI=m
+
+#
+# PCMCIA SCSI adapter support
+#
+# CONFIG_SCSI_PCMCIA is not set
+
+#
+# Block device drivers
+#
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=24576
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_BLK_DEV_XPRAM=m
+CONFIG_DCSSBLK=m
+
+#
+# S/390 block device drivers
+#
+CONFIG_DASD=y
+CONFIG_DASD_ECKD=y
+CONFIG_DASD_FBA=y
+# CONFIG_DASD_DIAG is not set
+CONFIG_S390_CMF=m
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID5=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_BLK_DEV_LVM=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_BLK_DEV_DM_MIRROR=m
+
+#
+# Character device drivers
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_UNIX98_PTY_COUNT=256
+
+#
+# S/390 character device drivers
+#
+CONFIG_TN3270=y
+CONFIG_TN3270_CONSOLE=y
+CONFIG_TN3215=y
+CONFIG_TN3215_CONSOLE=y
+CONFIG_SCLP=y
+CONFIG_SCLP_TTY=y
+CONFIG_SCLP_CONSOLE=y
+CONFIG_SCLP_VT220_TTY=y
+CONFIG_SCLP_VT220_CONSOLE=y
+# CONFIG_SCLP_CPI is not set
+CONFIG_S390_TAPE=m
+
+#
+# S/390 tape interface support
+#
+CONFIG_S390_TAPE_BLOCK=y
+
+#
+# S/390 tape hardware support
+#
+CONFIG_S390_TAPE_34XX=m
+# CONFIG_VMLOGRDR is not set
+
+#
+# Network device drivers
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+CONFIG_BONDING=m
+CONFIG_EQUALIZER=m
+CONFIG_TUN=m
+CONFIG_NET_ETHERNET=y
+CONFIG_TR=y
+# CONFIG_C7000 is not set
+CONFIG_FDDI=y
+
+#
+# S/390 network device drivers
+#
+CONFIG_CHANDEV=y
+CONFIG_HOTPLUG=y
+CONFIG_LCS=m
+CONFIG_QETH=m
+
+#
+# Gigabit Ethernet default settings
+#
+CONFIG_QETH_IPV6=y
+CONFIG_QETH_VLAN=y
+# CONFIG_QETH_PERF_STATS is not set
+CONFIG_CTC=m
+CONFIG_MPC=m
+CONFIG_IUCV=m
+CONFIG_NETIUCV=m
+CONFIG_SMSGIUCV=m
+
+#
+# Miscellaneous
+#
+CONFIG_Z90CRYPT=m
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+# CONFIG_NETLINK_DEV is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_FILTER=y
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+# CONFIG_IP_PNP is not set
+CONFIG_NET_IPIP=m
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_INET_ECN is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+CONFIG_IP_NF_FTP=m
+# CONFIG_IP_NF_AMANDA is not set
+# CONFIG_IP_NF_TFTP is not set
+# CONFIG_IP_NF_IRC is not set
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_LIMIT=m
+CONFIG_IP_NF_MATCH_MAC=m
+# CONFIG_IP_NF_MATCH_PKTTYPE is not set
+CONFIG_IP_NF_MATCH_MARK=m
+CONFIG_IP_NF_MATCH_MULTIPORT=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_RECENT=m
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_DSCP is not set
+# CONFIG_IP_NF_MATCH_AH_ESP is not set
+# CONFIG_IP_NF_MATCH_LENGTH is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+CONFIG_IP_NF_MATCH_TCPMSS=m
+# CONFIG_IP_NF_MATCH_HELPER is not set
+CONFIG_IP_NF_MATCH_STATE=m
+# CONFIG_IP_NF_MATCH_CONNTRACK is not set
+CONFIG_IP_NF_MATCH_UNCLEAN=m
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_MIRROR=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+# CONFIG_IP_NF_NAT_LOCAL is not set
+# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_TOS=m
+# CONFIG_IP_NF_TARGET_ECN is not set
+# CONFIG_IP_NF_TARGET_DSCP is not set
+CONFIG_IP_NF_TARGET_MARK=m
+CONFIG_IP_NF_TARGET_LOG=m
+# CONFIG_IP_NF_TARGET_ULOG is not set
+CONFIG_IP_NF_TARGET_TCPMSS=m
+# CONFIG_IP_NF_ARPTABLES is not set
+# CONFIG_IP_NF_COMPAT_IPCHAINS is not set
+# CONFIG_IP_NF_COMPAT_IPFWADM is not set
+
+#
+# IP: Virtual Server Configuration
+#
+CONFIG_IP_VS=m
+# CONFIG_IP_VS_DEBUG is not set
+CONFIG_IP_VS_TAB_BITS=12
+
+#
+# IPVS scheduler
+#
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+
+#
+# IPVS application helper
+#
+CONFIG_IP_VS_FTP=m
+CONFIG_IPV6=y
+# CONFIG_IPV6_PRIVACY is not set
+
+#
+# IPv6: Netfilter Configuration
+#
+# CONFIG_IP6_NF_QUEUE is not set
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_LIMIT=m
+# CONFIG_IP6_NF_MATCH_MAC is not set
+# CONFIG_IP6_NF_MATCH_RT is not set
+# CONFIG_IP6_NF_MATCH_OPTS is not set
+# CONFIG_IP6_NF_MATCH_FRAG is not set
+# CONFIG_IP6_NF_MATCH_HL is not set
+# CONFIG_IP6_NF_MATCH_MULTIPORT is not set
+# CONFIG_IP6_NF_MATCH_OWNER is not set
+CONFIG_IP6_NF_MATCH_MARK=m
+# CONFIG_IP6_NF_MATCH_IPV6HEADER is not set
+# CONFIG_IP6_NF_MATCH_AHESP is not set
+# CONFIG_IP6_NF_MATCH_LENGTH is not set
+# CONFIG_IP6_NF_MATCH_EUI64 is not set
+CONFIG_IP6_NF_FILTER=m
+# CONFIG_IP6_NF_TARGET_LOG is not set
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_TARGET_MARK=m
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_TUNNEL is not set
+CONFIG_SHARED_IPV6_CARDS=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_KHTTPD is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_HMAC_NONE is not set
+CONFIG_SCTP_HMAC_SHA1=y
+# CONFIG_SCTP_HMAC_MD5 is not set
+# CONFIG_ATM is not set
+CONFIG_VLAN_8021Q=m
+
+#
+#
+#
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+
+#
+# Appletalk devices
+#
+# CONFIG_DEV_APPLETALK is not set
+# CONFIG_DECNET is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_LLC is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
+# CONFIG_NET_HW_FLOWCONTROL is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+
+#
+# File systems
+#
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=m
+# CONFIG_REISERFS_FS is not set
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_ADFS_FS is not set
+# CONFIG_ADFS_FS_RW is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BEFS_DEBUG is not set
+# CONFIG_BFS_FS is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_XATTR_SHARING=y
+CONFIG_EXT3_FS_XATTR_USER=y
+CONFIG_EXT3_FS_XATTR_TRUSTED=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_FAT_FS is not set
+# CONFIG_MSDOS_FS is not set
+# CONFIG_UMSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+CONFIG_TMPFS=y
+CONFIG_RAMFS=y
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+# CONFIG_ZISOFS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_NTFS_FS is not set
+# CONFIG_NTFS_RW is not set
+# CONFIG_HPFS_FS is not set
+CONFIG_PROC_FS=y
+CONFIG_DEVFS_FS=y
+# CONFIG_DEVFS_MOUNT is not set
+# CONFIG_DEVFS_DEBUG is not set
+CONFIG_DEVPTS_FS=y
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX4FS_RW is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_XIP2FS=m
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XATTR_SHARING is not set
+# CONFIG_EXT2_FS_XATTR_USER is not set
+# CONFIG_EXT2_FS_XATTR_TRUSTED is not set
+# CONFIG_EXT2_FS_POSIX_ACL is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UDF_FS is not set
+# CONFIG_UDF_RW is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_UFS_FS_WRITE is not set
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_TRACE is not set
+# CONFIG_XFS_DEBUG is not set
+
+#
+# Network File Systems
+#
+# CONFIG_CODA_FS is not set
+# CONFIG_INTERMEZZO_FS is not set
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_ROOT_NFS is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_TCP=y
+CONFIG_SUNRPC=m
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_SMB_FS=m
+# CONFIG_SMB_NLS_DEFAULT is not set
+# CONFIG_SMB_UNIX is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_NCPFS_PACKET_SIGNING is not set
+# CONFIG_NCPFS_IOCTL_LOCKING is not set
+# CONFIG_NCPFS_STRONG is not set
+# CONFIG_NCPFS_NFS_NS is not set
+# CONFIG_NCPFS_OS2_NS is not set
+# CONFIG_NCPFS_SMALLDOS is not set
+# CONFIG_NCPFS_NLS is not set
+# CONFIG_NCPFS_EXTRAS is not set
+# CONFIG_ZISOFS_FS is not set
+CONFIG_FS_MBCACHE=y
+CONFIG_FS_POSIX_ACL=y
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+CONFIG_IBM_PARTITION=y
+# CONFIG_MAC_PARTITION is not set
+# CONFIG_MSDOS_PARTITION is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SMB_NLS=y
+CONFIG_NLS=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS_DEFAULT="iso8859-1"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Kernel hacking
+#
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_LOG_BUF_SHIFT=0
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_TEA=m
+# CONFIG_CRYPTO_ARC4 is not set
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC32 is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=m
+# CONFIG_FW_LOADER is not set
Added: branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/config/s390x
===================================================================
--- branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/config/s390x 2005-08-26 17:09:37 UTC (rev 4057)
+++ branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/config/s390x 2005-08-26 17:15:28 UTC (rev 4058)
@@ -0,0 +1,583 @@
+#
+# Automatically generated make config: don't edit
+#
+# CONFIG_ISA is not set
+# CONFIG_EISA is not set
+# CONFIG_MCA is not set
+# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+# CONFIG_GENERIC_BUST_SPINLOCK is not set
+CONFIG_ARCH_S390=y
+CONFIG_ARCH_S390X=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+
+#
+# Processor type and features
+#
+CONFIG_SMP=y
+CONFIG_NR_CPUS=32
+CONFIG_S390_SUPPORT=y
+CONFIG_BINFMT_ELF32=y
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+# CONFIG_MODVERSIONS is not set
+CONFIG_KMOD=y
+
+#
+# General setup
+#
+CONFIG_FAST_IRQ=y
+CONFIG_MACHCHK_WARNING=y
+CONFIG_CHSC=y
+CONFIG_QDIO=y
+# CONFIG_QDIO_PERF_STATS is not set
+CONFIG_IPL=y
+# CONFIG_IPL_TAPE is not set
+CONFIG_IPL_VM=y
+CONFIG_NET=y
+CONFIG_SYSVIPC=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_SYSCTL=y
+CONFIG_KCORE_ELF=y
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_MISC=m
+# CONFIG_PROCESS_DEBUG is not set
+CONFIG_PFAULT=y
+# CONFIG_SHARED_KERNEL is not set
+CONFIG_NO_IDLE_HZ=y
+# CONFIG_NO_IDLE_HZ_INIT is not set
+CONFIG_VIRT_TIMER=y
+CONFIG_APPLDATA_BASE=y
+CONFIG_APPLDATA_MEM=m
+CONFIG_APPLDATA_OS=m
+CONFIG_APPLDATA_NET_SUM=m
+CONFIG_CMM=y
+CONFIG_CMM_PROC=y
+
+#
+# SCSI support
+#
+CONFIG_SCSI=m
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+CONFIG_SD_EXTRA_DEVS=40
+CONFIG_CHR_DEV_ST=m
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=m
+# CONFIG_BLK_DEV_SR_VENDOR is not set
+CONFIG_SR_EXTRA_DEVS=10
+# CONFIG_CHR_DEV_SCH is not set
+CONFIG_CHR_DEV_SG=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+CONFIG_SCSI_DEBUG_QUEUES=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+
+#
+# SCSI low-level drivers
+#
+CONFIG_ZFCP=m
+CONFIG_ZFCP_HBAAPI=m
+
+#
+# PCMCIA SCSI adapter support
+#
+# CONFIG_SCSI_PCMCIA is not set
+
+#
+# Block device drivers
+#
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=24576
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_BLK_DEV_XPRAM=m
+CONFIG_DCSSBLK=m
+
+#
+# S/390 block device drivers
+#
+CONFIG_DASD=y
+CONFIG_DASD_ECKD=y
+CONFIG_DASD_FBA=y
+CONFIG_S390_CMF=m
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID5=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_BLK_DEV_LVM=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_BLK_DEV_DM_MIRROR=m
+
+#
+# Character device drivers
+#
+CONFIG_UNIX98_PTYS=y
+CONFIG_UNIX98_PTY_COUNT=256
+
+#
+# S/390 character device drivers
+#
+CONFIG_TN3270=y
+CONFIG_TN3270_CONSOLE=y
+CONFIG_TN3215=y
+CONFIG_TN3215_CONSOLE=y
+CONFIG_SCLP=y
+CONFIG_SCLP_TTY=y
+CONFIG_SCLP_CONSOLE=y
+CONFIG_SCLP_VT220_TTY=y
+CONFIG_SCLP_VT220_CONSOLE=y
+# CONFIG_SCLP_CPI is not set
+CONFIG_S390_TAPE=m
+
+#
+# S/390 tape interface support
+#
+CONFIG_S390_TAPE_BLOCK=y
+
+#
+# S/390 tape hardware support
+#
+CONFIG_S390_TAPE_34XX=m
+# CONFIG_VMLOGRDR is not set
+
+#
+# Network device drivers
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+CONFIG_BONDING=m
+CONFIG_EQUALIZER=m
+CONFIG_TUN=m
+CONFIG_NET_ETHERNET=y
+CONFIG_TR=y
+# CONFIG_C7000 is not set
+CONFIG_FDDI=y
+
+#
+# S/390 network device drivers
+#
+CONFIG_CHANDEV=y
+CONFIG_HOTPLUG=y
+CONFIG_LCS=m
+CONFIG_QETH=m
+
+#
+# Gigabit Ethernet default settings
+#
+CONFIG_QETH_IPV6=y
+CONFIG_QETH_VLAN=y
+# CONFIG_QETH_PERF_STATS is not set
+CONFIG_CTC=m
+CONFIG_MPC=m
+CONFIG_IUCV=m
+CONFIG_NETIUCV=m
+CONFIG_SMSGIUCV=m
+
+#
+# Miscellaneous
+#
+CONFIG_Z90CRYPT=m
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+# CONFIG_NETLINK_DEV is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_FILTER=y
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+# CONFIG_IP_PNP is not set
+CONFIG_NET_IPIP=m
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_INET_ECN is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+CONFIG_IP_NF_FTP=m
+# CONFIG_IP_NF_AMANDA is not set
+# CONFIG_IP_NF_TFTP is not set
+# CONFIG_IP_NF_IRC is not set
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_LIMIT=m
+CONFIG_IP_NF_MATCH_MAC=m
+# CONFIG_IP_NF_MATCH_PKTTYPE is not set
+CONFIG_IP_NF_MATCH_MARK=m
+CONFIG_IP_NF_MATCH_MULTIPORT=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_RECENT=m
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_DSCP is not set
+# CONFIG_IP_NF_MATCH_AH_ESP is not set
+# CONFIG_IP_NF_MATCH_LENGTH is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+CONFIG_IP_NF_MATCH_TCPMSS=m
+# CONFIG_IP_NF_MATCH_HELPER is not set
+CONFIG_IP_NF_MATCH_STATE=m
+# CONFIG_IP_NF_MATCH_CONNTRACK is not set
+CONFIG_IP_NF_MATCH_UNCLEAN=m
+CONFIG_IP_NF_MATCH_OWNER=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_MIRROR=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+# CONFIG_IP_NF_NAT_LOCAL is not set
+# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_TOS=m
+# CONFIG_IP_NF_TARGET_ECN is not set
+# CONFIG_IP_NF_TARGET_DSCP is not set
+CONFIG_IP_NF_TARGET_MARK=m
+CONFIG_IP_NF_TARGET_LOG=m
+# CONFIG_IP_NF_TARGET_ULOG is not set
+CONFIG_IP_NF_TARGET_TCPMSS=m
+# CONFIG_IP_NF_ARPTABLES is not set
+# CONFIG_IP_NF_COMPAT_IPCHAINS is not set
+# CONFIG_IP_NF_COMPAT_IPFWADM is not set
+
+#
+# IP: Virtual Server Configuration
+#
+CONFIG_IP_VS=m
+# CONFIG_IP_VS_DEBUG is not set
+CONFIG_IP_VS_TAB_BITS=12
+
+#
+# IPVS scheduler
+#
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+
+#
+# IPVS application helper
+#
+CONFIG_IP_VS_FTP=m
+CONFIG_IPV6=y
+# CONFIG_IPV6_PRIVACY is not set
+
+#
+# IPv6: Netfilter Configuration
+#
+# CONFIG_IP6_NF_QUEUE is not set
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_LIMIT=m
+# CONFIG_IP6_NF_MATCH_MAC is not set
+# CONFIG_IP6_NF_MATCH_RT is not set
+# CONFIG_IP6_NF_MATCH_OPTS is not set
+# CONFIG_IP6_NF_MATCH_FRAG is not set
+# CONFIG_IP6_NF_MATCH_HL is not set
+# CONFIG_IP6_NF_MATCH_MULTIPORT is not set
+# CONFIG_IP6_NF_MATCH_OWNER is not set
+CONFIG_IP6_NF_MATCH_MARK=m
+# CONFIG_IP6_NF_MATCH_IPV6HEADER is not set
+# CONFIG_IP6_NF_MATCH_AHESP is not set
+# CONFIG_IP6_NF_MATCH_LENGTH is not set
+# CONFIG_IP6_NF_MATCH_EUI64 is not set
+CONFIG_IP6_NF_FILTER=m
+# CONFIG_IP6_NF_TARGET_LOG is not set
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_TARGET_MARK=m
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_TUNNEL is not set
+CONFIG_SHARED_IPV6_CARDS=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_KHTTPD is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_HMAC_NONE is not set
+CONFIG_SCTP_HMAC_SHA1=y
+# CONFIG_SCTP_HMAC_MD5 is not set
+# CONFIG_ATM is not set
+CONFIG_VLAN_8021Q=m
+
+#
+#
+#
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+
+#
+# Appletalk devices
+#
+# CONFIG_DEV_APPLETALK is not set
+# CONFIG_DECNET is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_LLC is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
+# CONFIG_NET_HW_FLOWCONTROL is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+
+#
+# File systems
+#
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=m
+# CONFIG_REISERFS_FS is not set
+# CONFIG_REISERFS_CHECK is not set
+# CONFIG_REISERFS_PROC_INFO is not set
+# CONFIG_ADFS_FS is not set
+# CONFIG_ADFS_FS_RW is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BEFS_DEBUG is not set
+# CONFIG_BFS_FS is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_XATTR_SHARING=y
+CONFIG_EXT3_FS_XATTR_USER=y
+CONFIG_EXT3_FS_XATTR_TRUSTED=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_FAT_FS is not set
+# CONFIG_MSDOS_FS is not set
+# CONFIG_UMSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+CONFIG_TMPFS=y
+CONFIG_RAMFS=y
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+# CONFIG_ZISOFS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_NTFS_FS is not set
+# CONFIG_NTFS_RW is not set
+# CONFIG_HPFS_FS is not set
+CONFIG_PROC_FS=y
+CONFIG_DEVFS_FS=y
+# CONFIG_DEVFS_MOUNT is not set
+# CONFIG_DEVFS_DEBUG is not set
+CONFIG_DEVPTS_FS=y
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX4FS_RW is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_XIP2FS=m
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XATTR_SHARING is not set
+# CONFIG_EXT2_FS_XATTR_USER is not set
+# CONFIG_EXT2_FS_XATTR_TRUSTED is not set
+# CONFIG_EXT2_FS_POSIX_ACL is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UDF_FS is not set
+# CONFIG_UDF_RW is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_UFS_FS_WRITE is not set
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_TRACE is not set
+# CONFIG_XFS_DEBUG is not set
+
+#
+# Network File Systems
+#
+# CONFIG_CODA_FS is not set
+# CONFIG_INTERMEZZO_FS is not set
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_ROOT_NFS is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_TCP=y
+CONFIG_SUNRPC=m
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_SMB_FS=m
+# CONFIG_SMB_NLS_DEFAULT is not set
+# CONFIG_SMB_UNIX is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_NCPFS_PACKET_SIGNING is not set
+# CONFIG_NCPFS_IOCTL_LOCKING is not set
+# CONFIG_NCPFS_STRONG is not set
+# CONFIG_NCPFS_NFS_NS is not set
+# CONFIG_NCPFS_OS2_NS is not set
+# CONFIG_NCPFS_SMALLDOS is not set
+# CONFIG_NCPFS_NLS is not set
+# CONFIG_NCPFS_EXTRAS is not set
+# CONFIG_ZISOFS_FS is not set
+CONFIG_FS_MBCACHE=y
+CONFIG_FS_POSIX_ACL=y
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+CONFIG_IBM_PARTITION=y
+# CONFIG_MAC_PARTITION is not set
+# CONFIG_MSDOS_PARTITION is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+CONFIG_SMB_NLS=y
+CONFIG_NLS=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS_DEFAULT="iso8859-1"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Kernel hacking
+#
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_LOG_BUF_SHIFT=0
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=m
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_TEA=m
+# CONFIG_CRYPTO_ARC4 is not set
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC32 is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=m
+# CONFIG_FW_LOADER is not set
Added: branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/debian/changelog
===================================================================
--- branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/debian/changelog 2005-08-26 17:09:37 UTC (rev 4057)
+++ branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/debian/changelog 2005-08-26 17:15:28 UTC (rev 4058)
@@ -0,0 +1,210 @@
+kernel-image-2.4.27-s390 (2.4.27-2sarge1) stable-security; urgency=high
+
+ * Use kernel-tree-2.4.27-10sarge1.
+
+ -- Bastian Blank <waldi at debian.org> Fri, 26 Aug 2005 17:04:45 +0000
+
+kernel-image-2.4.27-s390 (2.4.27-2) unstable; urgency=low
+
+ * Use new kernel-patch-2.4.27-s390.
+ * Use kernel-tree-2.4.27-8.
+ * debian/control
+ - Bump soname.
+
+ -- Bastian Blank <waldi at debian.org> Wed, 26 Jan 2005 13:36:28 +0100
+
+kernel-image-2.4.27-s390 (2.4.27-1) unstable; urgency=high
+
+ * New upstream relese.
+ - Use kernel-tree-2.4.27-2 and kernel-patch-2.4.27-s390.
+ - CAN-2004-0497 (users could modify group ID of arbitrary files on
+ the system)
+ - CAN-2004-0415 (file offset pointer handling race)
+
+ -- Bastian Blank <waldi at debian.org> Tue, 17 Aug 2004 11:46:07 +0200
+
+kernel-image-2.4.26-s390 (2.4.26-1) unstable; urgency=low
+
+ * New upstream release.
+ - Use kernel-tree-2.4.26-1 and kernel-patch-2.4.26-s390.
+
+ -- Bastian Blank <waldi at debian.org> Sun, 25 Apr 2004 12:50:08 +0200
+
+kernel-image-2.4.25-s390 (2.4.25-2) unstable; urgency=low
+
+ * Use kernel-tree-2.4.25-3
+ - CAN-2004-0003
+ - CAN-2004-0010
+ - CAN-2004-0109
+ - CAN-2004-0177
+
+ -- Bastian Blank <waldi at debian.org> Wed, 14 Apr 2004 17:35:49 +0200
+
+kernel-image-2.4.25-s390 (2.4.25-1) unstable; urgency=low
+
+ * New upstream release.
+ - Use kernel-tree-2.4.25-1 and kernel-patch-2.4.25-s390.
+ * debian/control:
+ - Update.
+ * debian/rules:
+ - Enable s390x kernels.
+
+ -- Bastian Blank <waldi at debian.org> Sun, 04 Apr 2004 14:04:51 +0200
+
+kernel-image-2.4.21-s390 (2.4.21-2) unstable; urgency=low
+
+ * Rebuild against new kernel-patch.
+
+ -- Bastian Blank <waldi at debian.org> Sat, 06 Mar 2004 21:55:59 +0100
+
+kernel-image-2.4.21-s390 (2.4.21-1) unstable; urgency=low
+
+ * New upstream release.
+ - Use kernel-tree-2.4.21-7 and kernel-patch-2.4.21-s390.
+ * debian/control:
+ - Change Maintainer to debian-s390 at lists.debian.org.
+ - Add Bastian Blank and Jochen Röhrig to Uploaders.
+ - Update.
+
+ -- Bastian Blank <waldi at debian.org> Sat, 14 Feb 2004 21:36:58 +0100
+
+kernel-image-2.4.19-s390 (2.4.19-2) unstable; urgency=low
+
+ * Some restructuring by Bastian Blank <waldi at debian.org>:
+ - new udebs for debian installer
+ - support for 64 bit kernel on 31 bit (not yet enabled)
+
+ -- Jochen Röhrig <jr at debian.org> Thu, 28 Nov 2002 21:57:59 +0100
+
+kernel-image-2.4.19-s390 (2.4.19-1) unstable; urgency=high
+
+ * New upstream release.
+ * Replaced 2.4.17-patches by the initial 2.4.19-patch from the
+ IBM Developerworks website (released on 2002.09.13).
+ This patch includes the source code for the qdio I/O-driver which
+ was only available as object code only module so far, and support for
+ the new zSeries FCP attachment for SCSI.
+ * Integrated a new kernel-patch from the IBM Developerworks
+ website which fixes a lot of problems (released on 2002.10.24).
+ * Integrated a new kernel-patch from the IBM Developerworks
+ website which fixes further problems (released on 2002.11.25).
+ * Integrated the kerntypes patch from the IBM Developerworks
+ website (released on 2002.09.13).
+ * Integrated the on-demand timer patch from the IBM Developerworks
+ website (released on 2002.10.24).
+ * Ported the ramdisk-patch to apply on 2.4.19-kernel-source
+ (port by Bastian Blank <waldi at debian.org>).
+ * Ported the cmsfs-patch to apply on 2.4.19-kernel-source
+ (port by Bastian Blank <waldi at debian.org>).
+ * Removed ksyms-fix-patch (not needed any longer).
+
+ -- Jochen R<F6>hrig <jr at debian.org> Wed, 27 Nov 2002 22:36:43 +0100
+
+kernel-image-2.4.17-s390 (2.4.17-3) unstable; urgency=high
+
+ * Integrated a new kernel-patch from the IBM Developerworks
+ website (released on 2002.06.12).
+ This patch fixes the DASD deadlock problem and some other severe
+ problems.
+ * Removed NMU DASD deadlock fix.
+ * Integrated a new kernel-patch from the IBM Developerworks
+ website (released on 2002.08.16).
+ This patch fixes a problem related to the IUCV driver.
+
+ -- Jochen Röhrig <jr at debian.org> Tue, 10 Sep 2002 21:33:13 +0200
+
+kernel-image-2.4.17-s390 (2.4.17-2.1) unstable; urgency=high
+
+ * NMU
+ * Rebuilt with kernel-patch-2.4.17-s390 0.0.20020415-1.1 which
+ fixes a possible DASD deadlock
+
+ -- Stefan Gybas <sgybas at debian.org> Mon, 29 Apr 2002 21:15:18 +0200
+
+kernel-image-2.4.17-s390 (2.4.17-2) unstable; urgency=high
+
+ * Integrated a new kernel-patch from the IBM Developerworks
+ website (released on 2002.04.15).
+ * Added cpint-patch by Neale Ferguson which allows to invoke CP commands
+ from Linux.
+ * Added cmsfs-patch by Rick Troth <rtroth at bmc.com> which enables read
+ only access to CMS disks.
+
+ -- Jochen Röhrig <jr at debian.org> Tue, 16 Apr 2002 20:14:50 +0200
+
+kernel-image-2.4.17-s390 (2.4.17-1) unstable; urgency=low
+
+ * New upstream release.
+ * Use kernel-patch-2.4.17-s390.
+ * First kernel-image package including the freshly open sourced lcs
+ network driver module which was only available from the IBM
+ Developerworks website as object code only module so far.
+
+ -- Jochen Röhrig <jr at debian.org> Wed, 6 Mar 2002 21:25:25 +0100
+
+kernel-image-2.4.16-s390 (2.4.16-2) unstable; urgency=low
+
+ * Integrated a patch by Gerhard Tonn <gt at debian.org> which fixes
+ compile problems for some packages that use the kernel-headers.
+ * Use new kernel-package which generates /etc/zipl.conf correctly
+ in the kernel-image-postinstall-script.
+
+ -- Jochen Röhrig <jr at debian.org> Wed, 13 Feb 2002 22:25:16 +0100
+
+kernel-image-2.4.16-s390 (2.4.16-1) unstable; urgency=low
+
+ * New upstream release.
+ * Use kernel-patch-2.4.16-s390.
+ * Enable CONFIG_EXT3_FS.
+
+ -- Jochen Röhrig <jr at debian.org> Fri, 21 Dec 2001 01:04:09 +0100
+
+kernel-image-2.4.7-s390 (2.4.7-5) unstable; urgency=low
+
+ * Integrated a new kernel-patch from the IBM Developerworks
+ website (released on 2001.11.23).
+ * Updated the patch by Gerhard Ton <gt at debian.org> which adds
+ support for a second initrd (needed by the s390 boot-floppies).
+ * Enable CONFIG_FILTER and CRAMFS.
+
+ -- Jochen Röhrig <jr at debian.org> Tue, 11 Dec 2001 22:28:02 +0100
+
+kernel-image-2.4.7-s390 (2.4.7-4) unstable; urgency=low
+
+ * Integrated a new kernel-patch from the IBM Developerworks
+ website (released on 2001.11.09).
+ * Integrated a patch by Gerhard Ton <gt at debian.org> which adds
+ support for a second initrd (needed by the s390 boot-floppies).
+
+ -- Jochen Röhrig <jr at debian.org> Tue, 13 Nov 2001 22:05:01 +0100
+
+kernel-image-2.4.7-s390 (2.4.7-3) unstable; urgency=low
+
+ * Integrated the current kernel-patches from the IBM Developerworks
+ website.
+ * Renamed kernel-headers-deb and fixed problem with version-info in
+ include/linux/version.h
+ * Install System.map and config in /boot/ of s390-tape-udeb.
+
+ -- Jochen Röhrig <jr at debian.org> Fri, 26 Oct 2001 00:45:08 +0200
+
+kernel-image-2.4.7-s390 (2.4.7-2) unstable; urgency=low
+
+ * Compile NFS support as module.
+ * Corrected some dependencies.
+ * Changed section to devel.
+
+ -- Jochen Röhrig <jr at debian.org> Thu, 6 Sep 2001 20:36:32 +0200
+
+kernel-image-2.4.7-s390 (2.4.7-1) unstable; urgency=low
+
+ * New upstream release.
+ * Build s390-tape binary package as udeb.
+
+ -- Jochen Röhrig <jr at debian.org> Wed, 22 Aug 2001 00:43:24 +0200
+
+kernel-image-2.4.5-s390 (2.4.5-1) unstable; urgency=low
+
+ * Initial release, based on the kernel-image-2.4.7-i386 package
+
+ -- Stefan Gybas <sgybas at debian.org> Wed, 1 Aug 2001 09:03:24 +0200
Added: branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/debian/compat
===================================================================
--- branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/debian/compat 2005-08-26 17:09:37 UTC (rev 4057)
+++ branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/debian/compat 2005-08-26 17:15:28 UTC (rev 4058)
@@ -0,0 +1 @@
+4
Added: branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/debian/control
===================================================================
--- branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/debian/control 2005-08-26 17:09:37 UTC (rev 4057)
+++ branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/debian/control 2005-08-26 17:15:28 UTC (rev 4058)
@@ -0,0 +1,66 @@
+Source: kernel-image-2.4.27-s390
+Section: devel
+Priority: optional
+Maintainer: Debian S/390 Team <debian-s390 at lists.debian.org>
+Uploaders: Bastian Blank <waldi at debian.org>
+Standards-Version: 3.5.6
+Build-Depends: debhelper (>> 4.0.0), modutils (>= 2.4.21), kernel-tree-2.4.27-10sarge1, kernel-patch-2.4.27-s390 (>= 2.4.27-2), kernel-package (>= 8.084)
+
+Package: kernel-headers-2.4.27-2
+Architecture: s390
+Section: devel
+Priority: optional
+Provides: kernel-headers, kernel-headers-2.4
+Description: Header files related to Linux kernel version 2.4.27 on IBM S/390
+ This package provides kernel header files for version 2.4.27 on IBM S/390,
+ for sites that want the latest kernel headers. Please read
+ /usr/share/doc/kernel-headers-2.4.27/debian.README.gz for details.
+
+Package: kernel-image-2.4.27-2-s390
+Architecture: s390
+Section: base
+Priority: optional
+Provides: kernel-image, kernel-image-2.4
+Depends: modutils (>= 2.4.21)
+Suggests: s390-tools (>= 1.1.4), kernel-doc-2.4.27
+Description: Linux kernel image for kernel version 2.4.21 on IBM S/390
+ This package contains the Linux kernel image for kernel version 2.4.27
+ on IBM S/390 and zSeries, the corresponding System.map file, and the
+ modules built by the packager. It also contains scripts that try to
+ ensure that the system is not left in an unbootable state after an
+ update.
+ .
+ This kernel has support to IPL (boot) from a VM reader or DASD device.
+ .
+ Kernel image packages are generally produced using kernel-package,
+ and it is suggested that you install that package if you wish to
+ create a custom kernel from the sources.
+
+Package: kernel-image-2.4.27-2-s390-tape
+Architecture: s390
+Section: base
+Priority: extra
+Description: Linux kernel image for kernel version 2.4.27 on IBM S/390
+ This package contains the Linux kernel image for kernel version 2.4.27
+ on IBM S/390 and zSeries.
+ .
+ This kernel has support to IPL (boot) from a tape.
+
+Package: kernel-image-2.4.27-2-s390x
+Architecture: s390
+Section: base
+Priority: optional
+Provides: kernel-image, kernel-image-2.4
+Depends: modutils (>= 2.4.21)
+Suggests: s390-tools (>= 1.1.4), kernel-doc-2.4.27
+Description: Linux kernel image for kernel version 2.4.27 on IBM zSeries.
+ This package contains the Linux kernel image for kernel version 2.4.27
+ on IBM zSeries, the corresponding System.map file, and the modules
+ built by the packager. It also contains scripts that try to ensure that
+ the system is not left in a unbootable state after an update.
+ .
+ This kernel has support to IPL (boot) from a VM reader or DASD device.
+ .
+ Kernel image packages are generally produced using kernel-package,
+ and it is suggested that you install that package if you wish to
+ create a custom kernel from the sources.
Added: branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/debian/copyright
===================================================================
--- branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/debian/copyright 2005-08-26 17:09:37 UTC (rev 4057)
+++ branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/debian/copyright 2005-08-26 17:15:28 UTC (rev 4058)
@@ -0,0 +1,22 @@
+This is the Debian GNU/Linux prepackaged version of the Linux kernel.
+Linux was written by Linus Torvalds <Linus.Torvalds at cs.Helsinki.FI>
+and others.
+
+Linux is copyrighted by Linus Torvalds and others.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 dated June, 1991.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ 02111-1307, USA.
+
+On Debian GNU/Linux systems, the complete text of the GNU General
+Public License can be found in `/usr/share/common-licenses/GPL'.
Added: branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/debian/rules
===================================================================
--- branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/debian/rules 2005-08-26 17:09:37 UTC (rev 4057)
+++ branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/debian/rules 2005-08-26 17:15:28 UTC (rev 4058)
@@ -0,0 +1,138 @@
+#!/usr/bin/make -f
+
+# Uncomment this to turn on verbose mode.
+#export DH_VERBOSE=1
+
+SHELL := sh -e
+
+debian_version_temp := $(shell sed -n 's/.*kernel-headers-\([^-]*\)\(-[[:digit:]]*\)$$/\1 \2/; t e; b; :e; p; q' debian/control)
+version := $(word 1,$(debian_version_temp))
+debian_version_number := $(word 2,$(debian_version_temp))
+debian_version := $(version)$(debian_version_number)
+patch := s390-$(shell echo $(version) | sed -e 's,\.,_,g')
+
+kernel_tree_version := $(shell sed -n 's/.*kernel-tree-\([^,]*\).*/\1/p; t e; b; :e q' debian/control )
+
+flavours = s390 s390x
+flavours_image = s390-tape
+
+unpack: unpack-stamp
+unpack-stamp:
+ dh_testdir
+
+ tar -jxf /usr/src/kernel-source-$(version).tar.bz2
+
+ touch $@
+
+patch: patch-stamp
+patch-stamp: unpack-stamp
+ dh_testdir
+
+ @-rm -rf build
+ cp -al kernel-source-$(version) build
+ cd kernel-source-$(version); /usr/src/kernel-patches/all/$(version)/apply/debian $(kernel_tree_version)
+ mkdir build/debian
+ cp debian/changelog debian/control debian/copyright build/debian
+ echo "official" > build/debian/official
+ install post-install build/debian
+ cd build; PATCH_THE_KERNEL=YES make-kpkg --added-patches $(patch) debian
+
+ touch $@
+
+configure: configure-stamp
+configure-stamp: patch-stamp
+ dh_testdir
+
+ @-rm -rf build-*
+ for i in $(flavours) $(flavours_image); do \
+ arch=`echo $$i | sed -e 's,^\([^-]*\)-.*,\1,'`; \
+ echo "cp -al build build-$$i"; \
+ cp -al build build-$$i; \
+ if [ "$$i" == "$$arch" ]; then \
+ echo "cp config/$$i build-$$i/.config"; \
+ cp config/$$i build-$$i/.config; \
+ else \
+ type=`echo $$i | sed -e 's,^[^-]*-\(.*\)$$,\1,'`; \
+ if [ "$$type" == "tape" ]; then \
+ sed -e 's/^\(.*\)=m$$/# \1 is not set/' \
+ -e 's/^.*\(CONFIG_IPL_[A-Z]*\).*$$/# \1 is not set/' \
+ -e 's/^.*\(CONFIG_IPL_TAPE\).*$$/\1=y/' config/$$arch > build-$$i/.config; \
+ fi; \
+ fi; \
+ make_kpkg="make-kpkg --append-to-version $(debian_version_number)-$$arch --subarch $$arch configure"; \
+ echo "cd build-$$i; $$make_kpkg"; \
+ ( cd build-$$i; $$make_kpkg ); \
+ done
+ cp -al build build-default
+ cp config/s390 build-default/.config
+ cd build-default; make-kpkg configure
+
+ touch $@
+
+build: build-stamp
+build-stamp: configure-stamp
+ @for i in $(flavours) $(flavours_image); do \
+ arch=`echo $$i | sed -e 's,^\([^-]*\)-.*,\1,'`; \
+ make_kpkg="make-kpkg --append-to-version $(debian_version_number)-$$arch --subarch $$arch build"; \
+ echo "cd build-$$i; $$make_kpkg"; \
+ ( cd build-$$i; $$make_kpkg ); \
+ done
+
+ touch $@
+
+clean:
+ dh_testdir
+ dh_testroot
+
+ rm -f *-stamp
+ rm -rf kernel* build* install*
+
+ dh_clean
+
+install:
+ dh_testdir
+ dh_testroot
+ dh_clean -k
+
+# Build architecture-independent files here.
+binary-indep:
+
+# Build architecture-dependent files here.
+binary-arch: install
+ dh_testdir
+ dh_testroot
+
+ @-rm -rf install*
+
+ cp -al build-default install
+ cd install; make-kpkg kernel-headers --append-to-version $(debian_version_number)
+ cp install/debian/files debian
+
+ @for i in $(flavours); do \
+ arch=`echo $$i | sed -e 's,^\([^-]*\)-.*,\1,'`; \
+ echo "cp -al build-$$i install-$$i"; \
+ cp -al build-$$i install-$$i; \
+ make_kpkg="make-kpkg --append-to-version $(debian_version_number)-$$arch --subarch $$arch kernel-image"; \
+ echo "cd install-$$i; $$make_kpkg"; \
+ ( cd install-$$i; $$make_kpkg ); \
+ echo "cat install-$$i/debian/files >> debian/files"; \
+ cat install-$$i/debian/files >> debian/files; \
+ done
+ for i in $(flavours_image); do \
+ arch=`echo $$i | sed -e 's,^\([^-]*\)-.*,\1,'`; \
+ package="kernel-image-$(debian_version)-$$i"; \
+ mkdir -p debian/$$package/boot; \
+ install -m644 build-$$i/arch/s390/boot/image debian/$$package/boot/vmlinuz-$(debian_version)-$$i; \
+ dh_installchangelogs --package=$$package; \
+ dh_installdocs --package=$$package; \
+ dh_compress --package=$$package; \
+ dh_fixperms --package=$$package; \
+ dh_installdeb --package=$$package; \
+ dh_gencontrol --package=$$package; \
+ dh_md5sums --package=$$package; \
+ dh_builddeb --package=$$package; \
+ done
+ mv *.deb ..
+
+binary: binary-indep binary-arch
+.PHONY: build clean binary-indep binary-arch binary install unpack
Property changes on: branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/debian/rules
___________________________________________________________________
Name: svn:executable
+ *
Added: branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/post-install
===================================================================
--- branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/post-install 2005-08-26 17:09:37 UTC (rev 4057)
+++ branches/dist/sarge-security/kernel-2.4/s390/kernel-image-2.4.27-s390/post-install 2005-08-26 17:15:28 UTC (rev 4058)
@@ -0,0 +1,25 @@
+#!/bin/sh
+
+set -e
+
+debhelper_pre() {
+ dh_clean -k --package="$1"
+ dh_installdirs --package="$1"
+}
+
+debhelper_post() {
+ dh_installdocs --package="$1"
+ dh_installchangelogs --package="$1"
+ dh_compress --package="$1"
+ dh_fixperms --package="$1"
+ dh_installdeb --package="$1"
+ dh_gencontrol --package="$1"
+ dh_md5sums --package="$1"
+ dh_builddeb --package="$1"
+}
+
+pkg=kernel-image-${version}${INT_SUBARCH}
+pkgsimple=kernel-image-${version}
+
+debhelper_pre $pkg
+debhelper_post $pkg
Added: branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/Makefile
===================================================================
--- branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/Makefile 2005-08-26 17:09:37 UTC (rev 4057)
+++ branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/Makefile 2005-08-26 17:15:28 UTC (rev 4058)
@@ -0,0 +1,61 @@
+#
+# Makefile for s390 kernel patch package
+#
+# (C) 2001, 2002 by Jochen Röhrig (jr at debian.org)
+#
+
+KERNEL_VERSION_TEMP := $(shell sed -n 's/.*kernel-tree-\([^,]*\)\(-[[:digit:]]*\).*/\1 \2/p; t e; b; :e q' debian/control )
+KERNEL_VERSION = $(word 1,$(KERNEL_VERSION_TEMP))
+KERNEL_TREE_VERSION := $(KERNEL_VERSION)$(word 2,$(KERNEL_VERSION_TEMP))
+
+export CP = /bin/cp
+export DIFF = /usr/bin/diff -uN
+export GUNZIP= /bin/gunzip
+export MKDIR = /bin/mkdir
+export MV = /bin/mv
+export PATCH = /usr/bin/patch --no-backup-if-mismatch
+export TAR = /bin/tar
+export TOUCH = /usr/bin/touch
+
+patches = \
+ linux-$(KERNEL_VERSION)-s390
+
+kernel_source = kernel-source-$(KERNEL_VERSION)
+kernel_source_tar = /usr/src/$(kernel_source).tar.bz2
+
+kernel_patch = linux-$(KERNEL_VERSION)-s390.debian.diff
+
+build: diff-stamp
+
+source: source-stamp
+source-stamp:
+ @echo "Extracting kernel source ..."
+ $(TAR) -xjf $(kernel_source_tar)
+ cd $(kernel_source); /usr/src/kernel-patches/all/$(KERNEL_VERSION)/apply/debian $(KERNEL_TREE_VERSION)
+ touch $@
+
+source-orig: source-orig-stamp
+source-orig-stamp: source-stamp
+ @echo "Copying kernel source ..."
+ $(CP) -al $(kernel_source) $(kernel_source).orig
+ touch $@
+
+patch: patch-stamp
+patch-stamp: source-stamp source-orig-stamp
+ @echo "Patching kernel ..."
+ @cd $(kernel_source); \
+ for patch in $(patches); do \
+ echo "Applying patch $$patch ..."; \
+ $(PATCH) -p0 < ../$$patch.diff || exit 1; \
+ done
+ touch $@
+
+diff: diff-stamp
+diff-stamp: patch-stamp
+ @echo "Generating new patch ..."
+ -$(DIFF) -r $(kernel_source).orig $(kernel_source) > $(kernel_patch)
+ touch $@
+
+clean:
+ $(RM) *-stamp
+ $(RM) -r $(kernel_source).orig $(kernel_source) $(kernel_patch)
Added: branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/debian/changelog
===================================================================
--- branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/debian/changelog 2005-08-26 17:09:37 UTC (rev 4057)
+++ branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/debian/changelog 2005-08-26 17:15:28 UTC (rev 4058)
@@ -0,0 +1,219 @@
+kernel-patch-2.4.27-s390 (2.4.27-2) unstable; urgency=high
+
+ * Update patches:
+ - linux-2.4.27-s390. (Matches 2.4.21 patch 22)
+ * Use kernel-tree-2.4.27-8.
+
+ -- Bastian Blank <waldi at debian.org> Tue, 25 Jan 2005 12:31:42 +0100
+
+kernel-patch-2.4.27-s390 (2.4.27-1) unstable; urgency=high
+
+ * New upstream release.
+ * New patches:
+ - linux-2.4.27-s390.
+
+ -- Bastian Blank <waldi at debian.org> Tue, 17 Aug 2004 08:55:51 +0200
+
+kernel-patch-2.4.26-s390 (2.4.26-1) unstable; urgency=low
+
+ * New upstream release.
+ * New patches:
+ - linux-2.4.26-s390.
+
+ -- Bastian Blank <waldi at debian.org> Sun, 25 Apr 2004 12:19:03 +0200
+
+kernel-patch-2.4.25-s390 (2.4.25-1) unstable; urgency=low
+
+ * New upstream release.
+ * New patches:
+ - linux-2.4.25-s390.
+ - Remove patches:
+ - linux-cmsfs.diff
+ - linux-cpint.diff
+
+ -- Bastian Blank <waldi at debian.org> Fri, 02 Apr 2004 17:21:06 +0200
+
+kernel-patch-2.4.21-s390 (2.4.21.10-4) unstable; urgency=low
+
+ * Update patches:
+ - fixes:
+ - Merge in fixes-arch-listing.
+ - Add -m31/-m64 to CFLAGS.
+ * Remove patches:
+ - linux-2.4.21-CAN-2004-0077.
+ * Disable patches:
+ - linux-2.4.21-vmparms.diff. (closes: #236216)
+ * debian/control:
+ - Update patch listing.
+ - Build-Depend against kernel-tree-2.4.21-8.
+
+ -- Bastian Blank <waldi at debian.org> Thu, 04 Mar 2004 23:45:37 +0100
+
+kernel-patch-2.4.21-s390 (2.4.21.10-3) unstable; urgency=low
+
+ * New patches:
+ - CAN-2004-0077: linux-2.4.21-CAN-2004-0077 (temporary)
+ * debian/control:
+ - Update patch listing.
+ - Build-Depend against kernel-tree-2.4.21-7 instead of
+ kernel-source-2.4.21.
+
+ -- Bastian Blank <waldi at debian.org> Thu, 26 Feb 2004 14:08:38 +0100
+
+kernel-patch-2.4.21-s390 (2.4.21.10-2) unstable; urgency=low
+
+ * New patches:
+ - linux-2.4.21-vmparms
+ * Removed patches:
+ - linux-initrd
+ * debian/control:
+ - Update patch listing.
+
+ -- Bastian Blank <waldi at debian.org> Sat, 21 Feb 2004 19:34:50 +0100
+
+kernel-patch-2.4.21-s390 (2.4.21.10-1) unstable; urgency=low
+
+ * New upstream release.
+ * New patches:
+ - linux-2.4.21-s390-june2003
+ - linux-2.4.21-s390-01-june2003
+ - linux-2.4.21-s390-02-june2003
+ - linux-2.4.21-s390-03-june2003
+ - linux-2.4.21-s390-04-june2003
+ - linux-2.4.21-s390-05-june2003
+ - linux-2.4.21-s390-06-june2003
+ - linux-2.4.21-s390-07-june2003
+ - linux-2.4.21-s390-08-june2003
+ - linux-2.4.21-s390-09-june2003
+ - linux-2.4.21-s390-10-june2003
+ - linux-2.4.21-s390-kerntypes-june2003
+ - linux-2.4.21-s390-timer-02-june2003
+ - fixes
+ - fixes-arch-listing
+ * debian/control:
+ - Change Maintainer to debian-s390 at lists.debian.org.
+ - Add Bastian Blank and Jochen Röhrig to Uploaders.
+ - Update.
+
+ -- Bastian Blank <waldi at debian.org> Sun, 08 Feb 2004 17:36:19 +0100
+
+kernel-patch-2.4.19-s390 (0.0.20021125-1) unstable; urgency=low
+
+ * Integrated a new patch from the IBM Developerworks
+ website (released on 2002.11.25).
+
+ -- Jochen Röhrig <jr at debian.org> Wed, 27 Nov 2002 20:11:36 +0100
+
+kernel-patch-2.4.19-s390 (0.0.20021024-1) unstable; urgency=high
+
+ * New upstream release.
+ * Replaced 2.4.17-patches by the initial 2.4.19-patch from the
+ IBM Developerworks website (released on 2002.09.13).
+ This patch includes the source code for the qdio I/O-driver which
+ was only available as object code only module so far, and support for
+ the new zSeries FCP attachment for SCSI.
+ * Integrated a new kernel-patch from the IBM Developerworks
+ website which fixes a lot of problems (released on 2002.10.24).
+ * Integrated the kerntypes patch from the IBM Developerworks
+ website (released on 2002.09.13).
+ * Integrated the on-demand timer patch from the IBM Developerworks
+ website (released on 2002.10.24).
+ * Ported the ramdisk-patch to apply on 2.4.19-kernel-source
+ (port by Bastian Blank <waldi at debian.org>).
+ * Ported the cmsfs-patch to apply on 2.4.19-kernel-source
+ (port by Bastian Blank <waldi at debian.org>).
+ * Removed ksyms-fix-patch (not needed any longer).
+
+ -- Jochen Röhrig <jr at debian.org> Fri, 15 Nov 2002 19:44:59 +0100
+
+kernel-patch-2.4.17-s390 (0.0.20020816-1) unstable; urgency=high
+
+ * Integrated a new kernel-patch from the IBM Developerworks
+ website (released on 2002.06.12).
+ This patch fixes the DASD deadlock problem and some other severe
+ problems. closes: Bug#145020
+ * Removed NMU DASD deadlock fix. closes: Bug#145548
+ * Integrated a new kernel-patch from the IBM Developerworks
+ website (released on 2002.08.16).
+ This patch fixes a problem related to the IUCV driver.
+
+ -- Jochen Röhrig <jr at debian.org> Tue, 10 Sep 2002 20:44:52 +0200
+
+kernel-patch-2.4.17-s390 (0.0.20020415-1.1) unstable; urgency=high
+
+ * NMU
+ * Include DASD deadlock fix closes: Bug#145020
+
+ -- Stefan Gybas <sgybas at debian.org> Mon, 29 Apr 2002 21:01:09 +0200
+
+kernel-patch-2.4.17-s390 (0.0.20020415-1) unstable; urgency=high
+
+ * Added a new patch from the IBM Developerworks website (released on
+ 2002.04.15).
+ * Added cpint-patch by Neale Ferguson which allows to invoke CP commands
+ from Linux. closes: Bug#142202
+ * Added cmsfs-patch by Rick Troth <rtroth at bmc.com> which enables read
+ only access to CMS disks.
+ * Added a patch which fixes kernel compilation failure with some
+ configurations. closes: Bug#142335
+
+ -- Jochen Röhrig <jr at debian.org> Tue, 16 Apr 2002 19:03:11 +0200
+
+kernel-patch-2.4.17-s390 (0.0.20020304-1) unstable; urgency=low
+
+ * New upstream release.
+ * Replaced 2.4.16-patch by the inital 2.4.17-patch from the IBM
+ Developerworks website (released on 2002.02.05).
+ * Added a new patch from the IBM Developerworks website (released on
+ 2002.03.04).
+ This patch includes the source code for the lcs network driver which
+ was only available as object code only module so far.
+
+ -- Jochen Röhrig <jr at debian.org> Tue, 5 Mar 2002 20:37:33 +0100
+
+kernel-patch-2.4.16-s390 (0.0.20011212-1) unstable; urgency=low
+
+ * New upstream release.
+ * Replaced 2.4.7-patches by the inital 2.4.16-patch from the IBM
+ Developerworks website (released on 2001.12.12).
+ * Fixed the patch from Gerhard Tonn <gt at debian.org> (support for
+ a second initrd) to fit the 2.4.16 kernel sources.
+
+ -- Jochen Röhrig <jr at debian.org> Thu, 20 Dec 2001 22:23:17 +0100
+
+kernel-patch-2.4.7-s390 (0.0.20011123-1) unstable; urgency=low
+
+ * Added a new patch from the IBM Developerworks website (released on
+ 2001.11.23).
+ * New version of the linux-ramdisk-patch from Gerhard Tonn <gt at debian.org>
+ (fixes some error handling bugs). closes: Bug#123283
+
+ -- Jochen Röhrig <jr at debian.org> Tue, 11 Dec 2001 21:29:05 +0100
+
+kernel-patch-2.4.7-s390 (0.0.20011109-1) unstable; urgency=low
+
+ * Added a new patch from the IBM Developerworks website (released on
+ 2001.11.09).
+ * Added a patch from Gerhard Tonn <gt at debian.org> which adds support
+ for a second initrd (needed by the s390 boot-floppies). closes: Bug#119356
+
+ -- Jochen Röhrig <jr at debian.org> Tue, 13 Nov 2001 20:34:56 +0100
+
+kernel-patch-2.4.7-s390 (0.0.20011012-1) unstable; urgency=low
+
+ * Added new patches from the IBM Developerworks website (released on
+ 2001.09.14 and on 2001.10.12).
+ * Added the kerntypes patch from the IBM Developerworks website (released
+ on 2001.08.20) which is needed by the dump analysis tool lcrash.
+ * Integrate all patches into one big patch.
+
+ -- Jochen Röhrig <jr at debian.org> Thu, 25 Oct 2001 23:02:22 +0200
+
+kernel-patch-2.4.7-s390 (0.0.20010820-1) unstable; urgency=low
+
+ * Initial Release
+ * Fixed difference in diff of fs/partitions/Makefile which prevented
+ the patch from applying correctly to the kernel source from
+ kernel-source-2.4.7.deb.
+
+ -- Jochen Röhrig <jr at debian.org> Wed, 29 Aug 2001 00:12:15 +0200
Added: branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/debian/control
===================================================================
--- branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/debian/control 2005-08-26 17:09:37 UTC (rev 4057)
+++ branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/debian/control 2005-08-26 17:15:28 UTC (rev 4058)
@@ -0,0 +1,15 @@
+Source: kernel-patch-2.4.27-s390
+Section: devel
+Priority: extra
+Maintainer: Debian S/390 Team <debian-s390 at lists.debian.org>
+Uploaders: Bastian Blank <waldi at debian.org>, Jochen Röhrig <jr at debian.org>
+Build-Depends-Indep: debhelper (>= 3.0.51), patch (>= 2.5.4-11), dh-kpatches (>= 0.99.10), kernel-tree-2.4.27-8
+Standards-Version: 3.5.6
+
+Package: kernel-patch-2.4.27-s390
+Architecture: all
+Depends: ${kpatch:Depends}
+Recommends: kernel-source-2.4.27, kernel-package
+Description: Diffs to the Linux kernel source 2.4.27 for IBM S/390 and zSeries
+ Patches for the Linux kernel for the S/390 and zSeries architecture.
+ This package includes the patch for the Linux kernel version 2.4.27.
Added: branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/debian/copyright
===================================================================
--- branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/debian/copyright 2005-08-26 17:09:37 UTC (rev 4057)
+++ branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/debian/copyright 2005-08-26 17:15:28 UTC (rev 4058)
@@ -0,0 +1,34 @@
+This package was debianized by Jochen Röhrig on 16 Apr 2002.
+
+It was downloaded from IBM's developer works site at
+http://oss.software.ibm.com/linux390/current2_4_19-may2002.shtml#kernel20021024
+
+This package includes the cpint-patch which was extracted from the
+RedHat kernel source at
+ftp://updates.redhat.com/7.2/en/os/SRPMS/kernel-2.4.9-31.src.rpm
+and which was released under the GPL by its author, Neale Ferguson.
+
+This package includes the cmsfs-patch which was retrieved from
+http://people.redhat.com/zaitcev/linux/linux-2.4.18-pre7-cmsfs.diff
+This patch is copyrighted by its author, Rick Troth <rtroth at bmc.com> and
+BMC Software, Inc., Houston, Texas, USA and was released under the GPL.
+
+Linux is copyrighted by Linus Torvalds and others.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License with
+ the Debian GNU/Linux distribution in file /usr/share/common-licenses/GPL;
+ if not, write to the Free Software Foundation, Inc., 59 Temple Place,
+ Suite 330, Boston, MA 02111-1307 USA
+
+On Debian GNU/Linux systems, the complete text of the GNU General
+Public License can be found in `/usr/share/common-licenses/GPL'.
Added: branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/debian/kpatches
===================================================================
--- branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/debian/kpatches 2005-08-26 17:09:37 UTC (rev 4057)
+++ branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/debian/kpatches 2005-08-26 17:15:28 UTC (rev 4058)
@@ -0,0 +1,7 @@
+Patch-name: IBM s390 and zSeries support for Linux 2.4.27
+Patch-id: s390-2_4_27
+Architecture: s390
+Path-strip-level: 1
+
+Patch-file: linux-2.4.27-s390.debian.diff
+Kernel-version: 2.4.27
Added: branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/debian/rules
===================================================================
--- branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/debian/rules 2005-08-26 17:09:37 UTC (rev 4057)
+++ branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/debian/rules 2005-08-26 17:15:28 UTC (rev 4058)
@@ -0,0 +1,56 @@
+#!/usr/bin/make -f
+
+# Uncomment this to turn on verbose mode.
+#export DH_VERBOSE=1
+
+# This is the debhelper compatibility version to use.
+export DH_COMPAT=3
+
+build: build-stamp
+build-stamp:
+ dh_testdir
+ $(MAKE)
+ touch $@
+
+clean:
+ dh_testdir
+ dh_testroot
+ rm -f build-stamp
+ -$(MAKE) clean
+ dh_clean
+
+# Build architecture-independent files here.
+binary-indep: build
+ dh_testdir
+ dh_testroot
+ dh_clean -k
+# dh_installdebconf
+ dh_installdocs
+# dh_installexamples
+# dh_installmenu
+# dh_installlogrotate
+# dh_installemacsen
+# dh_installpam
+# dh_installmime
+# dh_installinit
+# dh_installcron
+# dh_installman
+# dh_installinfo
+# dh_undocumented
+ dh_installkpatches
+ dh_installchangelogs
+ dh_link
+ dh_compress
+ dh_fixperms
+ dh_installdeb
+# dh_perl
+ dh_gencontrol
+ dh_md5sums
+ dh_builddeb
+
+# Build architecture-dependent files here.
+binary-arch: build
+# We have nothing to do by default.
+
+binary: binary-indep binary-arch
+.PHONY: build clean binary-indep binary-arch binary install configure
Property changes on: branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/debian/rules
___________________________________________________________________
Name: svn:executable
+ *
Added: branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/linux-2.4.27-s390.diff
===================================================================
--- branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/linux-2.4.27-s390.diff 2005-08-26 17:09:37 UTC (rev 4057)
+++ branches/dist/sarge-security/kernel-2.4/s390/kernel-patch-2.4.27-s390/linux-2.4.27-s390.diff 2005-08-26 17:15:28 UTC (rev 4058)
@@ -0,0 +1,87498 @@
+=== kernel/ksyms.c
+==================================================================
+--- kernel/ksyms.c (/upstream/vanilla/2.4.27) (revision 52)
++++ kernel/ksyms.c (/trunk/2.4.27) (revision 52)
+@@ -605,6 +605,7 @@
+ EXPORT_SYMBOL(do_softirq);
+ EXPORT_SYMBOL(raise_softirq);
+ EXPORT_SYMBOL(cpu_raise_softirq);
++EXPORT_SYMBOL(open_softirq);
+ EXPORT_SYMBOL(__tasklet_schedule);
+ EXPORT_SYMBOL(__tasklet_hi_schedule);
+
+=== kernel/timer.c
+==================================================================
+--- kernel/timer.c (/upstream/vanilla/2.4.27) (revision 52)
++++ kernel/timer.c (/trunk/2.4.27) (revision 52)
+@@ -339,6 +339,64 @@
+ spin_unlock_irq(&timerlist_lock);
+ }
+
++#ifdef CONFIG_NO_IDLE_HZ
++/*
++ * Find out when the next timer event is due to happen. This
++ * is used on S/390 to stop all activity when all cpus are idle.
++ * The timerlist_lock must be acquired before calling this function.
++ */
++struct timer_list *next_timer_event(void)
++{
++ struct timer_list *nte, *tmp;
++ struct list_head *lst;
++ int i, j;
++
++ /* Look for the next timer event in tv1. */
++ i = 0;
++ j = tvecs[0]->index;
++ do {
++ struct list_head *head = tvecs[0]->vec + j;
++ if (!list_empty(head)) {
++ nte = list_entry(head->next, struct timer_list, list);
++ goto found;
++ }
++ j = (j + 1) & TVR_MASK;
++ } while (j != tv1.index);
++
++ /* No event found in tv1. Check tv2-tv5. */
++ for (i = 1; i < NOOF_TVECS; i++) {
++ j = tvecs[i]->index;
++ do {
++ nte = NULL;
++ list_for_each(lst, tvecs[i]->vec + j) {
++ tmp = list_entry(lst, struct timer_list, list);
++ if (nte == NULL ||
++ time_before(tmp->expires, nte->expires))
++ nte = tmp;
++ }
++ if (nte)
++ goto found;
++ j = (j + 1) & TVN_MASK;
++ } while (j != tvecs[i]->index);
++ }
++ return NULL;
++found:
++ /* Found timer event in tvecs[i]->vec[j] */
++ if (j < tvecs[i]->index && i < NOOF_TVECS-1) {
++ /*
++ * The search wrapped. We need to look at the next list
++ * from tvecs[i+1] that would cascade into tvecs[i].
++ */
++ list_for_each(lst, tvecs[i+1]->vec+tvecs[i+1]->index) {
++ tmp = list_entry(lst, struct timer_list, list);
++ if (time_before(tmp->expires, nte->expires))
++ nte = tmp;
++ }
++ }
++ return nte;
++}
++#endif
++
+ spinlock_t tqueue_lock = SPIN_LOCK_UNLOCKED;
+
+ void tqueue_bh(void)
+@@ -527,7 +585,7 @@
+ update_wall_time_one_tick();
+ } while (ticks);
+
+- if (xtime.tv_usec >= 1000000) {
++ while (xtime.tv_usec >= 1000000) {
+ xtime.tv_usec -= 1000000;
+ xtime.tv_sec++;
+ second_overflow();
+@@ -620,6 +678,31 @@
+ }
+
+ /*
++ * Called from the timer interrupt handler to charge a couple of ticks
++ * to the current process.
++ */
++void update_process_times_us(int user_ticks, int system_ticks)
++{
++ struct task_struct *p = current;
++ int cpu = smp_processor_id();
++
++ update_one_process(p, user_ticks, system_ticks, cpu);
++ if (p->pid) {
++ p->counter -= user_ticks + system_ticks;
++ if (p->counter <= 0) {
++ p->counter = 0;
++ p->need_resched = 1;
++ }
++ if (p->nice > 0)
++ kstat.per_cpu_nice[cpu] += user_ticks;
++ else
++ kstat.per_cpu_user[cpu] += user_ticks;
++ kstat.per_cpu_system[cpu] += system_ticks;
++ } else if (local_bh_count(cpu) || local_irq_count(cpu) > 1)
++ kstat.per_cpu_system[cpu] += system_ticks;
++}
++
++/*
+ * Nr of active tasks - counted in fixed-point numbers
+ */
+ static unsigned long count_active_tasks(void)
+@@ -651,7 +734,7 @@
+ static int count = LOAD_FREQ;
+
+ count -= ticks;
+- if (count < 0) {
++ while (count < 0) {
+ count += LOAD_FREQ;
+ active_tasks = count_active_tasks();
+ CALC_LOAD(avenrun[0], EXP_1, active_tasks);
+@@ -709,6 +792,14 @@
+ mark_bh(TQUEUE_BH);
+ }
+
++void do_timer_ticks(int ticks)
++{
++ (*(unsigned long *)&jiffies) += ticks;
++ mark_bh(TIMER_BH);
++ if (TQ_ACTIVE(tq_timer))
++ mark_bh(TQUEUE_BH);
++}
++
+ #if !defined(__alpha__) && !defined(__ia64__)
+
+ /*
+=== kernel/sysctl.c
+==================================================================
+--- kernel/sysctl.c (/upstream/vanilla/2.4.27) (revision 52)
++++ kernel/sysctl.c (/trunk/2.4.27) (revision 52)
+@@ -92,7 +92,10 @@
+ extern int sysctl_ieee_emulation_warnings;
+ #endif
+ extern int sysctl_userprocess_debug;
++#ifdef CONFIG_NO_IDLE_HZ
++extern int sysctl_hz_timer;
+ #endif
++#endif
+
+ #ifdef CONFIG_PPC32
+ extern unsigned long zero_paged_on, powersave_nap;
+@@ -274,6 +277,10 @@
+ {KERN_S390_USER_DEBUG_LOGGING,"userprocess_debug",
+ &sysctl_userprocess_debug,sizeof(int),0644,NULL,&proc_dointvec},
+ #endif
++#ifdef CONFIG_NO_IDLE_HZ
++ {KERN_S390_HZ_TIMER,"hz_timer",
++ &sysctl_hz_timer,sizeof(int),0644,NULL,&proc_dointvec},
++#endif
+ #ifdef __x86_64__
+ {KERN_EXCEPTION_TRACE,"exception-trace",
+ &exception_trace,sizeof(int),0644,NULL,&proc_dointvec},
+=== include/asm-ia64/pgalloc.h
+==================================================================
+--- include/asm-ia64/pgalloc.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-ia64/pgalloc.h (/trunk/2.4.27) (revision 52)
+@@ -297,4 +297,6 @@
+ set_bit(PG_arch_1, &page->flags); /* mark page as clean */
+ }
+
++#include <asm-generic/pgalloc.h>
++
+ #endif /* _ASM_IA64_PGALLOC_H */
+=== include/asm-mips/pgalloc.h
+==================================================================
+--- include/asm-mips/pgalloc.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-mips/pgalloc.h (/trunk/2.4.27) (revision 52)
+@@ -196,4 +196,6 @@
+
+ extern int do_check_pgt_cache(int, int);
+
++#include <asm-generic/pgalloc.h>
++
+ #endif /* _ASM_PGALLOC_H */
+=== include/asm-sparc/pgalloc.h
+==================================================================
+--- include/asm-sparc/pgalloc.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-sparc/pgalloc.h (/trunk/2.4.27) (revision 52)
+@@ -141,4 +141,6 @@
+
+ #define pte_free(pte) free_pte_fast(pte)
+
++#include <asm-generic/pgalloc.h>
++
+ #endif /* _SPARC_PGALLOC_H */
+=== include/net/if_inet6.h
+==================================================================
+--- include/net/if_inet6.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/net/if_inet6.h (/trunk/2.4.27) (revision 52)
+@@ -128,6 +128,8 @@
+ #define IFA_SITE IPV6_ADDR_SITELOCAL
+ #define IFA_GLOBAL 0x0000U
+
++extern struct notifier_block *inet6addr_chain;
++
+ struct inet6_dev
+ {
+ struct net_device *dev;
+=== include/net/addrconf.h
+==================================================================
+--- include/net/addrconf.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/net/addrconf.h (/trunk/2.4.27) (revision 52)
+@@ -71,6 +71,9 @@
+ /*
+ * multicast prototypes (mcast.c)
+ */
++extern int register_multicast6_notifier(struct notifier_block *nb);
++extern int unregister_multicast6_notifier(struct notifier_block *nb);
++
+ extern int ipv6_sock_mc_join(struct sock *sk, int ifindex,
+ struct in6_addr *addr);
+ extern int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
+=== include/asm-sh/pgalloc.h
+==================================================================
+--- include/asm-sh/pgalloc.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-sh/pgalloc.h (/trunk/2.4.27) (revision 52)
+@@ -153,4 +153,7 @@
+ pte_t old_pte = *ptep;
+ set_pte(ptep, pte_mkdirty(old_pte));
+ }
++
++#include <asm-generic/pgalloc.h>
++
+ #endif /* __ASM_SH_PGALLOC_H */
+=== include/asm-generic/pgalloc.h
+==================================================================
+--- include/asm-generic/pgalloc.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-generic/pgalloc.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,37 @@
++#ifndef _ASM_GENERIC_PGALLOC_H
++#define _ASM_GENERIC_PGALLOC_H
++
++static inline int ptep_test_and_clear_and_flush_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
++{
++ if (!ptep_test_and_clear_young(ptep))
++ return 0;
++ flush_tlb_page(vma, address);
++ return 1;
++}
++
++static inline int ptep_test_and_clear_and_flush_dirty(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
++{
++ if (!ptep_test_and_clear_dirty(ptep))
++ return 0;
++ flush_tlb_page(vma, address);
++ return 1;
++}
++
++static inline void ptep_establish(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t entry)
++{
++ set_pte(ptep, entry);
++ flush_tlb_page(vma, address);
++ update_mmu_cache(vma, address, entry);
++}
++
++static inline pte_t ptep_invalidate(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
++{
++ pte_t pte;
++
++ flush_cache_page(vma, address);
++ pte = ptep_get_and_clear(ptep);
++ flush_tlb_page(vma, address);
++ return pte;
++}
++
++#endif /* _ASM_GENERIC_PGALLOC_H */
+=== include/asm-arm/pgalloc.h
+==================================================================
+--- include/asm-arm/pgalloc.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-arm/pgalloc.h (/trunk/2.4.27) (revision 52)
+@@ -138,4 +138,6 @@
+
+ extern int do_check_pgt_cache(int, int);
+
++#include <asm-generic/pgalloc.h>
++
+ #endif
+=== include/asm-parisc/pgalloc.h
+==================================================================
+--- include/asm-parisc/pgalloc.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-parisc/pgalloc.h (/trunk/2.4.27) (revision 52)
+@@ -306,4 +306,6 @@
+
+ extern int do_check_pgt_cache(int, int);
+
++#include <asm-generic/pgalloc.h>
++
+ #endif
+=== include/linux/sysctl.h
+==================================================================
+--- include/linux/sysctl.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/linux/sysctl.h (/trunk/2.4.27) (revision 52)
+@@ -124,6 +124,7 @@
+ KERN_CORE_USES_PID=52, /* int: use core or core.%pid */
+ KERN_TAINTED=53, /* int: various kernel tainted flags */
+ KERN_CADPID=54, /* int: PID of the process to notify on CAD */
++ KERN_S390_HZ_TIMER=55, /* int: hz timer on or off */
+ KERN_CORE_PATTERN=56, /* string: pattern for core-files */
+ KERN_PPC_L3CR=57, /* l3cr register on PPC */
+ KERN_EXCEPTION_TRACE=58, /* boolean: exception trace */
+=== include/linux/netdevice.h
+==================================================================
+--- include/linux/netdevice.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/linux/netdevice.h (/trunk/2.4.27) (revision 52)
+@@ -382,6 +382,8 @@
+ #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
+ #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
+ #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
++#define NETIF_F_SHARED_IPV6 2048 /* make IPv6 address autogeneration
++ network card instance aware */
+
+ /* Called after device is detached from network. */
+ void (*uninit)(struct net_device *dev);
+@@ -453,6 +455,9 @@
+ /* this will get initialized at each interface type init routine */
+ struct divert_blk *divert;
+ #endif /* CONFIG_NET_DIVERT */
++#ifdef CONFIG_SHARED_IPV6_CARDS
++ unsigned short dev_id;
++#endif /* CONFIG_SHARED_IPV6_CARDS */
+ };
+
+ /* 2.6 compatibility */
+=== include/linux/xip2_fs_sb.h
+==================================================================
+--- include/linux/xip2_fs_sb.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/linux/xip2_fs_sb.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,47 @@
++/*
++ * linux/include/linux/xip2_fs_sb.h, Version 1
++ *
++ * (C) Copyright IBM Corp. 2002,2004
++ * Author(s): Carsten Otte <cotte at de.ibm.com>
++ * derived from second extended filesystem (ext2)
++ */
++
++#ifndef _LINUX_XIP2_FS_SB
++#define _LINUX_XIP2_FS_SB
++
++/*
++ * second extended-fs super-block data in memory
++ */
++struct xip2_sb_info {
++ unsigned long s_frag_size; /* Size of a fragment in bytes */
++ unsigned long s_frags_per_block;/* Number of fragments per block */
++ unsigned long s_inodes_per_block;/* Number of inodes per block */
++ unsigned long s_frags_per_group;/* Number of fragments in a group */
++ unsigned long s_blocks_per_group;/* Number of blocks in a group */
++ unsigned long s_inodes_per_group;/* Number of inodes in a group */
++ unsigned long s_itb_per_group; /* Number of inode table blocks per group */
++ unsigned long s_gdb_count; /* Number of group descriptor blocks */
++ unsigned long s_desc_per_block; /* Number of group descriptors per block */
++ unsigned long s_groups_count; /* Number of groups in the fs */
++ void * s_sbp; /* Pointer to the super block */
++ struct ext2_super_block * s_es; /* Pointer to the super block in buffer */
++ void ** s_group_desc;
++ unsigned short s_loaded_inode_bitmaps;
++ unsigned short s_loaded_block_bitmaps;
++ unsigned long s_inode_bitmap_number[EXT2_MAX_GROUP_LOADED];
++ void * s_inode_bitmap[EXT2_MAX_GROUP_LOADED];
++ unsigned long s_block_bitmap_number[EXT2_MAX_GROUP_LOADED];
++ void * s_block_bitmap[EXT2_MAX_GROUP_LOADED];
++ unsigned long s_mount_opt;
++ uid_t s_resuid;
++ gid_t s_resgid;
++ unsigned short s_mount_state;
++ unsigned short s_pad;
++ int s_addr_per_block_bits;
++ int s_desc_per_block_bits;
++ int s_inode_size;
++ int s_first_ino;
++ void* mem_area;
++};
++
++#endif /* _LINUX_XIP2_FS_SB */
+=== include/linux/inetdevice.h
+==================================================================
+--- include/linux/inetdevice.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/linux/inetdevice.h (/trunk/2.4.27) (revision 52)
+@@ -26,6 +26,7 @@
+ };
+
+ extern struct ipv4_devconf ipv4_devconf;
++extern struct notifier_block *inetaddr_chain;
+
+ struct in_device
+ {
+=== include/linux/timer.h
+==================================================================
+--- include/linux/timer.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/linux/timer.h (/trunk/2.4.27) (revision 52)
+@@ -22,6 +22,9 @@
+
+ extern void add_timer(struct timer_list * timer);
+ extern int del_timer(struct timer_list * timer);
++#ifdef CONFIG_NO_IDLE_HZ
++extern struct timer_list *next_timer_event(void);
++#endif
+
+ #ifdef CONFIG_SMP
+ extern int del_timer_sync(struct timer_list * timer);
+=== include/linux/igmp.h
+==================================================================
+--- include/linux/igmp.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/linux/igmp.h (/trunk/2.4.27) (revision 52)
+@@ -183,6 +183,9 @@
+ unsigned char crcount;
+ };
+
++extern int register_multicast_notifier(struct notifier_block *nb);
++extern int unregister_multicast_notifier(struct notifier_block *nb);
++
+ /* V3 exponential field decoding */
+ #define IGMPV3_MASK(value, nb) ((nb)>=32 ? (value) : ((1<<(nb))-1) & (value))
+ #define IGMPV3_EXP(thresh, nbmant, nbexp, value) \
+=== include/linux/fs.h
+==================================================================
+--- include/linux/fs.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/linux/fs.h (/trunk/2.4.27) (revision 52)
+@@ -733,6 +733,7 @@
+ #include <linux/usbdev_fs_sb.h>
+ #include <linux/cramfs_fs_sb.h>
+ #include <linux/jffs2_fs_sb.h>
++#include <linux/xip2_fs_sb.h>
+
+ extern struct list_head super_blocks;
+ extern spinlock_t sb_lock;
+@@ -792,6 +793,7 @@
+ struct usbdev_sb_info usbdevfs_sb;
+ struct jffs2_sb_info jffs2_sb;
+ struct cramfs_sb_info cramfs_sb;
++ struct xip2_sb_info xip2_sb;
+ void *generic_sbp;
+ } u;
+ /*
+=== include/linux/xip2_fs.h
+==================================================================
+--- include/linux/xip2_fs.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/linux/xip2_fs.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,117 @@
++/*
++ * linux/include/linux/xip2_fs.h, Version 1
++ *
++ * (C) Copyright IBM Corp. 2002,2004
++ * Author(s): Carsten Otte <cotte at de.ibm.com>
++ * derived from second extended filesystem (ext2)
++ */
++
++#ifndef _LINUX_XIP2_FS_H
++#define _LINUX_XIP2_FS_H
++#include <linux/ext2_fs.h>
++
++/* bit operations */
++#define xip2_clear_bit ext2_clear_bit
++#define xip2_test_bit ext2_test_bit
++#define xip2_set_bit ext2_set_bit
++#define xip2_find_next_zero_bit ext2_find_next_zero_bit
++#define xip2_find_first_zero_bit ext2_find_first_zero_bit
++
++/* the memory area structure */
++typedef struct _xip2_mem_area_t {
++ char* name;
++ unsigned long start;
++ unsigned long end;
++} xip2_mem_area_t;
++
++/*
++ * Debug code
++ */
++#undef XIP2FS_DEBUG
++
++#ifdef XIP2FS_DEBUG
++#define xip2_debug(f, a...) { \
++ printk ("XIP2-fs DEBUG (%s, %d): %s:", \
++ __FILE__, __LINE__, __FUNCTION__); \
++ printk (f, ## a); \
++ }
++#else
++# define xip2_debug(f, a...) /**/
++#endif
++
++/*
++ * Useful macros
++ */
++#ifdef __KERNEL__
++#define XIP2_SB(sb) (&((sb)->u.xip2_sb))
++#else
++/* Assume that user mode programs are passing in an ext2fs superblock, not
++ * a kernel struct super_block. This will allow us to call the feature-test
++ * macros from user land. */
++#define XIP2_SB(sb) (sb)
++#endif
++
++
++/* functions defined in fs/xip2/balloc.c */
++extern int xip2_bg_has_super(struct super_block *sb, int group);
++extern unsigned long xip2_bg_num_gdb(struct super_block *sb, int group);
++extern unsigned long xip2_count_free_blocks (struct super_block *);
++extern void xip2_check_blocks_bitmap (struct super_block *);
++extern void* xip2_maread (xip2_mem_area_t* mem_area, int block, int size);
++
++/* dir.c */
++extern ino_t xip2_inode_by_name(struct inode *, struct dentry *);
++extern struct ext2_dir_entry_2 * xip2_find_entry (struct inode *,struct dentry *);
++
++/* functions defined in fs/xip2/ialloc.c */
++extern unsigned long xip2_count_free_inodes (struct super_block *);
++
++/* functions defined in fs/xip2/inode.c */
++extern void xip2_read_inode (struct inode *);
++extern int xip2_get_block(struct inode *inode, long iblock,
++ unsigned long* blockno_result, int create);
++
++/* functions defined in fs/xip2/ioctl.c */
++extern int xip2_ioctl (struct inode *, struct file *, unsigned int,
++ unsigned long);
++
++/* functions defined in fs/xip2/super.c */
++extern void xip2_error (struct super_block *, const char *, const char *, ...)
++ __attribute__ ((format (printf, 3, 4)));
++extern NORET_TYPE void xip2_panic (struct super_block *, const char *,
++ const char *, ...)
++ __attribute__ ((NORET_AND format (printf, 3, 4)));
++extern void xip2_warning (struct super_block *, const char *, const char *, ...)
++ __attribute__ ((format (printf, 3, 4)));
++extern void xip2_update_dynamic_rev (struct super_block *sb);
++extern void xip2_put_super (struct super_block *);
++extern int xip2_remount (struct super_block *, int *, char *);
++extern struct super_block * xip2_read_super (struct super_block *,void *,int);
++extern int xip2_statfs (struct super_block *, struct statfs *);
++
++/*
++ * Inodes and files operations
++ */
++
++/* dir.c */
++extern struct file_operations xip2_dir_operations;
++
++/* file.c */
++extern struct inode_operations xip2_file_inode_operations;
++extern struct file_operations xip2_file_operations;
++extern int xip2_file_mmap(struct file * file, struct vm_area_struct * vma);
++extern struct page * xip2_nopage_in_place(struct vm_area_struct * area,
++ unsigned long address, int unused);
++extern ssize_t xip2_file_read(struct file * filp, char * buf, size_t count,
++ loff_t *ppos);
++
++/* inode.c */
++extern struct address_space_operations xip2_aops;
++
++/* namei.c */
++extern struct inode_operations xip2_dir_inode_operations;
++
++/* symlink.c */
++extern struct inode_operations xip2_fast_symlink_inode_operations;
++extern struct inode_operations xip2_symlink_inode_operations;
++#endif
+=== include/linux/s390net.h
+==================================================================
+--- include/linux/s390net.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/linux/s390net.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,17 @@
++/*
++ * include/linux/s390net.h
++ *
++ * S390 and zSeries version
++ * Copyright (C) 2003 IBM Corporation
++ * Author(s): Erwin Rol <erwinrol at de.ibm.com>
++ *
++ */
++
++#ifndef LINUX_S390NET_H
++#define LINUX_S390NET_H
++
++#define S390NET_IOC_MAGIC 'Z'
++
++#endif /* !LINUX_S390NET_H */
++
++
+=== include/linux/mm.h
+==================================================================
+--- include/linux/mm.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/linux/mm.h (/trunk/2.4.27) (revision 52)
+@@ -308,11 +308,9 @@
+ /* Make it prettier to test the above... */
+ #define UnlockPage(page) unlock_page(page)
+ #define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
+-#define SetPageUptodate(page) \
+- do { \
+- arch_set_page_uptodate(page); \
+- set_bit(PG_uptodate, &(page)->flags); \
+- } while (0)
++#ifndef SetPageUptodate
++#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags);
++#endif
+ #define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
+ #define PageDirty(page) test_bit(PG_dirty, &(page)->flags)
+ #define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags)
+=== include/linux/sched.h
+==================================================================
+--- include/linux/sched.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/linux/sched.h (/trunk/2.4.27) (revision 52)
+@@ -141,6 +141,9 @@
+ extern void cpu_init (void);
+ extern void trap_init(void);
+ extern void update_process_times(int user);
++#ifdef CONFIG_NO_IDLE_HZ
++extern void update_process_times_us(int user, int system);
++#endif
+ extern void update_one_process(struct task_struct *p, unsigned long user,
+ unsigned long system, int cpu);
+
+@@ -585,6 +588,9 @@
+ extern unsigned long itimer_next;
+ extern struct timeval xtime;
+ extern void do_timer(struct pt_regs *);
++#ifdef CONFIG_NO_IDLE_HZ
++extern void do_timer_ticks(int ticks);
++#endif
+
+ extern unsigned int * prof_buffer;
+ extern unsigned long prof_len;
+=== include/linux/tty.h
+==================================================================
+--- include/linux/tty.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/linux/tty.h (/trunk/2.4.27) (revision 52)
+@@ -380,6 +380,13 @@
+ extern void tty_register_devfs (struct tty_driver *driver, unsigned int flags,
+ unsigned minor);
+ extern void tty_unregister_devfs (struct tty_driver *driver, unsigned minor);
++struct devfs_entry;
++extern void tty_register_devfs_name (struct tty_driver *driver,
++ unsigned int flags, unsigned minor,
++ struct devfs_entry *dir, const char *name);
++extern void tty_unregister_devfs_name (struct tty_driver *driver,
++ unsigned minor, struct devfs_entry *dir,
++ const char *name);
+ extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp,
+ int buflen);
+ extern void tty_write_message(struct tty_struct *tty, char *msg);
+=== include/linux/mii.h
+==================================================================
+--- include/linux/mii.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/linux/mii.h (/trunk/2.4.27) (revision 52)
+@@ -19,6 +19,7 @@
+ #define MII_ADVERTISE 0x04 /* Advertisement control reg */
+ #define MII_LPA 0x05 /* Link partner ability reg */
+ #define MII_EXPANSION 0x06 /* Expansion register */
++#define MII_EXTSTATUS 0x0f /* Extended status register */
+ #define MII_DCOUNTER 0x12 /* Disconnect counter */
+ #define MII_FCSCOUNTER 0x13 /* False carrier counter */
+ #define MII_NWAYTEST 0x14 /* N-way auto-neg test reg */
+@@ -32,7 +33,8 @@
+ #define MII_NCONFIG 0x1c /* Network interface config */
+
+ /* Basic mode control register. */
+-#define BMCR_RESV 0x007f /* Unused... */
++#define BMCR_RESV 0x003f /* Unused... */
++#define BMCR_SPEED1000 0x0040 /* Select 1Gbps */
+ #define BMCR_CTST 0x0080 /* Collision test */
+ #define BMCR_FULLDPLX 0x0100 /* Full duplex */
+ #define BMCR_ANRESTART 0x0200 /* Auto negotiation restart */
+@@ -50,13 +52,21 @@
+ #define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */
+ #define BMSR_RFAULT 0x0010 /* Remote fault detected */
+ #define BMSR_ANEGCOMPLETE 0x0020 /* Auto-negotiation complete */
+-#define BMSR_RESV 0x07c0 /* Unused... */
++#define BMSR_EXTSTATUS 0x0100 /* extended status */
++#define BMSR_RESV 0x06c0 /* Unused... */
+ #define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
+ #define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
+ #define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
+ #define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */
+ #define BMSR_100BASE4 0x8000 /* Can do 100mbps, 4k packets */
+
++/* Extended status register */
++#define EXTSTATUS_RESV 0x0fff /* Unused.. */
++#define EXTSTATUS_1000THALF 0x1000 /* Can do 1000 BASE-T, half-duplex */
++#define EXTSTATUS_1000TFULL 0x2000 /* Can do 1000 BASE-T, full-duplex */
++#define EXTSTATUS_1000XHALF 0x4000 /* Can do 1000 BASE-X, half-duplex */
++#define EXTSTATUS_1000XFULL 0x8000 /* Can do 1000 BASE-X, full-duplex */
++
+ /* Advertisement control register. */
+ #define ADVERTISE_SLCT 0x001f /* Selector bits */
+ #define ADVERTISE_CSMA 0x0001 /* Only selector supported */
+=== include/linux/genhd.h
+==================================================================
+--- include/linux/genhd.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/linux/genhd.h (/trunk/2.4.27) (revision 52)
+@@ -103,6 +103,7 @@
+
+ devfs_handle_t *de_arr; /* one per physical disc */
+ char *flags; /* one per physical disc */
++ devfs_handle_t *label_arr; /* one per physical disc */
+ };
+
+ /* drivers/block/genhd.c */
+=== include/asm-ppc/pgalloc.h
+==================================================================
+--- include/asm-ppc/pgalloc.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-ppc/pgalloc.h (/trunk/2.4.27) (revision 52)
+@@ -150,5 +150,7 @@
+
+ extern int do_check_pgt_cache(int, int);
+
++#include <asm-generic/pgalloc.h>
++
+ #endif /* _PPC_PGALLOC_H */
+ #endif /* __KERNEL__ */
+=== include/asm-mips64/pgalloc.h
+==================================================================
+--- include/asm-mips64/pgalloc.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-mips64/pgalloc.h (/trunk/2.4.27) (revision 52)
+@@ -200,4 +200,6 @@
+
+ extern int do_check_pgt_cache(int, int);
+
++#include <asm-generic/pgalloc.h>
++
+ #endif /* _ASM_PGALLOC_H */
+=== include/asm-sparc64/pgalloc.h
+==================================================================
+--- include/asm-sparc64/pgalloc.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-sparc64/pgalloc.h (/trunk/2.4.27) (revision 52)
+@@ -304,4 +304,6 @@
+
+ extern int do_check_pgt_cache(int, int);
+
++#include <asm-generic/pgalloc.h>
++
+ #endif /* _SPARC64_PGALLOC_H */
+=== include/asm-m68k/pgalloc.h
+==================================================================
+--- include/asm-m68k/pgalloc.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-m68k/pgalloc.h (/trunk/2.4.27) (revision 52)
+@@ -163,4 +163,6 @@
+ #include <asm/motorola_pgalloc.h>
+ #endif
+
++#include <asm-generic/pgalloc.h>
++
+ #endif /* M68K_PGALLOC_H */
+=== include/asm-alpha/pgalloc.h
+==================================================================
+--- include/asm-alpha/pgalloc.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-alpha/pgalloc.h (/trunk/2.4.27) (revision 52)
+@@ -347,4 +347,6 @@
+
+ extern int do_check_pgt_cache(int, int);
+
++#include <asm-generic/pgalloc.h>
++
+ #endif /* _ALPHA_PGALLOC_H */
+=== include/asm-s390x/siginfo.h
+==================================================================
+--- include/asm-s390x/siginfo.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390x/siginfo.h (/trunk/2.4.27) (revision 52)
+@@ -19,7 +19,7 @@
+ } sigval_t;
+
+ #define SI_MAX_SIZE 128
+-#define SI_PAD_SIZE ((SI_MAX_SIZE/sizeof(int)) - 3)
++#define SI_PAD_SIZE ((SI_MAX_SIZE/sizeof(int)) - 4)
+
+ typedef struct siginfo {
+ int si_signo;
+=== include/asm-s390x/smp.h
+==================================================================
+--- include/asm-s390x/smp.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390x/smp.h (/trunk/2.4.27) (revision 52)
+@@ -26,6 +26,9 @@
+ __u16 cpu;
+ } sigp_info;
+
++extern int smp_call_function_on(void (*func) (void *info), void *info,
++ int nonatomic, int wait, int cpu);
++
+ extern unsigned long cpu_online_map;
+
+ #define NO_PROC_ID 0xFF /* No processor magic marker */
+@@ -65,4 +68,9 @@
+ #define cpu_logical_map(cpu) (cpu)
+
+ #endif
++
++#ifndef CONFIG_SMP
++#define smp_call_function_on(func,info,nonatomic,wait,cpu) ({ 0; })
+ #endif
++
++#endif
+=== include/asm-s390x/sigp.h
+==================================================================
+--- include/asm-s390x/sigp.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390x/sigp.h (/trunk/2.4.27) (revision 52)
+@@ -107,7 +107,7 @@
+ * Signal processor with parameter and return status
+ */
+ extern __inline__ sigp_ccode
+-signal_processor_ps(__u32 *statusptr, __u64 parameter,
++signal_processor_ps(unsigned long *statusptr, __u64 parameter,
+ __u16 cpu_addr, sigp_order_code order_code)
+ {
+ sigp_ccode ccode;
+=== include/asm-s390x/chandev.h
+==================================================================
+--- include/asm-s390x/chandev.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390x/chandev.h (/trunk/2.4.27) (revision 52)
+@@ -28,6 +28,7 @@
+ chandev_type_osad=0x8,
+ chandev_type_qeth=0x10,
+ chandev_type_claw=0x20,
++ chandev_type_ctcmpc=0x40,
+ } chandev_type;
+
+ typedef enum
+=== include/asm-s390x/lowcore.h
+==================================================================
+--- include/asm-s390x/lowcore.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390x/lowcore.h (/trunk/2.4.27) (revision 52)
+@@ -48,6 +48,7 @@
+ #define __LC_IPLDEV 0xDB8
+
+ #define __LC_JIFFY_TIMER 0xDC0
++#define __LC_INT_CLOCK 0xDC8
+
+ #define __LC_PANIC_MAGIC 0xE00
+
+@@ -164,8 +165,9 @@
+
+ /* SMP info area: defined by DJB */
+ __u64 jiffy_timer; /* 0xdc0 */
+- __u64 ext_call_fast; /* 0xdc8 */
+- __u8 pad12[0xe00-0xdd0]; /* 0xdd0 */
++ __u64 int_clock; /* 0xdc8 */
++ __u64 ext_call_fast; /* 0xdd0 */
++ __u8 pad12[0xe00-0xdd8]; /* 0xdd8 */
+
+ /* 0xe00 is used as indicator for dump tools */
+ /* whether the kernel died with panic() or not */
+=== include/asm-s390x/dasd.h
+==================================================================
+--- include/asm-s390x/dasd.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390x/dasd.h (/trunk/2.4.27) (revision 52)
+@@ -8,7 +8,7 @@
+ * any future changes wrt the API will result in a change of the APIVERSION reported
+ * to userspace by the DASDAPIVER-ioctl
+ *
+- * $Revision: 1.53 $
++ * $Revision: 1.52.6.3 $
+ *
+ * History of changes (starts July 2000)
+ * 05/04/01 created by moving the kernel interface to drivers/s390/block/dasd_int.h
+=== include/asm-s390x/irq.h
+==================================================================
+--- include/asm-s390x/irq.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390x/irq.h (/trunk/2.4.27) (revision 52)
+@@ -42,7 +42,9 @@
+ __u8 chpid[8]; /* CHPID 0-7 (if available) */
+ __u32 unused1 : 8; /* reserved zeros */
+ __u32 st : 3; /* subchannel type */
+- __u32 unused2 : 20; /* reserved zeros */
++ __u32 unused2 : 18; /* reserved zeros */
++ __u32 mbfc : 1; /* measurement block format control */
++ __u32 xmwme : 1; /* extended measurement word mode enable */
+ __u32 csense : 1; /* concurrent sense; can be enabled ...*/
+ /* ... per MSCH, however, if facility */
+ /* ... is not installed, this results */
+@@ -156,7 +158,8 @@
+ typedef struct {
+ pmcw_t pmcw; /* path management control word */
+ scsw_t scsw; /* subchannel status word */
+- __u8 mda[12]; /* model dependent area */
++ __u64 mba; /* measurement block address */
++ __u8 mda[4]; /* model dependent area */
+ } __attribute__ ((packed,aligned(4))) schib_t;
+ #endif /* __KERNEL__ */
+
+=== include/asm-s390x/ccwcache.h
+==================================================================
+--- include/asm-s390x/ccwcache.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390x/ccwcache.h (/trunk/2.4.27) (revision 52)
+@@ -4,7 +4,7 @@
+ * Bugreports.to..: <Linux390 at de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
+ *
+- * $Revision: 1.11 $
++ * $Revision: 1.11.4.2 $
+ *
+ */
+ #ifndef CCWCACHE_H
+=== include/asm-s390x/qeth.h
+==================================================================
+--- include/asm-s390x/qeth.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390x/qeth.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,26 @@
++/*
++ * include/asm-s390x/qeth.h
++ *
++ * S390 and zSeries version
++ * Copyright (C) 2003 IBM Corporation
++ * Author(s): Erwin Rol <erwinrol at de.ibm.com>
++ *
++ */
++
++#include <linux/s390net.h>
++
++#ifndef ASM_QETH_H
++#define ASM_QETH_H
++
++#define QETH_IOCPROC_REGISTER _IOW(S390NET_IOC_MAGIC, 1, int)
++
++#define SIOC_QETH_ARP_SET_NO_ENTRIES _IOWR(S390NET_IOC_MAGIC, 2, int)
++#define SIOC_QETH_ARP_QUERY_INFO _IOWR(S390NET_IOC_MAGIC, 3, int)
++#define SIOC_QETH_ARP_ADD_ENTRY _IOWR(S390NET_IOC_MAGIC, 4, int)
++#define SIOC_QETH_ARP_REMOVE_ENTRY _IOWR(S390NET_IOC_MAGIC, 5, int)
++#define SIOC_QETH_ARP_FLUSH_CACHE _IOWR(S390NET_IOC_MAGIC, 6, int)
++#define SIOC_QETH_ADP_SET_SNMP_CONTROL _IOWR(S390NET_IOC_MAGIC, 7, int)
++#define SIOC_QETH_GET_CARD_TYPE _IOWR(S390NET_IOC_MAGIC, 8, int)
++
++#endif /* !ASM_QETH_H */
++
+=== include/asm-s390x/pgtable.h
+==================================================================
+--- include/asm-s390x/pgtable.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390x/pgtable.h (/trunk/2.4.27) (revision 52)
+@@ -484,9 +484,11 @@
+ __pte; \
+ })
+
+-#define arch_set_page_uptodate(__page) \
++#define SetPageUptodate(_page) \
+ do { \
+- asm volatile ("sske %0,%1" : : "d" (0), \
++ struct page *__page = (_page); \
++ if (!test_and_set_bit(PG_uptodate, &__page->flags)) \
++ asm volatile ("sske %0,%1" : : "d" (0), \
+ "a" (__pa((__page-mem_map) << PAGE_SHIFT)));\
+ } while (0)
+
+=== include/asm-s390x/qdio.h
+==================================================================
+--- include/asm-s390x/qdio.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390x/qdio.h (/trunk/2.4.27) (revision 52)
+@@ -11,7 +11,7 @@
+ #ifndef __QDIO_H__
+ #define __QDIO_H__
+
+-#define VERSION_QDIO_H "$Revision: 1.57 $"
++#define VERSION_QDIO_H "$Revision: 1.57.4.5 $"
+
+ /* note, that most of the typedef's are from ingo. */
+
+@@ -42,11 +42,18 @@
+ #define QDIO_MAX_ELEMENTS_PER_BUFFER 16
+ #define SBAL_SIZE 256
+
+-#define IQDIO_FILL_LEVEL_TO_POLL (QDIO_MAX_BUFFERS_PER_Q*4/3)
++/* unfortunately this can't be (QDIO_MAX_BUFFERS_PER_Q*4/3) or so -- as
++ * we never know, whether we'll get initiative again, e.g. to give the
++ * transmit skb's back to the stack, however the stack may be waiting for
++ * them... therefore we define 4 as threshold to start polling (which
++ * will stop as soon as the asynchronous queue catches up)
++ * btw, this only applies to the asynchronous HiperSockets queue */
++#define IQDIO_FILL_LEVEL_TO_POLL 4
+
+ #define TIQDIO_THININT_ISC 3
+ #define TIQDIO_DELAY_TARGET 0
+-#define QDIO_BUSY_BIT_PATIENCE 2000 /* in microsecs */
++#define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */
++#define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */
+ #define IQDIO_GLOBAL_LAPS 2 /* GLOBAL_LAPS are not used as we */
+ #define IQDIO_GLOBAL_LAPS_INT 1 /* dont global summary */
+ #define IQDIO_LOCAL_LAPS 4
+@@ -612,6 +619,8 @@
+ typedef struct qdio_q_t {
+ volatile slsb_t slsb;
+
++ char unused[QDIO_MAX_BUFFERS_PER_Q];
++
+ __u32 * volatile dev_st_chg_ind;
+
+ int is_input_q;
+@@ -697,7 +706,9 @@
+ int last_transfer_index; */
+
+ __u64 last_transfer_time;
++ __u64 busy_start;
+ } timing;
++ atomic_t busy_siga_counter;
+ unsigned int queue_type;
+ } __attribute__ ((aligned(256))) qdio_q_t;
+
+@@ -713,7 +724,7 @@
+ unsigned int sync_done_on_outb_pcis;
+
+ unsigned int state;
+- spinlock_t setting_up_lock;
++ struct semaphore setting_up_lock;
+
+ unsigned int no_input_qs;
+ unsigned int no_output_qs;
+@@ -783,7 +794,10 @@
+ int reserved1:21;
+ int isc:3;
+ /* word 9&10 */
+- __u32 reserved2[2];
++ __u32 word_with_d_bit;
++ /* set to 0x10000000 to enable
++ * time delay disablement facility */
++ __u32 reserved2;
+ /* word 11 */
+ __u32 subsystem_id;
+ /* word 12-1015 */
+=== include/asm-s390x/setup.h
+==================================================================
+--- include/asm-s390x/setup.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390x/setup.h (/trunk/2.4.27) (revision 52)
+@@ -32,7 +32,7 @@
+ #define MACHINE_NEW_STIDP (machine_flags & 64)
+ #define MACHINE_HAS_PFIX (0)
+
+-#define MACHINE_HAS_HWC (!MACHINE_IS_P390)
++#define MACHINE_HAS_SCLP (!MACHINE_IS_P390)
+
+ /*
+ * Console mode. Override with conmode=
+@@ -41,10 +41,10 @@
+ extern unsigned int console_device;
+
+ #define CONSOLE_IS_UNDEFINED (console_mode == 0)
+-#define CONSOLE_IS_HWC (console_mode == 1)
++#define CONSOLE_IS_SCLP (console_mode == 1)
+ #define CONSOLE_IS_3215 (console_mode == 2)
+ #define CONSOLE_IS_3270 (console_mode == 3)
+-#define SET_CONSOLE_HWC do { console_mode = 1; } while (0)
++#define SET_CONSOLE_SCLP do { console_mode = 1; } while (0)
+ #define SET_CONSOLE_3215 do { console_mode = 2; } while (0)
+ #define SET_CONSOLE_3270 do { console_mode = 3; } while (0)
+
+=== include/asm-s390x/processor.h
+==================================================================
+--- include/asm-s390x/processor.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390x/processor.h (/trunk/2.4.27) (revision 52)
+@@ -52,6 +52,8 @@
+
+ extern void print_cpu_info(struct cpuinfo_S390 *);
+
++extern void show_trace(unsigned long* esp);
++
+ /* Lazy FPU handling on uni-processor */
+ extern struct task_struct *last_task_used_math;
+
+=== include/asm-s390x/ioctl32.h
+==================================================================
+--- include/asm-s390x/ioctl32.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390x/ioctl32.h (/trunk/2.4.27) (revision 52)
+@@ -8,7 +8,7 @@
+ #ifndef ASM_IOCTL32_H
+ #define ASM_IOCTL32_H
+
+-extern int sys_ioctl(unsigned int, unsigned int, unsigned long, struct file*);
++extern int sys_ioctl(unsigned int, unsigned int, unsigned long);
+ typedef int (*ioctl_trans_handler_t)(unsigned int, unsigned int, unsigned long, struct file *);
+
+ #ifdef CONFIG_S390_SUPPORT
+=== include/asm-s390x/dcss.h
+==================================================================
+--- include/asm-s390x/dcss.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390x/dcss.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,19 @@
++/*
++ * include/asm-s390x/dcss.h
++ *
++ * definitions for discontiguous saved segment support
++ * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
++ */
++
++#ifndef _ASM_S390X_DCSS_H
++#define _ASM_S390X_DCSS_H
++#ifndef __ASSEMBLY__
++#define SEGMENT_SHARED_RW 0
++#define SEGMENT_SHARED_RO 1
++#define SEGMENT_EXCLUSIVE_RW 2
++#define SEGMENT_EXCLUSIVE_RO 3
++extern int segment_load (char *name,int segtype,unsigned long *addr,unsigned long *length);
++extern void segment_unload(char *name);
++extern void segment_replace(char *name);
++#endif
++#endif
+=== include/asm-s390x/cmb.h
+==================================================================
+--- include/asm-s390x/cmb.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390x/cmb.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,194 @@
++/*
++ * include/asm-s390/cmb.h
++ * include/asm-s390x/cmb.h
++ *
++ * Copyright (C) 2003 IBM Corporation
++ * Author: Arnd Bergmann <arndb at de.ibm.com>
++ */
++
++#ifndef S390_CMB_H
++#define S390_CMB_H
++/**
++ * struct cmbdata -- channel measurement block data for user space
++ *
++ * @size: size of the stored data
++ * @elapsed_time: time since last reset
++ * @ssch_rsch_count: number I/O attempts, may overflow at
++ * 2^16 or 2^32, depending on machine
++ * @sample_count: number of sampled I/O attempts, should
++ * be identical to @ssch_rsch_count
++ * @device_connect_time: time that device communicated with
++ * channel
++ * @function_pending_time: time between command initiate and
++ * start of execution
++ * @device_disconnect_time: time of device-inactive while
++ * subchannel-active
++ * @control_unit_queuing_time: time that a device is blocked by
++ * I/O from another system
++ * @device_active_only_time: time of device-active while
++ * subchannel-inactive
++ * @device_busy_time: time where the device is busy
++ * when attemting to start I/O
++ * @initial_command_response_time: time between sending and
++ * the device accepting a command
++ *
++ * all values are stored as 64 bit for simplicity, especially
++ * in 32 bit emulation mode. All time values are normalized to
++ * nanoseconds.
++ * Currently, two formats are known, which differ by the size of
++ * this structure, i.e. the last two members are only set when
++ * the extended channel measurement facility (first shipped in
++ * z990 machines) is activated.
++ * Potentially, more fields could be added, which results in a
++ * new ioctl number.
++ **/
++struct cmbdata {
++ __u64 size;
++ __u64 elapsed_time;
++ /* basic and exended format: */
++ __u64 ssch_rsch_count;
++ __u64 sample_count;
++ __u64 device_connect_time;
++ __u64 function_pending_time;
++ __u64 device_disconnect_time;
++ __u64 control_unit_queuing_time;
++ __u64 device_active_only_time;
++ /* extended format only: */
++ __u64 device_busy_time;
++ __u64 initial_command_response_time;
++};
++
++/* enable channel measurement */
++#define BIODASDCMFENABLE _IO(DASD_IOCTL_LETTER,32)
++/* enable channel measurement */
++#define BIODASDCMFDISABLE _IO(DASD_IOCTL_LETTER,33)
++/* read channel measurement data */
++#define BIODASDREADALLCMB _IOWR(DASD_IOCTL_LETTER,33,struct cmbdata)
++
++#ifdef __KERNEL__
++#include <linux/config.h>
++#include <asm/irq.h>
++
++#if defined(CONFIG_S390_CMF) || defined(CONFIG_S390_CMF_MODULE)
++/**
++ * struct cmf_device - basic building block of the CMF code
++ *
++ * @cmb_list: entry in the global list of cmf_devices
++ * @ccwlock: a pointer to the subchannel's spinlock
++ * @cmb_start_time: clock value of previous cmf_reset operation
++ * @cmb: pointer to the hardware channel measurement block
++ * @callback: function pointer called by cmf_device_callback()
++ * @irq: subchannel number of the associated device
++ */
++struct cmf_device {
++ struct list_head cmb_list;
++ spinlock_t *ccwlock;
++ u64 cmb_start_time;
++ void *cmb;
++ void (*callback)(struct cmf_device *);
++ u16 irq;
++};
++
++/**
++ * cmf_device_callback - call the callback function for set_cmf
++ *
++ * cmf_device_callback() can be integrated in an appropriate
++ * point in the device driver where no I/O is ongoing on the
++ * subchannel and it is safe to call set_cmf.
++ * This is a nop when CONFIG_S390_CMF is disabled
++ */
++static inline void cmf_device_callback(struct cmf_device *cdev)
++{
++ if (cdev->callback) {
++ cdev->callback(cdev);
++ }
++}
++
++/**
++ * cmf_device_init - initialize the cmf_device structure
++ *
++ * cmf_device_init() needs to be called before doing any other operations
++ * on a cmf_device.
++ */
++static inline void cmf_device_init(struct cmf_device *cdev, int devno)
++{
++ INIT_LIST_HEAD (&cdev->cmb_list);
++ cdev->irq = get_irq_by_devno(devno);
++ cdev->ccwlock = get_irq_lock(cdev->irq);
++}
++
++#else /* !CONFIG_S390_CMF */
++struct cmf_device { };
++static inline void cmf_device_callback(struct cmf_device *cdev) { }
++static inline void cmf_device_init(struct cmf_device *cdev, int devno) { }
++#endif
++
++/**
++ * enable_cmf() - switch on the channel measurement for a specific device
++ * @cdev: The ccw device to be enabled
++ * returns 0 for success or a negative error value.
++ *
++ * This function only allocates memory for the measurement block, the
++ * actual activation is done with set_cmf()
++ *
++ * Context:
++ * may sleep, device is disabled
++ **/
++extern int enable_cmf(struct cmf_device *cdev);
++
++/**
++ * disable_cmf() - switch off the channel measurement for a specific device
++ * @cdev: The ccw device to be disabled
++ * returns 0 for success or a negative error value.
++ *
++ * This function only frees the memory allocated with enable_cmf. If
++ * measurement has been activated with set_cmf(), it also needs to be
++ * deactivated with that function before calling disable_cmf().
++ *
++ * Context:
++ * may sleep, device is enabled and inactive
++ **/
++extern void disable_cmf(struct cmf_device *cdev);
++
++/**
++ * cmf_readall() - read one value from the current channel measurement block
++ * @cmf: the device to be read
++ * @data: a pointer to a data block that will be filled
++ *
++ * Context:
++ * device is active
++ **/
++extern int cmf_readall(struct cmf_device *cdev, struct cmbdata*data);
++
++/**
++ * set_cmf() - Set the measurement mode for a specific device
++ * @cmf: the device to be modified
++ * @mme: measurement mode enable value, can be either
++ * 0 for deactivation or 2 for activation
++ *
++ * It is important to call this function only when there is no I/O
++ * activity on the subchannel. Therefore it may be necessary to call
++ * it from an interrupt handler at the point where the previous
++ * request is finished.
++ * In 2.6.x, set_cmf() is integrated within enable_cmf() and disable_cmf(),
++ * which makes a lot of sense and life much easier for users.
++ *
++ * Context:
++ * device lock held, device is enabled, subchannel is idle
++ **/
++extern int set_cmf(struct cmf_device *cdev, u32 mme);
++
++/**
++ * reset_cmf() - clear a channel measurement block
++ * @cmf: the device to be cleared
++ *
++ * This function is used to set all values in a channel measurement block
++ * to sane values. It should be called between enable_cmf() and set_cmf(cdev,2)
++ * but can be called as well when the device is already active.
++ *
++ * Context:
++ * device is enabled
++ **/
++extern void cmf_reset(struct cmf_device *cdev);
++#endif /* __KERNEL__ */
++#endif /* S390_CMB_H */
+=== include/asm-s390x/kmap_types.h
+==================================================================
+--- include/asm-s390x/kmap_types.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390x/kmap_types.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,16 @@
++#ifndef _ASM_KMAP_TYPES_H
++#define _ASM_KMAP_TYPES_H
++
++enum km_type {
++ KM_BOUNCE_READ,
++ KM_SKB_SUNRPC_DATA,
++ KM_SKB_DATA_SOFTIRQ,
++ KM_USER0,
++ KM_USER1,
++ KM_BH_IRQ,
++ KM_SOFTIRQ0,
++ KM_SOFTIRQ1,
++ KM_TYPE_NR
++};
++
++#endif
+=== include/asm-s390x/timer.h
+==================================================================
+--- include/asm-s390x/timer.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390x/timer.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,48 @@
++/*
++ * include/asm-s390/timer.h
++ *
++ * (C) Copyright IBM Corp. 2003
++ * Virtual CPU timer
++ *
++ * Author: Jan Glauber (jang at de.ibm.com)
++ */
++
++#ifndef _ASM_S390_TIMER_H
++#define _ASM_S390_TIMER_H
++
++#include <linux/timer.h>
++
++#define VTIMER_MAX_SLICE (0x7ffffffffffff000LL)
++
++struct vtimer_list {
++ struct list_head entry;
++
++ int cpu;
++ __u64 expires;
++ __u64 interval;
++
++ spinlock_t lock;
++ unsigned long magic;
++
++ void (*function)(unsigned long, struct pt_regs*);
++ unsigned long data;
++};
++
++/* the offset value will wrap after ca. 71 years */
++struct vtimer_queue {
++ struct list_head list;
++ spinlock_t lock;
++ __u64 to_expire; /* current event expire time */
++ __u64 offset; /* list offset to zero */
++ __u64 idle; /* temp var for idle */
++};
++
++void set_vtimer(__u64 expires);
++
++extern void init_virt_timer(struct vtimer_list *timer);
++extern void add_virt_timer(void *new);
++extern void add_virt_timer_periodic(void *new);
++extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires);
++extern int del_virt_timer(struct vtimer_list *timer);
++
++#endif
+=== include/asm-s390x/tape390.h
+==================================================================
+--- include/asm-s390x/tape390.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390x/tape390.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,39 @@
++/*************************************************************************
++ *
++ * tape390.h
++ * enables user programs to display messages on the tape device
++ *
++ * S390 and zSeries version
++ * Copyright (C) 2001 IBM Corporation
++ * Author(s): Despina Papadopoulou <despina_p at de.ibm.com>
++ *
++ *************************************************************************/
++
++#ifndef _TAPE390_H
++#define _TAPE390_H
++
++#define TAPE390_DISPLAY _IOW('d', 1, struct display_struct)
++
++/*
++ * The TAPE390_DISPLAY ioctl calls the Load Display command
++ * which transfers 17 bytes of data from the channel to the subsystem:
++ * - 1 format control byte, and
++ * - two 8-byte messages
++ *
++ * Format control byte:
++ * 0-2: New Message Overlay
++ * 3: Alternate Messages
++ * 4: Blink Message
++ * 5: Display Low/High Message
++ * 6: Reserved
++ * 7: Automatic Load Request
++ *
++ */
++
++typedef struct display_struct {
++ char cntrl;
++ char message1[8];
++ char message2[8];
++} display_struct;
++
++#endif
+=== include/asm-i386/pgalloc.h
+==================================================================
+--- include/asm-i386/pgalloc.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-i386/pgalloc.h (/trunk/2.4.27) (revision 52)
+@@ -235,4 +235,6 @@
+ flush_tlb_mm(mm);
+ }
+
++#include <asm-generic/pgalloc.h>
++
+ #endif /* _I386_PGALLOC_H */
+=== include/asm-s390/smp.h
+==================================================================
+--- include/asm-s390/smp.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390/smp.h (/trunk/2.4.27) (revision 52)
+@@ -26,6 +26,9 @@
+ __u16 cpu;
+ } sigp_info;
+
++extern int smp_call_function_on(void (*func) (void *info), void *info,
++ int nonatomic, int wait, int cpu);
++
+ extern unsigned long cpu_online_map;
+
+ #define NO_PROC_ID 0xFF /* No processor magic marker */
+@@ -46,23 +49,28 @@
+
+ extern __inline__ int cpu_logical_map(int cpu)
+ {
+- return cpu;
++ return cpu;
+ }
+
+ extern __inline__ int cpu_number_map(int cpu)
+ {
+- return cpu;
++ return cpu;
+ }
+
+ extern __inline__ __u16 hard_smp_processor_id(void)
+ {
+- __u16 cpu_address;
++ __u16 cpu_address;
+
+- __asm__ ("stap %0\n" : "=m" (cpu_address));
+- return cpu_address;
++ __asm__ ("stap %0\n" : "=m" (cpu_address));
++ return cpu_address;
+ }
+
+ #define cpu_logical_map(cpu) (cpu)
+
+ #endif
++
++#ifndef CONFIG_SMP
++#define smp_call_function_on(func,info,nonatomic,wait,cpu) ({ 0; })
+ #endif
++
++#endif
+=== include/asm-s390/sigp.h
+==================================================================
+--- include/asm-s390/sigp.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390/sigp.h (/trunk/2.4.27) (revision 52)
+@@ -106,7 +106,7 @@
+ * Signal processor with parameter and return status
+ */
+ extern __inline__ sigp_ccode
+-signal_processor_ps(__u32 *statusptr, __u32 parameter,
++signal_processor_ps(unsigned long *statusptr, __u32 parameter,
+ __u16 cpu_addr, sigp_order_code order_code)
+ {
+ sigp_ccode ccode;
+=== include/asm-s390/chandev.h
+==================================================================
+--- include/asm-s390/chandev.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390/chandev.h (/trunk/2.4.27) (revision 52)
+@@ -28,6 +28,7 @@
+ chandev_type_osad=0x8,
+ chandev_type_qeth=0x10,
+ chandev_type_claw=0x20,
++ chandev_type_ctcmpc=0x40,
+ } chandev_type;
+
+ typedef enum
+=== include/asm-s390/lowcore.h
+==================================================================
+--- include/asm-s390/lowcore.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390/lowcore.h (/trunk/2.4.27) (revision 52)
+@@ -47,6 +47,7 @@
+ #define __LC_IPLDEV 0xC7C
+
+ #define __LC_JIFFY_TIMER 0xC80
++#define __LC_INT_CLOCK 0xC88
+
+ #define __LC_PANIC_MAGIC 0xE00
+
+@@ -165,8 +166,9 @@
+
+ /* SMP info area: defined by DJB */
+ __u64 jiffy_timer; /* 0xc80 */
+- atomic_t ext_call_fast; /* 0xc88 */
+- __u8 pad11[0xe00-0xc8c]; /* 0xc8c */
++ __u64 int_clock; /* 0xc88 */
++ atomic_t ext_call_fast; /* 0xc90 */
++ __u8 pad11[0xe00-0xc94]; /* 0xc94 */
+
+ /* 0xe00 is used as indicator for dump tools */
+ /* whether the kernel died with panic() or not */
+=== include/asm-s390/dasd.h
+==================================================================
+--- include/asm-s390/dasd.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390/dasd.h (/trunk/2.4.27) (revision 52)
+@@ -8,7 +8,7 @@
+ * any future changes wrt the API will result in a change of the APIVERSION reported
+ * to userspace by the DASDAPIVER-ioctl
+ *
+- * $Revision: 1.53 $
++ * $Revision: 1.52.6.3 $
+ *
+ * History of changes (starts July 2000)
+ * 05/04/01 created by moving the kernel interface to drivers/s390/block/dasd_int.h
+=== include/asm-s390/irq.h
+==================================================================
+--- include/asm-s390/irq.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390/irq.h (/trunk/2.4.27) (revision 52)
+@@ -42,7 +42,9 @@
+ __u8 chpid[8]; /* CHPID 0-7 (if available) */
+ __u32 unused1 : 8; /* reserved zeros */
+ __u32 st : 3; /* subchannel type */
+- __u32 unused2 : 20; /* reserved zeros */
++ __u32 unused2 : 18; /* reserved zeros */
++ __u32 mbfc : 1; /* measurement block format control */
++ __u32 xmwme : 1; /* extended measurement word mode enable */
+ __u32 csense : 1; /* concurrent sense; can be enabled ...*/
+ /* ... per MSCH, however, if facility */
+ /* ... is not installed, this results */
+@@ -156,7 +158,8 @@
+ typedef struct {
+ pmcw_t pmcw; /* path management control word */
+ scsw_t scsw; /* subchannel status word */
+- __u8 mda[12]; /* model dependent area */
++ __u64 mba; /* measurement block address */
++ __u8 mda[4]; /* model dependent area */
+ } __attribute__ ((packed,aligned(4))) schib_t;
+ #endif /* __KERNEL__ */
+
+=== include/asm-s390/ccwcache.h
+==================================================================
+--- include/asm-s390/ccwcache.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390/ccwcache.h (/trunk/2.4.27) (revision 52)
+@@ -4,7 +4,7 @@
+ * Bugreports.to..: <Linux390 at de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
+ *
+- * $Revision: 1.9 $
++ * $Revision: 1.9.4.2 $
+ *
+ */
+ #ifndef CCWCACHE_H
+=== include/asm-s390/qeth.h
+==================================================================
+--- include/asm-s390/qeth.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390/qeth.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,26 @@
++/*
++ * include/asm-s390/qeth.h
++ *
++ * S390 and zSeries version
++ * Copyright (C) 2003 IBM Corporation
++ * Author(s): Erwin Rol <erwinrol at de.ibm.com>
++ *
++ */
++
++#include <linux/s390net.h>
++
++#ifndef ASM_QETH_H
++#define ASM_QETH_H
++
++#define QETH_IOCPROC_REGISTER _IOW(S390NET_IOC_MAGIC, 1, int)
++
++#define SIOC_QETH_ARP_SET_NO_ENTRIES _IOWR(S390NET_IOC_MAGIC, 2, int)
++#define SIOC_QETH_ARP_QUERY_INFO _IOWR(S390NET_IOC_MAGIC, 3, int)
++#define SIOC_QETH_ARP_ADD_ENTRY _IOWR(S390NET_IOC_MAGIC, 4, int)
++#define SIOC_QETH_ARP_REMOVE_ENTRY _IOWR(S390NET_IOC_MAGIC, 5, int)
++#define SIOC_QETH_ARP_FLUSH_CACHE _IOWR(S390NET_IOC_MAGIC, 6, int)
++#define SIOC_QETH_ADP_SET_SNMP_CONTROL _IOWR(S390NET_IOC_MAGIC, 7, int)
++#define SIOC_QETH_GET_CARD_TYPE _IOWR(S390NET_IOC_MAGIC, 8, int)
++
++#endif /* !ASM_QETH_H */
++
+=== include/asm-s390/pgtable.h
+==================================================================
+--- include/asm-s390/pgtable.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390/pgtable.h (/trunk/2.4.27) (revision 52)
+@@ -426,9 +426,12 @@
+ __pte; \
+ })
+
+-#define arch_set_page_uptodate(__page) \
++#define SetPageUptodate(_page) \
+ do { \
+- asm volatile ("sske %0,%1" : : "d" (get_storage_key()), \
++ struct page *__page = (_page); \
++ if (!test_and_set_bit(PG_uptodate, &__page->flags)) \
++ asm volatile ("sske %0,%1" \
++ : : "d" (get_storage_key()), \
+ "a" (__pa((__page-mem_map) << PAGE_SHIFT)));\
+ } while (0)
+
+=== include/asm-s390/qdio.h
+==================================================================
+--- include/asm-s390/qdio.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390/qdio.h (/trunk/2.4.27) (revision 52)
+@@ -11,7 +11,7 @@
+ #ifndef __QDIO_H__
+ #define __QDIO_H__
+
+-#define VERSION_QDIO_H "$Revision: 1.66 $"
++#define VERSION_QDIO_H "$Revision: 1.66.4.5 $"
+
+ /* note, that most of the typedef's are from ingo. */
+
+@@ -42,11 +42,18 @@
+ #define QDIO_MAX_ELEMENTS_PER_BUFFER 16
+ #define SBAL_SIZE 256
+
+-#define IQDIO_FILL_LEVEL_TO_POLL (QDIO_MAX_BUFFERS_PER_Q*4/3)
++/* unfortunately this can't be (QDIO_MAX_BUFFERS_PER_Q*4/3) or so -- as
++ * we never know, whether we'll get initiative again, e.g. to give the
++ * transmit skb's back to the stack, however the stack may be waiting for
++ * them... therefore we define 4 as threshold to start polling (which
++ * will stop as soon as the asynchronous queue catches up)
++ * btw, this only applies to the asynchronous HiperSockets queue */
++#define IQDIO_FILL_LEVEL_TO_POLL 4
+
+ #define TIQDIO_THININT_ISC 3
+ #define TIQDIO_DELAY_TARGET 0
+-#define QDIO_BUSY_BIT_PATIENCE 2000 /* in microsecs */
++#define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */
++#define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */
+ #define IQDIO_GLOBAL_LAPS 2 /* GLOBAL_LAPS are not used as we */
+ #define IQDIO_GLOBAL_LAPS_INT 1 /* dont global summary */
+ #define IQDIO_LOCAL_LAPS 4
+@@ -612,6 +619,8 @@
+ typedef struct qdio_q_t {
+ volatile slsb_t slsb;
+
++ char unused[QDIO_MAX_BUFFERS_PER_Q];
++
+ __u32 * volatile dev_st_chg_ind;
+
+ int is_input_q;
+@@ -697,7 +706,9 @@
+ int last_transfer_index; */
+
+ __u64 last_transfer_time;
++ __u64 busy_start;
+ } timing;
++ atomic_t busy_siga_counter;
+ unsigned int queue_type;
+ } __attribute__ ((aligned(256))) qdio_q_t;
+
+@@ -713,7 +724,7 @@
+ unsigned int sync_done_on_outb_pcis;
+
+ unsigned int state;
+- spinlock_t setting_up_lock;
++ struct semaphore setting_up_lock;
+
+ unsigned int no_input_qs;
+ unsigned int no_output_qs;
+@@ -783,7 +794,10 @@
+ int reserved1:21;
+ int isc:3;
+ /* word 9&10 */
+- __u32 reserved2[2];
++ __u32 word_with_d_bit;
++ /* set to 0x10000000 to enable
++ * time delay disablement facility */
++ __u32 reserved2;
+ /* word 11 */
+ __u32 subsystem_id;
+ /* word 12-1015 */
+=== include/asm-s390/setup.h
+==================================================================
+--- include/asm-s390/setup.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390/setup.h (/trunk/2.4.27) (revision 52)
+@@ -25,15 +25,16 @@
+ */
+ extern unsigned long machine_flags;
+
+-#define MACHINE_IS_VM (machine_flags & 1)
+-#define MACHINE_HAS_IEEE (machine_flags & 2)
+-#define MACHINE_IS_P390 (machine_flags & 4)
+-#define MACHINE_HAS_CSP (machine_flags & 8)
+-#define MACHINE_HAS_MVPG (machine_flags & 16)
++#define MACHINE_IS_VM (machine_flags & 1)
++#define MACHINE_HAS_IEEE (machine_flags & 2)
++#define MACHINE_IS_P390 (machine_flags & 4)
++#define MACHINE_HAS_CSP (machine_flags & 8)
++#define MACHINE_HAS_MVPG (machine_flags & 16)
++/* 32 is MACHINE_HAS_DIAG44 on s390x */
+ #define MACHINE_NEW_STIDP (machine_flags & 64)
+-#define MACHINE_HAS_PFIX (machine_flags & 128)
++#define MACHINE_HAS_PFIX (machine_flags & 128)
+
+-#define MACHINE_HAS_HWC (!MACHINE_IS_P390)
++#define MACHINE_HAS_SCLP (!MACHINE_IS_P390)
+
+ /*
+ * Console mode. Override with conmode=
+@@ -42,10 +43,10 @@
+ extern unsigned int console_device;
+
+ #define CONSOLE_IS_UNDEFINED (console_mode == 0)
+-#define CONSOLE_IS_HWC (console_mode == 1)
++#define CONSOLE_IS_SCLP (console_mode == 1)
+ #define CONSOLE_IS_3215 (console_mode == 2)
+ #define CONSOLE_IS_3270 (console_mode == 3)
+-#define SET_CONSOLE_HWC do { console_mode = 1; } while (0)
++#define SET_CONSOLE_SCLP do { console_mode = 1; } while (0)
+ #define SET_CONSOLE_3215 do { console_mode = 2; } while (0)
+ #define SET_CONSOLE_3270 do { console_mode = 3; } while (0)
+
+=== include/asm-s390/processor.h
+==================================================================
+--- include/asm-s390/processor.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390/processor.h (/trunk/2.4.27) (revision 52)
+@@ -50,6 +50,8 @@
+
+ extern void print_cpu_info(struct cpuinfo_S390 *);
+
++extern void show_trace(unsigned long* esp);
++
+ /* Lazy FPU handling on uni-processor */
+ extern struct task_struct *last_task_used_math;
+
+=== include/asm-s390/ioctl32.h
+==================================================================
+--- include/asm-s390/ioctl32.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390/ioctl32.h (/trunk/2.4.27) (revision 52)
+@@ -8,7 +8,7 @@
+ #ifndef ASM_IOCTL32_H
+ #define ASM_IOCTL32_H
+
+-extern int sys_ioctl(unsigned int, unsigned int, unsigned long, struct file*);
++extern int sys_ioctl(unsigned int, unsigned int, unsigned long);
+ typedef int (*ioctl_trans_handler_t)(unsigned int, unsigned int, unsigned long, struct file *);
+
+ #ifdef CONFIG_S390_SUPPORT
+=== include/asm-s390/dcss.h
+==================================================================
+--- include/asm-s390/dcss.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390/dcss.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,19 @@
++/*
++ * include/asm-s390/dcss.h
++ *
++ * definitions for discontiguous saved segment support
++ * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
++ */
++
++#ifndef _ASM_S390_DCSS_H
++#define _ASM_S390_DCSS_H
++#ifndef __ASSEMBLY__
++#define SEGMENT_SHARED_RW 0
++#define SEGMENT_SHARED_RO 1
++#define SEGMENT_EXCLUSIVE_RW 2
++#define SEGMENT_EXCLUSIVE_RO 3
++extern int segment_load (char *name,int segtype,unsigned long *addr,unsigned long *length);
++extern void segment_unload(char *name);
++extern void segment_replace(char *name);
++#endif
++#endif
+=== include/asm-s390/cmb.h
+==================================================================
+--- include/asm-s390/cmb.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390/cmb.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,194 @@
++/*
++ * include/asm-s390/cmb.h
++ * include/asm-s390x/cmb.h
++ *
++ * Copyright (C) 2003 IBM Corporation
++ * Author: Arnd Bergmann <arndb at de.ibm.com>
++ */
++
++#ifndef S390_CMB_H
++#define S390_CMB_H
++/**
++ * struct cmbdata -- channel measurement block data for user space
++ *
++ * @size: size of the stored data
++ * @elapsed_time: time since last reset
++ * @ssch_rsch_count: number I/O attempts, may overflow at
++ * 2^16 or 2^32, depending on machine
++ * @sample_count: number of sampled I/O attempts, should
++ * be identical to @ssch_rsch_count
++ * @device_connect_time: time that device communicated with
++ * channel
++ * @function_pending_time: time between command initiate and
++ * start of execution
++ * @device_disconnect_time: time of device-inactive while
++ * subchannel-active
++ * @control_unit_queuing_time: time that a device is blocked by
++ * I/O from another system
++ * @device_active_only_time: time of device-active while
++ * subchannel-inactive
++ * @device_busy_time: time where the device is busy
++ * when attemting to start I/O
++ * @initial_command_response_time: time between sending and
++ * the device accepting a command
++ *
++ * all values are stored as 64 bit for simplicity, especially
++ * in 32 bit emulation mode. All time values are normalized to
++ * nanoseconds.
++ * Currently, two formats are known, which differ by the size of
++ * this structure, i.e. the last two members are only set when
++ * the extended channel measurement facility (first shipped in
++ * z990 machines) is activated.
++ * Potentially, more fields could be added, which results in a
++ * new ioctl number.
++ **/
++struct cmbdata {
++ __u64 size;
++ __u64 elapsed_time;
++ /* basic and exended format: */
++ __u64 ssch_rsch_count;
++ __u64 sample_count;
++ __u64 device_connect_time;
++ __u64 function_pending_time;
++ __u64 device_disconnect_time;
++ __u64 control_unit_queuing_time;
++ __u64 device_active_only_time;
++ /* extended format only: */
++ __u64 device_busy_time;
++ __u64 initial_command_response_time;
++};
++
++/* enable channel measurement */
++#define BIODASDCMFENABLE _IO(DASD_IOCTL_LETTER,32)
++/* enable channel measurement */
++#define BIODASDCMFDISABLE _IO(DASD_IOCTL_LETTER,33)
++/* read channel measurement data */
++#define BIODASDREADALLCMB _IOWR(DASD_IOCTL_LETTER,33,struct cmbdata)
++
++#ifdef __KERNEL__
++#include <linux/config.h>
++#include <asm/irq.h>
++
++#if defined(CONFIG_S390_CMF) || defined(CONFIG_S390_CMF_MODULE)
++/**
++ * struct cmf_device - basic building block of the CMF code
++ *
++ * @cmb_list: entry in the global list of cmf_devices
++ * @ccwlock: a pointer to the subchannel's spinlock
++ * @cmb_start_time: clock value of previous cmf_reset operation
++ * @cmb: pointer to the hardware channel measurement block
++ * @callback: function pointer called by cmf_device_callback()
++ * @irq: subchannel number of the associated device
++ */
++struct cmf_device {
++ struct list_head cmb_list;
++ spinlock_t *ccwlock;
++ u64 cmb_start_time;
++ void *cmb;
++ void (*callback)(struct cmf_device *);
++ u16 irq;
++};
++
++/**
++ * cmf_device_callback - call the callback function for set_cmf
++ *
++ * cmf_device_callback() can be integrated in an appropriate
++ * point in the device driver where no I/O is ongoing on the
++ * subchannel and it is safe to call set_cmf.
++ * This is a nop when CONFIG_S390_CMF is disabled
++ */
++static inline void cmf_device_callback(struct cmf_device *cdev)
++{
++ if (cdev->callback) {
++ cdev->callback(cdev);
++ }
++}
++
++/**
++ * cmf_device_init - initialize the cmf_device structure
++ *
++ * cmf_device_init() needs to be called before doing any other operations
++ * on a cmf_device.
++ */
++static inline void cmf_device_init(struct cmf_device *cdev, int devno)
++{
++ INIT_LIST_HEAD (&cdev->cmb_list);
++ cdev->irq = get_irq_by_devno(devno);
++ cdev->ccwlock = get_irq_lock(cdev->irq);
++}
++
++#else /* !CONFIG_S390_CMF */
++struct cmf_device { };
++static inline void cmf_device_callback(struct cmf_device *cdev) { }
++static inline void cmf_device_init(struct cmf_device *cdev, int devno) { }
++#endif
++
++/**
++ * enable_cmf() - switch on the channel measurement for a specific device
++ * @cdev: The ccw device to be enabled
++ * returns 0 for success or a negative error value.
++ *
++ * This function only allocates memory for the measurement block, the
++ * actual activation is done with set_cmf()
++ *
++ * Context:
++ * may sleep, device is disabled
++ **/
++extern int enable_cmf(struct cmf_device *cdev);
++
++/**
++ * disable_cmf() - switch off the channel measurement for a specific device
++ * @cdev: The ccw device to be disabled
++ * returns 0 for success or a negative error value.
++ *
++ * This function only frees the memory allocated with enable_cmf. If
++ * measurement has been activated with set_cmf(), it also needs to be
++ * deactivated with that function before calling disable_cmf().
++ *
++ * Context:
++ * may sleep, device is enabled and inactive
++ **/
++extern void disable_cmf(struct cmf_device *cdev);
++
++/**
++ * cmf_readall() - read one value from the current channel measurement block
++ * @cmf: the device to be read
++ * @data: a pointer to a data block that will be filled
++ *
++ * Context:
++ * device is active
++ **/
++extern int cmf_readall(struct cmf_device *cdev, struct cmbdata*data);
++
++/**
++ * set_cmf() - Set the measurement mode for a specific device
++ * @cmf: the device to be modified
++ * @mme: measurement mode enable value, can be either
++ * 0 for deactivation or 2 for activation
++ *
++ * It is important to call this function only when there is no I/O
++ * activity on the subchannel. Therefore it may be necessary to call
++ * it from an interrupt handler at the point where the previous
++ * request is finished.
++ * In 2.6.x, set_cmf() is integrated within enable_cmf() and disable_cmf(),
++ * which makes a lot of sense and life much easier for users.
++ *
++ * Context:
++ * device lock held, device is enabled, subchannel is idle
++ **/
++extern int set_cmf(struct cmf_device *cdev, u32 mme);
++
++/**
++ * reset_cmf() - clear a channel measurement block
++ * @cmf: the device to be cleared
++ *
++ * This function is used to set all values in a channel measurement block
++ * to sane values. It should be called between enable_cmf() and set_cmf(cdev,2)
++ * but can be called as well when the device is already active.
++ *
++ * Context:
++ * device is enabled
++ **/
++extern void cmf_reset(struct cmf_device *cdev);
++#endif /* __KERNEL__ */
++#endif /* S390_CMB_H */
+=== include/asm-s390/kmap_types.h
+==================================================================
+--- include/asm-s390/kmap_types.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390/kmap_types.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,16 @@
++#ifndef _ASM_KMAP_TYPES_H
++#define _ASM_KMAP_TYPES_H
++
++enum km_type {
++ KM_BOUNCE_READ,
++ KM_SKB_SUNRPC_DATA,
++ KM_SKB_DATA_SOFTIRQ,
++ KM_USER0,
++ KM_USER1,
++ KM_BH_IRQ,
++ KM_SOFTIRQ0,
++ KM_SOFTIRQ1,
++ KM_TYPE_NR
++};
++
++#endif
+=== include/asm-s390/timer.h
+==================================================================
+--- include/asm-s390/timer.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390/timer.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,49 @@
++/*
++ * include/asm-s390/timer.h
++ *
++ * (C) Copyright IBM Corp. 2003
++ * Virtual CPU timer
++ *
++ * Author: Jan Glauber (jang at de.ibm.com)
++ */
++
++#ifndef _ASM_S390_TIMER_H
++#define _ASM_S390_TIMER_H
++
++#include <linux/timer.h>
++
++#define VTIMER_MAX_SLICE (0x7ffffffffffff000LL)
++
++struct vtimer_list {
++ struct list_head entry;
++
++ int cpu;
++ __u64 expires;
++ __u64 interval;
++
++ spinlock_t lock;
++ unsigned long magic;
++
++ void (*function)(unsigned long, struct pt_regs*);
++ unsigned long data;
++};
++
++/* the offset value will wrap after ca. 71 years */
++struct vtimer_queue {
++ struct list_head list;
++ spinlock_t lock;
++ __u64 to_expire; /* current event expire time */
++ __u64 offset; /* list offset to zero */
++ __u64 idle; /* temp var for idle */
++};
++
++void set_vtimer(__u64 expires);
++
++extern void init_virt_timer(struct vtimer_list *timer);
++extern void add_virt_timer(void *new);
++extern void add_virt_timer_periodic(void *new);
++extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires);
++extern int del_virt_timer(struct vtimer_list *timer);
++
++extern atomic_t active_cpu_timer;
++#endif
+=== include/asm-s390/tape390.h
+==================================================================
+--- include/asm-s390/tape390.h (/upstream/vanilla/2.4.27) (revision 52)
++++ include/asm-s390/tape390.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,39 @@
++/*************************************************************************
++ *
++ * tape390.h
++ * enables user programs to display messages on the tape device
++ *
++ * S390 and zSeries version
++ * Copyright (C) 2001 IBM Corporation
++ * Author(s): Despina Papadopoulou <despina_p at de.ibm.com>
++ *
++ *************************************************************************/
++
++#ifndef _TAPE390_H
++#define _TAPE390_H
++
++#define TAPE390_DISPLAY _IOW('d', 1, struct display_struct)
++
++/*
++ * The TAPE390_DISPLAY ioctl calls the Load Display command
++ * which transfers 17 bytes of data from the channel to the subsystem:
++ * - 1 format control byte, and
++ * - two 8-byte messages
++ *
++ * Format control byte:
++ * 0-2: New Message Overlay
++ * 3: Alternate Messages
++ * 4: Blink Message
++ * 5: Display Low/High Message
++ * 6: Reserved
++ * 7: Automatic Load Request
++ *
++ */
++
++typedef struct display_struct {
++ char cntrl;
++ char message1[8];
++ char message2[8];
++} display_struct;
++
++#endif
+=== net/netsyms.c
+==================================================================
+--- net/netsyms.c (/upstream/vanilla/2.4.27) (revision 52)
++++ net/netsyms.c (/trunk/2.4.27) (revision 52)
+@@ -289,7 +289,10 @@
+ EXPORT_SYMBOL(devinet_ioctl);
+ EXPORT_SYMBOL(register_inetaddr_notifier);
+ EXPORT_SYMBOL(unregister_inetaddr_notifier);
+-
++#ifdef CONFIG_IP_MULTICAST
++EXPORT_SYMBOL(register_multicast_notifier);
++EXPORT_SYMBOL(unregister_multicast_notifier);
++#endif
+ /* needed for ip_gre -cw */
+ EXPORT_SYMBOL(ip_statistics);
+
+=== net/core/dev.c
+==================================================================
+--- net/core/dev.c (/upstream/vanilla/2.4.27) (revision 52)
++++ net/core/dev.c (/trunk/2.4.27) (revision 52)
+@@ -104,6 +104,7 @@
+ #include <linux/wireless.h> /* Note : will define WIRELESS_EXT */
+ #include <net/iw_handler.h>
+ #endif /* CONFIG_NET_RADIO || CONFIG_NET_PCMCIA_RADIO */
++#include <linux/s390net.h>
+ #ifdef CONFIG_PLIP
+ extern int plip_init(void);
+ #endif
+@@ -2194,6 +2195,7 @@
+ default:
+ if ((cmd >= SIOCDEVPRIVATE &&
+ cmd <= SIOCDEVPRIVATE + 15) ||
++ _IOC_TYPE(cmd) == S390NET_IOC_MAGIC ||
+ cmd == SIOCBONDENSLAVE ||
+ cmd == SIOCBONDRELEASE ||
+ cmd == SIOCBONDSETHWADDR ||
+@@ -2383,6 +2385,7 @@
+
+ default:
+ if (cmd == SIOCWANDEV ||
++ _IOC_TYPE(cmd) == S390NET_IOC_MAGIC ||
+ (cmd >= SIOCDEVPRIVATE &&
+ cmd <= SIOCDEVPRIVATE + 15)) {
+ dev_load(ifr.ifr_name);
+=== net/802/tr.c
+==================================================================
+--- net/802/tr.c (/upstream/vanilla/2.4.27) (revision 52)
++++ net/802/tr.c (/trunk/2.4.27) (revision 52)
+@@ -327,9 +327,9 @@
+ int i;
+ unsigned int hash, rii_p = 0;
+ rif_cache entry;
+- unsigned long flags;
+
+- spin_lock_irqsave(&rif_lock, flags);
++
++ spin_lock_bh(&rif_lock);
+
+ /*
+ * Firstly see if the entry exists
+@@ -368,7 +368,7 @@
+ if(!entry)
+ {
+ printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n");
+- spin_unlock_irqrestore(&rif_lock,flags);
++ spin_unlock_bh(&rif_lock);
+ return;
+ }
+
+@@ -410,7 +410,7 @@
+ }
+ entry->last_used=jiffies;
+ }
+- spin_unlock_irqrestore(&rif_lock,flags);
++ spin_unlock_bh(&rif_lock);
+ }
+
+ /*
+=== net/8021q/vlan.c
+==================================================================
+--- net/8021q/vlan.c (/upstream/vanilla/2.4.27) (revision 52)
++++ net/8021q/vlan.c (/trunk/2.4.27) (revision 52)
+@@ -444,6 +444,10 @@
+ /* IFF_BROADCAST|IFF_MULTICAST; ??? */
+ new_dev->flags = real_dev->flags;
+ new_dev->flags &= ~IFF_UP;
++#ifdef CONFIG_SHARED_IPV6_CARDS
++ new_dev->features |= (real_dev->features & NETIF_F_SHARED_IPV6);
++ new_dev->dev_id = real_dev->dev_id;
++#endif
+
+ /* Make this thing known as a VLAN device */
+ new_dev->priv_flags |= IFF_802_1Q_VLAN;
+@@ -482,16 +486,16 @@
+ new_dev->stop = vlan_dev_stop;
+
+ if (real_dev->features & NETIF_F_HW_VLAN_TX) {
+- new_dev->hard_header = real_dev->hard_header;
++ new_dev->hard_header = real_dev->hard_header;
+ new_dev->hard_start_xmit = vlan_dev_hwaccel_hard_start_xmit;
+- new_dev->rebuild_header = real_dev->rebuild_header;
++ new_dev->rebuild_header = real_dev->rebuild_header;
+ } else {
+- new_dev->hard_header = vlan_dev_hard_header;
++ new_dev->hard_header = vlan_dev_hard_header;
+ new_dev->hard_start_xmit = vlan_dev_hard_start_xmit;
+- new_dev->rebuild_header = vlan_dev_rebuild_header;
++ new_dev->rebuild_header = vlan_dev_rebuild_header;
+ }
+- new_dev->hard_header_parse = real_dev->hard_header_parse;
+- new_dev->set_mac_address = vlan_dev_set_mac_address;
++ new_dev->hard_header_parse = real_dev->hard_header_parse;
++ new_dev->set_mac_address = vlan_dev_set_mac_address;
+ new_dev->set_multicast_list = vlan_dev_set_multicast_list;
+
+ VLAN_DEV_INFO(new_dev)->vlan_id = VLAN_ID; /* 1 through VLAN_VID_MASK */
+=== net/ipv4/igmp.c
+==================================================================
+--- net/ipv4/igmp.c (/upstream/vanilla/2.4.27) (revision 52)
++++ net/ipv4/igmp.c (/trunk/2.4.27) (revision 52)
+@@ -152,6 +152,18 @@
+
+ #ifdef CONFIG_IP_MULTICAST
+
++static struct notifier_block *multicast_chain;
++
++int register_multicast_notifier(struct notifier_block *nb)
++{
++ return notifier_chain_register(&multicast_chain, nb);
++}
++
++int unregister_multicast_notifier(struct notifier_block *nb)
++{
++ return notifier_chain_unregister(&multicast_chain,nb);
++}
++
+ /*
+ * Timer management
+ */
+@@ -1164,6 +1176,7 @@
+ igmp_group_added(im);
+ if (!in_dev->dead)
+ ip_rt_multicast_event(in_dev);
++ notifier_call_chain(&multicast_chain, NETDEV_REGISTER, im);
+ out:
+ return;
+ }
+@@ -1189,6 +1202,9 @@
+ if (!in_dev->dead)
+ ip_rt_multicast_event(in_dev);
+
++ notifier_call_chain(&multicast_chain,
++ NETDEV_UNREGISTER,
++ i);
+ ip_ma_put(i);
+ return;
+ }
+=== net/ipv6/ipv6_syms.c
+==================================================================
+--- net/ipv6/ipv6_syms.c (/upstream/vanilla/2.4.27) (revision 52)
++++ net/ipv6/ipv6_syms.c (/trunk/2.4.27) (revision 52)
+@@ -14,6 +14,8 @@
+ EXPORT_SYMBOL(ndisc_mc_map);
+ EXPORT_SYMBOL(register_inet6addr_notifier);
+ EXPORT_SYMBOL(unregister_inet6addr_notifier);
++EXPORT_SYMBOL(register_multicast6_notifier);
++EXPORT_SYMBOL(unregister_multicast6_notifier);
+ EXPORT_SYMBOL(ip6_route_output);
+ #ifdef CONFIG_NETFILTER
+ EXPORT_SYMBOL(ip6_route_me_harder);
+=== net/ipv6/mcast.c
+==================================================================
+--- net/ipv6/mcast.c (/upstream/vanilla/2.4.27) (revision 52)
++++ net/ipv6/mcast.c (/trunk/2.4.27) (revision 52)
+@@ -127,6 +127,18 @@
+
+ static struct socket *igmp6_socket;
+
++static struct notifier_block *multicast_chain;
++
++int register_multicast6_notifier(struct notifier_block *nb)
++{
++ return notifier_chain_register(&multicast_chain, nb);
++}
++
++int unregister_multicast6_notifier(struct notifier_block *nb)
++{
++ return notifier_chain_unregister(&multicast_chain,nb);
++}
++
+ static void igmp6_join_group(struct ifmcaddr6 *ma);
+ static void igmp6_leave_group(struct ifmcaddr6 *ma);
+ static void igmp6_timer_handler(unsigned long data);
+@@ -857,6 +869,7 @@
+
+ mld_del_delrec(idev, &mc->mca_addr);
+ igmp6_group_added(mc);
++ notifier_call_chain(&multicast_chain, NETDEV_REGISTER, mc);
+ ma_put(mc);
+ return 0;
+ }
+@@ -877,6 +890,8 @@
+
+ igmp6_group_dropped(ma);
+
++ notifier_call_chain(&multicast_chain,
++ NETDEV_UNREGISTER, ma);
+ ma_put(ma);
+ return 0;
+ }
+=== net/ipv6/addrconf.c
+==================================================================
+--- net/ipv6/addrconf.c (/upstream/vanilla/2.4.27) (revision 52)
++++ net/ipv6/addrconf.c (/trunk/2.4.27) (revision 52)
+@@ -825,9 +825,20 @@
+ return -1;
+ memcpy(eui, dev->dev_addr, 3);
+ memcpy(eui + 5, dev->dev_addr+3, 3);
++#ifdef CONFIG_SHARED_IPV6_CARDS
++ if (dev->features&NETIF_F_SHARED_IPV6) {
++ eui[3] = (dev->dev_id>>8)&0xff;
++ eui[4] = dev->dev_id&0xff;
++ } else {
++ eui[3] = 0xFF;
++ eui[4] = 0xFE;
++ eui[0] ^= 2;
++ }
++#else /* CONFIG_SHARED_IPV6_CARDS */
+ eui[3] = 0xFF;
+ eui[4] = 0xFE;
+ eui[0] ^= 2;
++#endif /* CONFIG_SHARED_IPV6_CARDS */
+ return 0;
+ case ARPHRD_ARCNET:
+ /* XXX: inherit EUI-64 fro mother interface -- yoshfuji */
+=== net/Config.in
+==================================================================
+--- net/Config.in (/upstream/vanilla/2.4.27) (revision 52)
++++ net/Config.in (/trunk/2.4.27) (revision 52)
+@@ -25,6 +25,7 @@
+ if [ "$CONFIG_IPV6" != "n" ]; then
+ source net/ipv6/Config.in
+ fi
++ bool ' Prepare net_device struct for shared IPv6 cards' CONFIG_SHARED_IPV6_CARDS
+ fi
+ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+ source net/khttpd/Config.in
+=== init/kerntypes.c
+==================================================================
+--- init/kerntypes.c (/upstream/vanilla/2.4.27) (revision 52)
++++ init/kerntypes.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,49 @@
++/*
++ * kerntypes.c
++ *
++ * Dummy module that includes headers for all kernel types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
++ *
++ * This source code is released under the GNU GPL.
++ */
++
++#ifndef __KERNEL__
++#define __KERNEL__
++#endif
++
++/* General linux types */
++
++#include <linux/autoconf.h>
++#include <linux/mm.h>
++#include <linux/config.h>
++#include <linux/utsname.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/compile.h>
++
++#ifdef CONFIG_ARCH_S390
++
++ /* s390 specific types */
++
++ #include <asm/lowcore.h>
++ #include <asm/debug.h>
++ #include <asm/irq.h>
++ #include <asm/io.h>
++
++ #if defined (CONFIG_DASD) || defined (CONFIG_DASD_MODULE)
++ #include "../drivers/s390/block/dasd_int.h"
++ #endif /* CONFIG_DASD */
++
++ #if defined (CONFIG_QDIO) || defined (CONFIG_QDIO_MODULE)
++ #include "asm/qdio.h"
++ #endif /* CONFIG_QDIO */
++
++#endif /* CONFIG_ARCH_S390 */
++
++void
++kerntypes_dummy(void)
++{
++}
+=== init/version.c
+==================================================================
+--- init/version.c (/upstream/vanilla/2.4.27) (revision 52)
++++ init/version.c (/trunk/2.4.27) (revision 52)
+@@ -10,6 +10,7 @@
+ #include <linux/utsname.h>
+ #include <linux/version.h>
+ #include <linux/compile.h>
++#include <linux/stringify.h>
+
+ #define version(a) Version_ ## a
+ #define version_string(a) version(a)
+@@ -24,3 +25,5 @@
+ const char *linux_banner =
+ "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
+ LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
++
++const char *LINUX_COMPILE_VERSION_ID = __stringify(LINUX_COMPILE_VERSION_ID);
+=== init/do_mounts.c
+==================================================================
+--- init/do_mounts.c (/upstream/vanilla/2.4.27) (revision 52)
++++ init/do_mounts.c (/trunk/2.4.27) (revision 52)
+@@ -164,7 +164,28 @@
+ { "dasdf", (DASD_MAJOR << MINORBITS) + (5 << 2) },
+ { "dasdg", (DASD_MAJOR << MINORBITS) + (6 << 2) },
+ { "dasdh", (DASD_MAJOR << MINORBITS) + (7 << 2) },
++ { "dasdi", (DASD_MAJOR << MINORBITS) + (8 << 2) },
++ { "dasdj", (DASD_MAJOR << MINORBITS) + (9 << 2) },
++ { "dasdk", (DASD_MAJOR << MINORBITS) + (10 << 2) },
++ { "dasdl", (DASD_MAJOR << MINORBITS) + (11 << 2) },
++ { "dasdm", (DASD_MAJOR << MINORBITS) + (12 << 2) },
++ { "dasdn", (DASD_MAJOR << MINORBITS) + (13 << 2) },
++ { "dasdo", (DASD_MAJOR << MINORBITS) + (14 << 2) },
++ { "dasdp", (DASD_MAJOR << MINORBITS) + (15 << 2) },
++ { "dasdq", (DASD_MAJOR << MINORBITS) + (16 << 2) },
++ { "dasdr", (DASD_MAJOR << MINORBITS) + (17 << 2) },
++ { "dasds", (DASD_MAJOR << MINORBITS) + (18 << 2) },
++ { "dasdt", (DASD_MAJOR << MINORBITS) + (19 << 2) },
++ { "dasdu", (DASD_MAJOR << MINORBITS) + (20 << 2) },
++ { "dasdv", (DASD_MAJOR << MINORBITS) + (21 << 2) },
++ { "dasdw", (DASD_MAJOR << MINORBITS) + (22 << 2) },
++ { "dasdx", (DASD_MAJOR << MINORBITS) + (23 << 2) },
++ { "dasdy", (DASD_MAJOR << MINORBITS) + (24 << 2) },
++ { "dasdz", (DASD_MAJOR << MINORBITS) + (25 << 2) },
+ #endif
++#ifdef CONFIG_BLK_DEV_XPRAM
++ { "xpram", (XPRAM_MAJOR << MINORBITS) },
++#endif
+ { "ida/c0d0p",0x4800 },
+ { "ida/c0d1p",0x4810 },
+ { "ida/c0d2p",0x4820 },
+=== fs/partitions/ibm.c
+==================================================================
+--- fs/partitions/ibm.c (/upstream/vanilla/2.4.27) (revision 52)
++++ fs/partitions/ibm.c (/trunk/2.4.27) (revision 52)
+@@ -9,6 +9,7 @@
+ * 07/10/00 Fixed detection of CMS formatted disks
+ * 02/13/00 VTOC partition support added
+ * 12/27/01 fixed PL030593 (CMS reserved minidisk not detected on 64 bit)
++ * 07/24/03 no longer using contents of freed page for CMS label recognition (BZ3611)
+ */
+
+ #include <linux/config.h>
+@@ -134,6 +135,9 @@
+ EBCASC(type, 4);
+ EBCASC(name, 6);
+
++ if(name[0] != '\0')
++ register_disk_label(hd, MINOR(to_kdev_t(bdev->bd_dev)), name);
++
+ /*
+ * Three different types: CMS1, VOL1 and LNX1/unlabeled
+ */
+@@ -141,7 +145,7 @@
+ /*
+ * VM style CMS1 labeled disk
+ */
+- int *label = (int *) data;
++ int *label = (int *) vlabel;
+
+ if (label[13] != 0) {
+ printk("CMS1/%8s(MDSK):", name);
+@@ -158,7 +162,8 @@
+ add_gd_partition(hd, first_part_minor,
+ offset*(blocksize >> 9),
+ size-offset*(blocksize >> 9));
+- } else if (strncmp(type, "VOL1", 4) == 0) {
++ } else if ((strncmp(type, "VOL1", 4) == 0) &&
++ (!info->FBA_layout) && (!strcmp(info->type, "ECKD"))) {
+ /*
+ * New style VOL1 labeled disk
+ */
+=== fs/partitions/check.c
+==================================================================
+--- fs/partitions/check.c (/upstream/vanilla/2.4.27) (revision 52)
++++ fs/partitions/check.c (/trunk/2.4.27) (revision 52)
+@@ -89,7 +89,7 @@
+ #ifdef CONFIG_ARCH_S390
+ int (*genhd_dasd_name)(char*,int,int,struct gendisk*) = NULL;
+ int (*genhd_dasd_ioctl)(struct inode *inp, struct file *filp,
+- unsigned int no, unsigned long data);
++ unsigned int no, unsigned long data);
+ EXPORT_SYMBOL(genhd_dasd_name);
+ EXPORT_SYMBOL(genhd_dasd_ioctl);
+ #endif
+@@ -281,6 +281,7 @@
+ int devnum = minor >> dev->minor_shift;
+ devfs_handle_t dir;
+ unsigned int devfs_flags = DEVFS_FL_DEFAULT;
++ umode_t devfs_perm = S_IFBLK | S_IRUSR | S_IWUSR;
+ char devname[16];
+
+ if (dev->part[minor + part].de) return;
+@@ -288,11 +289,14 @@
+ if (!dir) return;
+ if ( dev->flags && (dev->flags[devnum] & GENHD_FL_REMOVABLE) )
+ devfs_flags |= DEVFS_FL_REMOVABLE;
++ if (is_read_only(MKDEV(dev->major, minor+part))) {
++ devfs_perm &= ~(S_IWUSR);
++ }
+ sprintf (devname, "part%d", part);
+ dev->part[minor + part].de =
+ devfs_register (dir, devname, devfs_flags,
+ dev->major, minor + part,
+- S_IFBLK | S_IRUSR | S_IWUSR,
++ devfs_perm,
+ dev->fops, NULL);
+ }
+
+@@ -304,12 +308,16 @@
+ int devnum = minor >> dev->minor_shift;
+ devfs_handle_t dir, slave;
+ unsigned int devfs_flags = DEVFS_FL_DEFAULT;
++ umode_t devfs_perm = S_IFBLK | S_IRUSR | S_IWUSR;
+ char dirname[64], symlink[16];
+ static devfs_handle_t devfs_handle;
+
+ if (dev->part[minor].de) return;
+ if ( dev->flags && (dev->flags[devnum] & GENHD_FL_REMOVABLE) )
+ devfs_flags |= DEVFS_FL_REMOVABLE;
++ if (is_read_only(MKDEV(dev->major, minor))) {
++ devfs_perm &= ~(S_IWUSR);
++ }
+ if (dev->de_arr) {
+ dir = dev->de_arr[devnum];
+ if (!dir) /* Aware driver wants to block disc management */
+@@ -331,7 +339,7 @@
+ dirname + pos, &slave, NULL);
+ dev->part[minor].de =
+ devfs_register (dir, "disc", devfs_flags, dev->major, minor,
+- S_IFBLK | S_IRUSR | S_IWUSR, dev->fops, NULL);
++ devfs_perm, dev->fops, NULL);
+ devfs_auto_unregister (dev->part[minor].de, slave);
+ if (!dev->de_arr)
+ devfs_auto_unregister (slave, dir);
+@@ -359,6 +367,10 @@
+ dev->part[minor].de = NULL;
+ devfs_dealloc_unique_number (&disc_numspace,
+ dev->part[minor].number);
++ if(dev->label_arr && dev->label_arr[minor >> dev->minor_shift]) {
++ devfs_unregister(dev->label_arr[minor >> dev->minor_shift]);
++ dev->label_arr[minor >> dev->minor_shift] = NULL;
++ }
+ }
+ #endif /* CONFIG_DEVFS_FS */
+ }
+@@ -415,6 +427,93 @@
+ }
+ }
+
++/*
++ * This function creates a link from /dev/labels/<labelname> to the devfs
++ * device directory. The device driver must allocate memory to the label_arr
++ * for this to work.
++ * This enables devices/partition tables that support labels to be accessed
++ * by that name instead of the device name (which can change if devices are
++ * moved around).
++ *
++ * Current restictions:
++ * - Only the first device that uses a certain label creates the link
++ * (which can also be good in case there is a backup device)
++ * - When removing devices that created labels previously suppressed
++ * devices won't show up.
++ */
++void register_disk_label(struct gendisk *hd, int minor, char *label) {
++#ifdef CONFIG_DEVFS_FS
++ int disknum = minor >> hd->minor_shift;
++ static devfs_handle_t devfs_label_dir = NULL;
++ int i;
++
++ /*
++ * Check the given label. Trailing whitespaces are removed. Otherwise
++ * only alphanumeric characters are allowed. (fstab)
++ * Added [$#%@] since these are allowed by fdasd and seem
++ * to work in fstab.
++ */
++ for(i=0; label[i] != '\0'; i++);
++ for(i--; i >= 0; i--) {
++ if(label[i] == ' ' && label[i+1] == '\0') {
++ label[i] = '\0';
++ continue;
++ }
++ if(
++ label[i] == '$' || label[i] == '#' ||
++ label[i] == '@' || label[i] == '%'
++ )
++ continue;
++ if(label[i] >= 'a' && label[i] <= 'z')
++ continue;
++ if(label[i] >= 'A' && label[i] <= 'Z')
++ continue;
++ if(label[i] >= '0' && label[i] <= '9')
++ continue;
++
++ printk(KERN_WARNING "\nregister_disk_label: invalid character(s)"
++ " in label <%s>\n", label);
++ printk(KERN_WARNING "register_label: refusing to create devfs entry.\n");
++ return;
++ }
++
++ if(!hd->label_arr)
++ return;
++
++ if(!devfs_label_dir)
++ if(!(devfs_label_dir = devfs_mk_dir(NULL, "labels", NULL)))
++ return;
++
++ if(hd->label_arr[disknum]) {
++ if(strcmp(devfs_get_name(hd->label_arr[disknum], NULL), label) == 0)
++ return;
++
++ devfs_unregister(hd->label_arr[disknum]);
++ hd->label_arr[disknum] = NULL;
++ }
++ if(!devfs_find_handle(devfs_label_dir, label, 0, 0, 0, 0)) {
++ int pos = 0;
++ char path[64];
++
++ if(hd->de_arr) {
++ if(!hd->de_arr[disknum])
++ return;
++
++ pos = devfs_generate_path(hd->de_arr[disknum], path+3, sizeof(path)-3);
++ if(pos < 0)
++ return;
++
++ strncpy(path+pos, "../", 3);
++ } else {
++ sprintf(path, "../%s/disc/%d", hd->major_name, disknum);
++ }
++ devfs_mk_symlink(
++ devfs_label_dir, label, DEVFS_FL_DEFAULT, path+pos,
++ &hd->label_arr[disknum], NULL);
++ }
++#endif
++}
++
+ unsigned char *read_dev_sector(struct block_device *bdev, unsigned long n, Sector *p)
+ {
+ struct address_space *mapping = bdev->bd_inode->i_mapping;
+=== fs/partitions/check.h
+==================================================================
+--- fs/partitions/check.h (/upstream/vanilla/2.4.27) (revision 52)
++++ fs/partitions/check.h (/trunk/2.4.27) (revision 52)
+@@ -13,4 +13,6 @@
+ page_cache_release(p.v);
+ }
+
++void register_disk_label(struct gendisk *hd, int minor, char *label);
++
+ extern int warn_no_part;
+=== fs/xip2fs/dir.c
+==================================================================
+--- fs/xip2fs/dir.c (/upstream/vanilla/2.4.27) (revision 52)
++++ fs/xip2fs/dir.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,302 @@
++/*
++ * linux/fs/xip2/dir.c, Version 1
++ *
++ * (C) Copyright IBM Corp. 2002,2004
++ * Author(s): Carsten Otte <cotte at de.ibm.com>
++ * derived from second extended filesystem (ext2)
++ */
++
++#include <linux/fs.h>
++#include <linux/xip2_fs.h>
++#include <linux/pagemap.h>
++
++typedef struct ext2_dir_entry_2 ext2_dirent;
++
++static inline unsigned xip2_chunk_size(struct inode *inode)
++{
++ return inode->i_sb->s_blocksize;
++}
++
++static inline unsigned long dir_pages(struct inode *inode)
++{
++ return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
++}
++
++static int xip2_check_page(struct inode* dir, unsigned long index, void *kaddr)
++{
++ struct super_block *sb = dir->i_sb;
++ unsigned chunk_size = xip2_chunk_size(dir);
++ u32 max_inumber = le32_to_cpu(sb->u.xip2_sb.s_es->s_inodes_count);
++ unsigned offs, rec_len;
++ unsigned limit = PAGE_CACHE_SIZE;
++ ext2_dirent *p;
++ char *error;
++
++ if ((dir->i_size >> PAGE_CACHE_SHIFT) == index) {
++ limit = dir->i_size & ~PAGE_CACHE_MASK;
++ if (limit & (chunk_size - 1))
++ goto Ebadsize;
++ for (offs = limit; offs<PAGE_CACHE_SIZE; offs += chunk_size) {
++ ext2_dirent *p = (ext2_dirent*)(kaddr + offs);
++ p->rec_len = cpu_to_le16(chunk_size);
++ }
++ if (!limit)
++ goto out;
++ }
++ for (offs = 0; offs <= limit - EXT2_DIR_REC_LEN(1); offs += rec_len) {
++ p = (ext2_dirent *)(kaddr + offs);
++ rec_len = le16_to_cpu(p->rec_len);
++
++ if (rec_len < EXT2_DIR_REC_LEN(1))
++ goto Eshort;
++ if (rec_len & 3)
++ goto Ealign;
++ if (rec_len < EXT2_DIR_REC_LEN(p->name_len))
++ goto Enamelen;
++ if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
++ goto Espan;
++ if (le32_to_cpu(p->inode) > max_inumber)
++ goto Einumber;
++ }
++ if (offs != limit)
++ goto Eend;
++out:
++ return 0;
++
++ /* Too bad, we had an error */
++
++Ebadsize:
++ xip2_error(sb, "xip2_check_page",
++ "size of directory #%lu is not a multiple of chunk size",
++ dir->i_ino
++ );
++ goto fail;
++Eshort:
++ error = "rec_len is smaller than minimal";
++ goto bad_entry;
++Ealign:
++ error = "unaligned directory entry";
++ goto bad_entry;
++Enamelen:
++ error = "rec_len is too small for name_len";
++ goto bad_entry;
++Espan:
++ error = "directory entry across blocks";
++ goto bad_entry;
++Einumber:
++ error = "inode out of bounds";
++bad_entry:
++ xip2_error (sb, "xip2_check_page", "bad entry in directory #%lu: %s - "
++ "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
++ dir->i_ino, error, (index<<PAGE_CACHE_SHIFT)+offs,
++ (unsigned long) le32_to_cpu(p->inode),
++ rec_len, p->name_len);
++ goto fail;
++Eend:
++ p = (ext2_dirent *)(kaddr + offs);
++ xip2_error (sb, "xip2_check_page",
++ "entry in directory #%lu spans the page boundary"
++ "offset=%lu, inode=%lu",
++ dir->i_ino, (index<<PAGE_CACHE_SHIFT)+offs,
++ (unsigned long) le32_to_cpu(p->inode));
++fail:
++ return -EBADFD;
++}
++
++
++/*
++ * NOTE! unlike strncmp, xip2_match returns 1 for success, 0 for failure.
++ *
++ * len <= EXT2_NAME_LEN and de != NULL are guaranteed by caller.
++ */
++static inline int xip2_match (int len, const char * const name,
++ struct ext2_dir_entry_2 * de)
++{
++ if (len != de->name_len)
++ return 0;
++ if (!de->inode)
++ return 0;
++ return !memcmp(name, de->name, len);
++}
++
++/*
++ * p is at least 6 bytes before the end of page
++ */
++static inline ext2_dirent *xip2_next_entry(ext2_dirent *p)
++{
++ return (ext2_dirent *)((char*)p + le16_to_cpu(p->rec_len));
++}
++
++static inline unsigned
++xip2_validate_entry(char *base, unsigned offset, unsigned mask)
++{
++ ext2_dirent *de = (ext2_dirent*)(base + offset);
++ ext2_dirent *p = (ext2_dirent*)(base + (offset&mask));
++ while ((char*)p < (char*)de)
++ p = xip2_next_entry(p);
++ return (char *)p - base;
++}
++
++static unsigned char xip2_filetype_table[EXT2_FT_MAX] = {
++ [EXT2_FT_UNKNOWN] DT_UNKNOWN,
++ [EXT2_FT_REG_FILE] DT_REG,
++ [EXT2_FT_DIR] DT_DIR,
++ [EXT2_FT_CHRDEV] DT_CHR,
++ [EXT2_FT_BLKDEV] DT_BLK,
++ [EXT2_FT_FIFO] DT_FIFO,
++ [EXT2_FT_SOCK] DT_SOCK,
++ [EXT2_FT_SYMLINK] DT_LNK,
++};
++
++#define S_SHIFT 12
++static unsigned char xip2_type_by_mode[S_IFMT >> S_SHIFT] = {
++ [S_IFREG >> S_SHIFT] EXT2_FT_REG_FILE,
++ [S_IFDIR >> S_SHIFT] EXT2_FT_DIR,
++ [S_IFCHR >> S_SHIFT] EXT2_FT_CHRDEV,
++ [S_IFBLK >> S_SHIFT] EXT2_FT_BLKDEV,
++ [S_IFIFO >> S_SHIFT] EXT2_FT_FIFO,
++ [S_IFSOCK >> S_SHIFT] EXT2_FT_SOCK,
++ [S_IFLNK >> S_SHIFT] EXT2_FT_SYMLINK,
++};
++
++static inline void xip2_set_de_type(ext2_dirent *de, struct inode *inode)
++{
++ mode_t mode = inode->i_mode;
++ if (EXT2_HAS_INCOMPAT_FEATURE(inode->i_sb,
++ EXT2_FEATURE_INCOMPAT_FILETYPE))
++ de->file_type = xip2_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
++ else
++ de->file_type = 0;
++}
++
++static int
++xip2_readdir (struct file * filp, void * dirent, filldir_t filldir)
++{
++ int err;
++ loff_t pos = filp->f_pos;
++ unsigned long blockno;
++ struct inode *inode = filp->f_dentry->d_inode;
++ struct super_block *sb = inode->i_sb;
++ unsigned offset = pos & ~PAGE_CACHE_MASK;
++ unsigned long n = pos >> PAGE_CACHE_SHIFT;
++ unsigned long npages = dir_pages(inode);
++ unsigned chunk_mask = ~(xip2_chunk_size(inode)-1);
++ unsigned char *types = NULL;
++ int need_revalidate = (filp->f_version != inode->i_version);
++
++ if (pos > inode->i_size - EXT2_DIR_REC_LEN(1))
++ goto done;
++
++ if (EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_FILETYPE))
++ types = xip2_filetype_table;
++
++ for ( ; n < npages; n++, offset = 0) {
++ char *kaddr, *limit;
++ ext2_dirent *de;
++ err=xip2_get_block(inode, n, &blockno, 0);
++ if (err) goto done;
++ kaddr = xip2_maread (inode->i_sb->u.xip2_sb.mem_area,
++ blockno, PAGE_CACHE_SIZE);
++ if (kaddr==NULL)
++ BUG();
++ xip2_check_page(inode, n, kaddr);
++ if (need_revalidate) {
++ offset = xip2_validate_entry(kaddr, offset,
++ chunk_mask);
++ need_revalidate = 0;
++ }
++ de = (ext2_dirent *)(kaddr+offset);
++ limit = kaddr + PAGE_CACHE_SIZE - EXT2_DIR_REC_LEN(1);
++ for ( ;(char*)de <= limit; de = xip2_next_entry(de))
++ if (de->inode) {
++ int over;
++ unsigned char d_type = DT_UNKNOWN;
++
++ if (types && de->file_type < EXT2_FT_MAX)
++ d_type = types[de->file_type];
++
++ offset = (char *)de - kaddr;
++ over = filldir(dirent, de->name, de->name_len,
++ (n<<PAGE_CACHE_SHIFT) | offset,
++ le32_to_cpu(de->inode), d_type);
++ if (over) {
++ goto done;
++ }
++ }
++ }
++
++done:
++ filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset;
++ filp->f_version = inode->i_version;
++ UPDATE_ATIME(inode);
++ return 0;
++}
++
++/*
++ * xip2_find_entry()
++ *
++ * finds an entry in the specified directory with the wanted name. It
++ * returns the page in which the entry was found, and the entry itself
++ * (as a parameter - res_dir). Page is returned mapped and unlocked.
++ * Entry is guaranteed to be valid.
++ */
++struct ext2_dir_entry_2 * xip2_find_entry (struct inode * dir,
++ struct dentry *dentry)
++{
++ unsigned long blockno;
++ const char *name = dentry->d_name.name;
++ int err;
++ int namelen = dentry->d_name.len;
++ unsigned reclen = EXT2_DIR_REC_LEN(namelen);
++ unsigned long start, n;
++ unsigned long npages = dir_pages(dir);
++ ext2_dirent * de;
++
++ start = dir->u.ext2_i.i_dir_start_lookup;
++ if (start >= npages)
++ start = 0;
++ n = start;
++ do {
++ char *kaddr;
++ err=xip2_get_block(dir, n, &blockno, 0);
++ if (err) return NULL;
++ kaddr = xip2_maread (dir->i_sb->u.xip2_sb.mem_area, blockno,
++ PAGE_CACHE_SIZE);
++ if (kaddr==NULL)
++ BUG();
++ if (!xip2_check_page(dir, n, kaddr)) {
++ de = (ext2_dirent *) kaddr;
++ kaddr += PAGE_CACHE_SIZE - reclen;
++ while ((char *) de <= kaddr) {
++ if (xip2_match (namelen, name, de))
++ goto found;
++ de = xip2_next_entry(de);
++ }
++ }
++ if (++n >= npages)
++ n = 0;
++ } while (n != start);
++ return NULL;
++
++found:
++ dir->u.ext2_i.i_dir_start_lookup = n;
++ return de;
++}
++
++ino_t xip2_inode_by_name(struct inode * dir, struct dentry *dentry)
++{
++ ino_t res = 0;
++ struct ext2_dir_entry_2 * de;
++
++ de = xip2_find_entry (dir, dentry);
++ if (de) {
++ res = le32_to_cpu(de->inode);
++ }
++ return res;
++}
++
++struct file_operations xip2_dir_operations = {
++ read: generic_read_dir,
++ readdir: xip2_readdir,
++ ioctl: xip2_ioctl,
++};
+=== fs/xip2fs/inode.c
+==================================================================
+--- fs/xip2fs/inode.c (/upstream/vanilla/2.4.27) (revision 52)
++++ fs/xip2fs/inode.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,382 @@
++/*
++ * linux/fs/xip2/inode.c, Version 1
++ *
++ * (C) Copyright IBM Corp. 2002,2004
++ * Author(s): Carsten Otte <cotte at de.ibm.com>
++ * derived from second extended filesystem (ext2)
++ */
++
++#include <linux/fs.h>
++#include <linux/xip2_fs.h>
++#include <linux/iobuf.h>
++#include <linux/locks.h>
++#include <linux/smp_lock.h>
++#include <linux/sched.h>
++#include <linux/highuid.h>
++#include <linux/quotaops.h>
++#include <linux/module.h>
++
++#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
++
++MODULE_AUTHOR("Carsten Otte, Remy Card and others");
++MODULE_DESCRIPTION("XIP2 filesystem derived from ext2");
++MODULE_LICENSE("GPL");
++
++typedef struct {
++ u32 *p;
++ u32 key;
++ void *block_ptr;
++} Indirect;
++
++static inline void add_chain(Indirect *p, void *block_ptr, u32 *v)
++{
++ p->key = *(p->p = v);
++ p->block_ptr = block_ptr;
++}
++
++static inline int verify_chain(Indirect *from, Indirect *to)
++{
++ while (from <= to && from->key == *from->p)
++ from++;
++ return (from > to);
++}
++
++/**
++ * xip2_block_to_path - parse the block number into array of offsets
++ * @inode: inode in question (we are only interested in its superblock)
++ * @i_block: block number to be parsed
++ * @offsets: array to store the offsets in
++ *
++ * To store the locations of file's data ext2 uses a data structure common
++ * for UNIX filesystems - tree of pointers anchored in the inode, with
++ * data blocks at leaves and indirect blocks in intermediate nodes.
++ * This function translates the block number into path in that tree -
++ * return value is the path length and @offsets[n] is the offset of
++ * pointer to (n+1)th node in the nth one. If @block is out of range
++ * (negative or too large) warning is printed and zero returned.
++ *
++ * Note: function doesn't find node addresses, so no IO is needed. All
++ * we need to know is the capacity of indirect blocks (taken from the
++ * inode->i_sb).
++ */
++
++/*
++ * Portability note: the last comparison (check that we fit into triple
++ * indirect block) is spelled differently, because otherwise on an
++ * architecture with 32-bit longs and 8Kb pages we might get into trouble
++ * if our filesystem had 8Kb blocks. We might use long long, but that would
++ * kill us on x86. Oh, well, at least the sign propagation does not matter -
++ * i_block would have to be negative in the very beginning, so we would not
++ * get there at all.
++ */
++
++static int xip2_block_to_path(struct inode *inode, long i_block, int offsets[4])
++{
++ int ptrs = EXT2_ADDR_PER_BLOCK(inode->i_sb);
++ int ptrs_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb);
++ const long direct_blocks = EXT2_NDIR_BLOCKS,
++ indirect_blocks = ptrs,
++ double_blocks = (1 << (ptrs_bits * 2));
++ int n = 0;
++
++ if (i_block < 0) {
++ xip2_warning (inode->i_sb, "xip2_block_to_path", "block < 0");
++ } else if (i_block < direct_blocks) {
++ offsets[n++] = i_block;
++ } else if ( (i_block -= direct_blocks) < indirect_blocks) {
++ offsets[n++] = EXT2_IND_BLOCK;
++ offsets[n++] = i_block;
++ } else if ((i_block -= indirect_blocks) < double_blocks) {
++ offsets[n++] = EXT2_DIND_BLOCK;
++ offsets[n++] = i_block >> ptrs_bits;
++ offsets[n++] = i_block & (ptrs - 1);
++ } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
++ offsets[n++] = EXT2_TIND_BLOCK;
++ offsets[n++] = i_block >> (ptrs_bits * 2);
++ offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
++ offsets[n++] = i_block & (ptrs - 1);
++ } else {
++ xip2_warning (inode->i_sb, "xip2_block_to_path",
++ "block > big");
++ }
++ return n;
++}
++
++/**
++ * xip2_get_branch - read the chain of indirect blocks leading to data
++ * @inode: inode in question
++ * @depth: depth of the chain (1 - direct pointer, etc.)
++ * @offsets: offsets of pointers in inode/indirect blocks
++ * @chain: place to store the result
++ * @err: here we store the error value
++ *
++ */
++static Indirect *xip2_get_branch(struct inode *inode,
++ int depth,
++ int *offsets,
++ Indirect chain[4],
++ int *err)
++{
++ int size = inode->i_sb->s_blocksize;
++ Indirect *p = chain;
++ void *block_ptr;
++
++ *err = 0;
++ /* i_data is not going away, no lock needed */
++ add_chain (chain, NULL, inode->u.ext2_i.i_data + *offsets);
++ if (!p->key)
++ goto no_block;
++ while (--depth) {
++ block_ptr = xip2_maread(inode->i_sb->u.xip2_sb.mem_area,
++ le32_to_cpu(p->key), size);
++ if (!block_ptr)
++ goto failure;
++ /* Reader: pointers */
++ if (!verify_chain(chain, p))
++ goto changed;
++ add_chain(++p, block_ptr, (u32*)block_ptr + *++offsets);
++ /* Reader: end */
++ if (!p->key)
++ goto no_block;
++ }
++ return NULL;
++
++changed:
++ *err = -EAGAIN;
++ goto no_block;
++failure:
++ *err = -EIO;
++no_block:
++ return p;
++}
++
++int xip2_get_block(struct inode *inode, long iblock, unsigned long* blockno_result,
++ int create)
++{
++ int err = -EIO;
++ int offsets[4];
++ Indirect chain[4];
++ Indirect *partial;
++ int depth = xip2_block_to_path(inode, iblock, offsets);
++
++ if (depth == 0)
++ goto out;
++
++ partial = xip2_get_branch(inode, depth, offsets, chain, &err);
++
++ /* Simplest case - block found, no allocation needed */
++ if (!partial) {
++ *blockno_result = le32_to_cpu(chain[depth-1].key);
++ /* Clean up and exit */
++ partial = chain+depth-1; /* the whole chain */
++ goto cleanup;
++ }
++
++ /* Next simple case - plain lookup or failed read of indirect block */
++ if (!create || err) {
++cleanup:
++ while (partial > chain) {
++ partial--;
++ }
++out:
++ return err;
++ }
++ xip2_warning (inode->i_sb, "xip2_get_block", "allocation of a block "
++ "would be needed");
++ return -EROFS;
++}
++
++static int xip2_readpage(struct file *file, struct page *page)
++{
++ struct inode *inode = page->mapping->host;
++ unsigned long iblock;
++ unsigned long blocksize, blocks, blockno;
++ void* block_ptr;
++ int err;
++
++ printk ("Whoot whooot! Readpage was called! Trace-me at %p\n",
++ xip2_readpage);
++
++ if (!PageLocked(page))
++ PAGE_BUG(page);
++ blocksize = 1 << inode->i_blkbits;
++ blocks = PAGE_CACHE_SIZE >> inode->i_blkbits;
++ iblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
++
++ if ((blocksize != PAGE_SIZE) || (blocks != 1))
++ BUG();
++ err=xip2_get_block(inode, iblock, &blockno, 0);
++ if (err)
++ return err;
++ block_ptr = xip2_maread(inode->i_sb->u.xip2_sb.mem_area, blockno,
++ blocksize);
++ if (!block_ptr)
++ return -EIO;
++ memcpy (page_address(page),block_ptr,blocksize);
++ SetPageUptodate(page);
++ UnlockPage(page);
++ return 0;
++}
++
++struct address_space_operations xip2_aops = {
++ readpage: xip2_readpage,
++};
++
++/*
++ * Probably it should be a library function... search for first non-zero word
++ * or memcmp with zero_page, whatever is better for particular architecture.
++ * Linus?
++ */
++static inline int all_zeroes(u32 *p, u32 *q)
++{
++ while (p < q)
++ if (*p++)
++ return 0;
++ return 1;
++}
++
++void xip2_read_inode (struct inode * inode)
++{
++ void * block_ptr;
++ struct ext2_inode * raw_inode;
++ unsigned long block_group;
++ unsigned long group_desc;
++ unsigned long desc;
++ unsigned long block;
++ unsigned long offset;
++ struct ext2_group_desc * gdp;
++
++ if ((inode->i_ino != EXT2_ROOT_INO &&
++ inode->i_ino != EXT2_ACL_IDX_INO &&
++ inode->i_ino != EXT2_ACL_DATA_INO &&
++ inode->i_ino < EXT2_FIRST_INO(inode->i_sb)) ||
++ inode->i_ino >
++ le32_to_cpu(inode->i_sb->u.xip2_sb.s_es->s_inodes_count)) {
++ xip2_error (inode->i_sb, "xip2_read_inode",
++ "bad inode number: %lu", inode->i_ino);
++ goto bad_inode;
++ }
++ block_group = (inode->i_ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb);
++ if (block_group >= inode->i_sb->u.xip2_sb.s_groups_count) {
++ xip2_error (inode->i_sb, "xip2_read_inode",
++ "group >= groups count");
++ goto bad_inode;
++ }
++ group_desc = block_group >> EXT2_DESC_PER_BLOCK_BITS(inode->i_sb);
++ desc = block_group & (EXT2_DESC_PER_BLOCK(inode->i_sb) - 1);
++ block_ptr = inode->i_sb->u.xip2_sb.s_group_desc[group_desc];
++ if (!block_ptr) {
++ xip2_error (inode->i_sb, "xip2_read_inode",
++ "Descriptor not loaded");
++ goto bad_inode;
++ }
++
++ gdp = (struct ext2_group_desc *) block_ptr;
++ /*
++ * Figure out the offset within the block group inode table
++ */
++ offset = ((inode->i_ino - 1) % EXT2_INODES_PER_GROUP(inode->i_sb)) *
++ EXT2_INODE_SIZE(inode->i_sb);
++ block = le32_to_cpu(gdp[desc].bg_inode_table) +
++ (offset >> EXT2_BLOCK_SIZE_BITS(inode->i_sb));
++ if (!(block_ptr = xip2_maread (inode->i_sb->u.xip2_sb.mem_area, block,
++ inode->i_sb->s_blocksize))) {
++ xip2_error (inode->i_sb, "xip2_read_inode",
++ "unable to read inode block - "
++ "inode=%lu, block=%lu", inode->i_ino, block);
++ goto bad_inode;
++ }
++ offset &= (EXT2_BLOCK_SIZE(inode->i_sb) - 1);
++ raw_inode = (struct ext2_inode *) (block_ptr + offset);
++
++ inode->i_mode = le16_to_cpu(raw_inode->i_mode);
++ inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
++ inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
++ if(!(test_opt (inode->i_sb, NO_UID32))) {
++ inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
++ inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
++ }
++ inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
++ inode->i_size = le32_to_cpu(raw_inode->i_size);
++ inode->i_atime = le32_to_cpu(raw_inode->i_atime);
++ inode->i_ctime = le32_to_cpu(raw_inode->i_ctime);
++ inode->i_mtime = le32_to_cpu(raw_inode->i_mtime);
++ inode->u.ext2_i.i_dtime = le32_to_cpu(raw_inode->i_dtime);
++ /* We now have enough fields to check if the inode was active or not.
++ * This is needed because nfsd might try to access dead inodes
++ * the test is that same one that e2fsck uses
++ * NeilBrown 1999oct15
++ */
++ if (inode->i_nlink == 0 && (inode->i_mode == 0 ||
++ inode->u.ext2_i.i_dtime)) {
++ /* this inode is deleted */
++ goto bad_inode;
++ }
++ inode->i_blksize = PAGE_SIZE;
++ inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
++ inode->i_version = ++event;
++ inode->u.ext2_i.i_flags = le32_to_cpu(raw_inode->i_flags);
++ inode->u.ext2_i.i_faddr = le32_to_cpu(raw_inode->i_faddr);
++ inode->u.ext2_i.i_frag_no = raw_inode->i_frag;
++ inode->u.ext2_i.i_frag_size = raw_inode->i_fsize;
++ inode->u.ext2_i.i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
++ if (S_ISREG(inode->i_mode))
++ inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high))
++ << 32;
++ else
++ inode->u.ext2_i.i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
++ inode->i_generation = le32_to_cpu(raw_inode->i_generation);
++ inode->u.ext2_i.i_prealloc_count = 0;
++ inode->u.ext2_i.i_block_group = block_group;
++
++ /*
++ * NOTE! The in-memory inode i_data array is in little-endian order
++ * even on big-endian machines: we do NOT byteswap the block numbers!
++ */
++ for (block = 0; block < EXT2_N_BLOCKS; block++)
++ inode->u.ext2_i.i_data[block] = raw_inode->i_block[block];
++
++ if (inode->i_ino == EXT2_ACL_IDX_INO ||
++ inode->i_ino == EXT2_ACL_DATA_INO)
++ /* Nothing to do */ ;
++ else if (S_ISREG(inode->i_mode)) {
++ inode->i_op = &xip2_file_inode_operations;
++ inode->i_fop = &xip2_file_operations;
++ inode->i_mapping->a_ops = &xip2_aops;
++ } else if (S_ISDIR(inode->i_mode)) {
++ inode->i_op = &xip2_dir_inode_operations;
++ inode->i_fop = &xip2_dir_operations;
++ inode->i_mapping->a_ops = &xip2_aops;
++ } else if (S_ISLNK(inode->i_mode)) {
++ if (!inode->i_blocks)
++ inode->i_op = &xip2_fast_symlink_inode_operations;
++ else {
++ inode->i_op = &xip2_symlink_inode_operations;
++ inode->i_mapping->a_ops = &xip2_aops;
++ }
++ } else
++ init_special_inode(inode, inode->i_mode,
++ le32_to_cpu(raw_inode->i_block[0]));
++ inode->i_attr_flags = 0;
++ if (inode->u.ext2_i.i_flags & EXT2_SYNC_FL) {
++ inode->i_attr_flags |= ATTR_FLAG_SYNCRONOUS;
++ inode->i_flags |= S_SYNC;
++ }
++ if (inode->u.ext2_i.i_flags & EXT2_APPEND_FL) {
++ inode->i_attr_flags |= ATTR_FLAG_APPEND;
++ inode->i_flags |= S_APPEND;
++ }
++ if (inode->u.ext2_i.i_flags & EXT2_IMMUTABLE_FL) {
++ inode->i_attr_flags |= ATTR_FLAG_IMMUTABLE;
++ inode->i_flags |= S_IMMUTABLE;
++ }
++ if (inode->u.ext2_i.i_flags & EXT2_NOATIME_FL) {
++ inode->i_attr_flags |= ATTR_FLAG_NOATIME;
++ inode->i_flags |= S_NOATIME;
++ }
++ return;
++
++bad_inode:
++ make_bad_inode(inode);
++ return;
++}
+=== fs/xip2fs/super.c
+==================================================================
+--- fs/xip2fs/super.c (/upstream/vanilla/2.4.27) (revision 52)
++++ fs/xip2fs/super.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,661 @@
++/*
++ * linux/fs/xip2/super.c, Version 1
++ *
++ * (C) Copyright IBM Corp. 2002,2004
++ * Author(s): Carsten Otte <cotte at de.ibm.com>
++ * derived from second extended filesystem (ext2)
++ */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/string.h>
++#include <linux/fs.h>
++#include <linux/xip2_fs.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/locks.h>
++#include <linux/blkdev.h>
++#include <asm/dcss.h>
++#include <asm/uaccess.h>
++
++static char error_buf[1024];
++
++void xip2_error (struct super_block * sb, const char * function,
++ const char * fmt, ...)
++{
++ va_list args;
++
++ va_start (args, fmt);
++ vsprintf (error_buf, fmt, args);
++ va_end (args);
++ if (test_opt (sb, ERRORS_PANIC) ||
++ (le16_to_cpu(sb->u.xip2_sb.s_es->s_errors) == EXT2_ERRORS_PANIC &&
++ !test_opt (sb, ERRORS_CONT) && !test_opt (sb, ERRORS_RO)))
++ panic ("XIP2-fs panic (device %s): %s: %s\n",
++ bdevname(sb->s_dev), function, error_buf);
++ printk (KERN_CRIT "XIP2-fs error (device %s): %s: %s\n",
++ bdevname(sb->s_dev), function, error_buf);
++ if (test_opt (sb, ERRORS_RO) ||
++ (le16_to_cpu(sb->u.xip2_sb.s_es->s_errors) == EXT2_ERRORS_RO &&
++ !test_opt (sb, ERRORS_CONT) && !test_opt (sb, ERRORS_PANIC))) {
++ printk ("Remounting filesystem read-only\n");
++ sb->s_flags |= MS_RDONLY;
++ }
++}
++
++NORET_TYPE void xip2_panic (struct super_block * sb, const char * function,
++ const char * fmt, ...)
++{
++ va_list args;
++
++ if (!(sb->s_flags & MS_RDONLY)) {
++ sb->u.xip2_sb.s_mount_state |= EXT2_ERROR_FS;
++ sb->u.xip2_sb.s_es->s_state =
++ cpu_to_le16(le16_to_cpu(sb->u.xip2_sb.s_es->s_state)
++ | EXT2_ERROR_FS);
++ sb->s_dirt = 1;
++ }
++ va_start (args, fmt);
++ vsprintf (error_buf, fmt, args);
++ va_end (args);
++ sb->s_flags |= MS_RDONLY;
++ panic ("XIP2-fs panic (device %s): %s: %s\n",
++ bdevname(sb->s_dev), function, error_buf);
++}
++
++void xip2_warning (struct super_block * sb, const char * function,
++ const char * fmt, ...)
++{
++ va_list args;
++
++ va_start (args, fmt);
++ vsprintf (error_buf, fmt, args);
++ va_end (args);
++ printk (KERN_WARNING "XIP2-fs warning (device %s): %s: %s\n",
++ bdevname(sb->s_dev), function, error_buf);
++}
++
++void xip2_update_dynamic_rev(struct super_block *sb)
++{
++ struct ext2_super_block *es = XIP2_SB(sb)->s_es;
++
++ if (le32_to_cpu(es->s_rev_level) > EXT2_GOOD_OLD_REV)
++ return;
++
++ xip2_warning(sb, __FUNCTION__,
++ "updating to rev %d because of new feature flag, "
++ "running e2fsck is recommended",
++ EXT2_DYNAMIC_REV);
++
++ es->s_first_ino = cpu_to_le32(EXT2_GOOD_OLD_FIRST_INO);
++ es->s_inode_size = cpu_to_le16(EXT2_GOOD_OLD_INODE_SIZE);
++ es->s_rev_level = cpu_to_le32(EXT2_DYNAMIC_REV);
++ /* leave es->s_feature_*compat flags alone */
++ /* es->s_uuid will be set by e2fsck if empty */
++
++ /*
++ * The rest of the superblock fields should be zero, and if not it
++ * means they are likely already in use, so leave them alone. We
++ * can leave it up to e2fsck to clean up any inconsistencies there.
++ */
++}
++
++void xip2_put_super (struct super_block * sb)
++{
++ if (sb->u.xip2_sb.mem_area) {
++ segment_unload (((xip2_mem_area_t*)(sb->u.xip2_sb.mem_area))
++ ->name);
++ kfree (sb->u.xip2_sb.mem_area);
++ }
++ kfree(sb->u.xip2_sb.s_group_desc);
++ return;
++}
++
++static struct super_operations xip2_sops = {
++ read_inode: xip2_read_inode,
++ put_super: xip2_put_super,
++ statfs: xip2_statfs,
++ remount_fs: xip2_remount,
++};
++
++/*
++ * This function has been shamelessly adapted from the msdos fs
++ */
++static int parse_options (char * options, unsigned long * sb_block,
++ unsigned short *resuid, unsigned short * resgid,
++ unsigned long * mount_options,
++ xip2_mem_area_t ** mem_area)
++{
++ char * this_char;
++ char * value;
++
++ if (!options)
++ return 1;
++ for (this_char = strtok (options, ",");
++ this_char != NULL;
++ this_char = strtok (NULL, ",")) {
++ if ((value = strchr (this_char, '=')) != NULL)
++ *value++ = 0;
++ if (!strcmp (this_char, "bsddf"))
++ clear_opt (*mount_options, MINIX_DF);
++ else if (!strcmp (this_char, "nouid32")) {
++ set_opt (*mount_options, NO_UID32);
++ }
++ else if (!strcmp (this_char, "check")) {
++ if (!value || !*value || !strcmp (value, "none"))
++ clear_opt (*mount_options, CHECK);
++ else
++#ifdef CONFIG_EXT2_CHECK
++ set_opt (*mount_options, CHECK);
++#else
++ printk("XIP2 Check option not supported\n");
++#endif
++ }
++ else if (!strcmp (this_char, "debug"))
++ set_opt (*mount_options, DEBUG);
++ else if (!strcmp (this_char, "errors")) {
++ if (!value || !*value) {
++ printk ("XIP2-fs: the errors option requires "
++ "an argument\n");
++ return 0;
++ }
++ if (!strcmp (value, "continue")) {
++ clear_opt (*mount_options, ERRORS_RO);
++ clear_opt (*mount_options, ERRORS_PANIC);
++ set_opt (*mount_options, ERRORS_CONT);
++ }
++ else if (!strcmp (value, "remount-ro")) {
++ clear_opt (*mount_options, ERRORS_CONT);
++ clear_opt (*mount_options, ERRORS_PANIC);
++ set_opt (*mount_options, ERRORS_RO);
++ }
++ else if (!strcmp (value, "panic")) {
++ clear_opt (*mount_options, ERRORS_CONT);
++ clear_opt (*mount_options, ERRORS_RO);
++ set_opt (*mount_options, ERRORS_PANIC);
++ }
++ else {
++ printk ("XIP2-fs: Invalid errors option: %s\n",
++ value);
++ return 0;
++ }
++ }
++ else if (!strcmp (this_char, "grpid") ||
++ !strcmp (this_char, "bsdgroups"))
++ set_opt (*mount_options, GRPID);
++ else if (!strcmp (this_char, "minixdf"))
++ set_opt (*mount_options, MINIX_DF);
++ else if (!strcmp (this_char, "nocheck"))
++ clear_opt (*mount_options, CHECK);
++ else if (!strcmp (this_char, "nogrpid") ||
++ !strcmp (this_char, "sysvgroups"))
++ clear_opt (*mount_options, GRPID);
++ else if (!strcmp (this_char, "resgid")) {
++ if (!value || !*value) {
++ printk ("XIP2-fs: the resgid option requires "
++ "an argument\n");
++ return 0;
++ }
++ *resgid = simple_strtoul (value, &value, 0);
++ if (*value) {
++ printk ("XIP2-fs: Invalid resgid option: %s\n",
++ value);
++ return 0;
++ }
++ }
++ else if (!strcmp (this_char, "resuid")) {
++ if (!value || !*value) {
++ printk ("XIP2-fs: the resuid option requires "
++ "an argument");
++ return 0;
++ }
++ *resuid = simple_strtoul (value, &value, 0);
++ if (*value) {
++ printk ("XIP2-fs: Invalid resuid option: %s\n",
++ value);
++ return 0;
++ }
++ }
++ else if (!strcmp (this_char, "sb")) {
++ if (!value || !*value) {
++ printk ("XIP2-fs: the sb option requires "
++ "an argument");
++ return 0;
++ }
++ *sb_block = simple_strtoul (value, &value, 0);
++ if (*value) {
++ printk ("XIP2-fs: Invalid sb option: %s\n",
++ value);
++ return 0;
++ }
++ }
++ else if (!strcmp (this_char, "memarea")) {
++ if (!value || !*value) {
++ printk ("XIP2-fs: the memarea option requires "
++ "an argument\n");
++ return 0;
++ }
++ *mem_area=kmalloc (sizeof(xip2_mem_area_t)+strlen(value)+1,GFP_ATOMIC);
++ if (mem_area==NULL) {
++ printk ("XIP2-fs: out of memory\n");
++ return 0;
++ }
++ (*mem_area)->name=((char*)(*mem_area)) +
++ sizeof(xip2_mem_area_t);
++ strcpy ((*mem_area)->name, value);
++ if (segment_load(value, SEGMENT_SHARED_RO, &(*mem_area)->start,
++ &(*mem_area)->end)<0) {
++ printk ("XIP2-fs: the memarea specified could "
++ "not be found\n");
++ kfree (*mem_area);
++ *mem_area = NULL;
++ return 0;
++ }
++ }
++ /* Silently ignore the quota options */
++ else if (!strcmp (this_char, "grpquota")
++ || !strcmp (this_char, "noquota")
++ || !strcmp (this_char, "quota")
++ || !strcmp (this_char, "usrquota"))
++ /* Don't do anything ;-) */ ;
++ else {
++ printk ("XIP2-fs: Unrecognized mount option %s\n",
++ this_char);
++ return 0;
++ }
++ }
++ return 1;
++}
++
++static int xip2_check_descriptors (struct super_block * sb)
++{
++ int i;
++ int desc_block = 0;
++ unsigned long block = le32_to_cpu(sb->u.xip2_sb.s_es->
++ s_first_data_block);
++ struct ext2_group_desc * gdp = NULL;
++
++ xip2_debug ("Checking group descriptors");
++
++ for (i = 0; i < sb->u.xip2_sb.s_groups_count; i++)
++ {
++ if ((i % EXT2_DESC_PER_BLOCK(sb)) == 0)
++ gdp = (struct ext2_group_desc *)
++ sb->u.xip2_sb.s_group_desc[desc_block++];
++ if (le32_to_cpu(gdp->bg_block_bitmap) < block ||
++ le32_to_cpu(gdp->bg_block_bitmap) >=
++ block + EXT2_BLOCKS_PER_GROUP(sb))
++ {
++ xip2_error (sb, "xip2_check_descriptors",
++ "Block bitmap for group %d"
++ " not in group (block %lu)!",
++ i, (unsigned long)
++ le32_to_cpu(gdp->bg_block_bitmap));
++ return 0;
++ }
++ if (le32_to_cpu(gdp->bg_inode_bitmap) < block ||
++ le32_to_cpu(gdp->bg_inode_bitmap) >= block +
++ EXT2_BLOCKS_PER_GROUP(sb))
++ {
++ xip2_error (sb, "xip2_check_descriptors",
++ "Inode bitmap for group %d"
++ " not in group (block %lu)!",
++ i, (unsigned long)
++ le32_to_cpu(gdp->bg_inode_bitmap));
++ return 0;
++ }
++ if (le32_to_cpu(gdp->bg_inode_table) < block ||
++ le32_to_cpu(gdp->bg_inode_table) +
++ sb->u.xip2_sb.s_itb_per_group >=
++ block + EXT2_BLOCKS_PER_GROUP(sb))
++ {
++ xip2_error (sb, "xip2_check_descriptors",
++ "Inode table for group %d"
++ " not in group (block %lu)!",
++ i, (unsigned long)
++ le32_to_cpu(gdp->bg_inode_table));
++ return 0;
++ }
++ block += EXT2_BLOCKS_PER_GROUP(sb);
++ gdp++;
++ }
++ return 1;
++}
++
++#define log2(n) ffz(~(n))
++
++/*
++ * Maximal file size. There is a direct, and {,double-,triple-}indirect
++ * block limit, and also a limit of (2^32 - 1) 512-byte sectors in i_blocks.
++ * We need to be 1 filesystem block less than the 2^32 sector limit.
++ */
++static loff_t xip2_max_size(int bits)
++{
++ loff_t res = EXT2_NDIR_BLOCKS;
++ res += 1LL << (bits-2);
++ res += 1LL << (2*(bits-2));
++ res += 1LL << (3*(bits-2));
++ res <<= bits;
++ if (res > (512LL << 32) - (1 << bits))
++ res = (512LL << 32) - (1 << bits);
++ return res;
++}
++
++struct super_block * xip2_read_super (struct super_block * sb, void * data,
++ int silent)
++{
++ xip2_mem_area_t* mem_area = NULL;
++ void * block_ptr;
++ struct ext2_super_block * es;
++ unsigned long sb_block = 0;
++ unsigned short resuid = EXT2_DEF_RESUID;
++ unsigned short resgid = EXT2_DEF_RESGID;
++ kdev_t dev = sb->s_dev;
++ int blocksize = BLOCK_SIZE;
++ int db_count;
++ int i;
++
++
++ blocksize = PAGE_SIZE;
++ sb->u.xip2_sb.s_mount_opt = 0;
++ if (!parse_options ((char *) data, &sb_block, &resuid, &resgid,
++ &sb->u.xip2_sb.s_mount_opt,&mem_area)) {
++ return NULL;
++ }
++
++ if (!mem_area) {
++ printk ("XIP2-fs: no memarea specified\n");
++ return NULL;
++ }
++
++ if (!(block_ptr = xip2_maread (mem_area, sb_block, blocksize))) {
++ printk ("XIP2-fs: unable to read superblock\n");
++ kfree (mem_area);
++ return NULL;
++ }
++ /*
++ * Note: s_es must be initialized as soon as possible because
++ * some ext2 macro-instructions depend on its value
++ */
++ es = (struct ext2_super_block *) (block_ptr + BLOCK_SIZE);
++ sb->u.xip2_sb.s_es = es;
++ sb->u.xip2_sb.mem_area = mem_area;
++ sb->s_magic = le16_to_cpu(es->s_magic);
++ if (sb->s_magic != EXT2_SUPER_MAGIC) {
++ if (!silent)
++ printk ("VFS: Can't find xip2 filesystem on "
++ "memarea %s.\n", mem_area->name);
++ goto failed_mount;
++ }
++ if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
++ (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) ||
++ EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
++ EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U)))
++ printk("XIP2-fs warning: feature flags set on rev 0 fs, "
++ "running e2fsck is recommended\n");
++ /*
++ * Check if fs should be mounted rdonly. If not let the mount fail.
++ */
++ if (!(sb->s_flags & MS_RDONLY)) {
++ printk("XIP2-fs: %s: mounting RDWR was requested, "
++ "but is not supported. Please mount using"
++ "the RO option\n", mem_area->name);
++ goto failed_mount;
++ }
++
++ /*
++ * Check feature flags regardless of the revision level, since we
++ * previously didn't change the revision level when setting the flags,
++ * so there is a chance incompat flags are set on a rev 0 filesystem.
++ */
++ if ((i = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP))) {
++ printk("XIP2-fs: %s: couldn't mount because of "
++ "unsupported optional features (%x).\n",
++ mem_area->name, i);
++ goto failed_mount;
++ }
++ sb->s_blocksize_bits =
++ le32_to_cpu(XIP2_SB(sb)->s_es->s_log_block_size) + 10;
++ sb->s_blocksize = 1 << sb->s_blocksize_bits;
++
++ sb->s_maxbytes = xip2_max_size(sb->s_blocksize_bits);
++
++ /* If the blocksize doesn't match, tell the user.. */
++ if (sb->s_blocksize != blocksize) {
++ printk("XIP2-fs: Blocksize of the filesystem "
++ "on %s does not match PAGE_SIZE.\n",
++ mem_area->name);
++ goto failed_mount;
++ }
++
++ if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) {
++ sb->u.xip2_sb.s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
++ sb->u.xip2_sb.s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
++ } else {
++ sb->u.xip2_sb.s_inode_size = le16_to_cpu(es->s_inode_size);
++ sb->u.xip2_sb.s_first_ino = le32_to_cpu(es->s_first_ino);
++ if (sb->u.xip2_sb.s_inode_size != EXT2_GOOD_OLD_INODE_SIZE) {
++ printk ("XIP2-fs: unsupported inode size: %d\n",
++ sb->u.xip2_sb.s_inode_size);
++ goto failed_mount;
++ }
++ }
++ sb->u.xip2_sb.s_frag_size = EXT2_MIN_FRAG_SIZE <<
++ le32_to_cpu(es->s_log_frag_size);
++ if (sb->u.xip2_sb.s_frag_size)
++ sb->u.xip2_sb.s_frags_per_block = sb->s_blocksize /
++ sb->u.xip2_sb.s_frag_size;
++ else
++ sb->s_magic = 0;
++ sb->u.xip2_sb.s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
++ sb->u.xip2_sb.s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
++ sb->u.xip2_sb.s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
++ sb->u.xip2_sb.s_inodes_per_block = sb->s_blocksize /
++ EXT2_INODE_SIZE(sb);
++ sb->u.xip2_sb.s_itb_per_group = sb->u.xip2_sb.s_inodes_per_group /
++ sb->u.xip2_sb.s_inodes_per_block;
++ sb->u.xip2_sb.s_desc_per_block = sb->s_blocksize /
++ sizeof (struct ext2_group_desc);
++ sb->u.xip2_sb.s_sbp = block_ptr;
++ if (resuid != EXT2_DEF_RESUID)
++ sb->u.xip2_sb.s_resuid = resuid;
++ else
++ sb->u.xip2_sb.s_resuid = le16_to_cpu(es->s_def_resuid);
++ if (resgid != EXT2_DEF_RESGID)
++ sb->u.xip2_sb.s_resgid = resgid;
++ else
++ sb->u.xip2_sb.s_resgid = le16_to_cpu(es->s_def_resgid);
++ sb->u.xip2_sb.s_mount_state = le16_to_cpu(es->s_state);
++ sb->u.xip2_sb.s_addr_per_block_bits =
++ log2 (EXT2_ADDR_PER_BLOCK(sb));
++ sb->u.xip2_sb.s_desc_per_block_bits =
++ log2 (EXT2_DESC_PER_BLOCK(sb));
++ if (sb->s_magic != EXT2_SUPER_MAGIC) {
++ if (!silent)
++ printk ("VFS: Can't find an xip2 filesystem on dev "
++ "%s.\n",
++ bdevname(dev));
++ goto failed_mount;
++ }
++
++ if (sb->s_blocksize != sb->u.xip2_sb.s_frag_size) {
++ printk ("XIP2-fs: fragsize %lu != blocksize %lu "
++ "(not supported yet)\n",
++ sb->u.xip2_sb.s_frag_size, sb->s_blocksize);
++ goto failed_mount;
++ }
++
++ if (sb->u.xip2_sb.s_blocks_per_group > sb->s_blocksize * 8) {
++ printk ("XIP2-fs: #blocks per group too big: %lu\n",
++ sb->u.xip2_sb.s_blocks_per_group);
++ goto failed_mount;
++ }
++ if (sb->u.xip2_sb.s_frags_per_group > sb->s_blocksize * 8) {
++ printk ("XIP2-fs: #fragments per group too big: %lu\n",
++ sb->u.xip2_sb.s_frags_per_group);
++ goto failed_mount;
++ }
++ if (sb->u.xip2_sb.s_inodes_per_group > sb->s_blocksize * 8) {
++ printk ("XIP2-fs: #inodes per group too big: %lu\n",
++ sb->u.xip2_sb.s_inodes_per_group);
++ goto failed_mount;
++ }
++
++ sb->u.xip2_sb.s_groups_count = (le32_to_cpu(es->s_blocks_count) -
++ le32_to_cpu(es->s_first_data_block) +
++ EXT2_BLOCKS_PER_GROUP(sb) - 1) /
++ EXT2_BLOCKS_PER_GROUP(sb);
++ db_count = (sb->u.xip2_sb.s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1)
++ / EXT2_DESC_PER_BLOCK(sb);
++ sb->u.xip2_sb.s_group_desc = kmalloc (db_count * sizeof (void *),
++ GFP_KERNEL);
++ if (sb->u.xip2_sb.s_group_desc == NULL) {
++ printk ("XIP2-fs: not enough memory\n");
++ goto failed_mount;
++ }
++ for (i = 0; i < db_count; i++) {
++ sb->u.xip2_sb.s_group_desc[i] = xip2_maread
++ (mem_area, sb_block + i + 1, sb->s_blocksize);
++ if (!sb->u.xip2_sb.s_group_desc[i]) {
++ kfree(sb->u.xip2_sb.s_group_desc);
++ printk ("XIP2-fs: unable to read group descriptors\n");
++ goto failed_mount;
++ }
++ }
++ if (!xip2_check_descriptors (sb)) {
++ printk ("XIP2-fs: group descriptors corrupted!\n");
++ db_count = i;
++ goto failed_mount2;
++ }
++ for (i = 0; i < EXT2_MAX_GROUP_LOADED; i++) {
++ sb->u.xip2_sb.s_inode_bitmap_number[i] = 0;
++ sb->u.xip2_sb.s_inode_bitmap[i] = NULL;
++ sb->u.xip2_sb.s_block_bitmap_number[i] = 0;
++ sb->u.xip2_sb.s_block_bitmap[i] = NULL;
++ }
++ sb->u.xip2_sb.s_loaded_inode_bitmaps = 0;
++ sb->u.xip2_sb.s_loaded_block_bitmaps = 0;
++ sb->u.xip2_sb.s_gdb_count = db_count;
++ /*
++ * set up enough so that it can read an inode
++ */
++ sb->s_op = &xip2_sops;
++ sb->s_root = d_alloc_root(iget(sb, EXT2_ROOT_INO));
++ if (!sb->s_root || !S_ISDIR(sb->s_root->d_inode->i_mode) ||
++ !sb->s_root->d_inode->i_blocks || !sb->s_root->d_inode->i_size) {
++ if (sb->s_root) {
++ dput(sb->s_root);
++ sb->s_root = NULL;
++ printk(KERN_ERR "XIP2-fs: corrupt root inode, run "
++ "e2fsck\n");
++ } else
++ printk(KERN_ERR "XIP2-fs: get root inode failed\n");
++ goto failed_mount2;
++ }
++ return sb;
++failed_mount2:
++ kfree(sb->u.xip2_sb.s_group_desc);
++failed_mount:
++ kfree(mem_area);
++ return NULL;
++}
++
++int xip2_remount (struct super_block * sb, int * flags, char * data)
++{
++ struct ext2_super_block * es;
++ xip2_mem_area_t* mem_area;
++ unsigned short resuid = sb->u.xip2_sb.s_resuid;
++ unsigned short resgid = sb->u.xip2_sb.s_resgid;
++ unsigned long new_mount_opt;
++ unsigned long tmp;
++
++ /* filesystem is read-only _only_, therefore mounting rdwr is not permitted */
++ if (!(*flags & MS_RDONLY))
++ return -EINVAL;
++
++ /*
++ * Allow the "check" option to be passed as a remount option.
++ */
++ new_mount_opt = sb->u.xip2_sb.s_mount_opt;
++ mem_area = sb->u.xip2_sb.mem_area;
++ if (!parse_options (data, &tmp, &resuid, &resgid,
++ &new_mount_opt, &mem_area))
++ return -EINVAL;
++
++ if ((!mem_area) || (mem_area != sb->u.xip2_sb.mem_area))
++ return -EINVAL;
++
++ sb->u.xip2_sb.s_mount_opt = new_mount_opt;
++ sb->u.xip2_sb.s_resuid = resuid;
++ sb->u.xip2_sb.s_resgid = resgid;
++ es = sb->u.xip2_sb.s_es;
++ return 0;
++}
++
++int xip2_statfs (struct super_block * sb, struct statfs * buf)
++{
++ unsigned long overhead;
++ int i;
++
++ if (test_opt (sb, MINIX_DF))
++ overhead = 0;
++ else {
++ /*
++ * Compute the overhead (FS structures)
++ */
++
++ /*
++ * All of the blocks before first_data_block are
++ * overhead
++ */
++ overhead = le32_to_cpu(sb->u.xip2_sb.s_es->s_first_data_block);
++
++ /*
++ * Add the overhead attributed to the superblock and
++ * block group descriptors. If the sparse superblocks
++ * feature is turned on, then not all groups have this.
++ */
++ for (i = 0; i < XIP2_SB(sb)->s_groups_count; i++)
++ overhead += xip2_bg_has_super(sb, i) +
++ xip2_bg_num_gdb(sb, i);
++
++ /*
++ * Every block group has an inode bitmap, a block
++ * bitmap, and an inode table.
++ */
++ overhead += (sb->u.xip2_sb.s_groups_count *
++ (2 + sb->u.xip2_sb.s_itb_per_group));
++ }
++
++ buf->f_type = EXT2_SUPER_MAGIC;
++ buf->f_bsize = sb->s_blocksize;
++ buf->f_blocks = le32_to_cpu(sb->u.xip2_sb.s_es->s_blocks_count)
++ - overhead;
++ buf->f_bfree = xip2_count_free_blocks (sb);
++ buf->f_bavail = buf->f_bfree -
++ le32_to_cpu(sb->u.xip2_sb.s_es->s_r_blocks_count);
++ if (buf->f_bfree < le32_to_cpu(sb->u.xip2_sb.s_es->s_r_blocks_count))
++ buf->f_bavail = 0;
++ buf->f_files = le32_to_cpu(sb->u.xip2_sb.s_es->s_inodes_count);
++ buf->f_ffree = xip2_count_free_inodes (sb);
++ buf->f_namelen = EXT2_NAME_LEN;
++ return 0;
++}
++
++static DECLARE_FSTYPE (xip2_fs_type, "xip2", xip2_read_super,
++ FS_NO_DCACHE|FS_NO_PRELIM);
++
++static int __init init_xip2_fs(void)
++{
++ return register_filesystem(&xip2_fs_type);
++}
++
++static void __exit exit_xip2_fs(void)
++{
++ unregister_filesystem(&xip2_fs_type);
++}
++
++EXPORT_NO_SYMBOLS;
++
++module_init(init_xip2_fs)
++module_exit(exit_xip2_fs)
+=== fs/xip2fs/file.c
+==================================================================
+--- fs/xip2fs/file.c (/upstream/vanilla/2.4.27) (revision 52)
++++ fs/xip2fs/file.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,169 @@
++/*
++ * linux/fs/xip2/file.c, Version 1
++ *
++ * (C) Copyright IBM Corp. 2002,2004
++ * Author(s): Carsten Otte <cotte at de.ibm.com>
++ * derived from second extended filesystem (ext2)
++ */
++
++#include <linux/fs.h>
++#include <linux/xip2_fs.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/pagemap.h>
++#include <asm/uaccess.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++
++/*
++ * We have mostly NULL's here: the current defaults are ok for
++ * the xip2 filesystem. for mmap, we use a special implementation
++ * that provides execute in place functionality
++ */
++struct file_operations xip2_file_operations = {
++ llseek: generic_file_llseek,
++ read: xip2_file_read,
++ write: generic_file_write,
++ ioctl: xip2_ioctl,
++ mmap: xip2_file_mmap,
++ open: generic_file_open,
++};
++
++struct inode_operations xip2_file_inode_operations = {};
++
++static struct vm_operations_struct xip2_file_vm_ops = {
++ nopage: xip2_nopage_in_place,
++};
++
++/* This is used for a general mmap of a disk file */
++
++int xip2_file_mmap(struct file * file, struct vm_area_struct * vma)
++{
++ struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
++ struct inode *inode = mapping->host;
++
++ if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
++ if (!mapping->a_ops->writepage)
++ return -EINVAL;
++}
++ if (!mapping->a_ops->readpage)
++ return -ENOEXEC;
++ UPDATE_ATIME(inode);
++ vma->vm_ops = &xip2_file_vm_ops;
++ return 0;
++}
++
++struct page * xip2_nopage_in_place(struct vm_area_struct * area,
++ unsigned long address, int unused)
++{
++ int error;
++ unsigned long blockno=ULONG_MAX;
++ void* block_ptr;
++ struct file *file = area->vm_file;
++ struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
++ struct inode *inode = mapping->host;
++ unsigned long pgoff;
++
++ pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) +
++ area->vm_pgoff;
++ error=xip2_get_block(inode, pgoff, &blockno, 0);
++ if (error) {
++ printk ("XIP2-FS: xip2_nopage_in_place could not fullfill "
++ "page request\n");
++ return NULL;
++ }
++ block_ptr = xip2_maread(inode->i_sb->u.xip2_sb.mem_area, blockno,
++ PAGE_SIZE);
++ if (!block_ptr)
++ return virt_to_page(empty_zero_page);
++ return virt_to_page(block_ptr);
++}
++
++void xip2_do_file_read(struct file * filp, loff_t *ppos, read_descriptor_t * desc)
++{
++ struct address_space *mapping = filp->f_dentry->d_inode->i_mapping;
++ struct inode *inode = mapping->host;
++ unsigned long block,offset,rdlen,count, iblock, lblock, blockno;
++ void* block_ptr,* cpystart;
++ int error,cpycount;
++
++ iblock = (*ppos)/PAGE_SIZE;
++ offset = (*ppos)%PAGE_SIZE;
++ rdlen = desc->count;
++ if ((*ppos)+desc->count > inode->i_size)
++ rdlen = inode->i_size - (*ppos);
++ lblock = (*ppos + rdlen) / PAGE_SIZE;
++ count = 0;
++ for (block = iblock; block <= lblock; block++) {
++ error=xip2_get_block(inode, block, &blockno, 0);
++ if (error) {
++ desc->error = error;
++ desc->written = count;
++ return;
++}
++ block_ptr = xip2_maread (inode->i_sb->u.xip2_sb.mem_area,
++ blockno, PAGE_SIZE);
++ if (block_ptr) {
++ if (block == iblock) {
++ cpystart = block_ptr + offset;
++ cpycount = PAGE_SIZE - offset;
++ } else {
++ cpystart = block_ptr;
++ cpycount = PAGE_SIZE;
++ }
++ } else {
++ // there is no block assigned, copy zeros over
++ if (block == iblock) {
++ cpystart = empty_zero_page;
++ cpycount = PAGE_SIZE - offset;
++ } else {
++ cpystart = empty_zero_page;
++ cpycount = PAGE_SIZE;
++ }
++ }
++ if (cpycount > rdlen-count) {
++ cpycount = rdlen-count;
++ if (block!=lblock) BUG();
++}
++ if (copy_to_user(desc->buf+count, cpystart, cpycount)) {
++ desc->error = -EFAULT;
++ desc->written = count;
++ return;
++}
++ count += cpycount;
++ }
++ if (rdlen-count>0) BUG();
++ desc->error = 0;
++ desc->written = count;
++ *ppos+=count;
++ return;
++}
++
++
++ssize_t xip2_file_read(struct file * filp, char * buf, size_t count, loff_t *ppos)
++{
++ ssize_t retval;
++
++ if ((ssize_t) count < 0)
++ return -EINVAL;
++
++ retval = -EFAULT;
++ if (access_ok(VERIFY_WRITE, buf, count)) {
++ retval = 0;
++
++ if (count) {
++ read_descriptor_t desc;
++
++ desc.written = 0;
++ desc.count = count;
++ desc.buf = buf;
++ desc.error = 0;
++ xip2_do_file_read(filp, ppos, &desc);
++
++ retval = desc.written;
++ if (!retval)
++ retval = desc.error;
++ }
++}
++ return retval;
++}
+=== fs/xip2fs/ialloc.c
+==================================================================
+--- fs/xip2fs/ialloc.c (/upstream/vanilla/2.4.27) (revision 52)
++++ fs/xip2fs/ialloc.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,34 @@
++/*
++ * linux/fs/xip2/ialloc.c, Version 1
++ *
++ * (C) Copyright IBM Corp. 2002,2004
++ * Author(s): Carsten Otte <cotte at de.ibm.com>
++ * derived from second extended filesystem (ext2)
++ */
++
++#include <linux/config.h>
++#include <linux/fs.h>
++#include <linux/xip2_fs.h>
++#include <linux/locks.h>
++#include <linux/quotaops.h>
++
++
++/*
++ * ialloc.c contains the inodes allocation and deallocation routines
++ */
++
++/*
++ * The free inodes are managed by bitmaps. A file system contains several
++ * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
++ * block for inodes, N blocks for the inode table and data blocks.
++ *
++ * The file system contains group descriptors which are located after the
++ * super block. Each descriptor contains the number of the bitmap block and
++ * the free blocks count in the block. The descriptors are loaded in memory
++ * when a file system is mounted (see xip2_read_super).
++ */
++
++unsigned long xip2_count_free_inodes (struct super_block * sb)
++{
++ return le32_to_cpu(sb->u.xip2_sb.s_es->s_free_inodes_count);
++}
+=== fs/xip2fs/symlink.c
+==================================================================
+--- fs/xip2fs/symlink.c (/upstream/vanilla/2.4.27) (revision 52)
++++ fs/xip2fs/symlink.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,66 @@
++/*
++ * linux/fs/xip2/symlink.c, Version 1
++ *
++ * (C) Copyright IBM Corp. 2002,2004
++ * Author(s): Carsten Otte <cotte at de.ibm.com>
++ * derived from second extended filesystem (ext2)
++ */
++
++#include <linux/fs.h>
++#include <linux/xip2_fs.h>
++#include <linux/mm.h>
++
++static char *xip2_getlink(struct dentry * dentry)
++{
++ char *res;
++ unsigned long blockno;
++
++ if (xip2_get_block(dentry->d_inode, 0, &blockno, 0)) {
++ printk ("XIP2-FS: could not resolve symbolic link\n");
++ return NULL;
++ }
++ res = (char*) xip2_maread (dentry->d_inode->i_sb->u.xip2_sb.mem_area,
++ blockno, PAGE_SIZE);
++ if (!res)
++ return (char*)empty_zero_page;
++ return (char*)res;
++}
++
++
++static int xip2_readlink(struct dentry *dentry, char *buffer, int buflen)
++{
++ char *s = xip2_getlink(dentry);
++ int res = vfs_readlink(dentry,buffer,buflen,s);
++
++ return res;
++}
++
++static int xip2_follow_link(struct dentry *dentry, struct nameidata *nd)
++{
++ char *s = xip2_getlink(dentry);
++ int res = vfs_follow_link(nd, s);
++
++ return res;
++}
++
++static int xip2_fast_readlink(struct dentry *dentry, char *buffer, int buflen)
++{
++ char *s = (char *)dentry->d_inode->u.ext2_i.i_data;
++ return vfs_readlink(dentry, buffer, buflen, s);
++}
++
++static int xip2_fast_follow_link(struct dentry *dentry, struct nameidata *nd)
++{
++ char *s = (char *)dentry->d_inode->u.ext2_i.i_data;
++ return vfs_follow_link(nd, s);
++}
++
++struct inode_operations xip2_fast_symlink_inode_operations = {
++ readlink: xip2_fast_readlink,
++ follow_link: xip2_fast_follow_link,
++};
++
++struct inode_operations xip2_symlink_inode_operations = {
++ readlink: xip2_readlink,
++ follow_link: xip2_follow_link,
++};
+=== fs/xip2fs/namei.c
+==================================================================
+--- fs/xip2fs/namei.c (/upstream/vanilla/2.4.27) (revision 52)
++++ fs/xip2fs/namei.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,38 @@
++/*
++ * linux/fs/xip2/namei.c, Version 1
++ *
++ * (C) Copyright IBM Corp. 2002,2004
++ * Author(s): Carsten Otte <cotte at de.ibm.com>
++ * derived from second extended filesystem (ext2)
++ */
++
++#include <linux/fs.h>
++#include <linux/xip2_fs.h>
++#include <linux/pagemap.h>
++
++/*
++ * Methods themselves.
++ */
++
++static struct dentry *xip2_lookup(struct inode * dir, struct dentry *dentry)
++{
++ struct inode * inode;
++ ino_t ino;
++
++ if (dentry->d_name.len > EXT2_NAME_LEN)
++ return ERR_PTR(-ENAMETOOLONG);
++
++ ino = xip2_inode_by_name(dir, dentry);
++ inode = NULL;
++ if (ino) {
++ inode = iget(dir->i_sb, ino);
++ if (!inode)
++ return ERR_PTR(-EACCES);
++ }
++ d_add(dentry, inode);
++ return NULL;
++}
++
++struct inode_operations xip2_dir_inode_operations = {
++ lookup: xip2_lookup,
++};
+=== fs/xip2fs/ioctl.c
+==================================================================
+--- fs/xip2fs/ioctl.c (/upstream/vanilla/2.4.27) (revision 52)
++++ fs/xip2fs/ioctl.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,31 @@
++/*
++ * linux/fs/xip2/ioctl.c, Version 1
++ *
++ * (C) Copyright IBM Corp. 2002,2004
++ * Author(s): Carsten Otte <cotte at de.ibm.com>
++ * derived from second extended filesystem (ext2)
++ */
++
++#include <linux/fs.h>
++#include <linux/xip2_fs.h>
++#include <linux/sched.h>
++#include <asm/uaccess.h>
++
++
++int xip2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
++ unsigned long arg)
++{
++ unsigned int flags;
++
++ xip2_debug ("cmd = %u, arg = %lu\n", cmd, arg);
++
++ switch (cmd) {
++ case EXT2_IOC_GETFLAGS:
++ flags = inode->u.ext2_i.i_flags & EXT2_FL_USER_VISIBLE;
++ return put_user(flags, (int *) arg);
++ case EXT2_IOC_GETVERSION:
++ return put_user(inode->i_generation, (int *) arg);
++ default:
++ return -ENOTTY;
++ }
++}
+=== fs/xip2fs/Makefile
+==================================================================
+--- fs/xip2fs/Makefile (/upstream/vanilla/2.4.27) (revision 52)
++++ fs/xip2fs/Makefile (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,16 @@
++#
++# Makefile for the linux xip2fs-filesystem routines.
++#
++# Note! Dependencies are done automagically by 'make dep', which also
++# removes any old dependencies. DON'T put your own dependencies here
++# unless it's something special (ie not a .c file).
++#
++# Note 2! The CFLAGS definitions are now in the main makefile...
++
++O_TARGET := xip2fs.o
++
++obj-y := balloc.o dir.o file.o ialloc.o inode.o \
++ ioctl.o namei.o super.o symlink.o
++obj-m := $(O_TARGET)
++
++include $(TOPDIR)/Rules.make
+=== fs/xip2fs/balloc.c
+==================================================================
+--- fs/xip2fs/balloc.c (/upstream/vanilla/2.4.27) (revision 52)
++++ fs/xip2fs/balloc.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,83 @@
++/*
++ * linux/fs/xip2/balloc.c, Version 1
++ *
++ * (C) Copyright IBM Corp. 2002,2004
++ * Author(s): Carsten Otte <cotte at de.ibm.com>
++ * derived from second extended filesystem (ext2)
++ */
++
++#include <linux/config.h>
++#include <linux/fs.h>
++#include <linux/xip2_fs.h>
++#include <linux/locks.h>
++#include <linux/quotaops.h>
++
++/*
++ * balloc.c contains the blocks allocation and deallocation routines
++ */
++
++unsigned long xip2_count_free_blocks (struct super_block * sb)
++{
++ return le32_to_cpu(sb->u.xip2_sb.s_es->s_free_blocks_count);
++}
++
++static inline int test_root(int a, int b)
++{
++ if (a == 0)
++ return 1;
++ while (1) {
++ if (a == 1)
++ return 1;
++ if (a % b)
++ return 0;
++ a = a / b;
++ }
++}
++
++int xip2_group_sparse(int group)
++{
++ return (test_root(group, 3) || test_root(group, 5) ||
++ test_root(group, 7));
++}
++
++/**
++ * xip_bg_has_super - number of blocks used by the superblock in group
++ * @sb: superblock for filesystem
++ * @group: group number to check
++ *
++ * Return the number of blocks used by the superblock (primary or backup)
++ * in this group. Currently this will be only 0 or 1.
++ */
++int xip2_bg_has_super(struct super_block *sb, int group)
++{
++ if (EXT2_HAS_RO_COMPAT_FEATURE(sb,EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER)
++ && !xip2_group_sparse(group))
++ return 0;
++ return 1;
++}
++
++/**
++ * xip2_bg_num_gdb - number of blocks used by the group table in group
++ * @sb: superblock for filesystem
++ * @group: group number to check
++ *
++ * Return the number of blocks used by the group descriptor table
++ * (primary or backup) in this group. In the future there may be a
++ * different number of descriptor blocks in each group.
++ */
++unsigned long xip2_bg_num_gdb(struct super_block *sb, int group)
++{
++ if (EXT2_HAS_RO_COMPAT_FEATURE(sb,EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER)
++ && !xip2_group_sparse(group))
++ return 0;
++ return XIP2_SB(sb)->s_gdb_count;
++}
++
++
++void* xip2_maread (xip2_mem_area_t* mem_area, int block, int size) {
++ if ((block+1)*size-1 > (unsigned long)mem_area->end -
++ (unsigned long)mem_area->start) {
++ return NULL;
++ }
++ return (void*)(mem_area->start + block*size);
++}
+=== fs/Config.in
+==================================================================
+--- fs/Config.in (/upstream/vanilla/2.4.27) (revision 52)
++++ fs/Config.in (/trunk/2.4.27) (revision 52)
+@@ -91,6 +91,8 @@
+
+ tristate 'ROM file system support' CONFIG_ROMFS_FS
+
++tristate 'XIP2 execute-in-place filesystem support' CONFIG_XIP2FS
++
+ tristate 'Second extended fs support' CONFIG_EXT2_FS
+
+ tristate 'System V/Xenix/V7/Coherent file system support' CONFIG_SYSV_FS
+=== Documentation/s390/xip2.8
+==================================================================
+--- Documentation/s390/xip2.8 (/upstream/vanilla/2.4.27) (revision 52)
++++ Documentation/s390/xip2.8 (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,67 @@
++.TH XIP2 8 "8 December 2003" "Linux 2.4" "Linux Programmer's Manual"
++.SH NAME
++xip2 \- mount a xip2 file system
++.SH "DESCRIPTION"
++This man page describes mount options specific to the xip2 filesystem. Have a
++look at the man page of
++.BR mount (2)
++for information about what mount options are and how mount is used overall.
++.SH "Mount options inherited from ext2"
++The `xip2' file system is derived from the second extended filesystem (ext2).
++It supports the following mount options which are inherited from ext2:
++.TP
++.BR bsddf " / " minixdf
++Set the behaviour for the
++.I statfs
++system call. The
++.B minixdf
++behaviour is to return in the
++.I f_blocks
++field the total number of blocks of the file system, while the
++.B bsddf
++behaviour (which is the default) is to subtract the overhead blocks
++used by the ext2 file system and not available for file storage.
++.TP
++.BR errors=continue " / " errors=remount-ro " / " errors=panic
++Define the behaviour when an error is encountered.
++(Either ignore errors and just mark the file system erroneous and continue,
++or remount the file system read-only, or panic and halt the system.)
++Note that mounting read-only does not change anything because the xip2 file
++system is read-only only anyway.
++.TP
++.BR grpid " or " bsdgroups " / " nogrpid " or " sysvgroups
++These options are accepted but ignored.
++.TP
++\fBresgid=\fP\fIn\fP and \fBresuid=\fP\fIn\fP
++These options are accepted but ignored.
++.TP
++.BI sb= n
++Instead of block 1, use block
++.I n
++as superblock. This could be useful when the filesystem has been damaged.
++The block number here uses 1k units. Thus, if you want to use logical
++block 32768 on a filesystem with 4k blocks, use "sb=131072". Note that the
++xip2 file system always works with 4k blocks.
++.TP
++.BR grpquota " / " noquota " / " quota " / " usrquota
++These options are accepted but ignored.
++
++.TP
++.BR nouid32
++Disables 32-bit UIDs and GIDs. This is for interoperability with older
++kernels which only store and expect 16-bit values.
++.SH "Mount options specific for xip2"
++The 'xip2' file system supports only one mount option that is specific for its
++use:
++.TP
++.BI memarea= <name>
++This mount-option is mandatory. It specifies the name of the memory segment to
++be used. In case of running on zSeries z/VM, available memory segment names
++correspond with available z/VM DCSS.
++.SH "SEE ALSO"
++.BR mount (8)
++.SH BUGS
++As of today, and mostly due to the stable code base inherited from the second
++extended filesystem, no known bugs exist. In case you think you encountered
++one, please report it to Carsten Otte
++.B <cotte at de.ibm.com>
+=== Documentation/s390/TAPE
+==================================================================
+--- Documentation/s390/TAPE (/upstream/vanilla/2.4.27) (revision 52)
++++ Documentation/s390/TAPE (/trunk/2.4.27) (revision 52)
+@@ -1,122 +0,0 @@
+-Channel attached Tape device driver
+-
+------------------------------WARNING-----------------------------------------
+-This driver is considered to be EXPERIMENTAL. Do NOT use it in
+-production environments. Feel free to test it and report problems back to us.
+------------------------------------------------------------------------------
+-
+-The LINUX for zSeries tape device driver manages channel attached tape drives
+-which are compatible to IBM 3480 or IBM 3490 magnetic tape subsystems. This
+-includes various models of these devices (for example the 3490E).
+-
+-
+-Tape driver features
+-
+-The device driver supports a maximum of 128 tape devices.
+-No official LINUX device major number is assigned to the zSeries tape device
+-driver. It allocates major numbers dynamically and reports them on system
+-startup.
+-Typically it will get major number 254 for both the character device front-end
+-and the block device front-end.
+-
+-The tape device driver needs no kernel parameters. All supported devices
+-present are detected on driver initialization at system startup or module load.
+-The devices detected are ordered by their subchannel numbers. The device with
+-the lowest subchannel number becomes device 0, the next one will be device 1
+-and so on.
+-
+-
+-Tape character device front-end
+-
+-The usual way to read or write to the tape device is through the character
+-device front-end. The zSeries tape device driver provides two character devices
+-for each physical device -- the first of these will rewind automatically when
+-it is closed, the second will not rewind automatically.
+-
+-The character device nodes are named /dev/rtibm0 (rewinding) and /dev/ntibm0
+-(non-rewinding) for the first device, /dev/rtibm1 and /dev/ntibm1 for the
+-second, and so on.
+-
+-The character device front-end can be used as any other LINUX tape device. You
+-can write to it and read from it using LINUX facilities such as GNU tar. The
+-tool mt can be used to perform control operations, such as rewinding the tape
+-or skipping a file.
+-
+-Most LINUX tape software should work with either tape character device.
+-
+-
+-Tape block device front-end
+-
+-The tape device may also be accessed as a block device in read-only mode.
+-This could be used for software installation in the same way as it is used with
+-other operation systems on the zSeries platform (and most LINUX
+-distributions are shipped on compact disk using ISO9660 filesystems).
+-
+-One block device node is provided for each physical device. These are named
+-/dev/btibm0 for the first device, /dev/btibm1 for the second and so on.
+-You should only use the ISO9660 filesystem on LINUX for zSeries tapes because
+-the physical tape devices cannot perform fast seeks and the ISO9660 system is
+-optimized for this situation.
+-
+-
+-Tape block device example
+-
+-In this example a tape with an ISO9660 filesystem is created using the first
+-tape device. ISO9660 filesystem support must be built into your system kernel
+-for this.
+-The mt command is used to issue tape commands and the mkisofs command to
+-create an ISO9660 filesystem:
+-
+-- create a LINUX directory (somedir) with the contents of the filesystem
+- mkdir somedir
+- cp contents somedir
+-
+-- insert a tape
+-
+-- ensure the tape is at the beginning
+- mt -f /dev/ntibm0 rewind
+-
+-- set the blocksize of the character driver. The blocksize 2048 bytes
+- is commonly used on ISO9660 CD-Roms
+- mt -f /dev/ntibm0 setblk 2048
+-
+-- write the filesystem to the character device driver
+- mkisofs -o /dev/ntibm0 somedir
+-
+-- rewind the tape again
+- mt -f /dev/ntibm0 rewind
+-
+-- Now you can mount your new filesystem as a block device:
+- mount -t iso9660 -o ro,block=2048 /dev/btibm0 /mnt
+-
+-TODO List
+-
+- - Driver has to be stabelized still
+-
+-BUGS
+-
+-This driver is considered BETA, which means some weaknesses may still
+-be in it.
+-If an error occurs which cannot be handled by the code you will get a
+-sense-data dump.In that case please do the following:
+-
+-1. set the tape driver debug level to maximum:
+- echo 6 >/proc/s390dbf/tape/level
+-
+-2. re-perform the actions which produced the bug. (Hopefully the bug will
+- reappear.)
+-
+-3. get a snapshot from the debug-feature:
+- cat /proc/s390dbf/tape/hex_ascii >somefile
+-
+-4. Now put the snapshot together with a detailed description of the situation
+- that led to the bug:
+- - Which tool did you use?
+- - Which hardware do you have?
+- - Was your tape unit online?
+- - Is it a shared tape unit?
+-
+-5. Send an email with your bug report to:
+- mailto:Linux390 at de.ibm.com
+-
+-
+
+Property changes on:
+___________________________________________________________________
+Name: svk:merge
+ -b0378580-cad3-0310-9fc0-80b21f4f072e:/upstream/ibm/2.4.21-current:50
+
+=== Documentation/ioctl-number.txt
+==================================================================
+--- Documentation/ioctl-number.txt (/upstream/vanilla/2.4.27) (revision 52)
++++ Documentation/ioctl-number.txt (/trunk/2.4.27) (revision 52)
+@@ -107,6 +107,7 @@
+ 'W' 00-1F linux/wanrouter.h conflict!
+ 'X' all linux/xfs_fs.h
+ 'Y' all linux/cyclades.h
++'Z' all linux/s390net.h S390 networking
+ 'a' all ATM on linux
+ <http://lrcwww.epfl.ch/linux-atm/magic.html>
+ 'b' 00-FF bit3 vme host bridge
+@@ -183,6 +184,8 @@
+ 0xB1 00-1F PPPoX <mailto:mostrows at styx.uwaterloo.ca>
+ 0xCB 00-1F CBM serial IEC bus in development:
+ <mailto:michael.klein at puffin.lb.shuttle.de>
++0xDD 00-3F ZFCP device driver see drivers/s390/scsi/
++ <mailto:aherrman at de.ibm.com>
+ 0xF3 00-3F linux/sisfb.h SiS framebuffer device driver
+ <mailto:thomas at winischhofer.net>
+ 0xFE 00-9F Logical Volume Manager <mailto:linux-lvm at sistina.com>
+=== Documentation/DocBook/zfcp-hba-api.tmpl
+==================================================================
+--- Documentation/DocBook/zfcp-hba-api.tmpl (/upstream/vanilla/2.4.27) (revision 52)
++++ Documentation/DocBook/zfcp-hba-api.tmpl (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,747 @@
++<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook V4.1//EN"[]>
++
++<!-- ZFCP HBA API Kernel Interfaces. -->
++<!-- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, -->
++<!-- IBM Corporation -->
++<!-- Permission is granted to copy, distribute and/or modify this -->
++<!-- document under the terms of the GNU Free Documentation License, -->
++<!-- Version 1.1 or any later version published by the Free Software -->
++<!-- Foundation; with no Invariant Sections, no Front-Cover Texts and -->
++<!-- no Back-Cover Texts. A copy of the license is included in the -->
++<!-- section entitled "GNU Free Documentation License". -->
++
++<book id="ZFCPHBAAPI">
++
++<!-- book header -->
++ <bookinfo>
++ <title>ZFCP HBA API Kernel Interfaces</title>
++
++ <revhistory>
++ <revision>
++ <revnumber>v1.0 </revnumber>
++ <date>2003/09/24</date>
++ <authorinitials>AH</authorinitials>
++ <revremark>
++ <remark>Initial version.</remark>
++ </revremark>
++ </revision>
++ <revision>
++ <revnumber>v1.1 </revnumber>
++ <date>2003/11/14</date>
++ <authorinitials>AH</authorinitials>
++ <revremark>
++ <remark>Completed interface description for module
++ <filename>zfcp_hbaapi</filename>.
++ </remark>
++<!-- Added Stefan Völkel as author. -->
++<!-- Removed improper license statement regarding ZFCP HBA API Library. -->
++<!-- Added Introduction and Bibliography. -->
++<!-- Added section about misc device provided by module zfcp_hbaapi. -->
++<!-- Added section about callback functions of module zfcp_hbaapi. -->
++ </revremark>
++ </revision>
++ <revision>
++ <revnumber>v1.2 </revnumber>
++ <date>2003/11/19</date>
++ <authorinitials>AH</authorinitials>
++ <revremark>
++ <remark>Completed interface description for module
++ <filename>zfcp</filename>.
++ </remark>
++<!-- Added section about intra-kernel interfaces of module zfcp. -->
++<!-- Added section about callbacks and hooks in module zfcp. -->
++ </revremark>
++ </revision>
++ </revhistory>
++
++ <authorgroup>
++ <author>
++ <firstname>Andreas</firstname>
++ <surname>Herrman</surname>
++ <affiliation>
++ <address><email>aherrman at de.ibm.com</email></address>
++ </affiliation>
++ </author>
++ <author>
++ <firstname>Stefan</firstname>
++ <surname>Völkel</surname>
++ <affiliation>
++ <address><email>Stefan.Voelkel at millenux.com</email></address>
++ </affiliation>
++ </author>
++ </authorgroup>
++
++ <copyright>
++ <year>2003</year>
++ <holder>IBM Corp.</holder>
++ </copyright>
++
++ <legalnotice>
++ <para>
++ The Kernel parts of ZFCP HBA API are released under
++ the GNU General Public License (GPL). ZFCP HBA API Library is released
++ under the ...
++ </para>
++ </legalnotice>
++
++ <abstract>
++ <para>
++ This document describes Intra-Kernel and Kernel-User-Space interfaces
++ of ZFCP HBA API.
++ </para>
++ <para>
++ ZFCP HBA API is an implementation of
++ <citation><xref linkend="bib.fchba"></citation>
++ for the ZFCP device driver for Linux on zSeries.
++ </para>
++ <para>
++ This is the first version of the document. It is written in DocBook 4.1.
++ Please let me know if you find any markup and other errors.
++ </para>
++ </abstract>
++ </bookinfo>
++
++ <toc></toc>
++
++ <!-- introduction -->
++ <chapter id="cha.introduction">
++ <title>Introduction</title>
++ <para>
++ ZFCP HBA API is an implementation of
++ <citation><xref linkend="bib.fchba"></citation>
++ for the ZFCP device driver. The ZFCP device driver is a FCP device driver
++ for Linux on zSeries. This documentation describes the ZFCP HBA API for
++ Linux Kernel 2.4. ZFCP HBA API consists of the following parts.
++ </para>
++ <itemizedlist>
++ <listitem>
++ <para>
++ ZFCP HBA API Library - a shared library which provides the
++ API defined in <citation><xref linkend="bib.fchba"></citation>.
++ </para>
++ </listitem>
++ <listitem>
++ <para>
++ The kernel module <filename>zfcp_hbaapi</filename> which
++ provides a misc device with IO controls for communication
++ between kernel and user space. The module registers callback
++ functions in the ZFCP device driver and is able to use
++ functionality provided by the ZFCP device driver. This
++ module is the connection between ZFCP HBA API Library and
++ the ZFCP devcie driver.
++ </para>
++ </listitem>
++ <listitem>
++ <para>
++ Finally the ZFCP device driver contains hooks at which
++ callback functions of the <filename>zfcp_hbaapi</filename>
++ kernel module are invoked. Furthermore the device driver
++ provides functionality which can be used by the
++ <filename>zfcp_hbaapi</filename> kernel module.
++ </para>
++ </listitem>
++ </itemizedlist>
++ <para>
++ This documentation describes the kernel parts of ZFCP HBA API.
++ Separate documentation for ZFCP HBA API Library exists.
++ </para>
++ </chapter>
++
++ <!-- documentation about zfcp_hbaapi module -->
++ <chapter id="cha.zfcphbaapi">
++ <title>Kernel Module <filename>zfcp_hbaapi</filename></title>
++ <section id="sec.zfcphbaapioverview">
++ <title>Overview</title>
++ <para>
++ The module <filename>zfcp_hbaapi</filename> is the interface
++ between the ZFCP HBA API Library and the ZFCP device
++ driver. It provides a misc device which is used for
++ communication between kernel and user space. The module
++ registers callback functions in the ZFCP device driver which
++ are invoked when certain events occur. Furthermore it calls
++ functions provided by the ZFCP device driver to collect data
++ for the ZFCP HBA API Library.
++ </para>
++ </section>
++ <section>
++ <title>Device File</title>
++ <para>
++ The module <filename>zfcp_hbaapi</filename> provides a misc
++ device. The corresponding device node should be named
++ <filename>/dev/zfcp_hbaapi</filename> - ZFCP HBA API Library
++ expects this name for the device file. When the module is
++ loaded the device node can be generated using the commands:
++ </para>
++ <screen>
++#>minor=`cat /proc/misc | awk "\\$2==\\"zfcp_hbaapi\\" {print \\$1}"`
++#>mknod /dev/zfcp_hbaapi c 10 $minor
++ </screen>
++ <section>
++ <title>Module Paramters</title>
++ <para>The following parameters can be set for the module.</para>
++ <table>
++ <title>Module Parameters</title>
++ <tgroup cols='3' colsep='1' rowsep='1'>
++ <thead>
++ <row>
++ <entry>Parameter</entry>
++ <entry>Description</entry>
++ <entry>Default Value</entry>
++ </row>
++ </thead>
++ <tbody>
++ <row>
++ <entry><para><parameter>maxshared</parameter></para></entry>
++ <entry><para>Maximum number of events in the shared event queue.
++ </para></entry>
++ <entry><para><parameter>20</parameter></para></entry>
++ </row>
++ <row>
++ <entry><para><parameter>maxpolled</parameter></para></entry>
++ <entry><para>Maximum number of events in the polled event queue.
++ </para></entry>
++ <entry><para><parameter>20</parameter></para></entry>
++ </row>
++ <row>
++ <entry><para><parameter>minor</parameter></para></entry>
++ <entry><para>
++ Minor number for the misc device to be registered.
++ </para></entry>
++ <entry><para>
++ <symbol>MISC_DYNAMIC_MINOR</symbol>
++ </para></entry>
++ </row>
++ </tbody>
++ </tgroup>
++ </table>
++ </section>
++ <section>
++ <title>File Operations</title>
++ <para>
++ The module <filename>zfcp_hbaapi</filename> defines the
++ methods <function>open</function>,
++ <function>read</function>, <function>release</function>, and
++ <function>ioctl</function> for the misc device.
++ </para>
++ <section>
++ <title>Reference</title>
++ <para></para>
++!Fdrivers/s390/scsi/zh_main.c zh_ioctl
++!Fdrivers/s390/scsi/zh_main.c zh_read
++!Fdrivers/s390/scsi/zh_main.c zh_release
++!Fdrivers/s390/scsi/zh_main.c zh_open
++ </section>
++ </section>
++ <section>
++ <title>IO Controls</title>
++ <para>
++ The next table gives an overview about IO controls of the misc
++ device, the name of corresponding internal helper functions and
++ argument types (if any).
++ </para>
++ <table frame='all'><title>IO Controls</title>
++ <tgroup cols='2' colsep='1' rowsep='1'>
++ <thead>
++ <row>
++ <entry><para>Name</para></entry>
++ <entry><para>Helper Function, Argument Type</para></entry>
++ </row>
++ </thead>
++ <tbody>
++ <row>
++ <entry><para>
++ <symbol>ZH_IOC_GET_ADAPTERATTRIBUTES</symbol>
++ </para></entry>
++ <entry><para>
++ <function>zh_ioc_get_adapterattributes</function>,
++ <type>struct zh_get_adapterattributes</type>
++ </para></entry>
++ </row>
++ <row>
++ <entry><para>
++ <symbol>ZH_IOC_GET_PORTATTRIBUTES</symbol>
++ </para></entry>
++ <entry><para>
++ <function>zh_ioc_get_portattributes</function>,
++ <type>struct zh_get_portattributes</type>
++ </para></entry>
++ </row>
++ <row>
++ <entry><para>
++ <symbol>ZH_IOC_GET_PORTSTATISTICS</symbol>
++ </para></entry>
++ <entry><para>
++ <function>zh_ioc_get_portstatistics</function>,
++ <type>struct zh_get_portstatistics</type>
++ </para></entry>
++ </row>
++ <row>
++ <entry><para>
++ <symbol>ZH_IOC_GET_DPORTATTRIBUTES</symbol>
++ </para></entry>
++ <entry><para>
++ <function>zh_ioc_get_dportattributes</function>,
++ <type>struct zh_get_portattributes</type>
++ </para></entry>
++ </row>
++ <row>
++ <entry><para>
++ <symbol>ZH_IOC_GET_RNID</symbol>
++ </para></entry>
++ <entry><para>
++ <function>zh_ioc_get_rnid</function>,
++ <type>struct zh_get_rnid</type>
++ </para></entry>
++ </row>
++ <row>
++ <entry><para>
++ <symbol>ZH_IOC_SEND_RNID</symbol>
++ </para></entry>
++ <entry><para>
++ <function>zh_ioc_send_rnid</function>,
++ <type>struct zh_send_rnid</type>
++ </para></entry>
++ </row>
++ <row>
++ <entry><para>
++ <symbol>ZH_IOC_SEND_CT</symbol>
++ </para></entry>
++ <entry><para>
++ <function>zh_ioc_send_ct</function>,
++ <type>struct zh_send_ct</type>
++ </para></entry>
++ </row>
++ <row>
++ <entry><para>
++ <symbol>ZH_IOC_SCSI_INQUIRY</symbol>
++ </para></entry>
++ <entry><para>
++ <function>zh_ioc_scsi_inquiry</function>,
++ <type>struct zh_scsi_inquiry</type>
++ </para></entry>
++ </row>
++ <row>
++ <entry><para>
++ <symbol>ZH_IOC_SCSI_READ_CAPACITY</symbol>
++ </para></entry>
++ <entry><para>
++ <function>zh_ioc_scsi_read_capacity</function>,
++ <type>struct zh_scsi_read_capacity</type>
++ </para></entry>
++ </row>
++ <row>
++ <entry><para>
++ <symbol>ZH_IOC_SCSI_REPORT_LUNS</symbol>
++ </para></entry>
++ <entry><para>
++ <function>zh_ioc_scsi_report_luns</function>,
++ <type>struct zh_scsi_report_luns</type>
++ </para></entry>
++ </row>
++ <row>
++ <entry><para>
++ <symbol>ZH_IOC_GET_EVENT_BUFFER</symbol>
++ </para></entry>
++ <entry><para>
++ <function>zh_ioc_get_event_buffer</function>,
++ <type>struct zh_get_event_buffer</type>
++ </para></entry>
++ </row>
++ <row>
++ <entry><para>
++ <symbol>ZH_IOC_GET_CONFIG</symbol>
++ </para></entry>
++ <entry><para>
++ <function>zh_ioc_get_config</function>,
++ <type>struct zh_get_config</type>
++ </para></entry>
++ </row>
++ <row>
++ <entry><para>
++ <symbol>ZH_IOC_CLEAR_CONFIG</symbol>
++ </para></entry>
++ <entry><para>
++ <function>zh_ioc_clear_config</function>,
++ <type>struct zh_clear_config</type>
++ </para></entry>
++ </row>
++ <row>
++ <entry><para>
++ <symbol>ZH_IOC_EVENT_START</symbol>
++ </para></entry>
++ <entry><para>
++ <function>zh_ioc_event_start</function>
++ </para></entry>
++ </row>
++ <row>
++ <entry><para>
++ <symbol>ZH_IOC_EVENT_STOP</symbol>
++ </para></entry>
++ <entry><para>
++ <function>zh_ioc_event_stop</function>
++ </para></entry>
++ </row>
++ <row>
++ <entry><para>
++ <symbol>ZH_IOC_EVENT</symbol>
++ </para></entry>
++ <entry><para>
++ <function>zh_ioc_event</function>,
++ <type>struct zh_event</type>
++ </para></entry>
++ </row>
++ <row>
++ <entry><para>
++ <symbol>ZH_IOC_EVENT_INSERT</symbol>
++ </para></entry>
++ <entry><para>
++ <function>zh_ioc_event_insert</function>
++ </para></entry>
++ </row>
++ </tbody>
++ </tgroup>
++ </table>
++ <para>
++ If <filename>zfcp_hbaapi</filename> is compiled for 64 bit
++ architecture, two additional IO controls exist. They are
++ used for 32 bit IO control conversion:
++ </para>
++ <table frame='all'>
++ <title>IO Controls (on 64 bit architecture only)</title>
++ <tgroup cols='2' colsep='1' rowsep='1'>
++ <thead>
++ <row>
++ <entry><para>Name</para></entry>
++ <entry><para>Helper Function, Argument Type</para></entry>
++ </row>
++ </thead>
++ <tbody>
++ <row>
++ <entry><para>
++ <symbol>ZH_IOC_SEND_CT32</symbol>
++ </para></entry>
++ <entry><para>
++ <function>zh_ioc_send_ct32</function>,
++ <type>struct zh_send_ct32</type>
++ </para></entry>
++ </row>
++ <row>
++ <entry><para>
++ <symbol>ZH_IOC_SCSI_REPORT_LUNS32</symbol>
++ </para></entry>
++ <entry><para>
++ <function>zh_ioc_scsi_report_luns</function>,
++ <type>struct zh_scsi_report_luns32</type>
++ </para></entry>
++ </row>
++ </tbody>
++ </tgroup>
++ </table>
++ </section>
++ <section>
++ <title>Reference</title>
++ <para></para>
++!Fdrivers/s390/scsi/zh.h zh_get_adapterattributes
++!Fdrivers/s390/scsi/zh_main.c zh_ioc_get_adapterattributes
++!Fdrivers/s390/scsi/zh.h zh_get_portattributes
++!Fdrivers/s390/scsi/zh_main.c zh_ioc_get_portattributes
++!Fdrivers/s390/scsi/zh_main.c zh_ioc_get_dportattributes
++!Fdrivers/s390/scsi/zh.h zh_get_portstatistics
++!Fdrivers/s390/scsi/zh_main.c zh_ioc_get_portstatistics
++!Fdrivers/s390/scsi/zh.h zh_get_rnid
++!Fdrivers/s390/scsi/zh_main.c zh_ioc_get_rnid
++!Fdrivers/s390/scsi/zh.h zh_send_rnid
++!Fdrivers/s390/scsi/zh_main.c zh_ioc_send_rnid
++!Fdrivers/s390/scsi/zh.h zh_send_ct
++!Fdrivers/s390/scsi/zh_main.c zh_ioc_send_ct
++!Fdrivers/s390/scsi/zh.h zh_scsi_inquiry
++!Fdrivers/s390/scsi/zh_main.c zh_ioc_scsi_inquiry
++!Fdrivers/s390/scsi/zh.h zh_scsi_read_capacity
++!Fdrivers/s390/scsi/zh_main.c zh_ioc_scsi_read_capacity
++!Fdrivers/s390/scsi/zh.h zh_scsi_report_luns
++!Fdrivers/s390/scsi/zh_main.c zh_ioc_scsi_report_luns
++!Fdrivers/s390/scsi/zh.h zh_get_event_buffer
++!Fdrivers/s390/scsi/zh_main.c zh_ioc_get_event_buffer
++!Fdrivers/s390/scsi/zh.h zh_get_config
++!Fdrivers/s390/scsi/zh_main.c zh_ioc_get_config
++!Fdrivers/s390/scsi/zh_main.c zh_ioc_clear_config
++!Fdrivers/s390/scsi/zh_main.c zh_ioc_event_start
++!Fdrivers/s390/scsi/zh_main.c zh_ioc_event_stop
++!Fdrivers/s390/scsi/zh.h zh_event
++!Fdrivers/s390/scsi/zh_main.c zh_ioc_event
++!Fdrivers/s390/scsi/zh_main.c zh_ioc_event_insert
++ </section>
++ </section>
++ <section>
++ <title>Callback functions</title>
++ <para>
++ For event notification <filename>zfcp_hbaapi</filename>
++ registers a bunch of callback functions at the ZFCP device
++ driver. In zfcp certain hooks exist where those callbacks are
++ invoked.
++ </para>
++ <section>
++ <title>Reference</title>
++ <para></para>
++!Fdrivers/s390/scsi/zh_main.c zh_cb_adapter_add
++!Fdrivers/s390/scsi/zh_main.c zh_cb_port_add
++!Fdrivers/s390/scsi/zh_main.c zh_cb_unit_add
++!Fdrivers/s390/scsi/zh_main.c zh_cb_incomming_els
++!Fdrivers/s390/scsi/zh_main.c zh_cb_link_down
++!Fdrivers/s390/scsi/zh_main.c zh_cb_link_up
++ </section>
++ </section>
++ </chapter>
++
++ <!-- changed/new functions and structures in zfcp -->
++ <!-- documentation about zfcp module -->
++ <chapter id="cha.zfcp">
++ <title>Kernel Module <filename>zfcp</filename></title>
++ <para>
++ The module <filename>zfcp</filename> provides (new) interfaces for
++ ZFCP HBA API. Furthermore hooks are integrated at which callback functions
++ for event notification are invoked.
++ </para>
++
++ <section>
++ <title>Intra-Kernel Interface</title>
++ <para>The ZFCP device driver exports the following functions</para>
++ <glosslist>
++ <glossentry>
++ <glossterm>
++ <function>zfcp_zh_callbacks_register</function>
++ </glossterm>
++ <glossdef>
++ <para>Register callbacks for event handling. Called from
++ <function>zh_init</function> - the init function of module
++ <filename>zfcp_hbaapi</filename>.</para>
++ </glossdef>
++ </glossentry>
++ <glossentry>
++ <glossterm>
++ <function>zfcp_zh_callbacks_unregister</function>
++ </glossterm>
++ <glossdef>
++ <para>Unregister callbacks for event handling. Called from
++ <function>zh_exit</function> - the exit function of module
++ <filename>zfcp_hbaapi</filename>.</para>
++ </glossdef>
++ </glossentry>
++ <glossentry>
++ <glossterm>
++ <function>zfcp_zh_get_config</function>
++ </glossterm>
++ <glossdef>
++ <para>For each adapter, port and unit configured in module
++ <filename>zfcp</filename> the corresponding callback
++ function is called.</para>
++ </glossdef>
++ </glossentry>
++ <glossentry>
++ <glossterm>
++ <function>zfcp_zh_get_adapter_attributes</function>
++ </glossterm>
++ <glossdef>
++ <para>Collect attributes of an adapter.</para>
++ </glossdef>
++ </glossentry>
++ <glossentry>
++ <glossterm>
++ <function>zfcp_zh_get_port_attributes</function>
++ </glossterm>
++ <glossdef>
++ <para>Collect attributes of an adapter port. Calls FSF
++ command <command>ExchangePortData</command>.</para>
++ </glossdef>
++ </glossentry>
++ <glossentry>
++ <glossterm>
++ <function>zfcp_zh_port_statistics</function>
++ </glossterm>
++ <glossdef>
++ <para>Collect statistics of an adapter port. Calls FSF
++ command <command>ExchangePortData</command>.</para>
++ </glossdef>
++ </glossentry>
++ <glossentry>
++ <glossterm>
++ <function>zfcp_zh_get_dport_attributes</function>
++ </glossterm>
++ <glossdef>
++ <para>Collect attributes of a discovered port. Sends a
++ FC-GS-2 request <command>GA_NXT</command> to the Name
++ Server Directory Service.</Para>
++ </glossdef>
++ </glossentry>
++ <glossentry>
++ <glossterm>
++ <function>zfcp_zh_assert_fclun_zero</function>
++ </glossterm>
++ <glossdef>
++ <para>Checks whether an unit with FC LUN
++ <parameter>0x0</parameter> is configured in
++ <filename>zfcp</filename> for a certain port. If not it
++ creates a <filename>zfcp</filename> unit structure for FC
++ LUN <parameter>0x0</parameter> for this port.</para>
++ </glossdef>
++ </glossentry>
++ <glossentry>
++ <glossterm>
++ <function>zfcp_zh_send_scsi</function>
++ </glossterm>
++ <glossdef>
++ <para>Send a SCSI command to a FC LUN.</para>
++ </glossdef>
++ </glossentry>
++ <glossentry>
++ <glossterm>
++ <function>zfcp_zh_send_els</function>
++ </glossterm>
++ <glossdef>
++ <para>Send ELS commands (according to FC-FS).</para>
++ </glossdef>
++ </glossentry>
++ <glossentry>
++ <glossterm>
++ <function>zfcp_zh_send_ct</function>
++ </glossterm>
++ <glossdef>
++ <para>Send Generic Service commands according to FC-GS-4).
++ </para>
++ </glossdef>
++ </glossentry>
++ </glosslist>
++ <note>
++ <para>In <function>zfcp_zh_send_ct</function> currently only
++ requests to the Name Server Directory Service are
++ supported.</para>
++ </note>
++ <section>
++ <title>Reference</title>
++ <para></para>
++!Fdrivers/s390/scsi/zfcp_zh.c zfcp_zh_callbacks_register
++!Fdrivers/s390/scsi/zfcp_zh.c zfcp_zh_callbacks_unregister
++!Fdrivers/s390/scsi/zfcp_zh.c zfcp_zh_get_config
++!Fdrivers/s390/scsi/zfcp_zh.c zfcp_zh_get_adapter_attributes
++!Fdrivers/s390/scsi/zfcp_zh.c zfcp_zh_get_port_attributes
++!Fdrivers/s390/scsi/zfcp_zh.c zfcp_zh_get_port_statistics
++!Fdrivers/s390/scsi/zfcp_zh.c zfcp_zh_get_dport_attributes
++!Fdrivers/s390/scsi/zfcp_zh.c zfcp_zh_send_ct
++!Fdrivers/s390/scsi/zfcp_zh.c zfcp_zh_send_els
++!Fdrivers/s390/scsi/zfcp_zh.c zfcp_zh_send_scsi
++!Fdrivers/s390/scsi/zfcp_zh.c zfcp_zh_assert_fclun_zero
++ </section>
++ </section>
++ <section>
++ <title>Callbacks for Event Handling</title>
++ <para>
++ To enable event delivery as required by <citation><xref
++ linkend="bib.fchba"></citation>, some callback functions of
++ module <filename>zfcp_hbaapi</filename> must be called from
++ module <filename>zfcp</filename>.
++ </para>
++ <para>
++ The following table gives an overview of the callbacks into
++ module <filename>zfcp_hbaapi</filename> and their hooks in
++ <filename>zfcp</filename>.
++ </para>
++ <table frame='all'><title>Callbacks</title>
++ <tgroup cols='2' colsep='1' rowsep='1'>
++ <thead>
++ <row>
++ <entry><para>Callback</para></entry>
++ <entry><para>
++ Hook in module <filename>zfcp</filename>
++ </para></entry>
++ </row>
++ </thead>
++ <tbody>
++ <row>
++ <entry><para>
++ <function>adapter_add</function>
++ </para></entry>
++ <entry><para>
++ <function>zfcp_fsf_exchange_config_data_handler</function>
++ </para></entry>
++ </row>
++ <row>
++ <entry><para>
++ <function>port_add</function>
++ </para></entry>
++ <entry><para>
++ <function>zfcp_port_enqueue</function>
++ </para></entry>
++ </row>
++ <row>
++ <entry><para>
++ <function>unit_add</function>
++ </para></entry>
++ <entry><para>
++ <function>zfcp_unit_enqueue</function>
++ </para></entry>
++ </row>
++ <row>
++ <entry><para>
++ <function>incoming_els</function>
++ </para></entry>
++ <entry><para>
++ <function>zfcp_fsf_incoming_els</function>
++ </para></entry>
++ </row>
++ <row>
++ <entry><para>
++ <function>link_down</function>
++ </para></entry>
++ <entry><para>
++ <function>zfcp_status_read_handler</function>,
++ <function>zfcp_fsf_protstatus_eval</function>
++ </para></entry>
++ </row>
++ <row>
++ <entry><para>
++ <function>link_up</function>
++ </para></entry>
++ <entry><para>
++ <function>zfcp_status_read_handler</function>
++ </para></entry>
++ </row>
++ </tbody>
++ </tgroup>
++ </table>
++ </section>
++ </chapter>
++
++ <!-- Bibliography -->
++ <bibliography>
++ <biblioentry id="bib.fchba" xreflabel="FC-HBA">
++ <title>
++ Working Draft. Information technology - Fibre Channel HBA API (FC-HBA)
++ </title>
++ <orgname>The T11 Technical Committee</orgname>
++ <!-- <pubdate> -->
++ <!-- Revision 11, 2003-10-29, -->
++ <!-- <ulink url="http://www.t11.org/"></ulink> -->
++ <!-- </pubdate> -->
++ </biblioentry>
++ </bibliography>
++
++</book>
++<!-- Keep this comment at the end of the file
++Local variables:
++mode: sgml
++sgml-always-quote-attributes:t
++sgml-auto-insert-required-elements:t
++sgml-balanced-tag-edit:t
++sgml-exposed-tags:nil
++sgml-general-insert-case:lower
++sgml-indent-data:t
++sgml-indent-step:2
++sgml-local-catalogs:nil
++sgml-local-ecat-files:nil
++sgml-minimize-attributes:nil
++sgml-namecase-general:t
++sgml-omittag:t
++sgml-shorttag:t
++sgml-tag-region-if-active:t
++End:
++-->
+=== Documentation/DocBook/Makefile
+==================================================================
+--- Documentation/DocBook/Makefile (/upstream/vanilla/2.4.27) (revision 52)
++++ Documentation/DocBook/Makefile (/trunk/2.4.27) (revision 52)
+@@ -2,7 +2,7 @@
+ kernel-api.sgml parportbook.sgml kernel-hacking.sgml \
+ kernel-locking.sgml via-audio.sgml mousedrivers.sgml sis900.sgml \
+ deviceiobook.sgml procfs-guide.sgml tulip-user.sgml \
+- journal-api.sgml libata.sgml
++ journal-api.sgml zfcp-hba-api.sgml libata.sgml
+
+ PS := $(patsubst %.sgml, %.ps, $(BOOKS))
+ PDF := $(patsubst %.sgml, %.pdf, $(BOOKS))
+@@ -159,7 +159,15 @@
+ $(TOPDIR)/scripts/docgen $(JBDSOURCES) \
+ <journal-api.tmpl >journal-api.sgml
+
++ZFCPHBAAPISOURCES := $(TOPDIR)/drivers/s390/scsi/zh.h \
++ $(TOPDIR)/drivers/s390/scsi/zh_main.c \
++ $(TOPDIR)/drivers/s390/scsi/zfcp_zh.h \
++ $(TOPDIR)/drivers/s390/scsi/zfcp_zh.c
+
++zfcp-hba-api.sgml: zfcp-hba-api.tmpl $(ZFCPHBAAPISOURCES)
++ $(TOPDIR)/scripts/docgen $(ZFCPHBAAPISOURCES) \
++ <zfcp-hba-api.tmpl >zfcp-hba-api.sgml
++
+ DVI := $(patsubst %.sgml, %.dvi, $(BOOKS))
+ AUX := $(patsubst %.sgml, %.aux, $(BOOKS))
+ TEX := $(patsubst %.sgml, %.tex, $(BOOKS))
+=== Documentation/Configure.help
+==================================================================
+--- Documentation/Configure.help (/upstream/vanilla/2.4.27) (revision 52)
++++ Documentation/Configure.help (/trunk/2.4.27) (revision 52)
+@@ -6283,6 +6283,16 @@
+
+ It is safe to say N here for now.
+
++Prepare net_device struct for shared IPv6 cards
++CONFIG_SHARED_IPV6_CARDS
++ This prepares the net_device structure to contain a card user instance
++ id. On some systems, e.g. IBM zSeries, networking cards can be shared.
++ In order to make IPv6 autoconfiguration useful, each user of the
++ networking card will get a different id which is used for unique
++ address generation (the id is used in the EUI-64 generation).
++
++ Only say yes on IBM zSeries or S/390 systems.
++
+ The SCTP Protocol (EXPERIMENTAL)
+ CONFIG_IP_SCTP
+ Stream Control Transmission Protocol
+@@ -7953,7 +7963,7 @@
+ QDIO base support for IBM S/390 and zSeries
+ CONFIG_QDIO
+ This driver provides the Queued Direct I/O base support for the
+- IBM S/390 (G5 and G6) and eServer zSeries (z800 and z900).
++ IBM S/390 (G5 and G6) and eServer zSeries (z800, z900 and z990).
+
+ For details please refer to the documentation provided by IBM at
+ <http://www10.software.ibm.com/developerworks/opensource/linux390>
+@@ -7971,6 +7981,61 @@
+
+ If unsure, say N.
+
++IBM S/390 and zSeries OSA-Express and HiperSockets device driver
++CONFIG_QETH
++ This driver supports the IBM S/390 and zSeries OSA Express adapters
++ in QDIO mode (all media types), HiperSockets interfaces and VM GuestLAN
++ interfaces in QDIO and HIPER mode.
++
++ For details please refer to the documentation provided by IBM at
++ <http://www10.software.ibm.com/developerworks/opensource/linux390>
++
++ This driver is also available as a module (code which can be
++ inserted in and removed from the running kernel whenever you
++ want). If you want to compile it as a module, say 'M' here and
++ read file Documentation/modules.txt.
++
++IPv6 support for qeth
++CONFIG_QETH_IPV6
++ If CONFIG_QETH is switched on, this option will include IPv6
++ support in the qeth device driver.
++
++IEEE 802.1q VLAN support for qeth
++CONFIG_QETH_VLAN
++ If CONFIG_QETH is switched on, this option will include IEEE
++ 802.1q VLAN support in the qeth device driver.
++
++Performance statistics for the qeth drivers
++CONFIG_QETH_PERF_STATS
++ When switched on, this option will add a file in the proc-fs
++ (/proc/qeth_perf_stats) containing performance statistics. It
++ may slightly impact performance, so this is only recommended for
++ internal tuning of the device driver.
++
++FCP adapter driver for IBM eServer zSeries
++CONFIG_ZFCP
++ This driver supports the IBM eServer zSeries 800/900 FCP adapter.
++ If you want to access SCSI devices attached to your zSeries
++ by means of Fibre Channel interfaces say Y.
++ For details please refer to the documentation provided by IBM at
++ <http://www10.software.ibm.com/developerworks/opensource/linux390>
++
++ This driver is also available as a module ( = code which can be
++ inserted in and removed from the running kernel whenever you want).
++ The module will be called zfcp.o. If you want to compile it as a
++ module, say M here and read <file:Documentation/modules.txt>.
++
++HBA API support for the IBM eServer z990 (GA2) FCP adapter driver
++CONFIG_ZFCP_HBAAPI
++ Say Y here to include HBA API (FC-HBA) support for z990 (GA2).
++
++ This support is also available as a separate module.
++ If you want to compile it as a module, say M here and read
++ <file:Documentation/modules.txt>. The module will be called
++ zfcp_hbaapi.o.
++
++ If unsure, say N.
++
+ SGI WD93C93 SCSI Driver
+ CONFIG_SCSI_SGIWD93
+ Say Y here to support the on-board WD93C93 SCSI controller found (a)
+@@ -25334,6 +25399,49 @@
+ You should only select this option if you know what you are
+ doing and want to exploit this feature.
+
++CONFIG_VIRT_TIMER
++ This provides a kernel interface for virtual CPU timers.
++
++CONFIG_APPLDATA_BASE
++ This option provides a kernel interface for creating and updating
++ z/VM APPLDATA monitor records. The monitor records are updated at
++ given time intervals, once the timer is started.
++
++CONFIG_APPLDATA_MEM
++ This option provides memory management related data to the Linux -
++ z/VM Monitor Stream, for example, the paging/swapping rate and the
++ utilisation.
++
++CONFIG_APPLDATA_OS
++ This option provides operating system related data to the Linux -
++ z/VM Monitor Stream, for example, the CPU utilisation.
++
++CONFIG_APPLDATA_NET_SUM
++ This option provides network related data to the Linux - z/VM
++ Monitor Stream. The data gives a total sum of network I/O
++ statistics, no per-interface data.
++
++Collaborative memory management
++CONFIG_CMM
++ Select this option, if you want to enable the kernel interface
++ to reduce the memory size of the system. This is accomplished
++ by allocating pages of memory and put them "on hold". This only
++ makes sense for a system running under VM where the unused pages
++ will be reused by VM for other guest systems. The interface
++ allows an external monitor to balance memory of many systems.
++ Everybody who wants to run Linux under VM should select this
++ option.
++
++/proc interface to cooperative memory management
++CONFIG_CMM_PROC
++ Select this option to enable the /proc interface to the
++ cooperative memory management.
++
++IUCV special message interface to cooperative memory management
++CONFIG_CMM_IUCV
++ Select this option to enable the special message interface to
++ the cooperative memory management.
++
+ Support for IBM-style disk-labels (S/390)
+ CONFIG_S390_PARTITION
+ Enable this option to assure standard IBM labels on the DASDs.
+@@ -25344,13 +25452,13 @@
+ Support for DASD hard disks
+ CONFIG_DASD
+ Enable this option if you want to access DASDs directly utilizing
+- S/390s channel subsystem commands. This is necessary for running
++ S/390's or zSeries' channel subsystem commands. This is necessary for running
+ natively on a single image or an LPAR.
+
+ Support for ECKD hard disks
+ CONFIG_DASD_ECKD
+ ECKD (Extended Count Key Data) devices are the most commonly used
+- devices on S/390s. You should enable this option unless you are
++ devices on zSeries and S/390. You should enable this option unless you are
+ very sure you have no ECKD device.
+
+ ECKD demand loading
+@@ -25376,6 +25484,14 @@
+ CONFIG_DASD_AUTO_DIAG
+ This option enables demand loading of the DIAG module.
+
++Support for Channel Measurement on DASD devices
++CONFIG_S390_CMF
++ Select this option if you want to run applications that read
++ statistical data about DASD I/O from the Channel Measurement
++ Facility.
++ If you say "M" here, two modules, "dasd_cmb.o" and "cmf.o",
++ will be created. If unsure, say "N".
++
+ Merge some code into the kernel to make the image IPLable
+ CONFIG_IPLABLE
+ If you want to use the produced kernel to IPL directly from a
+@@ -25403,45 +25519,49 @@
+ system console. Available only if 3270 support is compiled in
+ statically.
+
+-Support for HWC line mode terminal
+-CONFIG_HWC
+- Include support for IBM HWC line-mode terminals.
++Support for SCLP
++CONFIG_SCLP
++ Include support for the IBM SCLP interface to the service element.
+
+-Console on HWC line mode terminal
+-CONFIG_HWC_CONSOLE
+- Include support for using an IBM HWC line-mode terminal as the Linux
++Support for SCLP line mode terminal
++CONFIG_SCLP_TTY
++ Include support for IBM SCLP line-mode terminals.
++
++Support for console on SCLP line mode terminal
++CONFIG_SCLP_CONSOLE
++ Include support for using an IBM SCLP line-mode terminal as a Linux
+ system console.
+
+-Control Program Identification
+-CONFIG_HWC_CPI
+- Allows for Control Program Identification via the HWC interface,
+- i.e. provides a mean to pass an OS instance name (system name)
+- to the machine.
++Support for SCLP VT220-compatible terminal
++CONFIG_SCLP_VT220_TTY
++ Include support for an IBM SCLP VT220-compatible terminal.
+
+- This option should only be selected as a module since the
+- system name has to be passed as module parameter. The module
+- will be called hwc_cpi.o.
++Support for console on SCLP VT220-compatible terminal
++CONFIG_SCLP_VT220_CONSOLE
++ Include support for using an IBM SCLP VT220-compatible terminal as a Linux
++ system console.
+
++Control-Program Identification
++CONFIG_SCLP_CPI
++ This option enables the hardware console interface for system
++ identification. This is commonly used for workload management and
++ gives you a nice name for the system on the service element.
++ Please select this option as a module since built-in operation is
++ completely untested.
++ You should only select this option if you know what you are doing,
++ need this feature and intend to run your kernel in LPAR.
++
+ S/390 tape device support
+ CONFIG_S390_TAPE
+ Select this option if you want to access channel-attached tape
+ devices on IBM S/390 or zSeries.
+- If you select this option you will also want to select at
+- least one of the tape interface options and one of the tape
+- hardware options in order to access a tape device.
++ If you select this option you will also want to select at least
++ one of the hardware options in order to access a tape device.
+ This option is also available as a module. The module will be
+ called tape390.o and include all selected interfaces.
+ The hardware drivers will be seperate modules.
+ If unsure, say "Y".
+
+-Support for tape character devices
+-CONFIG_S390_TAPE_CHAR
+- Select this option if you want to access your channel-attached
+- tape devices using the character device interface.
+- This interface is similar to other Linux tape devices like
+- SCSI-Tapes (st) and the floppy tape device (ftape).
+- If unsure, say "Y".
+-
+ Support for tape block devices
+ CONFIG_S390_TAPE_BLOCK
+ Select this option if you want to access your channel-attached tape
+@@ -25451,26 +25571,16 @@
+ Documentation/s390/TAPE for further information about creating
+ volumes for and using this interface. It is safe to say "Y" here.
+
+-Support for 3490 tape hardware
+-CONFIG_S390_TAPE_3490
+- Select this option if you want to access IBM 3490 magnetic
++Support for 3480/3490 tape hardware
++CONFIG_S390_TAPE_34XX
++ Select this option if you want to access IBM 3480/3490 magnetic
+ tape subsystems and 100% compatibles.
+ This option is also available as a module. The module will be
+- called tape3490.o. If CONFIG_S390_TAPE is selected as a module,
++ called tape_34xx.o. If CONFIG_S390_TAPE is selected as a module,
+ this hardware driver cannot be built-in but is only available
+ as a module.
+ It is safe to say "Y" here.
+
+-Support for 3480 tape hardware
+-CONFIG_S390_TAPE_3480
+- Select this option if you want to access IBM 3480 magnetic
+- tape subsystems and 100% compatibles.
+- This option is also available as a module. The module will be
+- called tape3480.o. If CONFIG_S390_TAPE is selected as a module,
+- this hardware driver cannot be built-in but is only available
+- as a module.
+- It is safe to say "Y" here.
+-
+ CTC device support
+ CONFIG_CTC
+ Select this option if you want to use channel-to-channel networking
+@@ -25486,8 +25596,12 @@
+ or zSeries as a disk. This is useful as a _fast_ swap device if you
+ want to access more than 2G of memory when running in 31 bit mode.
+ This option is also available as a module which will be called
+- xpram.o. If unsure, say "N".
++ xpram.o. If unsure, say M.
+
++CONFIG_DCSSBLK
++ A block device driver for DCSS segments. It can be used to create
++ a filesystem in such a segment.
++
+ Fast IRQ handling
+ CONFIG_FAST_IRQ
+ Select this option in order to get the interrupts processed faster
+@@ -25496,12 +25610,38 @@
+ interrupts which will also be processed before leaving the interrupt
+ context. This speeds up the I/O a lot. Say "Y".
+
+-IUCV device support (VM only)
++IUCV support (VM only)
+ CONFIG_IUCV
+ Select this option if you want to use inter-user communication
+- vehicle networking under VM or VIF. This option is also available
+- as a module which will be called iucv.o. If unsure, say "Y".
++ under VM or VIF. If unsure, say "Y" to enable a fast communication
++ link between VM guests. At boot time the user ID of the guest needs
++ to be passed to the kernel. Note that both kernels need to be
++ compiled with this option and both need to be booted with the user ID
++ of the other VM guest.
+
++IUCV network device support (VM only)
++CONFIG_NETIUCV
++ Select this option if you want to use inter-user communication
++ vehicle networking under VM or VIF. It enables a fast communication
++ link between VM guests. Using ifconfig a point-to-point connection
++ can be established to the Linux for zSeries and S/390 system
++ running on the other VM guest. This option is also available
++ as a module which will be called netiucv.o. If unsure, say "Y".
++
++IUCV special message support (VM only)
++CONFIG_SMSGIUCV
++ Select this option if you want to be able to receive SMSG messages
++ from other VM guest systems.
++
++Support for the z/VM recording system services (VM only)
++CONFIG_VMLOGRDR
++ Select this option if you want to be able to receive records collected
++ by the z/VM recording system services, eg. from *LOGREC. This option
++ should be build as a module since the actual service to connect to
++ has to be specified at module load time. The module will be called
++ vmlogrdr.o.
++ This driver depends on the IUCV support driver.
++
+ Process warning machine checks
+ CONFIG_MACHCHK_WARNING
+ Select this option if you want the machine check handler on IBM S/390 or
+@@ -25530,6 +25670,20 @@
+ enabled, you'll be able to toggle chpids logically offline and online. Even
+ if you don't understand what this means, you should say "Y".
+
++Process warning machine checks
++CONFIG_MACHCHK_WARNING
++ Select this option if you want the machine check handler on IBM S/390 or
++ zSeries to process warning machine checks (e.g. on power failures).
++ If unsure, say "Y".
++
++Use chscs for Common I/O
++CONFIG_CHSC
++ Select this option if you want the s390 common I/O layer to use information
++ obtained by channel subsystem calls. This will enable Linux to process link
++ failures and resource accessibility events. Moreover, if you have procfs
++ enabled, you'll be able to toggle chpids logically offline and online. Even
++ if you don't understand what this means, you should say "Y".
++
+ Kernel support for 31 bit ELF binaries
+ CONFIG_S390_SUPPORT
+ Select this option if you want to enable your system kernel to
+@@ -25537,6 +25691,15 @@
+ (and some other stuff like libraries and such) is needed for
+ executing 31 bit applications. It is safe to say "Y".
+
++Lan Channel Station (LCS) Interface
++CONFIG_LCS
++ Select this option if you want to use LCS networking on IBM S/390
++ or zSeries. This device driver supports Token Ring (IEEE 802.5),
++ FDDI (IEEE 802.7) and Ethernet.
++ It will use the channel device configuration if this is available.
++ This option is also available as a module which will be
++ called lcs.o . If you do not know what it is, it's safe to say "Y".
++
+ Channel Device Configuration
+ CONFIG_CHANDEV
+ The channel device layer is a layer to provide a consistent
+@@ -25573,6 +25736,19 @@
+ For more info see the chandev manpage usually distributed in
+ <file:Documentation/s390/chandev.8> in the Linux source tree.
+
++IBM S/390 and zSeries PCICC and PCICA device driver
++CONFIG_Z90CRYPT
++ This driver supports the IBM S/390 and zSeries Cryptographic
++ Coprocessor (PCICC) and Crytocraphic Accelerator (PCICA) adapters.
++
++ For details please refer to the documentation provided by IBM at
++ <http://www10.software.ibm.com/developerworks/opensource/linux390>.
++
++ This driver is also available as a module (code which can be
++ inserted in and removed from the running kernel whenever you
++ want). If you want to compile it as a module, say 'M' here and
++ read file Documentation/modules.txt.
++
+ SAB3036 tuner support
+ CONFIG_TUNER_3036
+ Say Y here to include support for Philips SAB3036 compatible tuners.
+=== mm/memory.c
+==================================================================
+--- mm/memory.c (/upstream/vanilla/2.4.27) (revision 52)
++++ mm/memory.c (/trunk/2.4.27) (revision 52)
+@@ -163,6 +163,86 @@
+ #define PMD_TABLE_MASK ((PTRS_PER_PMD-1) * sizeof(pmd_t))
+
+ /*
++ * Allocate page middle directory.
++ *
++ * We've already handled the fast-path in-line, and we own the
++ * page table lock.
++ *
++ * On a two-level page table, this ends up actually being entirely
++ * optimized away.
++ */
++pmd_t *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
++{
++ pmd_t *new;
++
++ /* "fast" allocation can happen without dropping the lock.. */
++ new = pmd_alloc_one_fast(mm, address);
++ if (!new) {
++ spin_unlock(&mm->page_table_lock);
++ new = pmd_alloc_one(mm, address);
++ spin_lock(&mm->page_table_lock);
++ if (!new)
++ return NULL;
++
++ /*
++ * Because we dropped the lock, we should re-check the
++ * entry, as somebody else could have populated it..
++ */
++ if (!pgd_none(*pgd)) {
++ pmd_free(new);
++ check_pgt_cache();
++ goto out;
++ }
++ }
++#if defined(CONFIG_ARCH_S390X)
++ new = pgd_populate(mm, pgd, new);
++ if (!new)
++ return NULL;
++#else
++ pgd_populate(mm, pgd, new);
++#endif
++out:
++ return pmd_offset(pgd, address);
++}
++
++/*
++ * Allocate the page table directory.
++ *
++ * We've already handled the fast-path in-line, and we own the
++ * page table lock.
++ */
++inline pte_t *
++pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
++{
++ if (pmd_none(*pmd)) {
++ pte_t *new;
++
++ /* "fast" allocation can happen without dropping the lock.. */
++ new = pte_alloc_one_fast(mm, address);
++ if (!new) {
++ spin_unlock(&mm->page_table_lock);
++ new = pte_alloc_one(mm, address);
++ spin_lock(&mm->page_table_lock);
++ if (!new)
++ return NULL;
++
++ /*
++ * Because we dropped the lock, we should re-check the
++ * entry, as somebody else could have populated it..
++ */
++ if (!pmd_none(*pmd)) {
++ pte_free(new);
++ check_pgt_cache();
++ goto out;
++ }
++ }
++ pmd_populate(mm, pmd, new);
++ }
++out:
++ return pte_offset(pmd, address);
++}
++
++/*
+ * copy one vm_area from one task to the other. Assumes the page tables
+ * already present in the new task to be cleared in the whole range
+ * covered by this vma.
+@@ -422,9 +502,14 @@
+
+ pte = *ptep;
+ if (pte_present(pte)) {
+- if (!write ||
+- (pte_write(pte) && pte_dirty(pte)))
+- return pte_page(pte);
++ struct page * page = pte_page(pte);
++ if (!write)
++ return page;
++ if (pte_write(pte)) {
++ if (!pte_dirty(pte) && !PageDirty(page))
++ set_page_dirty(page);
++ return page;
++ }
+ }
+
+ out:
+@@ -899,20 +984,6 @@
+ return error;
+ }
+
+-/*
+- * Establish a new mapping:
+- * - flush the old one
+- * - update the page tables
+- * - inform the TLB about the new one
+- *
+- * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock
+- */
+-static inline void establish_pte(struct vm_area_struct * vma, unsigned long address, pte_t *page_table, pte_t entry)
+-{
+- set_pte(page_table, entry);
+- flush_tlb_page(vma, address);
+- update_mmu_cache(vma, address, entry);
+-}
+
+ /*
+ * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock
+@@ -922,7 +993,8 @@
+ {
+ flush_page_to_ram(new_page);
+ flush_cache_page(vma, address);
+- establish_pte(vma, address, page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
++ ptep_establish(vma, address, page_table,
++ pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
+ }
+
+ /*
+@@ -959,7 +1031,8 @@
+ unlock_page(old_page);
+ if (reuse) {
+ flush_cache_page(vma, address);
+- establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));
++ ptep_establish(vma, address, page_table,
++ pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));
+ spin_unlock(&mm->page_table_lock);
+ return 1; /* Minor fault */
+ }
+@@ -1354,7 +1427,7 @@
+ entry = pte_mkdirty(entry);
+ }
+ entry = pte_mkyoung(entry);
+- establish_pte(vma, address, pte, entry);
++ ptep_establish(vma, address, pte, entry);
+ spin_unlock(&mm->page_table_lock);
+ return 1;
+ }
+@@ -1387,79 +1460,6 @@
+ return -1;
+ }
+
+-/*
+- * Allocate page middle directory.
+- *
+- * We've already handled the fast-path in-line, and we own the
+- * page table lock.
+- *
+- * On a two-level page table, this ends up actually being entirely
+- * optimized away.
+- */
+-pmd_t *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+-{
+- pmd_t *new;
+-
+- /* "fast" allocation can happen without dropping the lock.. */
+- new = pmd_alloc_one_fast(mm, address);
+- if (!new) {
+- spin_unlock(&mm->page_table_lock);
+- new = pmd_alloc_one(mm, address);
+- spin_lock(&mm->page_table_lock);
+- if (!new)
+- return NULL;
+-
+- /*
+- * Because we dropped the lock, we should re-check the
+- * entry, as somebody else could have populated it..
+- */
+- if (!pgd_none(*pgd)) {
+- pmd_free(new);
+- check_pgt_cache();
+- goto out;
+- }
+- }
+- pgd_populate(mm, pgd, new);
+-out:
+- return pmd_offset(pgd, address);
+-}
+-
+-/*
+- * Allocate the page table directory.
+- *
+- * We've already handled the fast-path in-line, and we own the
+- * page table lock.
+- */
+-pte_t *pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
+-{
+- if (pmd_none(*pmd)) {
+- pte_t *new;
+-
+- /* "fast" allocation can happen without dropping the lock.. */
+- new = pte_alloc_one_fast(mm, address);
+- if (!new) {
+- spin_unlock(&mm->page_table_lock);
+- new = pte_alloc_one(mm, address);
+- spin_lock(&mm->page_table_lock);
+- if (!new)
+- return NULL;
+-
+- /*
+- * Because we dropped the lock, we should re-check the
+- * entry, as somebody else could have populated it..
+- */
+- if (!pmd_none(*pmd)) {
+- pte_free(new);
+- check_pgt_cache();
+- goto out;
+- }
+- }
+- pmd_populate(mm, pmd, new);
+- }
+-out:
+- return pte_offset(pmd, address);
+-}
+-
+ int make_pages_present(unsigned long addr, unsigned long end)
+ {
+ int ret, len, write;
+=== mm/filemap.c
+==================================================================
+--- mm/filemap.c (/upstream/vanilla/2.4.27) (revision 52)
++++ mm/filemap.c (/trunk/2.4.27) (revision 52)
+@@ -1388,7 +1388,7 @@
+ * If it was already so marked, move it to the active queue and drop
+ * the referenced bit. Otherwise, just mark it for future action..
+ */
+-void mark_page_accessed(struct page *page)
++inline void mark_page_accessed(struct page *page)
+ {
+ if (!PageActive(page) && PageReferenced(page)) {
+ activate_page(page);
+@@ -2211,8 +2211,8 @@
+
+ if (pte_present(pte)) {
+ struct page *page = pte_page(pte);
+- if (VALID_PAGE(page) && !PageReserved(page) && ptep_test_and_clear_dirty(ptep)) {
+- flush_tlb_page(vma, address);
++ if (VALID_PAGE(page) && !PageReserved(page) &&
++ ptep_test_and_clear_and_flush_dirty(vma, address, ptep)) {
+ set_page_dirty(page);
+ }
+ }
+=== mm/vmscan.c
+==================================================================
+--- mm/vmscan.c (/upstream/vanilla/2.4.27) (revision 52)
++++ mm/vmscan.c (/trunk/2.4.27) (revision 52)
+@@ -101,9 +101,7 @@
+ * is needed on CPUs which update the accessed and dirty
+ * bits in hardware.
+ */
+- flush_cache_page(vma, address);
+- pte = ptep_get_and_clear(page_table);
+- flush_tlb_page(vma, address);
++ pte = ptep_invalidate(vma, address, page_table);
+
+ if (pte_dirty(pte))
+ set_page_dirty(page);
+=== arch/s390x/kernel/smp.c
+==================================================================
+--- arch/s390x/kernel/smp.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390x/kernel/smp.c (/trunk/2.4.27) (revision 52)
+@@ -92,7 +92,7 @@
+
+ extern void reipl(unsigned long devno);
+
+-static sigp_ccode smp_ext_bitcall(int, ec_bit_sig);
++static void smp_ext_bitcall(int, ec_bit_sig);
+ static void smp_ext_bitcall_others(ec_bit_sig);
+
+ /*
+@@ -131,7 +131,7 @@
+ * in the system.
+ */
+
+-int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
++int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
+ int wait)
+ /*
+ * [SUMMARY] Run a function on all other CPUs.
+@@ -176,46 +176,91 @@
+ return 0;
+ }
+
++/*
++ * Call a function only on one CPU
++ * cpu : the CPU the function should be executed on
++ *
++ * You must not call this function with disabled interrupts or from a
++ * hardware interrupt handler, you may call it from a bottom half handler. */
++int smp_call_function_on(void (*func) (void *info), void *info,
++ int nonatomic, int wait, int cpu)
++{
++ struct call_data_struct data;
++
++ if (!atomic_read(&smp_commenced))
++ return 0;
++
++ if (smp_processor_id() == cpu) {
++ /* direct call to function */
++ func(info);
++ return 0;
++ }
++
++ data.func = func;
++ data.info = info;
++
++ atomic_set(&data.started, 0);
++ data.wait = wait;
++ if (wait)
++ atomic_set(&data.finished, 0);
++
++ spin_lock_bh(&call_lock);
++ call_data = &data;
++ smp_ext_bitcall(cpu, ec_call_function);
++
++ /* Wait for response */
++ while (atomic_read(&data.started) != 1)
++ barrier();
++
++ if (wait)
++ while (atomic_read(&data.finished) != 1)
++ barrier();
++
++ spin_unlock_bh(&call_lock);
++ return 0;
++}
++
++
+ static inline void do_send_stop(void)
+ {
+- u32 dummy;
+- int i;
++ unsigned long dummy;
++ int i;
+
+- /* stop all processors */
+- for (i = 0; i < smp_num_cpus; i++) {
+- if (smp_processor_id() != i) {
+- int ccode;
+- do {
+- ccode = signal_processor_ps(
+- &dummy,
+- 0,
+- i,
+- sigp_stop);
+- } while(ccode == sigp_busy);
+- }
+- }
++ /* stop all processors */
++ for (i = 0; i < smp_num_cpus; i++) {
++ if (smp_processor_id() != i) {
++ int ccode;
++ do {
++ ccode = signal_processor_ps(
++ &dummy,
++ 0,
++ i,
++ sigp_stop);
++ } while(ccode == sigp_busy);
++ }
++ }
+ }
+
+ static inline void do_store_status(void)
+ {
+- unsigned long low_core_addr;
+- u32 dummy;
+- int i;
++ unsigned long low_core_addr;
++ unsigned long dummy;
++ int i;
+
+- /* store status of all processors in their lowcores (real 0) */
+- for (i = 0; i < smp_num_cpus; i++) {
+- if (smp_processor_id() != i) {
+- int ccode;
+- low_core_addr = (unsigned long)get_cpu_lowcore(i);
+- do {
+- ccode = signal_processor_ps(
+- &dummy,
+- low_core_addr,
+- i,
+- sigp_store_status_at_address);
+- } while(ccode == sigp_busy);
+- }
+- }
++ /* store status of all processors in their lowcores (real 0) */
++ for (i = 0; i < smp_num_cpus; i++) {
++ if (smp_processor_id() != i) {
++ int ccode;
++ low_core_addr = (unsigned long)get_cpu_lowcore(i);
++ do {
++ ccode = signal_processor_ps(
++ &dummy,
++ low_core_addr,
++ i,
++ sigp_store_status_at_address);
++ } while(ccode == sigp_busy);
++ }
++ }
+ }
+
+ /*
+@@ -224,8 +269,8 @@
+ */
+ void smp_send_stop(void)
+ {
+- /* write magic number to zero page (absolute 0) */
+- get_cpu_lowcore(smp_processor_id())->panic_magic = __PANIC_MAGIC;
++ /* write magic number to zero page (absolute 0) */
++ get_cpu_lowcore(smp_processor_id())->panic_magic = __PANIC_MAGIC;
+
+ /* stop other processors. */
+ do_send_stop();
+@@ -263,7 +308,7 @@
+ void machine_restart_smp(char * __unused)
+ {
+ cpu_restart_map = cpu_online_map;
+- smp_call_function(do_machine_restart, NULL, 0, 0);
++ smp_call_function(do_machine_restart, NULL, 0, 0);
+ do_machine_restart(NULL);
+ }
+
+@@ -282,7 +327,7 @@
+
+ void machine_halt_smp(void)
+ {
+- smp_call_function(do_machine_halt, NULL, 0, 0);
++ smp_call_function(do_machine_halt, NULL, 0, 0);
+ do_machine_halt(NULL);
+ }
+
+@@ -301,7 +346,7 @@
+
+ void machine_power_off_smp(void)
+ {
+- smp_call_function(do_machine_power_off, NULL, 0, 0);
++ smp_call_function(do_machine_power_off, NULL, 0, 0);
+ do_machine_power_off(NULL);
+ }
+
+@@ -312,55 +357,52 @@
+
+ void do_ext_call_interrupt(struct pt_regs *regs, __u16 code)
+ {
+- unsigned long bits;
++ unsigned long bits;
+
+- /*
+- * handle bit signal external calls
+- *
+- * For the ec_schedule signal we have to do nothing. All the work
+- * is done automatically when we return from the interrupt.
+- */
++ /*
++ * handle bit signal external calls
++ *
++ * For the ec_schedule signal we have to do nothing. All the work
++ * is done automatically when we return from the interrupt.
++ */
+ bits = xchg(&S390_lowcore.ext_call_fast, 0);
+
+- if (test_bit(ec_call_function, &bits))
++ if (test_bit(ec_call_function, &bits))
+ do_call_function();
+ }
+
+ /*
+- * Send an external call sigp to another cpu and return without waiting
++ * Send an external call sigp to another cpu and wait
+ * for its completion.
+ */
+-static sigp_ccode smp_ext_bitcall(int cpu, ec_bit_sig sig)
++static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
+ {
+- sigp_ccode ccode;
+-
+- /*
+- * Set signaling bit in lowcore of target cpu and kick it
+- */
++ /*
++ * Set signaling bit in lowcore of target cpu and kick it
++ */
+ set_bit(sig, &(get_cpu_lowcore(cpu)->ext_call_fast));
+- ccode = signal_processor(cpu, sigp_external_call);
+- return ccode;
++ while (signal_processor(cpu, sigp_external_call) == sigp_busy)
++ udelay(10);
+ }
+
+ /*
+ * Send an external call sigp to every other cpu in the system and
+- * return without waiting for its completion.
++ * wait for its completion.
+ */
+ static void smp_ext_bitcall_others(ec_bit_sig sig)
+ {
+- sigp_ccode ccode;
+- int i;
++ int i;
+
+- for (i = 0; i < smp_num_cpus; i++) {
+- if (smp_processor_id() == i)
+- continue;
+- /*
+- * Set signaling bit in lowcore of target cpu and kick it
+- */
++ for (i = 0; i < smp_num_cpus; i++) {
++ if (smp_processor_id() == i)
++ continue;
++ /*
++ * Set signaling bit in lowcore of target cpu and kick it
++ */
+ set_bit(sig, &(get_cpu_lowcore(i)->ext_call_fast));
+- while (signal_processor(i, sigp_external_call) == sigp_busy)
++ while (signal_processor(i, sigp_external_call) == sigp_busy)
+ udelay(10);
+- }
++ }
+ }
+
+ /*
+@@ -650,3 +692,4 @@
+ EXPORT_SYMBOL(smp_ctl_clear_bit);
+ EXPORT_SYMBOL(smp_num_cpus);
+ EXPORT_SYMBOL(smp_call_function);
++
+=== arch/s390x/kernel/setup.c
+==================================================================
+--- arch/s390x/kernel/setup.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390x/kernel/setup.c (/trunk/2.4.27) (revision 52)
+@@ -164,9 +164,9 @@
+
+ static int __init conmode_setup(char *str)
+ {
+-#if defined(CONFIG_HWC_CONSOLE)
+- if (strncmp(str, "hwc", 4) == 0)
+- SET_CONSOLE_HWC;
++#if defined(CONFIG_SCLP_CONSOLE)
++ if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
++ SET_CONSOLE_SCLP;
+ #endif
+ #if defined(CONFIG_TN3215_CONSOLE)
+ if (strncmp(str, "3215", 5) == 0)
+@@ -198,8 +198,8 @@
+ */
+ cpcmd("TERM CONMODE 3215", NULL, 0);
+ if (ptr == NULL) {
+-#if defined(CONFIG_HWC_CONSOLE)
+- SET_CONSOLE_HWC;
++#if defined(CONFIG_SCLP_CONSOLE)
++ SET_CONSOLE_SCLP;
+ #endif
+ return;
+ }
+@@ -208,16 +208,16 @@
+ SET_CONSOLE_3270;
+ #elif defined(CONFIG_TN3215_CONSOLE)
+ SET_CONSOLE_3215;
+-#elif defined(CONFIG_HWC_CONSOLE)
+- SET_CONSOLE_HWC;
++#elif defined(CONFIG_SCLP_CONSOLE)
++ SET_CONSOLE_SCLP;
+ #endif
+ } else if (strncmp(ptr + 8, "3215", 4) == 0) {
+ #if defined(CONFIG_TN3215_CONSOLE)
+ SET_CONSOLE_3215;
+ #elif defined(CONFIG_TN3270_CONSOLE)
+ SET_CONSOLE_3270;
+-#elif defined(CONFIG_HWC_CONSOLE)
+- SET_CONSOLE_HWC;
++#elif defined(CONFIG_SCLP_CONSOLE)
++ SET_CONSOLE_SCLP;
+ #endif
+ }
+ } else if (MACHINE_IS_P390) {
+@@ -227,8 +227,8 @@
+ SET_CONSOLE_3270;
+ #endif
+ } else {
+-#if defined(CONFIG_HWC_CONSOLE)
+- SET_CONSOLE_HWC;
++#if defined(CONFIG_SCLP_CONSOLE)
++ SET_CONSOLE_SCLP;
+ #endif
+ }
+ }
+@@ -271,21 +271,25 @@
+
+ /*
+ * Reboot, halt and power_off stubs. They just call _machine_restart,
+- * _machine_halt or _machine_power_off.
++ * _machine_halt or _machine_power_off after making sure that all pending
++ * printks reached their destination.
+ */
+
+ void machine_restart(char *command)
+ {
++ console_unblank();
+ _machine_restart(command);
+ }
+
+ void machine_halt(void)
+ {
++ console_unblank();
+ _machine_halt();
+ }
+
+ void machine_power_off(void)
+ {
++ console_unblank();
+ _machine_power_off();
+ }
+
+=== arch/s390x/kernel/linux32.c
+==================================================================
+--- arch/s390x/kernel/linux32.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390x/kernel/linux32.c (/trunk/2.4.27) (revision 52)
+@@ -4427,7 +4427,7 @@
+ ret = sys_newstat(tmp, &s);
+ set_fs (old_fs);
+ putname(tmp);
+- if (putstat64 (statbuf, &s))
++ if (!ret && putstat64 (statbuf, &s))
+ return -EFAULT;
+ return ret;
+ }
+@@ -4451,7 +4451,7 @@
+ ret = sys_newlstat(tmp, &s);
+ set_fs (old_fs);
+ putname(tmp);
+- if (putstat64 (statbuf, &s))
++ if (!ret && putstat64 (statbuf, &s))
+ return -EFAULT;
+ return ret;
+ }
+@@ -4467,7 +4467,7 @@
+ set_fs (KERNEL_DS);
+ ret = sys_newfstat(fd, &s);
+ set_fs (old_fs);
+- if (putstat64 (statbuf, &s))
++ if (!ret && putstat64 (statbuf, &s))
+ return -EFAULT;
+ return ret;
+ }
+@@ -4507,7 +4507,7 @@
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ if (!IS_ERR((void *) error) && error + len >= 0x80000000ULL) {
+ /* Result is out of bounds. */
+- do_munmap(current->mm, addr, len);
++ do_munmap(current->mm, error, len);
+ error = -ENOMEM;
+ }
+ up_write(¤t->mm->mmap_sem);
+=== arch/s390x/kernel/process.c
+==================================================================
+--- arch/s390x/kernel/process.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390x/kernel/process.c (/trunk/2.4.27) (revision 52)
+@@ -43,9 +43,24 @@
+ #include <asm/io.h>
+ #include <asm/processor.h>
+ #include <asm/irq.h>
++#if defined (CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
++#include <asm/timer.h>
++#endif
+
+ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+
++#ifdef CONFIG_VIRT_TIMER
++extern int stop_cpu_timer(void);
++#endif
++
++#ifdef CONFIG_NO_IDLE_HZ
++extern void stop_hz_timer(void);
++#endif
++
++#if defined (CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
++extern atomic_t active_cpu_timer;
++#endif
++
+ /*
+ * The idle loop on a S390...
+ */
+@@ -68,9 +83,37 @@
+ continue;
+ }
+
++#ifdef CONFIG_VIRT_TIMER
+ /*
++ * virtual CPU timer should not progress while its CPU is idle
++ */
++ if (stop_cpu_timer()) {
++ __sti();
++ continue;
++ }
++#endif
++
++/*
++ * active_cpu_timer is used by stop_hz_timer to determine if the last
++ * CPU is gone. We have to update this value also if we use the virtual
++ * CPU timer because both use monitor calls.
++ */
++#if defined (CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
++ atomic_dec(&active_cpu_timer);
++#endif
++
++#ifdef CONFIG_NO_IDLE_HZ
++ stop_hz_timer();
++#endif
++
++#if defined (CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
++ /* enable monitor call class 0 */
++ __ctl_set_bit(8, 15);
++#endif
++
++ /*
+ * Wait for external, I/O or machine check interrupt and
+- * switch of machine check bit after the wait has ended.
++ * switch off machine check bit after the wait has ended.
+ */
+ wait_psw.mask = _WAIT_PSW_MASK;
+ asm volatile (
+@@ -83,6 +126,10 @@
+ " lpswe 0(%1)\n"
+ "1:"
+ : "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" );
++ /*
++ * start_hz_timer is called by monitor call in entry.S
++ * if stop_hz_timer switched off the regular HZ interrupts
++ */
+ }
+ }
+
+=== arch/s390x/kernel/ioctl32.c
+==================================================================
+--- arch/s390x/kernel/ioctl32.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390x/kernel/ioctl32.c (/trunk/2.4.27) (revision 52)
+@@ -33,7 +33,9 @@
+ #include <asm/types.h>
+ #include <asm/uaccess.h>
+ #include <asm/dasd.h>
++#include <asm/tape390.h>
+ #include <asm/sockios.h>
++#include <asm/qeth.h>
+
+ #include "linux32.h"
+
+@@ -317,6 +319,95 @@
+ return err;
+ }
+
++static int qeth_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
++{
++ struct ifreq ifr;
++ struct ifreq32* ifr32 = (struct ifreq32 *)A(arg);
++ mm_segment_t old_fs;
++ int res;
++ u32 data,len;
++
++ /* copy the interface name from userspace */
++ if (copy_from_user(ifr.ifr_ifrn.ifrn_name, ifr32->ifr_ifrn.ifrn_name,
++ sizeof(ifr.ifr_ifrn.ifrn_name)))
++ return -EFAULT;
++
++ switch (cmd) {
++ case SIOC_QETH_ARP_SET_NO_ENTRIES:
++ if (copy_from_user(&ifr.ifr_ifru.ifru_ivalue,
++ &ifr32->ifr_ifru.ifru_ivalue, sizeof(int)))
++ return -EFAULT;
++ /* fall trough */
++ case SIOC_QETH_GET_CARD_TYPE:
++ old_fs = get_fs();
++ set_fs (KERNEL_DS);
++ res = sys_ioctl (fd, cmd, (unsigned long)&ifr);
++ set_fs (old_fs);
++ break;
++
++ case SIOC_QETH_ARP_ADD_ENTRY:
++ case SIOC_QETH_ARP_REMOVE_ENTRY:
++ case SIOC_QETH_ARP_FLUSH_CACHE:
++ case SIOC_QETH_ARP_QUERY_INFO:
++ case SIOC_QETH_ADP_SET_SNMP_CONTROL:
++ /* copy the pointer to kernel space */
++ if (copy_from_user(&data, &ifr32->ifr_ifru.ifru_data,
++ sizeof(data)))
++ return -EFAULT;
++
++ /* copy the first 32bit (which holds the buffer size)
++ * to kernel space
++ */
++ if (copy_from_user(&len, (char*)A(data), sizeof(len)))
++ return -EFAULT;
++
++ /* the length field in the buffer does not include the
++ * 16 byte header
++ */
++ len += 16;
++
++ /* limit the len to a sane value */
++ if (len > 64*1024)
++ return -EINVAL;
++
++ /* allocate a buffer for the data */
++ ifr.ifr_data = kmalloc(len,GFP_KERNEL);
++ if (!ifr.ifr_data)
++ return -EAGAIN;
++
++ /* copy the data from 31bit userspace buffer to 64bit
++ * kernel space buffer
++ */
++ if (copy_from_user( ifr.ifr_data, (char*)A(data), len)){
++ res = -EFAULT;
++ kfree(ifr.ifr_data);
++ break;
++ }
++
++ /* call the nomral 64bit ioctl systemcall */
++ old_fs = get_fs();
++ set_fs (KERNEL_DS);
++ res = sys_ioctl (fd, cmd, (unsigned long)&ifr);
++ set_fs (old_fs);
++
++ /* copy back the data from 64bit kernel space buffer
++ * to 31bit userspace buffer.
++ */
++ if (copy_to_user((char*)A(data), ifr.ifr_data, len)){
++ res = -EFAULT;
++ }
++
++ kfree( ifr.ifr_data );
++ break;
++
++ default:
++ res = -EINVAL;
++ break;
++ }
++
++ return res;
++}
++
+ struct rtentry32
+ {
+ unsigned int rt_pad1;
+@@ -378,6 +469,7 @@
+ return sys_ioctl(fd, cmd, arg);
+ }
+
++
+ struct loop_info32 {
+ int lo_number; /* ioctl r/o */
+ __kernel_dev_t32 lo_device; /* ioctl r/o */
+@@ -478,7 +570,6 @@
+ return err;
+ }
+
+-
+ static int w_long(unsigned int fd, unsigned int cmd, unsigned long arg)
+ {
+ mm_segment_t old_fs = get_fs();
+@@ -519,6 +610,8 @@
+ IOCTL32_DEFAULT(BIODASDINFO),
+ IOCTL32_DEFAULT(BIODASDFMT),
+
++ IOCTL32_DEFAULT(TAPE390_DISPLAY),
++
+ IOCTL32_DEFAULT(BLKROSET),
+ IOCTL32_DEFAULT(BLKROGET),
+ IOCTL32_DEFAULT(BLKRRPART),
+@@ -686,8 +779,20 @@
+ IOCTL32_HANDLER(BLKGETSIZE, w_long),
+ IOCTL32_HANDLER(BLKFRAGET, w_long),
+ IOCTL32_HANDLER(BLKSECTGET, w_long),
+- IOCTL32_HANDLER(BLKPG, blkpg_ioctl_trans)
+-
++ IOCTL32_HANDLER(BLKPG, blkpg_ioctl_trans),
++/* qeth */
++ IOCTL32_DEFAULT(QETH_IOCPROC_REGISTER),
++ IOCTL32_HANDLER(SIOC_QETH_ARP_SET_NO_ENTRIES, qeth_ioctl),
++ IOCTL32_HANDLER(SIOC_QETH_ARP_ADD_ENTRY, qeth_ioctl),
++ IOCTL32_HANDLER(SIOC_QETH_ARP_REMOVE_ENTRY, qeth_ioctl),
++ IOCTL32_HANDLER(SIOC_QETH_ARP_FLUSH_CACHE, qeth_ioctl),
++ IOCTL32_HANDLER(SIOC_QETH_ARP_QUERY_INFO, qeth_ioctl),
++ IOCTL32_HANDLER(SIOC_QETH_ADP_SET_SNMP_CONTROL, qeth_ioctl),
++ IOCTL32_HANDLER(SIOC_QETH_GET_CARD_TYPE, qeth_ioctl),
++/* MII */
++ IOCTL32_DEFAULT(SIOCGMIIPHY),
++ IOCTL32_DEFAULT(SIOCGMIIREG),
++ IOCTL32_DEFAULT(SIOCSMIIREG)
+ };
+
+ #define NR_IOCTL32_HANDLERS (sizeof(ioctl32_handler_table) / \
+=== arch/s390x/kernel/s390_ksyms.c
+==================================================================
+--- arch/s390x/kernel/s390_ksyms.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390x/kernel/s390_ksyms.c (/trunk/2.4.27) (revision 52)
+@@ -9,10 +9,14 @@
+ #include <linux/mm.h>
+ #include <linux/smp.h>
+ #include <asm/checksum.h>
++#include <asm/cpcmd.h>
+ #include <asm/delay.h>
+ #include <asm/pgalloc.h>
+ #include <asm/setup.h>
+ #include <asm/softirq.h>
++#ifdef CONFIG_VIRT_TIMER
++#include <asm/timer.h>
++#endif
+ #include <asm/ioctl32.h>
+ #if CONFIG_IP_MULTICAST
+ #include <net/arp.h>
+@@ -24,6 +28,7 @@
+ EXPORT_SYMBOL_NOVERS(_oi_bitmap);
+ EXPORT_SYMBOL_NOVERS(_ni_bitmap);
+ EXPORT_SYMBOL_NOVERS(_zb_findmap);
++EXPORT_SYMBOL_NOVERS(empty_zero_page);
+ EXPORT_SYMBOL_NOVERS(__copy_from_user_asm);
+ EXPORT_SYMBOL_NOVERS(__copy_to_user_asm);
+ EXPORT_SYMBOL_NOVERS(__clear_user_asm);
+@@ -83,3 +88,22 @@
+ EXPORT_SYMBOL(console_device);
+ EXPORT_SYMBOL_NOVERS(do_call_softirq);
+ EXPORT_SYMBOL(sys_wait4);
++EXPORT_SYMBOL(smp_call_function_on);
++EXPORT_SYMBOL(show_trace);
++EXPORT_SYMBOL(cpcmd);
++
++
++/*
++ * virtual CPU timer
++ */
++#ifdef CONFIG_VIRT_TIMER
++EXPORT_SYMBOL(init_virt_timer);
++EXPORT_SYMBOL(add_virt_timer);
++EXPORT_SYMBOL(add_virt_timer_periodic);
++EXPORT_SYMBOL(mod_virt_timer);
++EXPORT_SYMBOL(del_virt_timer);
++#endif
++
++/* urandom read needed for z90crypt */
++extern struct file_operations urandom_fops;
++EXPORT_SYMBOL_GPL(urandom_fops);
+=== arch/s390x/kernel/time.c
+==================================================================
+--- arch/s390x/kernel/time.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390x/kernel/time.c (/trunk/2.4.27) (revision 52)
+@@ -4,8 +4,8 @@
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Hartmut Penner (hp at de.ibm.com),
+- * Martin Schwidefsky (schwidefsky at de.ibm.com),
+- * Denis Joseph Barrow (djbarrow at de.ibm.com,barrow_dj at yahoo.com)
++ * Martin Schwidefsky (schwidefsky at de.ibm.com),
++ * Denis Joseph Barrow (djbarrow at de.ibm.com,barrow_dj at yahoo.com)
+ *
+ * Derived from "arch/i386/kernel/time.c"
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+@@ -26,38 +26,66 @@
+
+ #include <asm/uaccess.h>
+ #include <asm/delay.h>
+-
+ #include <linux/timex.h>
+ #include <linux/config.h>
+-
++#include <asm/s390_ext.h>
+ #include <asm/irq.h>
+-#include <asm/s390_ext.h>
++#if defined (CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
++#include <asm/timer.h>
++#endif
+
+ /* change this if you have some constant time drift */
+-#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
++#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
+ #define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
+
++/*
++ * Create a small time difference between the timer interrupts
++ * on the different cpus to avoid lock contention.
++ */
++#define CPU_DEVIATION (smp_processor_id() << 12)
++
+ #define TICK_SIZE tick
+
+-static ext_int_info_t ext_int_info_timer;
+-static uint64_t init_timer_cc;
++static ext_int_info_t ext_int_info_cc;
++static u64 init_timer_cc;
++static u64 xtime_cc;
+
+ extern rwlock_t xtime_lock;
+ extern unsigned long wall_jiffies;
+
++#ifdef CONFIG_VIRT_TIMER
++static ext_int_info_t ext_int_info_timer;
++static struct vtimer_queue virt_cpu_timer[NR_CPUS];
++#define VTIMER_MAGIC (0x4b87ad6e + 1)
++#endif
++
++#ifdef CONFIG_NO_IDLE_HZ
++
++#ifdef CONFIG_NO_IDLE_HZ_INIT
++int sysctl_hz_timer = 0;
++#else
++int sysctl_hz_timer = 1;
++#endif
++
++#endif
++
++#if defined (CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
++atomic_t active_cpu_timer = ATOMIC_INIT(0);
++#endif
++
+ void tod_to_timeval(__u64 todval, struct timeval *xtime)
+ {
+- todval >>= 12;
+- xtime->tv_sec = todval / 1000000;
+- xtime->tv_usec = todval % 1000000;
++ todval >>= 12;
++ xtime->tv_sec = todval / 1000000;
++ xtime->tv_usec = todval % 1000000;
+ }
+
+ static inline unsigned long do_gettimeoffset(void)
+ {
+ __u64 now;
+
+- asm ("STCK 0(%0)" : : "a" (&now) : "memory", "cc");
+- now = (now - init_timer_cc) >> 12;
++ asm volatile ("STCK 0(%0)" : : "a" (&now) : "memory", "cc");
++ now = (now - init_timer_cc) >> 12;
+ /* We require the offset from the latest update of xtime */
+ now -= (__u64) wall_jiffies*USECS_PER_JIFFY;
+ return (unsigned long) now;
+@@ -114,54 +142,267 @@
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
++void account_ticks(struct pt_regs *regs)
++{
++ int cpu = smp_processor_id();
++ __u64 tmp;
++ __u32 ticks;
+
++ /* Calculate how many ticks have passed. */
++ tmp = S390_lowcore.int_clock - S390_lowcore.jiffy_timer;
++ if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than one tick ? */
++ ticks = tmp / CLK_TICKS_PER_JIFFY + 1;
++ S390_lowcore.jiffy_timer +=
++ CLK_TICKS_PER_JIFFY * (__u64) ticks;
++ } else if (tmp > CLK_TICKS_PER_JIFFY) {
++ ticks = 2;
++ S390_lowcore.jiffy_timer += 2*CLK_TICKS_PER_JIFFY;
++ } else {
++ ticks = 1;
++ S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY;
++ }
++
++ /* set clock comparator for next tick */
++ tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
++ asm volatile ("SCKC %0" : : "m" (tmp));
++
++ irq_enter(cpu, 0);
++
+ #ifdef CONFIG_SMP
+-extern __u16 boot_cpu_addr;
++ /*
++ * Do not rely on the boot cpu to do the calls to do_timer.
++ * Spread it over all cpus instead.
++ */
++ write_lock(&xtime_lock);
++ if (S390_lowcore.jiffy_timer > xtime_cc) {
++ __u32 xticks;
++
++ tmp = S390_lowcore.jiffy_timer - xtime_cc;
++ if (tmp >= 2*CLK_TICKS_PER_JIFFY) {
++ xticks = tmp / CLK_TICKS_PER_JIFFY;
++ xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY;
++ } else {
++ xticks = 1;
++ xtime_cc += CLK_TICKS_PER_JIFFY;
++ }
++ while (xticks--)
++ do_timer(regs);
++ }
++ write_unlock(&xtime_lock);
++ while (ticks--)
++ update_process_times(user_mode(regs));
++#else
++ while (ticks--)
++ do_timer(regs);
+ #endif
++ irq_exit(cpu, 0);
++}
+
+-static void do_comparator_interrupt(struct pt_regs *regs, __u16 error_code)
++#ifdef CONFIG_VIRT_TIMER
++void start_cpu_timer(void)
+ {
+- int cpu = smp_processor_id();
++ struct vtimer_queue *vt_list;
+
+- irq_enter(cpu, 0);
++ vt_list = &virt_cpu_timer[smp_processor_id()];
++ set_vtimer(vt_list->idle);
++}
+
++int stop_cpu_timer(void)
++{
++ __u64 done;
++ struct vtimer_queue *vt_list;
++
++ vt_list = &virt_cpu_timer[smp_processor_id()];
++
++ /* store progress */
++ asm volatile ("STPT %0" : "=m" (done));
++
+ /*
+- * set clock comparator for next tick
++ * If done is negative we do not stop the CPU timer
++ * because we will get instantly an interrupt that
++ * will start the CPU timer again.
+ */
+- S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY;
+- asm volatile ("SCKC %0" : : "m" (S390_lowcore.jiffy_timer));
++ if (done & 1LL<<63)
++ return 1;
++ else
++ vt_list->offset += vt_list->to_expire - done;
+
+-#ifdef CONFIG_SMP
+- if (S390_lowcore.cpu_data.cpu_addr == boot_cpu_addr)
+- write_lock(&xtime_lock);
++ /* save the actual expire value */
++ vt_list->idle = done;
+
+- update_process_times(user_mode(regs));
++ /*
++ * We cannot halt the CPU timer, we just write a value that
++ * nearly never expires (only after 71 years) and re-write
++ * the stored expire value if we continue the timer
++ */
++ set_vtimer(VTIMER_MAX_SLICE);
++ return 0;
++}
+
+- if (S390_lowcore.cpu_data.cpu_addr == boot_cpu_addr) {
+- do_timer(regs);
+- write_unlock(&xtime_lock);
++void set_vtimer(__u64 expires)
++{
++ asm volatile ("SPT %0" : : "m" (expires));
++
++ /* store expire time for this CPU timer */
++ virt_cpu_timer[smp_processor_id()].to_expire = expires;
++}
++
++/*
++ * Sorted add to a list. List is linear searched until first bigger
++ * element is found.
++ */
++void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
++{
++ struct vtimer_list *event;
++
++ list_for_each_entry(event, head, entry) {
++ if (event->expires > timer->expires) {
++ list_add_tail(&timer->entry, &event->entry);
++ return;
++ }
+ }
+-#else
+- do_timer(regs);
+-#endif
++ list_add_tail(&timer->entry, head);
++}
+
+- irq_exit(cpu, 0);
++/*
++ * Do the callback functions of expired vtimer events.
++ * Called from within the interrupt handler.
++ */
++static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs)
++{
++ struct vtimer_queue *vt_list;
++ struct vtimer_list *event;
++ struct list_head *ptr, *tmp;
++ void (*fn)(unsigned long, struct pt_regs*);
++ unsigned long data;
++
++ if (list_empty(cb_list))
++ return;
++
++ vt_list = &virt_cpu_timer[smp_processor_id()];
++
++ list_for_each_safe(ptr, tmp, cb_list) {
++ event = list_entry(ptr, struct vtimer_list, entry);
++
++ fn = event->function;
++ data = event->data;
++ fn(data, regs);
++
++ if (!event->interval)
++ /* delete one shot timer */
++ list_del_init(ptr);
++ else {
++ /* move interval timer back to list */
++ spin_lock(&vt_list->lock);
++ list_del_init(&event->entry);
++ list_add_sorted(event, &vt_list->list);
++ spin_unlock(&vt_list->lock);
++ }
++ }
+ }
+
+ /*
+- * Start the clock comparator on the current CPU
++ * Handler for the virtual CPU timer.
+ */
++static void do_cpu_timer_interrupt(struct pt_regs *regs, __u16 error_code)
++{
++ int cpu;
++ __u64 next, delta;
++ struct list_head *ptr, *tmp;
++ struct vtimer_queue *vt_list;
++ struct vtimer_list *event;
++ /* the callback queue */
++ struct list_head cb_list;
++
++ INIT_LIST_HEAD(&cb_list);
++ cpu = smp_processor_id();
++ vt_list = &virt_cpu_timer[cpu];
++
++ /* walk timer list, fire all expired events */
++ spin_lock(&vt_list->lock);
++
++ if (vt_list->to_expire < VTIMER_MAX_SLICE)
++ vt_list->offset += vt_list->to_expire;
++
++ list_for_each_safe(ptr, tmp, &vt_list->list) {
++ event = list_entry(ptr, struct vtimer_list, entry);
++
++ if (event->expires > vt_list->offset)
++ /* found first unexpired event, leave */
++ break;
++
++ /* re-charge interval timer, we have to add the offset */
++ if (event->interval)
++ event->expires = event->interval + vt_list->offset;
++
++ /* move expired timer to the callback queue */
++ list_move_tail(ptr, &cb_list);
++ }
++ spin_unlock(&vt_list->lock);
++ do_callbacks(&cb_list, regs);
++
++ /* next event is first in list */
++ spin_lock(&vt_list->lock);
++ if (!list_empty(&vt_list->list)) {
++ ptr = vt_list->list.next;
++ event = list_entry(ptr, struct vtimer_list, entry);
++ next = event->expires - vt_list->offset;
++
++ /* add the expired time from this interrupt handler
++ * and the callback functions
++ */
++ asm volatile ("STPT %0" : "=m" (delta));
++ delta = 0xffffffffffffffffLL - delta + 1;
++ vt_list->offset += delta;
++ next -= delta;
++ } else {
++ vt_list->offset = 0;
++ next = VTIMER_MAX_SLICE;
++ }
++ spin_unlock(&vt_list->lock);
++ set_vtimer(next);
++}
++#endif
++
++/*
++ * Start the clock comparator and the virtual CPU timer
++ * on the current CPU
++ */
+ void init_cpu_timer(void)
+ {
+ unsigned long cr0;
++ __u64 timer;
++#ifdef CONFIG_VIRT_TIMER
++ struct vtimer_queue *vt_list;
++#endif
+
+- S390_lowcore.jiffy_timer = (__u64) jiffies * CLK_TICKS_PER_JIFFY;
+- S390_lowcore.jiffy_timer += init_timer_cc + CLK_TICKS_PER_JIFFY;
+- asm volatile ("SCKC %0" : : "m" (S390_lowcore.jiffy_timer));
+- /* allow clock comparator timer interrupt */
+- asm volatile ("STCTG 0,0,%0" : "=m" (cr0) : : "memory");
+- cr0 |= 0x800;
+- asm volatile ("LCTLG 0,0,%0" : : "m" (cr0) : "memory");
++ timer = init_timer_cc + (__u64) jiffies * CLK_TICKS_PER_JIFFY;
++ S390_lowcore.jiffy_timer = timer + CLK_TICKS_PER_JIFFY;
++ timer += CLK_TICKS_PER_JIFFY + CPU_DEVIATION;
++ asm volatile ("SCKC %0" : : "m" (timer));
++#if defined (CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
++ atomic_inc(&active_cpu_timer);
++#endif
++ /* allow clock comparator timer interrupt */
++ asm volatile ("STCTG 0,0,%0" : "=m" (cr0) : : "memory");
++ cr0 |= 0x800;
++ asm volatile ("LCTLG 0,0,%0" : : "m" (cr0) : "memory");
++
++#ifdef CONFIG_VIRT_TIMER
++ /* kick the virtual timer */
++ timer = VTIMER_MAX_SLICE;
++ asm volatile ("SPT %0" : : "m" (timer));
++ __ctl_store(cr0, 0, 0);
++ cr0 |= 0x400;
++ __ctl_load(cr0, 0, 0);
++
++ vt_list = &virt_cpu_timer[smp_processor_id()];
++ INIT_LIST_HEAD(&vt_list->list);
++ spin_lock_init(&vt_list->lock);
++ vt_list->to_expire = 0;
++ vt_list->offset = 0;
++ vt_list->idle = 0;
++#endif
+ }
+
+ /*
+@@ -170,38 +411,372 @@
+ */
+ void __init time_init(void)
+ {
+- __u64 set_time_cc;
++ __u64 set_time_cc;
+ int cc;
+
+- /* kick the TOD clock */
+- asm volatile ("STCK 0(%1)\n\t"
+- "IPM %0\n\t"
+- "SRL %0,28" : "=r" (cc) : "a" (&init_timer_cc)
++ /* kick the TOD clock */
++ asm volatile ("STCK 0(%1)\n\t"
++ "IPM %0\n\t"
++ "SRL %0,28" : "=r" (cc) : "a" (&init_timer_cc)
+ : "memory", "cc");
+- switch (cc) {
+- case 0: /* clock in set state: all is fine */
+- break;
+- case 1: /* clock in non-set state: FIXME */
+- printk("time_init: TOD clock in non-set state\n");
+- break;
+- case 2: /* clock in error state: FIXME */
+- printk("time_init: TOD clock in error state\n");
+- break;
+- case 3: /* clock in stopped or not-operational state: FIXME */
+- printk("time_init: TOD clock stopped/non-operational\n");
+- break;
+- }
++ switch (cc) {
++ case 0: /* clock in set state: all is fine */
++ break;
++ case 1: /* clock in non-set state: FIXME */
++ printk("time_init: TOD clock in non-set state\n");
++ break;
++ case 2: /* clock in error state: FIXME */
++ printk("time_init: TOD clock in error state\n");
++ break;
++ case 3: /* clock in stopped or not-operational state: FIXME */
++ printk("time_init: TOD clock stopped/non-operational\n");
++ break;
++ }
+
+ /* set xtime */
+- set_time_cc = init_timer_cc - 0x8126d60e46000000LL +
+- (0x3c26700LL*1000000*4096);
+- tod_to_timeval(set_time_cc, &xtime);
++ xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY;
++ set_time_cc = init_timer_cc - 0x8126d60e46000000LL +
++ (0x3c26700LL*1000000*4096);
++ tod_to_timeval(set_time_cc, &xtime);
+
+- /* request the 0x1004 external interrupt */
+- if (register_early_external_interrupt(0x1004, do_comparator_interrupt,
++ /* request the clock comparator external interrupt */
++ if (register_early_external_interrupt(0x1004, NULL,
++ &ext_int_info_cc) != 0)
++ panic("Couldn't request external interrupt 0x1004");
++
++#ifdef CONFIG_VIRT_TIMER
++ /* request the cpu timer external interrupt */
++ if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt,
+ &ext_int_info_timer) != 0)
+- panic("Couldn't request external interrupt 0x1004");
++ panic("Couldn't request external interrupt 0x1005");
++#endif
+
+- /* init CPU timer */
+- init_cpu_timer();
++ init_cpu_timer();
+ }
++
++#ifdef CONFIG_VIRT_TIMER
++void init_virt_timer(struct vtimer_list *timer)
++{
++ timer->magic = VTIMER_MAGIC;
++ timer->function = NULL;
++ INIT_LIST_HEAD(&timer->entry);
++ spin_lock_init(&timer->lock);
++}
++
++static inline int check_vtimer(struct vtimer_list *timer)
++{
++ if (timer->magic != VTIMER_MAGIC)
++ return -EINVAL;
++ return 0;
++}
++
++static inline int vtimer_pending(struct vtimer_list *timer)
++{
++ return (!list_empty(&timer->entry));
++}
++
++/*
++ * this function should only run on the specified CPU
++ */
++static void internal_add_vtimer(struct vtimer_list *timer)
++{
++ unsigned long flags;
++ __u64 done;
++ struct vtimer_list *event;
++ struct vtimer_queue *vt_list;
++
++ vt_list = &virt_cpu_timer[timer->cpu];
++ spin_lock_irqsave(&vt_list->lock, flags);
++
++ if (timer->cpu != smp_processor_id())
++ printk("internal_add_vtimer: BUG, running on wrong CPU");
++
++ /* if list is empty we only have to set the timer */
++ if (list_empty(&vt_list->list)) {
++ /* reset the offset, this may happen if the last timer was
++ * just deleted by mod_virt_timer and the interrupt
++ * didn't happen until here
++ */
++ vt_list->offset = 0;
++ goto fire;
++ }
++
++ /* save progress */
++ asm volatile ("STPT %0" : "=m" (done));
++
++ /* calculate completed work */
++ done = vt_list->to_expire - done + vt_list->offset;
++ vt_list->offset = 0;
++
++ list_for_each_entry(event, &vt_list->list, entry)
++ event->expires -= done;
++
++ fire:
++ list_add_sorted(timer, &vt_list->list);
++
++ /* get first element, which is the next vtimer slice */
++ event = list_entry(vt_list->list.next, struct vtimer_list, entry);
++
++ set_vtimer(event->expires);
++ spin_unlock_irqrestore(&vt_list->lock, flags);
++}
++
++static inline int prepare_vtimer(struct vtimer_list *timer)
++{
++ if (check_vtimer(timer) || !timer->function) {
++ printk("add_virt_timer: uninitialized timer\n");
++ return -EINVAL;
++ }
++
++ if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) {
++ printk("add_virt_timer: invalid timer expire value!\n");
++ return -EINVAL;
++ }
++
++ if (vtimer_pending(timer)) {
++ printk("add_virt_timer: timer pending\n");
++ return -EBUSY;
++ }
++
++ timer->cpu = smp_processor_id();
++ return 0;
++}
++
++/*
++ * add_virt_timer - add an oneshot virtual CPU timer
++ */
++void add_virt_timer(void *new)
++{
++ struct vtimer_list *timer;
++
++ timer = (struct vtimer_list *)new;
++
++ if (prepare_vtimer(timer) < 0)
++ return;
++
++ timer->interval = 0;
++ internal_add_vtimer(timer);
++}
++
++/*
++ * add_virt_timer_int - add an interval virtual CPU timer
++ */
++void add_virt_timer_periodic(void *new)
++{
++ struct vtimer_list *timer;
++
++ timer = (struct vtimer_list *)new;
++
++ if (prepare_vtimer(timer) < 0)
++ return;
++
++ timer->interval = timer->expires;
++ internal_add_vtimer(timer);
++}
++
++/*
++ * If we change a pending timer the function must be called on the CPU
++ * where the timer is running on, e.g. by smp_call_function_on()
++ *
++ * The original mod_timer adds the timer if it is not pending. For compatibility
++ * we do the same. The timer will be added on the current CPU as a oneshot timer.
++ *
++ * returns whether it has modified a pending timer (1) or not (0)
++ */
++int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
++{
++ struct vtimer_queue *vt_list;
++ unsigned long flags;
++
++ if (check_vtimer(timer) || !timer->function) {
++ printk("mod_virt_timer: uninitialized timer\n");
++ return -EINVAL;
++ }
++
++ if (!expires || expires > VTIMER_MAX_SLICE) {
++ printk("mod_virt_timer: invalid expire range\n");
++ return -EINVAL;
++ }
++
++ /*
++ * This is a common optimization triggered by the
++ * networking code - if the timer is re-modified
++ * to be the same thing then just return:
++ */
++ if (timer->expires == expires && vtimer_pending(timer))
++ return 1;
++
++ /* disable interrupts before test if timer is pending */
++ vt_list = &virt_cpu_timer[smp_processor_id()];
++ spin_lock_irqsave(&vt_list->lock, flags);
++
++ /* if timer isn't pending add it on the current CPU */
++ if (!vtimer_pending(timer)) {
++ spin_unlock_irqrestore(&vt_list->lock, flags);
++ /* we do not activate an interval timer with mod_virt_timer */
++ timer->interval = 0;
++ timer->expires = expires;
++ timer->cpu = smp_processor_id();
++ internal_add_vtimer(timer);
++ return 0;
++ }
++
++ /* check if we run on the right CPU */
++ if (timer->cpu != smp_processor_id()) {
++ printk("mod_virt_timer: running on wrong CPU, check your code\n");
++ spin_unlock_irqrestore(&vt_list->lock, flags);
++ return -EINVAL;
++ }
++
++ list_del_init(&timer->entry);
++ timer->expires = expires;
++
++ /* also change the interval if we have an interval timer */
++ if (timer->interval)
++ timer->interval = expires;
++
++ /* the timer can't expire anymore so we can release the lock */
++ spin_unlock_irqrestore(&vt_list->lock, flags);
++ internal_add_vtimer(timer);
++ return 1;
++}
++
++/*
++ * delete a virtual timer
++ *
++ * returns whether the deleted timer was pending (1) or not (0)
++ */
++int del_virt_timer(struct vtimer_list *timer)
++{
++ unsigned long flags;
++ struct vtimer_queue *vt_list;
++
++ if (check_vtimer(timer)) {
++ printk("del_virt_timer: timer not initialized\n");
++ return -EINVAL;
++ }
++
++ /* check if timer is pending */
++ if (!vtimer_pending(timer))
++ return 0;
++
++ if (timer->cpu > smp_num_cpus) {
++ printk("del_virt_timer: CPU not present!\n");
++ return -1;
++ }
++
++ vt_list = &virt_cpu_timer[timer->cpu];
++ spin_lock_irqsave(&vt_list->lock, flags);
++
++ /* we don't interrupt a running timer, just let it expire! */
++ list_del_init(&timer->entry);
++
++ /* last timer removed */
++ if (list_empty(&vt_list->list)) {
++ vt_list->to_expire = 0;
++ vt_list->offset = 0;
++ }
++
++ spin_unlock_irqrestore(&vt_list->lock, flags);
++ return 1;
++}
++#endif
++
++#ifdef CONFIG_NO_IDLE_HZ
++
++/*
++ * Start the HZ tick on the current CPU.
++ * Only cpu_idle may call this function.
++ */
++void start_hz_timer(void)
++{
++ __u64 tmp;
++ __u32 ticks;
++
++ if (sysctl_hz_timer != 0)
++ return;
++
++ irq_enter(smp_processor_id(), 0);
++
++ /* Calculate how many ticks have passed */
++ asm volatile ("STCK 0(%0)" : : "a" (&tmp) : "memory", "cc");
++ tmp = tmp + CLK_TICKS_PER_JIFFY - S390_lowcore.jiffy_timer;
++ ticks = tmp / CLK_TICKS_PER_JIFFY;
++ S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY * (__u64) ticks;
++
++ /* Set the clock comparator to the next tick. */
++ tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
++ asm volatile ("SCKC %0" : : "m" (tmp));
++
++ /* Charge the ticks. */
++ if (ticks > 0) {
++#ifdef CONFIG_SMP
++ write_lock(&xtime_lock);
++ if (S390_lowcore.jiffy_timer > xtime_cc) {
++ __u32 xticks;
++
++ tmp = S390_lowcore.jiffy_timer - xtime_cc;
++ xticks = tmp / CLK_TICKS_PER_JIFFY;
++ xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY;
++ do_timer_ticks(xticks);
++ }
++ write_unlock(&xtime_lock);
++#else
++ do_timer_ticks(0, ticks);
++#endif
++ update_process_times_us(0, ticks);
++ }
++ irq_exit(smp_processor_id(), 0);
++}
++
++extern spinlock_t timerlist_lock;
++
++/*
++ * Stop the HZ tick on the current CPU.
++ * Only cpu_idle may call this function.
++ */
++void stop_hz_timer(void)
++{
++ __u64 timer;
++
++ if (sysctl_hz_timer != 0)
++ return;
++ if (atomic_read(&active_cpu_timer) == 0) {
++ /*
++ * The last active cpu is going to sleep. Setup the clock
++ * comparator for the next event if nothing on tq_timer
++ * is pending. If something is pending on tq_timer then
++ * don't change the clock comparator as it is setup for
++ * the next timer tick already.
++ */
++ if (!TQ_ACTIVE(tq_timer)) {
++ spin_lock(&timerlist_lock);
++ timer = (__u64) next_timer_event()->expires;
++ timer *= CLK_TICKS_PER_JIFFY;
++ timer += init_timer_cc;
++ asm volatile ("SCKC %0" : : "m" (timer));
++ spin_unlock(&timerlist_lock);
++ }
++ } else {
++ timer = (__u64) -1;
++ asm volatile ("SCKC %0" : : "m" (timer));
++ }
++}
++#endif
++
++void do_monitor_call(struct pt_regs *regs, long interruption_code)
++{
++ /* disable monitor call class 0 */
++ __ctl_clear_bit(8, 15);
++
++#if defined (CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
++ atomic_inc(&active_cpu_timer);
++#endif
++
++#ifdef CONFIG_VIRT_TIMER
++ start_cpu_timer();
++#endif
++#ifdef CONFIG_NO_IDLE_HZ
++ start_hz_timer();
++#endif
++}
+=== arch/s390x/kernel/entry.S
+==================================================================
+--- arch/s390x/kernel/entry.S (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390x/kernel/entry.S (/trunk/2.4.27) (revision 52)
+@@ -722,7 +722,12 @@
+ .globl io_int_handler
+ io_int_handler:
+ SAVE_ALL __LC_IO_OLD_PSW,0
++ mc 0, 0
+ GET_CURRENT # load pointer to task_struct to R9
++ stck __LC_INT_CLOCK
++ clc __LC_INT_CLOCK(8),__LC_JIFFY_TIMER
++ jhe io_handle_tick
++io_call_handler:
+ la %r2,SP_PTREGS(%r15) # address of register-save area
+ llgh %r3,__LC_SUBCHANNEL_NR # load subchannel number
+ llgf %r4,__LC_IO_INT_PARM # load interuption parm
+@@ -780,14 +785,27 @@
+ larl %r14,io_leave
+ jg do_signal # return point is io_leave
+
++#
++# account tick
++#
++io_handle_tick:
++ la %r2,SP_PTREGS(%r15) # address of register-save area
++ larl %r14,io_call_handler
++ jg account_ticks
++
+ /*
+ * External interrupt handler routine
+ */
+ .globl ext_int_handler
+ ext_int_handler:
+ SAVE_ALL __LC_EXT_OLD_PSW,0
++ mc 0, 0
+ GET_CURRENT # load pointer to task_struct to R9
+ llgh %r6,__LC_EXT_INT_CODE # get interruption code
++ stck __LC_INT_CLOCK
++ clc __LC_INT_CLOCK(8),__LC_JIFFY_TIMER
++ jhe ext_handle_tick
++ext_call_handler:
+ lgr %r1,%r6 # calculate index = code & 0xff
+ nill %r1,0xff
+ sll %r1,3
+@@ -799,6 +817,8 @@
+ ch %r6,16(%r7) # compare external interrupt code
+ jne ext_int_next
+ lg %r1,8(%r7) # get handler address
++ ltgr %r1,%r1
++ jz ext_int_next
+ la %r2,SP_PTREGS(%r15) # address of register-save area
+ lgr %r3,%r6 # interruption code
+ basr %r14,%r1 # call handler
+@@ -808,12 +828,21 @@
+ jnz ext_int_loop
+ j io_return
+
++#
++# account tick
++#
++ext_handle_tick:
++ la %r2,SP_PTREGS(%r15) # address of register-save area
++ larl %r14,ext_call_handler
++ jg account_ticks
++
+ /*
+ * Machine check handler routines
+ */
+ .globl mcck_int_handler
+ mcck_int_handler:
+ SAVE_ALL __LC_MCK_OLD_PSW,0
++ mc 0, 0
+ brasl %r14,s390_do_machine_check
+ mcck_return:
+ RESTORE_ALL 0
+=== arch/s390x/kernel/traps.c
+==================================================================
+--- arch/s390x/kernel/traps.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390x/kernel/traps.c (/trunk/2.4.27) (revision 52)
+@@ -56,6 +56,9 @@
+ extern pgm_check_handler_t do_segment_exception;
+ extern pgm_check_handler_t do_region_exception;
+ extern pgm_check_handler_t do_page_exception;
++#if defined (CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
++extern pgm_check_handler_t do_monitor_call;
++#endif
+ #ifdef CONFIG_PFAULT
+ extern int pfault_init(void);
+ extern void pfault_fini(void);
+@@ -497,6 +500,9 @@
+ pgm_check_table[0x1C] = &privileged_op;
+ pgm_check_table[0x38] = &addressing_exception;
+ pgm_check_table[0x3B] = &do_region_exception;
++#if defined (CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
++ pgm_check_table[0x40] = &do_monitor_call;
++#endif
+ #ifdef CONFIG_PFAULT
+ if (MACHINE_IS_VM) {
+ /* request the 0x2603 external interrupt */
+=== arch/s390x/defconfig
+==================================================================
+--- arch/s390x/defconfig (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390x/defconfig (/trunk/2.4.27) (revision 52)
+@@ -39,8 +39,8 @@
+ CONFIG_QDIO=m
+ # CONFIG_QDIO_PERF_STATS is not set
+ CONFIG_IPL=y
+-# CONFIG_IPL_TAPE is not set
+-CONFIG_IPL_VM=y
++CONFIG_IPL_TAPE=y
++# CONFIG_IPL_VM is not set
+ CONFIG_NET=y
+ CONFIG_SYSVIPC=y
+ # CONFIG_BSD_PROCESS_ACCT is not set
+@@ -51,8 +51,52 @@
+ # CONFIG_PROCESS_DEBUG is not set
+ CONFIG_PFAULT=y
+ # CONFIG_SHARED_KERNEL is not set
++# CONFIG_VIRT_TIMER is not set
++# CONFIG_APPLDATA_BASE is not set
++# CONFIG_APPLDATA_MEM is not set
++# CONFIG_APPLDATA_OS is not set
++# CONFIG_APPLDATA_NET_SUM is not set
++CONFIG_CMM=m
++CONFIG_CMM_PROC=y
++# CONFIG_CMM_IUCV is not set
+
+ #
++# SCSI support
++#
++CONFIG_SCSI=m
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=m
++CONFIG_SD_EXTRA_DEVS=1000
++CONFIG_CHR_DEV_ST=m
++# CONFIG_CHR_DEV_OSST is not set
++CONFIG_BLK_DEV_SR=m
++# CONFIG_BLK_DEV_SR_VENDOR is not set
++CONFIG_SR_EXTRA_DEVS=10
++CONFIG_CHR_DEV_SG=m
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_DEBUG_QUEUES=y
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++
++#
++# SCSI low-level drivers
++#
++CONFIG_ZFCP=m
++CONFIG_ZFCP_HBAAPI=m
++
++#
++# PCMCIA SCSI adapter support
++#
++# CONFIG_SCSI_PCMCIA is not set
++
++#
+ # Block device drivers
+ #
+ CONFIG_BLK_DEV_LOOP=y
+@@ -61,6 +105,7 @@
+ CONFIG_BLK_DEV_RAM_SIZE=24576
+ CONFIG_BLK_DEV_INITRD=y
+ CONFIG_BLK_DEV_XPRAM=m
++CONFIG_DCSSBLK=m
+
+ #
+ # S/390 block device drivers
+@@ -68,6 +113,7 @@
+ CONFIG_DASD=y
+ CONFIG_DASD_ECKD=y
+ CONFIG_DASD_FBA=y
++CONFIG_S390_CMF=m
+
+ #
+ # Multi-device support (RAID and LVM)
+@@ -94,22 +140,24 @@
+ CONFIG_TN3270_CONSOLE=y
+ CONFIG_TN3215=y
+ CONFIG_TN3215_CONSOLE=y
+-CONFIG_HWC=y
+-CONFIG_HWC_CONSOLE=y
+-CONFIG_HWC_CPI=m
++CONFIG_SCLP=y
++CONFIG_SCLP_TTY=y
++CONFIG_SCLP_CONSOLE=y
++CONFIG_SCLP_VT220_TTY=y
++CONFIG_SCLP_VT220_CONSOLE=y
++CONFIG_SCLP_CPI=m
+ CONFIG_S390_TAPE=m
+
+ #
+ # S/390 tape interface support
+ #
+-CONFIG_S390_TAPE_CHAR=y
+ CONFIG_S390_TAPE_BLOCK=y
+
+ #
+ # S/390 tape hardware support
+ #
+-CONFIG_S390_TAPE_3490=y
+-CONFIG_S390_TAPE_3480=y
++CONFIG_S390_TAPE_34XX=m
++CONFIG_VMLOGRDR=m
+
+ #
+ # Network device drivers
+@@ -121,7 +169,6 @@
+ # CONFIG_TUN is not set
+ CONFIG_NET_ETHERNET=y
+ CONFIG_TR=y
+-# CONFIG_C7000 is not set
+ CONFIG_FDDI=y
+
+ #
+@@ -129,10 +176,29 @@
+ #
+ CONFIG_CHANDEV=y
+ CONFIG_HOTPLUG=y
++CONFIG_LCS=m
++CONFIG_QETH=m
++
++#
++# Gigabit Ethernet default settings
++#
++CONFIG_QETH_IPV6=y
++CONFIG_QETH_VLAN=y
++# CONFIG_QETH_PERF_STATS is not set
+ CONFIG_CTC=m
++# CONFIG_MPC is not set
+ CONFIG_IUCV=m
++CONFIG_NETIUCV=m
++CONFIG_SMSGIUCV=m
+
+ #
++# Miscellaneous
++#
++CONFIG_Z90CRYPT=m
++# CONFIG_NO_IDLE_HZ is not set
++# CONFIG_NO_IDLE_HZ_INIT is not set
++
++#
+ # Networking options
+ #
+ CONFIG_PACKET=y
+@@ -165,11 +231,6 @@
+ # CONFIG_IP_NF_ARPTABLES is not set
+ # CONFIG_IP_NF_COMPAT_IPCHAINS is not set
+ # CONFIG_IP_NF_COMPAT_IPFWADM is not set
+-
+-#
+-# IP: Virtual Server Configuration
+-#
+-# CONFIG_IP_VS is not set
+ CONFIG_IPV6=m
+
+ #
+@@ -177,6 +238,7 @@
+ #
+ # CONFIG_IP6_NF_QUEUE is not set
+ # CONFIG_IP6_NF_IPTABLES is not set
++CONFIG_SHARED_IPV6_CARDS=y
+ # CONFIG_KHTTPD is not set
+
+ #
+@@ -268,6 +330,7 @@
+ # CONFIG_QNX4FS_FS is not set
+ # CONFIG_QNX4FS_RW is not set
+ # CONFIG_ROMFS_FS is not set
++CONFIG_XIP2FS=m
+ CONFIG_EXT2_FS=y
+ # CONFIG_SYSV_FS is not set
+ # CONFIG_UDF_FS is not set
+=== arch/s390x/mm/init.c
+==================================================================
+--- arch/s390x/mm/init.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390x/mm/init.c (/trunk/2.4.27) (revision 52)
+@@ -172,7 +172,7 @@
+
+ void diag10(unsigned long addr)
+ {
+- if (addr >= 0x80000000)
++ if (addr >= 0x7ff00000)
+ return;
+ asm volatile ("sam31\n\t"
+ "diag %0,%0,0x10\n\t"
+=== arch/s390x/mm/Makefile
+==================================================================
+--- arch/s390x/mm/Makefile (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390x/mm/Makefile (/trunk/2.4.27) (revision 52)
+@@ -9,6 +9,8 @@
+
+ O_TARGET := mm.o
+
+-obj-y := init.o fault.o ioremap.o extable.o
++obj-y := init.o fault.o ioremap.o extable.o dcss.o
++obj-$(CONFIG_CMM) += cmm.o
++export-objs := dcss.o cmm.o
+
+ include $(TOPDIR)/Rules.make
+=== arch/s390x/mm/cmm.c
+==================================================================
+--- arch/s390x/mm/cmm.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390x/mm/cmm.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,448 @@
++/*
++ * arch/s390/mm/cmm.c
++ *
++ * S390 version
++ * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
++ * Author(s): Martin Schwidefsky (schwidefsky at de.ibm.com)
++ *
++ * Collaborative memory management interface.
++ */
++
++#include <linux/config.h>
++#include <linux/errno.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/sysctl.h>
++#include <linux/ctype.h>
++
++#include <asm/pgalloc.h>
++#include <asm/uaccess.h>
++
++#include "../../../drivers/s390/net/smsgiucv.h"
++
++#define CMM_NR_PAGES ((PAGE_SIZE / sizeof(unsigned long)) - 2)
++
++struct cmm_page_array {
++ struct cmm_page_array *next;
++ unsigned long index;
++ unsigned long pages[CMM_NR_PAGES];
++};
++
++static long cmm_pages = 0;
++static long cmm_timed_pages = 0;
++static volatile long cmm_pages_target = 0;
++static volatile long cmm_timed_pages_target = 0;
++static long cmm_timeout_pages = 0;
++static long cmm_timeout_seconds = 0;
++
++static struct cmm_page_array *cmm_page_list = 0;
++static struct cmm_page_array *cmm_timed_page_list = 0;
++
++static unsigned long cmm_thread_active = 0;
++static struct tq_struct cmm_thread_starter;
++static wait_queue_head_t cmm_thread_wait;
++static struct timer_list cmm_timer;
++
++static void cmm_timer_fn(unsigned long);
++static void cmm_set_timer(void);
++
++static long
++cmm_strtoul(const char *cp, char **endp)
++{
++ unsigned int base = 10;
++
++ if (*cp == '0') {
++ base = 8;
++ cp++;
++ if ((*cp == 'x' || *cp == 'X') && isxdigit(cp[1])) {
++ base = 16;
++ cp++;
++ }
++ }
++ return simple_strtoul(cp, endp, base);
++}
++
++static long
++cmm_alloc_pages(long pages, long *counter, struct cmm_page_array **list)
++{
++ struct cmm_page_array *pa;
++ unsigned long page;
++
++ pa = *list;
++ while (pages) {
++ page = __get_free_page(GFP_NOIO);
++ if (!page)
++ break;
++ if (!pa || pa->index >= CMM_NR_PAGES) {
++ /* Need a new page for the page list. */
++ pa = (struct cmm_page_array *)
++ __get_free_page(GFP_NOIO);
++ if (!pa) {
++ free_page(page);
++ break;
++ }
++ pa->next = *list;
++ pa->index = 0;
++ *list = pa;
++ }
++ diag10((unsigned long) page);
++ pa->pages[pa->index++] = page;
++ (*counter)++;
++ pages--;
++ }
++ return pages;
++}
++
++static void
++cmm_free_pages(long pages, long *counter, struct cmm_page_array **list)
++{
++ struct cmm_page_array *pa;
++ unsigned long page;
++
++ pa = *list;
++ while (pages) {
++ if (!pa || pa->index <= 0)
++ break;
++ page = pa->pages[--pa->index];
++ if (pa->index == 0) {
++ pa = pa->next;
++ free_page((unsigned long) *list);
++ *list = pa;
++ }
++ free_page(page);
++ (*counter)--;
++ pages--;
++ }
++}
++
++static int
++cmm_thread(void *dummy)
++{
++ int rc;
++
++ daemonize();
++ reparent_to_init();
++ strcpy(current->comm, "cmmthread");
++ set_cpus_allowed(current, 1);
++ while (1) {
++ rc = wait_event_interruptible(cmm_thread_wait,
++ (cmm_pages != cmm_pages_target ||
++ cmm_timed_pages != cmm_timed_pages_target));
++ if (rc == -ERESTARTSYS) {
++ /* Got kill signal. End thread. */
++ clear_bit(0, &cmm_thread_active);
++ cmm_pages_target = cmm_pages;
++ cmm_timed_pages_target = cmm_timed_pages;
++ break;
++ }
++ if (cmm_pages_target > cmm_pages) {
++ if (cmm_alloc_pages(1, &cmm_pages, &cmm_page_list))
++ cmm_pages_target = cmm_pages;
++ } else if (cmm_pages_target < cmm_pages) {
++ cmm_free_pages(1, &cmm_pages, &cmm_page_list);
++ }
++ if (cmm_timed_pages_target > cmm_timed_pages) {
++ if (cmm_alloc_pages(1, &cmm_timed_pages,
++ &cmm_timed_page_list))
++ cmm_timed_pages_target = cmm_timed_pages;
++ } else if (cmm_timed_pages_target < cmm_timed_pages) {
++ cmm_free_pages(1, &cmm_timed_pages,
++ &cmm_timed_page_list);
++ }
++ if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer))
++ cmm_set_timer();
++ }
++ return 0;
++}
++
++static void
++cmm_start_thread(void)
++{
++ kernel_thread(cmm_thread, 0, 0);
++}
++
++static void
++cmm_kick_thread(void)
++{
++ if (!test_and_set_bit(0, &cmm_thread_active))
++ schedule_task(&cmm_thread_starter);
++ wake_up(&cmm_thread_wait);
++}
++
++static void
++cmm_set_timer(void)
++{
++ if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) {
++ if (timer_pending(&cmm_timer))
++ del_timer(&cmm_timer);
++ return;
++ }
++ if (timer_pending(&cmm_timer)) {
++ if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ))
++ return;
++ }
++ cmm_timer.function = cmm_timer_fn;
++ cmm_timer.data = 0;
++ cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ;
++ add_timer(&cmm_timer);
++}
++
++static void
++cmm_timer_fn(unsigned long ignored)
++{
++ long pages;
++
++ pages = cmm_timed_pages_target - cmm_timeout_pages;
++ if (pages < 0)
++ cmm_timed_pages_target = 0;
++ else
++ cmm_timed_pages_target = pages;
++ cmm_kick_thread();
++ cmm_set_timer();
++}
++
++void
++cmm_set_pages(long pages)
++{
++ cmm_pages_target = pages;
++ cmm_kick_thread();
++}
++
++long
++cmm_get_pages(void)
++{
++ return cmm_pages;
++}
++
++void
++cmm_add_timed_pages(long pages)
++{
++ cmm_timed_pages_target += pages;
++ cmm_kick_thread();
++}
++
++long
++cmm_get_timed_pages(void)
++{
++ return cmm_timed_pages;
++}
++
++void
++cmm_set_timeout(long pages, long seconds)
++{
++ cmm_timeout_pages = pages;
++ cmm_timeout_seconds = seconds;
++ cmm_set_timer();
++}
++
++static inline int
++cmm_skip_blanks(char *cp, char **endp)
++{
++ char *str;
++
++ for (str = cp; *str == ' ' || *str == '\t'; str++);
++ *endp = str;
++ return str != cp;
++}
++
++#ifdef CONFIG_CMM_PROC
++/* These will someday get removed. */
++#define VM_CMM_PAGES 1111
++#define VM_CMM_TIMED_PAGES 1112
++#define VM_CMM_TIMEOUT 1113
++
++static struct ctl_table cmm_table[];
++
++static int
++cmm_pages_handler(ctl_table *ctl, int write, struct file *filp,
++ void *buffer, size_t *lenp)
++{
++ char buf[16], *p;
++ long pages;
++ int len;
++
++ if (!*lenp || (filp->f_pos && !write)) {
++ *lenp = 0;
++ return 0;
++ }
++
++ if (write) {
++ len = *lenp;
++ if (copy_from_user(buf, buffer,
++ len > sizeof(buf) ? sizeof(buf) : len))
++ return -EFAULT;
++ buf[sizeof(buf) - 1] = '\0';
++ cmm_skip_blanks(buf, &p);
++ pages = cmm_strtoul(p, &p);
++ if (ctl == &cmm_table[0])
++ cmm_set_pages(pages);
++ else
++ cmm_add_timed_pages(pages);
++ } else {
++ if (ctl == &cmm_table[0])
++ pages = cmm_get_pages();
++ else
++ pages = cmm_get_timed_pages();
++ len = sprintf(buf, "%ld\n", pages);
++ if (len > *lenp)
++ len = *lenp;
++ if (copy_to_user(buffer, buf, len))
++ return -EFAULT;
++ }
++ *lenp = len;
++ filp->f_pos += len;
++ return 0;
++}
++
++static int
++cmm_timeout_handler(ctl_table *ctl, int write, struct file *filp,
++ void *buffer, size_t *lenp)
++{
++ char buf[64], *p;
++ long pages, seconds;
++ int len;
++
++ if (!*lenp || (filp->f_pos && !write)) {
++ *lenp = 0;
++ return 0;
++ }
++
++ if (write) {
++ len = *lenp;
++ if (copy_from_user(buf, buffer,
++ len > sizeof(buf) ? sizeof(buf) : len))
++ return -EFAULT;
++ buf[sizeof(buf) - 1] = '\0';
++ cmm_skip_blanks(buf, &p);
++ pages = cmm_strtoul(p, &p);
++ cmm_skip_blanks(p, &p);
++ seconds = cmm_strtoul(p, &p);
++ cmm_set_timeout(pages, seconds);
++ } else {
++ len = sprintf(buf, "%ld %ld\n",
++ cmm_timeout_pages, cmm_timeout_seconds);
++ if (len > *lenp)
++ len = *lenp;
++ if (copy_to_user(buffer, buf, len))
++ return -EFAULT;
++ }
++ *lenp = len;
++ filp->f_pos += len;
++ return 0;
++}
++
++static struct ctl_table cmm_table[] = {
++ {
++ .ctl_name = VM_CMM_PAGES,
++ .procname = "cmm_pages",
++ .mode = 0600,
++ .proc_handler = &cmm_pages_handler,
++ },
++ {
++ .ctl_name = VM_CMM_TIMED_PAGES,
++ .procname = "cmm_timed_pages",
++ .mode = 0600,
++ .proc_handler = &cmm_pages_handler,
++ },
++ {
++ .ctl_name = VM_CMM_TIMEOUT,
++ .procname = "cmm_timeout",
++ .mode = 0600,
++ .proc_handler = &cmm_timeout_handler,
++ },
++ { .ctl_name = 0 }
++};
++
++static struct ctl_table cmm_dir_table[] = {
++ {
++ .ctl_name = CTL_VM,
++ .procname = "vm",
++ .maxlen = 0,
++ .mode = 0555,
++ .child = cmm_table,
++ },
++ { .ctl_name = 0 }
++};
++#endif
++
++#ifdef CONFIG_CMM_IUCV
++#define SMSG_PREFIX "CMM"
++static void
++cmm_smsg_target(char *msg)
++{
++ long pages, seconds;
++
++ if (!cmm_skip_blanks(msg + strlen(SMSG_PREFIX), &msg))
++ return;
++ if (strncmp(msg, "SHRINK", 6) == 0) {
++ if (!cmm_skip_blanks(msg + 6, &msg))
++ return;
++ pages = cmm_strtoul(msg, &msg);
++ cmm_skip_blanks(msg, &msg);
++ if (*msg == '\0')
++ cmm_set_pages(pages);
++ } else if (strncmp(msg, "RELEASE", 7) == 0) {
++ if (!cmm_skip_blanks(msg + 7, &msg))
++ return;
++ pages = cmm_strtoul(msg, &msg);
++ cmm_skip_blanks(msg, &msg);
++ if (*msg == '\0')
++ cmm_add_timed_pages(pages);
++ } else if (strncmp(msg, "REUSE", 5) == 0) {
++ if (!cmm_skip_blanks(msg + 5, &msg))
++ return;
++ pages = cmm_strtoul(msg, &msg);
++ if (!cmm_skip_blanks(msg, &msg))
++ return;
++ seconds = cmm_strtoul(msg, &msg);
++ cmm_skip_blanks(msg, &msg);
++ if (*msg == '\0')
++ cmm_set_timeout(pages, seconds);
++ }
++}
++#endif
++
++struct ctl_table_header *cmm_sysctl_header;
++
++static int
++cmm_init (void)
++{
++#ifdef CONFIG_CMM_PROC
++ cmm_sysctl_header = register_sysctl_table(cmm_dir_table, 1);
++#endif
++#ifdef CONFIG_CMM_IUCV
++ smsg_register_callback(SMSG_PREFIX, cmm_smsg_target);
++#endif
++ cmm_thread_starter.routine = (void *) cmm_start_thread;
++ cmm_thread_starter.data = 0;
++ init_waitqueue_head(&cmm_thread_wait);
++ init_timer(&cmm_timer);
++ return 0;
++}
++
++static void
++cmm_exit(void)
++{
++ cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
++ cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
++#ifdef CONFIG_CMM_PROC
++ unregister_sysctl_table(cmm_sysctl_header);
++#endif
++#ifdef CONFIG_CMM_IUCV
++ smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
++#endif
++}
++
++module_init(cmm_init);
++module_exit(cmm_exit);
++
++EXPORT_SYMBOL(cmm_set_pages);
++EXPORT_SYMBOL(cmm_get_pages);
++EXPORT_SYMBOL(cmm_add_timed_pages);
++EXPORT_SYMBOL(cmm_get_timed_pages);
++EXPORT_SYMBOL(cmm_set_timeout);
++
++MODULE_LICENSE("GPL");
+=== arch/s390x/mm/dcss.c
+==================================================================
+--- arch/s390x/mm/dcss.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390x/mm/dcss.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,504 @@
++/*
++ * File...........: arch/s390/mm/dcss.c
++ * Author(s)......: Steven Shultz <shultzss at us.ibm.com>
++ * Carsten Otte <cotte at de.ibm.com>
++ * Bugreports.to..: <Linux390 at de.ibm.com>
++ * thanks to Rob M van der Heij
++ * - he wrote the diag64 function
++ * (C) IBM Corporation 2002
++ */
++
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/spinlock.h>
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <linux/bootmem.h>
++#include <asm/page.h>
++#include <asm/ebcdic.h>
++#include <asm/errno.h>
++#include <asm/dcss.h>
++#include <asm/cpcmd.h>
++#include <linux/ctype.h>
++
++#define DCSS_DEBUG /* Debug messages on/off */
++
++#define DCSS_NAME "dcss"
++#ifdef DCSS_DEBUG
++#define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSS_NAME " debug:" x)
++#else
++#define PRINT_DEBUG(x...) do {} while (0)
++#endif
++#define PRINT_INFO(x...) printk(KERN_INFO DCSS_NAME " info:" x)
++#define PRINT_WARN(x...) printk(KERN_WARNING DCSS_NAME " warning:" x)
++#define PRINT_ERR(x...) printk(KERN_ERR DCSS_NAME " error:" x)
++
++
++#define DCSS_LOADSHR 0x00
++#define DCSS_LOADNSR 0x04
++#define DCSS_PURGESEG 0x08
++#define DCSS_FINDSEG 0x0c
++#define DCSS_LOADNOLY 0x10
++#define DCSS_SEGEXT 0x18
++#define DCSS_QACTV 0x0c
++
++struct dcss_segment {
++ struct list_head list;
++ char dcss_name[8];
++ unsigned long start_addr;
++ unsigned long end;
++ atomic_t ref_count;
++ int dcss_attr;
++ int shared_attr;
++};
++
++static spinlock_t dcss_lock = SPIN_LOCK_UNLOCKED;
++static struct list_head dcss_list = LIST_HEAD_INIT(dcss_list);
++extern struct {unsigned long addr, size, type} memory_chunk[16];
++
++/*
++ * Create the 8 bytes, ebcdic VM segment name from
++ * an ascii name.
++ */
++static void inline dcss_mkname(char *name, char *dcss_name)
++{
++ int i;
++
++ for (i = 0; i <= 8; i++) {
++ if (name[i] == '\0')
++ break;
++ dcss_name[i] = toupper(name[i]);
++ };
++ for (; i <= 8; i++)
++ dcss_name[i] = ' ';
++ ASCEBC(dcss_name, 8);
++}
++
++/*
++ * Perform a function on a dcss segment.
++ */
++static inline int
++dcss_diag (__u8 func, void *parameter,
++ unsigned long *ret1, unsigned long *ret2)
++{
++ unsigned long rx, ry;
++ int rc;
++
++ rx = (unsigned long) parameter;
++ ry = (unsigned long) func;
++ __asm__ __volatile__(
++#ifdef CONFIG_ARCH_S390X
++ " sam31\n" // switch to 31 bit
++ " diag %0,%1,0x64\n"
++ " sam64\n" // switch back to 64 bit
++#else
++ " diag %0,%1,0x64\n"
++#endif
++ " ipm %2\n"
++ " srl %2,28\n"
++ : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc" );
++ *ret1 = rx;
++ *ret2 = ry;
++ return rc;
++}
++
++
++/* use to issue "extended" dcss query */
++static inline int
++dcss_diag_query(char *name, int *rwattr, int *shattr, unsigned long *segstart, unsigned long *segend)
++{
++ int i,j,rc;
++ unsigned long rx, ry;
++
++ typedef struct segentry {
++ char thisseg[8];
++ } segentry;
++
++ struct qout64 {
++ int segstart;
++ int segend;
++ int segcnt;
++ int segrcnt;
++ segentry segout[6];
++ };
++
++ struct qin64 {
++ char qopcode;
++ char rsrv1[3];
++ char qrcode;
++ char rsrv2[3];
++ char qname[8];
++ unsigned int qoutptr;
++ short int qoutlen;
++ };
++
++
++ struct qin64 *qinarea;
++ struct qout64 *qoutarea;
++
++ qinarea = (struct qin64*) get_free_page (GFP_DMA);
++ if (!qinarea) {
++ rc =-ENOMEM;
++ goto out;
++ }
++ qoutarea = (struct qout64*) get_free_page (GFP_DMA);
++ if (!qoutarea) {
++ rc = -ENOMEM;
++ free_page (qinarea);
++ goto out;
++ }
++ memset (qinarea,0,PAGE_SIZE);
++ memset (qoutarea,0,PAGE_SIZE);
++
++ qinarea->qopcode = DCSS_QACTV; /* do a query for active
++ segments */
++ qinarea->qoutptr = (unsigned long) qoutarea;
++ qinarea->qoutlen = sizeof(struct qout64);
++
++ /* Move segment name into double word aligned
++ field and pad with blanks to 8 long.
++ */
++
++ for (i = j = 0 ; i < 8; i++) {
++ qinarea->qname[i] = (name[j] == '\0') ? ' ' : name[j++];
++ }
++
++ /* name already in EBCDIC */
++ /* ASCEBC ((void *)&qinarea.qname, 8); */
++
++ /* set the assembler variables */
++ rx = (unsigned long) qinarea;
++ ry = DCSS_SEGEXT; /* this is extended function */
++
++ /* issue diagnose x'64' */
++ __asm__ __volatile__(
++#ifdef CONFIG_ARCH_S390X
++ " sam31\n" // switch to 31 bit
++ " diag %0,%1,0x64\n"
++ " sam64\n" // switch back to 64 bit
++#else
++ " diag %0,%1,0x64\n"
++#endif
++ " ipm %2\n"
++ " srl %2,28\n"
++ : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc" );
++
++ /* parse the query output area */
++ *segstart=qoutarea->segstart;
++ *segend=qoutarea->segend;
++
++ if (rc > 1)
++ {
++ *rwattr = 2;
++ *shattr = 2;
++ rc = 0;
++ goto free;
++ }
++
++ if (qoutarea->segcnt > 6)
++ {
++ *rwattr = 3;
++ *shattr = 3;
++ rc = 0;
++ goto free;
++ }
++
++ *rwattr = 1;
++ *shattr = 1;
++
++ for (i=0; i < qoutarea->segrcnt; i++) {
++ if (qoutarea->segout[i].thisseg[3] == 2 ||
++ qoutarea->segout[i].thisseg[3] == 3 ||
++ qoutarea->segout[i].thisseg[3] == 6 )
++ *rwattr = 0;
++ if (qoutarea->segout[i].thisseg[3] == 1 ||
++ qoutarea->segout[i].thisseg[3] == 3 ||
++ qoutarea->segout[i].thisseg[3] == 5 )
++ *shattr = 0;
++ } /* end of for statement */
++ rc = 0;
++ free:
++ free_page (qoutarea);
++ free_page (qinarea);
++ out:
++ return rc;
++}
++
++/*
++ * Load a DCSS segment via the diag 0x64.
++ */
++int segment_load(char *name, int segtype, unsigned long *addr,
++ unsigned long *end)
++{
++ char dcss_name[8];
++ struct list_head *l;
++ struct dcss_segment *seg, *tmp;
++ unsigned long dummy;
++ unsigned long segstart, segend;
++ int rc = 0,i;
++ int initrc = 0;
++ int rwattr, shattr;
++
++ if (!MACHINE_IS_VM)
++ return -ENOSYS;
++ dcss_mkname(name, dcss_name);
++ /* search for the dcss in list of currently loaded segments */
++ spin_lock(&dcss_lock);
++ seg = NULL;
++ list_for_each(l, &dcss_list) {
++ tmp = list_entry(l, struct dcss_segment, list);
++ if (memcmp(tmp->dcss_name, dcss_name, 8) == 0) {
++ seg = tmp;
++ break;
++ }
++ }
++
++ if (seg == NULL) {
++ /* find out the attributes of this
++ shared segment */
++ dcss_diag_query(dcss_name, &rwattr, &shattr, &segstart, &segend);
++ /* does segment collide with main memory? */
++ for (i=0; i<16; i++) {
++ if (memory_chunk[i].type != 0)
++ continue;
++ if (memory_chunk[i].addr > segend)
++ continue;
++ if (memory_chunk[i].addr + memory_chunk[i].size <= segstart)
++ continue;
++ spin_unlock(&dcss_lock);
++ return -ENOENT;
++ }
++ /* or does it collide with other (loaded) segments? */
++ list_for_each(l, &dcss_list) {
++ tmp = list_entry(l, struct dcss_segment, list);
++ if ((segstart <= tmp->end && segstart >= tmp->start_addr) ||
++ (segend <= tmp->end && segend >= tmp->start_addr) ||
++ (segstart <= tmp->start_addr && segend >= tmp->end)) {
++ PRINT_ERR("Segment Overlap!\n");
++ spin_unlock(&dcss_lock);
++ return -ENOENT;
++ }
++ }
++
++ /* do case statement on segtype */
++ /* if asking for shared ro,
++ shared rw works */
++ /* if asking for exclusive ro,
++ exclusive rw works */
++
++ switch(segtype) {
++ case SEGMENT_SHARED_RO:
++ if (shattr > 1 || rwattr > 1) {
++ spin_unlock(&dcss_lock);
++ return -ENOENT;
++ } else {
++ if (shattr == 0 && rwattr == 0)
++ rc = SEGMENT_EXCLUSIVE_RO;
++ if (shattr == 0 && rwattr == 1)
++ rc = SEGMENT_EXCLUSIVE_RW;
++ if (shattr == 1 && rwattr == 0)
++ rc = SEGMENT_SHARED_RO;
++ if (shattr == 1 && rwattr == 1)
++ rc = SEGMENT_SHARED_RW;
++ }
++ break;
++ case SEGMENT_SHARED_RW:
++ if (shattr > 1 || rwattr != 1) {
++ spin_unlock(&dcss_lock);
++ return -ENOENT;
++ } else {
++ if (shattr == 0)
++ rc = SEGMENT_EXCLUSIVE_RW;
++ if (shattr == 1)
++ rc = SEGMENT_SHARED_RW;
++ }
++ break;
++
++ case SEGMENT_EXCLUSIVE_RO:
++ if (shattr > 0 || rwattr > 1) {
++ spin_unlock(&dcss_lock);
++ return -ENOENT;
++ } else {
++ if (rwattr == 0)
++ rc = SEGMENT_EXCLUSIVE_RO;
++ if (rwattr == 1)
++ rc = SEGMENT_EXCLUSIVE_RW;
++ }
++ break;
++
++ case SEGMENT_EXCLUSIVE_RW:
++/* if (shattr != 0 || rwattr != 1) {
++ spin_unlock(&dcss_lock);
++ return -ENOENT;
++ } else {
++*/
++ rc = SEGMENT_EXCLUSIVE_RW;
++// }
++ break;
++
++ default:
++ spin_unlock(&dcss_lock);
++ return -ENOENT;
++ } /* end switch */
++
++ seg = kmalloc(sizeof(struct dcss_segment), GFP_DMA);
++ if (seg != NULL) {
++ memcpy(seg->dcss_name, dcss_name, 8);
++ if (rc == SEGMENT_EXCLUSIVE_RW) {
++ if (dcss_diag(DCSS_LOADNSR, seg->dcss_name,
++ &seg->start_addr, &seg->end) == 0) {
++ if (seg->end < max_low_pfn*PAGE_SIZE ) {
++ atomic_set(&seg->ref_count, 1);
++ list_add(&seg->list, &dcss_list);
++ *addr = seg->start_addr;
++ *end = seg->end;
++ seg->dcss_attr = rc;
++ if (shattr == 1 && rwattr == 1)
++ seg->shared_attr = SEGMENT_SHARED_RW;
++ else if (shattr == 1 && rwattr == 0)
++ seg->shared_attr = SEGMENT_SHARED_RO;
++ else
++ seg->shared_attr = SEGMENT_EXCLUSIVE_RW;
++ } else {
++ dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
++ kfree (seg);
++ rc = -ENOENT;
++ }
++ } else {
++ kfree(seg);
++ rc = -ENOENT;
++ }
++ goto out;
++ }
++ if (dcss_diag(DCSS_LOADNOLY, seg->dcss_name,
++ &seg->start_addr, &seg->end) == 0) {
++ if (seg->end < max_low_pfn*PAGE_SIZE ) {
++ atomic_set(&seg->ref_count, 1);
++ list_add(&seg->list, &dcss_list);
++ *addr = seg->start_addr;
++ *end = seg->end;
++ seg->dcss_attr = rc;
++ seg->shared_attr = rc;
++ } else {
++ dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
++ kfree (seg);
++ rc = -ENOENT;
++ }
++ } else {
++ kfree(seg);
++ rc = -ENOENT;
++ }
++ } else rc = -ENOMEM;
++ } else {
++ /* found */
++ if ((segtype == SEGMENT_EXCLUSIVE_RW) && (seg->dcss_attr != SEGMENT_EXCLUSIVE_RW)) {
++ PRINT_ERR("Segment already loaded in other mode than EXCLUSIVE_RW!\n");
++ rc = -EPERM;
++ goto out;
++ /* reload segment in exclusive mode */
++/* dcss_diag(DCSS_LOADNSR, seg->dcss_name,
++ &seg->start_addr, &seg->end);
++ seg->dcss_attr = SEGMENT_EXCLUSIVE_RW;*/
++ }
++ if ((segtype != SEGMENT_EXCLUSIVE_RW) && (seg->dcss_attr == SEGMENT_EXCLUSIVE_RW)) {
++ PRINT_ERR("Segment already loaded in EXCLUSIVE_RW mode!\n");
++ rc = -EPERM;
++ goto out;
++ }
++ atomic_inc(&seg->ref_count);
++ *addr = seg->start_addr;
++ *end = seg->end;
++ rc = seg->dcss_attr;
++ }
++out:
++ spin_unlock(&dcss_lock);
++ return rc;
++}
++
++/*
++ * Decrease the use count of a DCSS segment and remove
++ * it from the address space if nobody is using it
++ * any longer.
++ */
++void segment_unload(char *name)
++{
++ char dcss_name[8];
++ unsigned long dummy;
++ struct list_head *l,*l_tmp;
++ struct dcss_segment *seg;
++
++ if (!MACHINE_IS_VM)
++ return;
++ dcss_mkname(name, dcss_name);
++ spin_lock(&dcss_lock);
++ list_for_each_safe(l, l_tmp, &dcss_list) {
++ seg = list_entry(l, struct dcss_segment, list);
++ if (memcmp(seg->dcss_name, dcss_name, 8) == 0) {
++ if (atomic_dec_return(&seg->ref_count) == 0) {
++ /* Last user of the segment is
++ gone. */
++ list_del(&seg->list);
++ dcss_diag(DCSS_PURGESEG, seg->dcss_name,
++ &dummy, &dummy);
++ kfree(seg);
++ }
++ break;
++ }
++ }
++ spin_unlock(&dcss_lock);
++}
++
++/*
++ * Replace an existing DCSS segment, so that machines
++ * that load it anew will see the new version.
++ */
++void segment_replace(char *name)
++{
++ char dcss_name[8];
++ struct list_head *l;
++ struct dcss_segment *seg;
++ int mybeg = 0;
++ int myend = 0;
++ char mybuff1[80];
++ char mybuff2[80];
++
++ if (!MACHINE_IS_VM)
++ return;
++ dcss_mkname(name, dcss_name);
++
++ memset (mybuff1, 0, sizeof(mybuff1));
++ memset (mybuff2, 0, sizeof(mybuff2));
++
++ spin_lock(&dcss_lock);
++ list_for_each(l, &dcss_list) {
++ seg = list_entry(l, struct dcss_segment, list);
++ if (memcmp(seg->dcss_name, dcss_name, 8) == 0) {
++ mybeg = seg->start_addr >> 12;
++ myend = (seg->end) >> 12;
++ if (seg->shared_attr == SEGMENT_EXCLUSIVE_RW)
++ sprintf(mybuff1, "DEFSEG %s %X-%X EW",
++ name, mybeg, myend);
++ if (seg->shared_attr == SEGMENT_EXCLUSIVE_RO)
++ sprintf(mybuff1, "DEFSEG %s %X-%X RO",
++ name, mybeg, myend);
++ if (seg->shared_attr == SEGMENT_SHARED_RW)
++ sprintf(mybuff1, "DEFSEG %s %X-%X SW",
++ name, mybeg, myend);
++ if (seg->shared_attr == SEGMENT_SHARED_RO)
++ sprintf(mybuff1, "DEFSEG %s %X-%X SR",
++ name, mybeg, myend);
++ spin_unlock(&dcss_lock);
++ sprintf(mybuff2, "SAVESEG %s", name);
++ cpcmd(mybuff1, NULL, 80);
++ cpcmd(mybuff2, NULL, 80);
++ break;
++ }
++
++ }
++ if (myend == 0) spin_unlock(&dcss_lock);
++}
++
++EXPORT_SYMBOL(segment_load);
++EXPORT_SYMBOL(segment_unload);
++EXPORT_SYMBOL(segment_replace);
+=== arch/s390x/config.in
+==================================================================
+--- arch/s390x/config.in (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390x/config.in (/trunk/2.4.27) (revision 52)
+@@ -65,9 +65,35 @@
+ bool 'Show crashed user process info' CONFIG_PROCESS_DEBUG
+ bool 'Pseudo page fault support' CONFIG_PFAULT
+ bool 'VM shared kernel support' CONFIG_SHARED_KERNEL
++bool 'No HZ timer ticks in idle' CONFIG_NO_IDLE_HZ
++if [ "$CONFIG_NO_IDLE_HZ" = "y" ] ; then
++ bool ' Idle HZ timer on by default' CONFIG_NO_IDLE_HZ_INIT
++fi
++bool 'Virtual CPU timer support' CONFIG_VIRT_TIMER
++dep_bool 'Linux - VM Monitor Stream, base infrastructure' CONFIG_APPLDATA_BASE \
++$CONFIG_PROC_FS $CONFIG_VIRT_TIMER
++dep_tristate ' Monitor memory management statistics' CONFIG_APPLDATA_MEM $CONFIG_APPLDATA_BASE
++dep_tristate ' Monitor OS statistics' CONFIG_APPLDATA_OS $CONFIG_APPLDATA_BASE
++dep_tristate ' Monitor overall network statistics' CONFIG_APPLDATA_NET_SUM $CONFIG_APPLDATA_BASE
++tristate 'Collaborative memory management' CONFIG_CMM
++if [ "$CONFIG_CMM" != "n" ]; then
++ dep_bool '/proc interface to cooperative memory management' CONFIG_CMM_PROC $CONFIG_PROC_FS
++ if [ "$CONFIG_SMSGIUCV" = "y" -o "$CONFIG_SMSGIUCV" = "$CONFIG_CMM" ]; then
++ bool 'IUCV special message interface to cooperative memory management' CONFIG_CMM_IUCV
++ fi
++fi
+ endmenu
+
++mainmenu_option next_comment
++comment 'SCSI support'
+
++tristate 'SCSI support' CONFIG_SCSI
++
++if [ "$CONFIG_SCSI" != "n" ]; then
++ source drivers/scsi/Config.in
++fi
++endmenu
++
+ source drivers/s390/Config.in
+
+ if [ "$CONFIG_NET" = "y" ]; then
+=== arch/s390x/Makefile
+==================================================================
+--- arch/s390x/Makefile (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390x/Makefile (/trunk/2.4.27) (revision 52)
+@@ -24,19 +24,22 @@
+ endif
+ MODFLAGS += -fpic
+
++CFLAGS_ARCH := -m64
+ CFLAGS_PIPE := -pipe
+ CFLAGS_NSR := -fno-strength-reduce
+-CFLAGS := $(CFLAGS) $(CFLAGS_PIPE) $(CFLAGS_NSR)
++CFLAGS := $(CFLAGS) $(CFLAGS_ARCH) $(CFLAGS_PIPE) $(CFLAGS_NSR)
++AFLAGS := $(AFLAGS) $(CFLAGS_ARCH)
+
+ HEAD := arch/s390x/kernel/head.o arch/s390x/kernel/init_task.o
+
+ SUBDIRS := $(SUBDIRS) arch/s390x/mm arch/s390x/kernel arch/s390x/lib \
+- drivers/s390
+-CORE_FILES := arch/s390x/mm/mm.o arch/s390x/kernel/kernel.o $(CORE_FILES)
++ arch/s390/appldata drivers/s390
++CORE_FILES := arch/s390x/mm/mm.o arch/s390x/kernel/kernel.o \
++ arch/s390/appldata/appldata.o $(CORE_FILES)
+ DRIVERS := $(DRIVERS) drivers/s390/io.o
+ LIBS := $(TOPDIR)/arch/s390x/lib/lib.a $(LIBS) $(TOPDIR)/arch/s390x/lib/lib.a
+
+-all: image listing
++all: image
+
+ listing: vmlinux
+ @$(MAKEBOOT) listing
+@@ -44,6 +47,9 @@
+ arch/s390x/kernel: dummy
+ $(MAKE) linuxsubdirs SUBDIRS=arch/s390x/kernel
+
++arch/s390/appldata: dummy
++ $(MAKE) linuxsubdirs SUBDIRS=arch/s390/appldata
++
+ arch/s390x/mm: dummy
+ $(MAKE) linuxsubdirs SUBDIRS=arch/s390x/mm
+
+=== arch/s390/kernel/time.c
+==================================================================
+--- arch/s390/kernel/time.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390/kernel/time.c (/trunk/2.4.27) (revision 52)
+@@ -4,8 +4,8 @@
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Hartmut Penner (hp at de.ibm.com),
+- * Martin Schwidefsky (schwidefsky at de.ibm.com),
+- * Denis Joseph Barrow (djbarrow at de.ibm.com,barrow_dj at yahoo.com)
++ * Martin Schwidefsky (schwidefsky at de.ibm.com),
++ * Denis Joseph Barrow (djbarrow at de.ibm.com,barrow_dj at yahoo.com)
+ *
+ * Derived from "arch/i386/kernel/time.c"
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+@@ -26,38 +26,67 @@
+
+ #include <asm/uaccess.h>
+ #include <asm/delay.h>
+-#include <asm/s390_ext.h>
+-
+ #include <linux/timex.h>
+ #include <linux/config.h>
+-
++#include <asm/s390_ext.h>
+ #include <asm/irq.h>
++#if defined (CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
++#include <asm/timer.h>
++#endif
+
+ /* change this if you have some constant time drift */
+-#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
++#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
+ #define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
+
++/*
++ * Create a small time difference between the timer interrupts
++ * on the different cpus to avoid lock contention.
++ */
++#define CPU_DEVIATION (smp_processor_id() << 12)
++
+ #define TICK_SIZE tick
+
+-static ext_int_info_t ext_int_info_timer;
+-static uint64_t init_timer_cc;
++static ext_int_info_t ext_int_info_cc;
+
++static u64 init_timer_cc;
++static u64 xtime_cc;
++
+ extern rwlock_t xtime_lock;
+ extern unsigned long wall_jiffies;
+
++#ifdef CONFIG_VIRT_TIMER
++static ext_int_info_t ext_int_info_timer;
++static struct vtimer_queue virt_cpu_timer[NR_CPUS];
++#define VTIMER_MAGIC (0x4b87ad6e + 1)
++#endif
++
++#ifdef CONFIG_NO_IDLE_HZ
++
++#ifdef CONFIG_NO_IDLE_HZ_INIT
++int sysctl_hz_timer = 0;
++#else
++int sysctl_hz_timer = 1;
++#endif
++
++#endif
++
++#if defined (CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
++atomic_t active_cpu_timer = ATOMIC_INIT(0);
++#endif
++
+ void tod_to_timeval(__u64 todval, struct timeval *xtime)
+ {
+- const int high_bit = 0x80000000L;
+- const int c_f4240 = 0xf4240L;
+- const int c_7a120 = 0x7a120;
++ const int high_bit = 0x80000000L;
++ const int c_f4240 = 0xf4240L;
++ const int c_7a120 = 0x7a120;
+ /* We have to divide the 64 bit value todval by 4096
+ * (because the 2^12 bit is the one that changes every
+- * microsecond) and then split it into seconds and
+- * microseconds. A value of max (2^52-1) divided by
+- * the value 0xF4240 can yield a max result of approx
+- * (2^32.068). Thats to big to fit into a signed int
++ * microsecond) and then split it into seconds and
++ * microseconds. A value of max (2^52-1) divided by
++ * the value 0xF4240 can yield a max result of approx
++ * (2^32.068). Thats to big to fit into a signed int
+ * ... hacking time!
+- */
++ */
+ asm volatile ("L 2,%1\n\t"
+ "LR 3,2\n\t"
+ "SRL 2,12\n\t"
+@@ -70,12 +99,12 @@
+ "JL .+12\n\t"
+ "S 2,%2\n\t"
+ "L 4,%3\n\t"
+- "D 2,%4\n\t"
++ "D 2,%4\n\t"
+ "OR 3,4\n\t"
+ "ST 2,%O0+4(%R0)\n\t"
+ "ST 3,%0"
+ : "=m" (*xtime) : "m" (todval),
+- "m" (c_7a120), "m" (high_bit), "m" (c_f4240)
++ "m" (c_7a120), "m" (high_bit), "m" (c_f4240)
+ : "cc", "memory", "2", "3", "4" );
+ }
+
+@@ -83,8 +112,8 @@
+ {
+ __u64 now;
+
+- asm ("STCK 0(%0)" : : "a" (&now) : "memory", "cc");
+- now = (now - init_timer_cc) >> 12;
++ asm volatile ("STCK 0(%0)" : : "a" (&now) : "memory", "cc");
++ now = (now - init_timer_cc) >> 12;
+ /* We require the offset from the latest update of xtime */
+ now -= (__u64) wall_jiffies*USECS_PER_JIFFY;
+ return (unsigned long) now;
+@@ -114,7 +143,6 @@
+
+ void do_settimeofday(struct timeval *tv)
+ {
+-
+ write_lock_irq(&xtime_lock);
+ /* This is revolting. We need to set the xtime.tv_usec
+ * correctly. However, the value in this location is
+@@ -137,58 +165,280 @@
+ write_unlock_irq(&xtime_lock);
+ }
+
++static inline __u32 div64_32(__u64 dividend, __u32 divisor)
++{
++ register_pair rp;
++
++ rp.pair = dividend;
++ asm ("dr %0,%1" : "+d" (rp) : "d" (divisor));
++ return rp.subreg.odd;
++}
++
+ /*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
++void account_ticks(struct pt_regs *regs)
++{
++ int cpu = smp_processor_id();
++ __u64 tmp;
++ __u32 ticks;
+
++ /* Calculate how many ticks have passed. */
++ tmp = S390_lowcore.int_clock - S390_lowcore.jiffy_timer;
++ if (tmp >= 2*CLK_TICKS_PER_JIFFY) {
++ ticks = div64_32(tmp >> 1, CLK_TICKS_PER_JIFFY >> 1) + 1;
++ S390_lowcore.jiffy_timer +=
++ CLK_TICKS_PER_JIFFY * (__u64) ticks;
++ } else if (tmp > CLK_TICKS_PER_JIFFY) {
++ ticks = 2;
++ S390_lowcore.jiffy_timer += 2*CLK_TICKS_PER_JIFFY;
++ } else {
++ ticks = 1;
++ S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY;
++ }
++
++ /* set clock comparator for next tick */
++ tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
++ asm volatile ("SCKC %0" : : "m" (tmp));
++
++ irq_enter(cpu, 0);
++
+ #ifdef CONFIG_SMP
+-extern __u16 boot_cpu_addr;
++ /*
++ * Do not rely on the boot cpu to do the calls to do_timer.
++ * Spread it over all cpus instead.
++ */
++ write_lock(&xtime_lock);
++ if (S390_lowcore.jiffy_timer > xtime_cc) {
++ __u32 xticks;
++
++ tmp = S390_lowcore.jiffy_timer - xtime_cc;
++ if (tmp >= 2*CLK_TICKS_PER_JIFFY) {
++ xticks = div64_32(tmp >> 1, CLK_TICKS_PER_JIFFY >> 1);
++ xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY;
++ } else {
++ xticks = 1;
++ xtime_cc += CLK_TICKS_PER_JIFFY;
++ }
++ while (xticks--)
++ do_timer(regs);
++ }
++ write_unlock(&xtime_lock);
++ while (ticks--)
++ update_process_times(user_mode(regs));
++#else
++ while (ticks--)
++ do_timer(regs);
+ #endif
++ irq_exit(cpu, 0);
++}
+
+-static void do_comparator_interrupt(struct pt_regs *regs, __u16 error_code)
++#ifdef CONFIG_VIRT_TIMER
++void start_cpu_timer(void)
+ {
+- int cpu = smp_processor_id();
++ struct vtimer_queue *vt_list;
+
+- irq_enter(cpu, 0);
++ vt_list = &virt_cpu_timer[smp_processor_id()];
++ set_vtimer(vt_list->idle);
++}
+
++int stop_cpu_timer(void)
++{
++ __u64 done;
++ struct vtimer_queue *vt_list;
++
++ vt_list = &virt_cpu_timer[smp_processor_id()];
++
++ /* store progress */
++ asm volatile ("STPT %0" : "=m" (done));
++
+ /*
+- * set clock comparator for next tick
++ * If done is negative we do not stop the CPU timer
++ * because we will get instantly an interrupt that
++ * will start the CPU timer again.
+ */
+- S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY;
+- asm volatile ("SCKC %0" : : "m" (S390_lowcore.jiffy_timer));
++ if (done & 1LL<<63)
++ return 1;
++ else
++ vt_list->offset += vt_list->to_expire - done;
+
+-#ifdef CONFIG_SMP
+- if (S390_lowcore.cpu_data.cpu_addr == boot_cpu_addr)
+- write_lock(&xtime_lock);
++ /* save the actual expire value */
++ vt_list->idle = done;
+
+- update_process_times(user_mode(regs));
++ /*
++ * We cannot halt the CPU timer, we just write a value that
++ * nearly never expires (only after 71 years) and re-write
++ * the stored expire value if we continue the timer
++ */
++ set_vtimer(VTIMER_MAX_SLICE);
++ return 0;
++}
+
+- if (S390_lowcore.cpu_data.cpu_addr == boot_cpu_addr) {
+- do_timer(regs);
+- write_unlock(&xtime_lock);
++void set_vtimer(__u64 expires)
++{
++ asm volatile ("SPT %0" : : "m" (expires));
++
++ /* store expire time for this CPU timer */
++ virt_cpu_timer[smp_processor_id()].to_expire = expires;
++}
++
++/*
++ * Sorted add to a list. List is linear searched until first bigger
++ * element is found.
++ */
++void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
++{
++ struct vtimer_list *event;
++
++ list_for_each_entry(event, head, entry) {
++ if (event->expires > timer->expires) {
++ list_add_tail(&timer->entry, &event->entry);
++ return;
++ }
+ }
+-#else
+- do_timer(regs);
+-#endif
++ list_add_tail(&timer->entry, head);
++}
+
+- irq_exit(cpu, 0);
++/*
++ * Do the callback functions of expired vtimer events.
++ * Called from within the interrupt handler.
++ */
++static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs)
++{
++ struct vtimer_queue *vt_list;
++ struct vtimer_list *event;
++ struct list_head *ptr, *tmp;
++ void (*fn)(unsigned long, struct pt_regs*);
++ unsigned long data;
++
++ if (list_empty(cb_list))
++ return;
++
++ vt_list = &virt_cpu_timer[smp_processor_id()];
++
++ list_for_each_safe(ptr, tmp, cb_list) {
++ event = list_entry(ptr, struct vtimer_list, entry);
++
++ fn = event->function;
++ data = event->data;
++ fn(data, regs);
++
++ if (!event->interval)
++ /* delete one shot timer */
++ list_del_init(ptr);
++ else {
++ /* move interval timer back to list */
++ spin_lock(&vt_list->lock);
++ list_del_init(&event->entry);
++ list_add_sorted(event, &vt_list->list);
++ spin_unlock(&vt_list->lock);
++ }
++ }
+ }
+
+ /*
+- * Start the clock comparator on the current CPU
++ * Handler for the virtual CPU timer.
+ */
++static void do_cpu_timer_interrupt(struct pt_regs *regs, __u16 error_code)
++{
++ int cpu;
++ __u64 next, delta;
++ struct list_head *ptr, *tmp;
++ struct vtimer_queue *vt_list;
++ struct vtimer_list *event;
++ /* the callback queue */
++ struct list_head cb_list;
++
++ INIT_LIST_HEAD(&cb_list);
++ cpu = smp_processor_id();
++ vt_list = &virt_cpu_timer[cpu];
++
++ /* walk timer list, fire all expired events */
++ spin_lock(&vt_list->lock);
++
++ if (vt_list->to_expire < VTIMER_MAX_SLICE)
++ vt_list->offset += vt_list->to_expire;
++
++ list_for_each_safe(ptr, tmp, &vt_list->list) {
++ event = list_entry(ptr, struct vtimer_list, entry);
++
++ if (event->expires > vt_list->offset)
++ /* found first unexpired event, leave */
++ break;
++
++ /* re-charge interval timer, we have to add the offset */
++ if (event->interval)
++ event->expires = event->interval + vt_list->offset;
++
++ /* move expired timer to the callback queue */
++ list_move_tail(ptr, &cb_list);
++ }
++ spin_unlock(&vt_list->lock);
++ do_callbacks(&cb_list, regs);
++
++ /* next event is first in list */
++ spin_lock(&vt_list->lock);
++ if (!list_empty(&vt_list->list)) {
++ ptr = vt_list->list.next;
++ event = list_entry(ptr, struct vtimer_list, entry);
++ next = event->expires - vt_list->offset;
++
++ /* add the expired time from this interrupt handler
++ * and the callback functions
++ */
++ asm volatile ("STPT %0" : "=m" (delta));
++ delta = 0xffffffffffffffffLL - delta + 1;
++ vt_list->offset += delta;
++ next -= delta;
++ } else {
++ vt_list->offset = 0;
++ next = VTIMER_MAX_SLICE;
++ }
++ spin_unlock(&vt_list->lock);
++ set_vtimer(next);
++}
++#endif
++
++/*
++ * Start the clock comparator and the virtual CPU timer
++ * on the current CPU
++ */
+ void init_cpu_timer(void)
+ {
+ unsigned long cr0;
++ __u64 timer;
++#ifdef CONFIG_VIRT_TIMER
++ struct vtimer_queue *vt_list;
++#endif
+
+- S390_lowcore.jiffy_timer = (__u64) jiffies * CLK_TICKS_PER_JIFFY;
+- S390_lowcore.jiffy_timer += init_timer_cc + CLK_TICKS_PER_JIFFY;
+- asm volatile ("SCKC %0" : : "m" (S390_lowcore.jiffy_timer));
+- /* allow clock comparator timer interrupt */
+- asm volatile ("STCTL 0,0,%0" : "=m" (cr0) : : "memory");
+- cr0 |= 0x800;
+- asm volatile ("LCTL 0,0,%0" : : "m" (cr0) : "memory");
++ timer = init_timer_cc + (__u64) jiffies * CLK_TICKS_PER_JIFFY;
++ S390_lowcore.jiffy_timer = timer + CLK_TICKS_PER_JIFFY;
++ timer += CLK_TICKS_PER_JIFFY + CPU_DEVIATION;
++ asm volatile ("SCKC %0" : : "m" (timer));
++#if defined (CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
++ atomic_inc(&active_cpu_timer);
++#endif
++ /* allow clock comparator timer interrupt */
++ asm volatile ("STCTL 0,0,%0" : "=m" (cr0) : : "memory");
++ cr0 |= 0x800;
++ asm volatile ("LCTL 0,0,%0" : : "m" (cr0) : "memory");
++
++#ifdef CONFIG_VIRT_TIMER
++ /* kick the virtual timer */
++ timer = VTIMER_MAX_SLICE;
++ asm volatile ("SPT %0" : : "m" (timer));
++ __ctl_store(cr0, 0, 0);
++ cr0 |= 0x400;
++ __ctl_load(cr0, 0, 0);
++
++ vt_list = &virt_cpu_timer[smp_processor_id()];
++ INIT_LIST_HEAD(&vt_list->list);
++ spin_lock_init(&vt_list->lock);
++ vt_list->to_expire = 0;
++ vt_list->offset = 0;
++ vt_list->idle = 0;
++#endif
+ }
+
+ /*
+@@ -196,39 +446,374 @@
+ * the boot cpu.
+ */
+ void __init time_init(void)
+-{
+- __u64 set_time_cc;
++{
++ __u64 set_time_cc;
+ int cc;
+
+- /* kick the TOD clock */
+- asm volatile ("STCK 0(%1)\n\t"
+- "IPM %0\n\t"
+- "SRL %0,28" : "=r" (cc) : "a" (&init_timer_cc)
++ /* kick the TOD clock */
++ asm volatile ("STCK 0(%1)\n\t"
++ "IPM %0\n\t"
++ "SRL %0,28" : "=r" (cc) : "a" (&init_timer_cc)
+ : "memory", "cc");
+- switch (cc) {
+- case 0: /* clock in set state: all is fine */
+- break;
+- case 1: /* clock in non-set state: FIXME */
+- printk("time_init: TOD clock in non-set state\n");
+- break;
+- case 2: /* clock in error state: FIXME */
+- printk("time_init: TOD clock in error state\n");
+- break;
+- case 3: /* clock in stopped or not-operational state: FIXME */
+- printk("time_init: TOD clock stopped/non-operational\n");
+- break;
+- }
++ switch (cc) {
++ case 0: /* clock in set state: all is fine */
++ break;
++ case 1: /* clock in non-set state: FIXME */
++ printk("time_init: TOD clock in non-set state\n");
++ break;
++ case 2: /* clock in error state: FIXME */
++ printk("time_init: TOD clock in error state\n");
++ break;
++ case 3: /* clock in stopped or not-operational state: FIXME */
++ printk("time_init: TOD clock stopped/non-operational\n");
++ break;
++ }
+
+ /* set xtime */
+- set_time_cc = init_timer_cc - 0x8126d60e46000000LL +
+- (0x3c26700LL*1000000*4096);
+- tod_to_timeval(set_time_cc, &xtime);
++ xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY;
++ set_time_cc = init_timer_cc - 0x8126d60e46000000LL +
++ (0x3c26700LL*1000000*4096);
++ tod_to_timeval(set_time_cc, &xtime);
+
+- /* request the 0x1004 external interrupt */
+- if (register_early_external_interrupt(0x1004, do_comparator_interrupt,
++ /* request the clock comparator external interrupt */
++ if (register_early_external_interrupt(0x1004, NULL,
++ &ext_int_info_cc) != 0)
++ panic("Couldn't request external interrupt 0x1004");
++
++#ifdef CONFIG_VIRT_TIMER
++ /* request the cpu timer external interrupt */
++ if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt,
+ &ext_int_info_timer) != 0)
+- panic("Couldn't request external interrupt 0x1004");
++ panic("Couldn't request external interrupt 0x1005");
++#endif
+
+- /* init CPU timer */
+- init_cpu_timer();
++ init_cpu_timer();
+ }
++
++#ifdef CONFIG_VIRT_TIMER
++void init_virt_timer(struct vtimer_list *timer)
++{
++ timer->magic = VTIMER_MAGIC;
++ timer->function = NULL;
++ INIT_LIST_HEAD(&timer->entry);
++ spin_lock_init(&timer->lock);
++}
++
++static inline int check_vtimer(struct vtimer_list *timer)
++{
++ if (timer->magic != VTIMER_MAGIC)
++ return -EINVAL;
++ return 0;
++}
++
++static inline int vtimer_pending(struct vtimer_list *timer)
++{
++ return (!list_empty(&timer->entry));
++}
++
++/*
++ * this function should only run on the specified CPU
++ */
++static void internal_add_vtimer(struct vtimer_list *timer)
++{
++ unsigned long flags;
++ __u64 done;
++ struct vtimer_list *event;
++ struct vtimer_queue *vt_list;
++
++ vt_list = &virt_cpu_timer[timer->cpu];
++ spin_lock_irqsave(&vt_list->lock, flags);
++
++ if (timer->cpu != smp_processor_id())
++ printk("internal_add_vtimer: BUG, running on wrong CPU");
++
++ /* if list is empty we only have to set the timer */
++ if (list_empty(&vt_list->list)) {
++ /* reset the offset, this may happen if the last timer was
++ * just deleted by mod_virt_timer and the interrupt
++ * didn't happen until here
++ */
++ vt_list->offset = 0;
++ goto fire;
++ }
++
++ /* save progress */
++ asm volatile ("STPT %0" : "=m" (done));
++
++ /* calculate completed work */
++ done = vt_list->to_expire - done + vt_list->offset;
++ vt_list->offset = 0;
++
++ list_for_each_entry(event, &vt_list->list, entry)
++ event->expires -= done;
++
++ fire:
++ list_add_sorted(timer, &vt_list->list);
++
++ /* get first element, which is the next vtimer slice */
++ event = list_entry(vt_list->list.next, struct vtimer_list, entry);
++
++ set_vtimer(event->expires);
++ spin_unlock_irqrestore(&vt_list->lock, flags);
++}
++
++static inline int prepare_vtimer(struct vtimer_list *timer)
++{
++ if (check_vtimer(timer) || !timer->function) {
++ printk("add_virt_timer: uninitialized timer\n");
++ return -EINVAL;
++ }
++
++ if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) {
++ printk("add_virt_timer: invalid timer expire value!\n");
++ return -EINVAL;
++ }
++
++ if (vtimer_pending(timer)) {
++ printk("add_virt_timer: timer pending\n");
++ return -EBUSY;
++ }
++
++ timer->cpu = smp_processor_id();
++ return 0;
++}
++
++/*
++ * add_virt_timer - add an oneshot virtual CPU timer
++ */
++void add_virt_timer(void *new)
++{
++ struct vtimer_list *timer;
++
++ timer = (struct vtimer_list *)new;
++
++ if (prepare_vtimer(timer) < 0)
++ return;
++
++ timer->interval = 0;
++ internal_add_vtimer(timer);
++}
++
++/*
++ * add_virt_timer_int - add an interval virtual CPU timer
++ */
++void add_virt_timer_periodic(void *new)
++{
++ struct vtimer_list *timer;
++
++ timer = (struct vtimer_list *)new;
++
++ if (prepare_vtimer(timer) < 0)
++ return;
++
++ timer->interval = timer->expires;
++ internal_add_vtimer(timer);
++}
++
++/*
++ * If we change a pending timer the function must be called on the CPU
++ * where the timer is running on, e.g. by smp_call_function_on()
++ *
++ * The original mod_timer adds the timer if it is not pending. For compatibility
++ * we do the same. The timer will be added on the current CPU as a oneshot timer.
++ *
++ * returns whether it has modified a pending timer (1) or not (0)
++ */
++int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
++{
++ struct vtimer_queue *vt_list;
++ unsigned long flags;
++
++ if (check_vtimer(timer) || !timer->function) {
++ printk("mod_virt_timer: uninitialized timer\n");
++ return -EINVAL;
++ }
++
++ if (!expires || expires > VTIMER_MAX_SLICE) {
++ printk("mod_virt_timer: invalid expire range\n");
++ return -EINVAL;
++ }
++
++ /*
++ * This is a common optimization triggered by the
++ * networking code - if the timer is re-modified
++ * to be the same thing then just return:
++ */
++ if (timer->expires == expires && vtimer_pending(timer))
++ return 1;
++
++ /* disable interrupts before test if timer is pending */
++ vt_list = &virt_cpu_timer[smp_processor_id()];
++ spin_lock_irqsave(&vt_list->lock, flags);
++
++ /* if timer isn't pending add it on the current CPU */
++ if (!vtimer_pending(timer)) {
++ spin_unlock_irqrestore(&vt_list->lock, flags);
++ /* we do not activate an interval timer with mod_virt_timer */
++ timer->interval = 0;
++ timer->expires = expires;
++ timer->cpu = smp_processor_id();
++ internal_add_vtimer(timer);
++ return 0;
++ }
++
++ /* check if we run on the right CPU */
++ if (timer->cpu != smp_processor_id()) {
++ printk("mod_virt_timer: running on wrong CPU, check your code\n");
++ spin_unlock_irqrestore(&vt_list->lock, flags);
++ return -EINVAL;
++ }
++
++ list_del_init(&timer->entry);
++ timer->expires = expires;
++
++ /* also change the interval if we have an interval timer */
++ if (timer->interval)
++ timer->interval = expires;
++
++ /* the timer can't expire anymore so we can release the lock */
++ spin_unlock_irqrestore(&vt_list->lock, flags);
++ internal_add_vtimer(timer);
++ return 1;
++}
++
++/*
++ * delete a virtual timer
++ *
++ * returns whether the deleted timer was pending (1) or not (0)
++ */
++int del_virt_timer(struct vtimer_list *timer)
++{
++ unsigned long flags;
++ struct vtimer_queue *vt_list;
++
++ if (check_vtimer(timer)) {
++ printk("del_virt_timer: timer not initialized\n");
++ return -EINVAL;
++ }
++
++ /* check if timer is pending */
++ if (!vtimer_pending(timer))
++ return 0;
++
++ if (timer->cpu > smp_num_cpus) {
++ printk("del_virt_timer: CPU not present!\n");
++ return -1;
++ }
++
++ vt_list = &virt_cpu_timer[timer->cpu];
++ spin_lock_irqsave(&vt_list->lock, flags);
++
++ /* we don't interrupt a running timer, just let it expire! */
++ list_del_init(&timer->entry);
++
++ /* last timer removed */
++ if (list_empty(&vt_list->list)) {
++ vt_list->to_expire = 0;
++ vt_list->offset = 0;
++ }
++
++ spin_unlock_irqrestore(&vt_list->lock, flags);
++ return 1;
++}
++#endif
++
++#ifdef CONFIG_NO_IDLE_HZ
++
++/*
++ * Start the HZ tick on the current CPU.
++ * Only cpu_idle may call this function.
++ */
++void start_hz_timer(void)
++{
++ __u64 tmp;
++ __u32 ticks;
++
++ if (sysctl_hz_timer != 0)
++ return;
++
++ irq_enter(smp_processor_id(), 0);
++
++ /* Calculate how many ticks have passed */
++ asm volatile ("STCK 0(%0)" : : "a" (&tmp) : "memory", "cc");
++ tmp = tmp + CLK_TICKS_PER_JIFFY - S390_lowcore.jiffy_timer;
++ ticks = div64_32(tmp >> 1, CLK_TICKS_PER_JIFFY >> 1);
++ S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY * (__u64) ticks;
++
++ /* Set the clock comparator to the next tick. */
++ tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
++ asm volatile ("SCKC %0" : : "m" (tmp));
++
++ /* Charge the ticks. */
++ if (ticks > 0) {
++#ifdef CONFIG_SMP
++ write_lock(&xtime_lock);
++ if (S390_lowcore.jiffy_timer > xtime_cc) {
++ __u32 xticks;
++
++ tmp = S390_lowcore.jiffy_timer - xtime_cc;
++ xticks = div64_32(tmp >> 1, CLK_TICKS_PER_JIFFY >> 1);
++ xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY;
++ do_timer_ticks(xticks);
++ }
++ write_unlock(&xtime_lock);
++#else
++ do_timer_ticks(0, ticks);
++#endif
++ update_process_times_us(0, ticks);
++ }
++ irq_exit(smp_processor_id(), 0);
++}
++
++extern spinlock_t timerlist_lock;
++
++/*
++ * Stop the HZ tick on the current CPU.
++ * Only cpu_idle may call this function.
++ */
++void stop_hz_timer(void)
++{
++ __u64 timer;
++
++ if (sysctl_hz_timer != 0)
++ return;
++
++ if (atomic_read(&active_cpu_timer) == 0) {
++ /*
++ * The last active cpu is going to sleep. Setup the clock
++ * comparator for the next event if nothing on tq_timer
++ * is pending. If something is pending on tq_timer then
++ * don't change the clock comparator as it is setup for
++ * the next timer tick already.
++ */
++ if (!TQ_ACTIVE(tq_timer)) {
++ spin_lock(&timerlist_lock);
++ timer = (__u64) next_timer_event()->expires;
++ timer *= CLK_TICKS_PER_JIFFY;
++ timer += init_timer_cc;
++ asm volatile ("SCKC %0" : : "m" (timer));
++ spin_unlock(&timerlist_lock);
++ }
++ } else {
++ timer = (__u64) -1;
++ asm volatile ("SCKC %0" : : "m" (timer));
++ }
++}
++#endif
++
++void do_monitor_call(struct pt_regs *regs, long interruption_code)
++{
++ /* disable monitor call class 0 */
++ __ctl_clear_bit(8, 15);
++
++#if defined (CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
++ atomic_inc(&active_cpu_timer);
++#endif
++
++#ifdef CONFIG_VIRT_TIMER
++ start_cpu_timer();
++#endif
++#ifdef CONFIG_NO_IDLE_HZ
++ start_hz_timer();
++#endif
++}
+=== arch/s390/kernel/smp.c
+==================================================================
+--- arch/s390/kernel/smp.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390/kernel/smp.c (/trunk/2.4.27) (revision 52)
+@@ -92,7 +92,7 @@
+
+ extern void reipl(unsigned long devno);
+
+-static sigp_ccode smp_ext_bitcall(int, ec_bit_sig);
++static void smp_ext_bitcall(int, ec_bit_sig);
+ static void smp_ext_bitcall_others(ec_bit_sig);
+
+ /*
+@@ -131,7 +131,7 @@
+ * in the system.
+ */
+
+-int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
++int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
+ int wait)
+ /*
+ * [SUMMARY] Run a function on all other CPUs.
+@@ -162,7 +162,7 @@
+ spin_lock_bh(&call_lock);
+ call_data = &data;
+ /* Send a message to all other CPUs and wait for them to respond */
+- smp_ext_bitcall_others(ec_call_function);
++ smp_ext_bitcall_others(ec_call_function);
+
+ /* Wait for response */
+ while (atomic_read(&data.started) != cpus)
+@@ -176,9 +176,54 @@
+ return 0;
+ }
+
++/*
++ * Call a function on one CPU
++ * cpu : the CPU the function should be executed on
++ *
++ * You must not call this function with disabled interrupts or from a
++ * hardware interrupt handler, you may call it from a bottom half handler.
++ */
++int smp_call_function_on(void (*func) (void *info), void *info,
++ int nonatomic, int wait, int cpu)
++{
++ struct call_data_struct data;
++
++ if (!atomic_read(&smp_commenced))
++ return 0;
++
++ if (smp_processor_id() == cpu) {
++ /* direct call to function */
++ func(info);
++ return 0;
++ }
++
++ data.func = func;
++ data.info = info;
++
++ atomic_set(&data.started, 0);
++ data.wait = wait;
++ if (wait)
++ atomic_set(&data.finished, 0);
++
++ spin_lock_bh(&call_lock);
++ call_data = &data;
++ smp_ext_bitcall(cpu, ec_call_function);
++
++ /* Wait for response */
++ while (atomic_read(&data.started) != 1)
++ barrier();
++
++ if (wait)
++ while (atomic_read(&data.finished) != 1)
++ barrier();
++
++ spin_unlock_bh(&call_lock);
++ return 0;
++}
++
+ static inline void do_send_stop(void)
+ {
+- u32 dummy;
++ unsigned long dummy;
+ int i;
+
+ /* stop all processors */
+@@ -199,7 +244,7 @@
+ static inline void do_store_status(void)
+ {
+ unsigned long low_core_addr;
+- u32 dummy;
++ unsigned long dummy;
+ int i;
+
+ /* store status of all processors in their lowcores (real 0) */
+@@ -328,42 +373,41 @@
+ }
+
+ /*
+- * Send an external call sigp to another cpu and return without waiting
++ * Send an external call sigp to another cpu and wait
+ * for its completion.
+ */
+-static sigp_ccode smp_ext_bitcall(int cpu, ec_bit_sig sig)
++static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
+ {
+- struct _lowcore *lowcore = get_cpu_lowcore(cpu);
+- sigp_ccode ccode;
++ struct _lowcore *lowcore = get_cpu_lowcore(cpu);
+
+- /*
+- * Set signaling bit in lowcore of target cpu and kick it
+- */
+- atomic_set_mask(1<<sig, &lowcore->ext_call_fast);
+- ccode = signal_processor(cpu, sigp_external_call);
+- return ccode;
++ /*
++ * Set signaling bit in lowcore of target cpu and kick it
++ */
++ atomic_set_mask(1<<sig, &lowcore->ext_call_fast);
++ while(signal_processor(cpu, sigp_external_call) == sigp_busy)
++ udelay(10);
+ }
+
+ /*
+ * Send an external call sigp to every other cpu in the system and
+- * return without waiting for its completion.
++ * wait for its completion.
+ */
+ static void smp_ext_bitcall_others(ec_bit_sig sig)
+ {
+- struct _lowcore *lowcore;
+- int i;
++ struct _lowcore *lowcore;
++ int i;
+
+- for (i = 0; i < smp_num_cpus; i++) {
+- if (smp_processor_id() == i)
+- continue;
+- lowcore = get_cpu_lowcore(i);
+- /*
+- * Set signaling bit in lowcore of target cpu and kick it
+- */
+- atomic_set_mask(1<<sig, &lowcore->ext_call_fast);
+- while (signal_processor(i, sigp_external_call) == sigp_busy)
++ for (i = 0; i < smp_num_cpus; i++) {
++ if (smp_processor_id() == i)
++ continue;
++ lowcore = get_cpu_lowcore(i);
++ /*
++ * Set signaling bit in lowcore of target cpu and kick it
++ */
++ atomic_set_mask(1<<sig, &lowcore->ext_call_fast);
++ while (signal_processor(i, sigp_external_call) == sigp_busy)
+ udelay(10);
+- }
++ }
+ }
+
+ /*
+=== arch/s390/kernel/setup.c
+==================================================================
+--- arch/s390/kernel/setup.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390/kernel/setup.c (/trunk/2.4.27) (revision 52)
+@@ -276,9 +276,9 @@
+
+ static int __init conmode_setup(char *str)
+ {
+-#if defined(CONFIG_HWC_CONSOLE)
+- if (strncmp(str, "hwc", 4) == 0)
+- SET_CONSOLE_HWC;
++#if defined(CONFIG_SCLP_CONSOLE)
++ if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
++ SET_CONSOLE_SCLP;
+ #endif
+ #if defined(CONFIG_TN3215_CONSOLE)
+ if (strncmp(str, "3215", 5) == 0)
+@@ -310,8 +310,8 @@
+ */
+ cpcmd("TERM CONMODE 3215", NULL, 0);
+ if (ptr == NULL) {
+-#if defined(CONFIG_HWC_CONSOLE)
+- SET_CONSOLE_HWC;
++#if defined(CONFIG_SCLP_CONSOLE)
++ SET_CONSOLE_SCLP;
+ #endif
+ return;
+ }
+@@ -320,16 +320,16 @@
+ SET_CONSOLE_3270;
+ #elif defined(CONFIG_TN3215_CONSOLE)
+ SET_CONSOLE_3215;
+-#elif defined(CONFIG_HWC_CONSOLE)
+- SET_CONSOLE_HWC;
++#elif defined(CONFIG_SCLP_CONSOLE)
++ SET_CONSOLE_SCLP;
+ #endif
+ } else if (strncmp(ptr + 8, "3215", 4) == 0) {
+ #if defined(CONFIG_TN3215_CONSOLE)
+ SET_CONSOLE_3215;
+ #elif defined(CONFIG_TN3270_CONSOLE)
+ SET_CONSOLE_3270;
+-#elif defined(CONFIG_HWC_CONSOLE)
+- SET_CONSOLE_HWC;
++#elif defined(CONFIG_SCLP_CONSOLE)
++ SET_CONSOLE_SCLP;
+ #endif
+ }
+ } else if (MACHINE_IS_P390) {
+@@ -339,8 +339,8 @@
+ SET_CONSOLE_3270;
+ #endif
+ } else {
+-#if defined(CONFIG_HWC_CONSOLE)
+- SET_CONSOLE_HWC;
++#if defined(CONFIG_SCLP_CONSOLE)
++ SET_CONSOLE_SCLP;
+ #endif
+ }
+ }
+@@ -383,21 +383,25 @@
+
+ /*
+ * Reboot, halt and power_off stubs. They just call _machine_restart,
+- * _machine_halt or _machine_power_off.
++ * _machine_halt or _machine_power_off after making sure that all pending
++ * printks reached their destination.
+ */
+
+ void machine_restart(char *command)
+ {
++ console_unblank();
+ _machine_restart(command);
+ }
+
+ void machine_halt(void)
+ {
++ console_unblank();
+ _machine_halt();
+ }
+
+ void machine_power_off(void)
+ {
++ console_unblank();
+ _machine_power_off();
+ }
+
+=== arch/s390/kernel/process.c
+==================================================================
+--- arch/s390/kernel/process.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390/kernel/process.c (/trunk/2.4.27) (revision 52)
+@@ -43,13 +43,27 @@
+ #include <asm/io.h>
+ #include <asm/processor.h>
+ #include <asm/irq.h>
++#if defined (CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
++#include <asm/timer.h>
++#endif
+
+ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+
++#ifdef CONFIG_VIRT_TIMER
++extern int stop_cpu_timer(void);
++#endif
++
++#ifdef CONFIG_NO_IDLE_HZ
++extern void stop_hz_timer(void);
++#endif
++
++#if defined (CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
++extern atomic_t active_cpu_timer;
++#endif
++
+ /*
+ * The idle loop on a S390...
+ */
+-
+ int cpu_idle(void *unused)
+ {
+ psw_t wait_psw;
+@@ -68,9 +82,37 @@
+ continue;
+ }
+
++#ifdef CONFIG_VIRT_TIMER
+ /*
++ * virtual CPU timer should not progress while its CPU is idle
++ */
++ if (stop_cpu_timer()) {
++ __sti();
++ continue;
++ }
++#endif
++
++/*
++ * active_cpu_timer is used by stop_hz_timer to determine if the last
++ * CPU is gone. We have to update this value also if we use the virtual
++ * CPU timer because both use monitor calls.
++ */
++#if defined (CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
++ atomic_dec(&active_cpu_timer);
++#endif
++
++#ifdef CONFIG_NO_IDLE_HZ
++ stop_hz_timer();
++#endif
++
++#if defined (CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
++ /* enable monitor call class 0 */
++ __ctl_set_bit(8, 15);
++#endif
++
++ /*
+ * Wait for external, I/O or machine check interrupt and
+- * switch of machine check bit after the wait has ended.
++ * switch off machine check bit after the wait has ended.
+ */
+ wait_psw.mask = _WAIT_PSW_MASK;
+ asm volatile (
+@@ -86,6 +128,10 @@
+ " lpsw 0(%1)\n"
+ "2:"
+ : "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" );
++ /*
++ * start_hz_timer is called by monitor call in entry.S
++ * if stop_hz_timer switched off the regular HZ interrupts
++ */
+ }
+ }
+
+=== arch/s390/kernel/entry.S
+==================================================================
+--- arch/s390/kernel/entry.S (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390/kernel/entry.S (/trunk/2.4.27) (revision 52)
+@@ -690,7 +690,12 @@
+ io_int_handler:
+ SAVE_ALL_BASE
+ SAVE_ALL __LC_IO_OLD_PSW,0
++ mc 0,0
+ GET_CURRENT # load pointer to task_struct to R9
++ stck __LC_INT_CLOCK
++ clc __LC_INT_CLOCK(8),__LC_JIFFY_TIMER
++ bhe BASED(io_handle_tick)
++io_call_handler:
+ l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
+ la %r2,SP_PTREGS(%r15) # address of register-save area
+ sr %r3,%r3
+@@ -725,6 +730,15 @@
+ RESTORE_ALL 0
+
+ #
++# account tick
++#
++io_handle_tick:
++ l %r1,BASED(.Laccount_ticks)
++ la %r2,SP_PTREGS(%r15) # address of register-save area
++ la %r14,BASED(io_call_handler)
++ br %r1
++
++#
+ # call do_softirq
+ #
+ io_handle_bottom_half:
+@@ -758,8 +772,13 @@
+ ext_int_handler:
+ SAVE_ALL_BASE
+ SAVE_ALL __LC_EXT_OLD_PSW,0
+- GET_CURRENT # load pointer to task_struct to R9
++ mc 0, 0
++ GET_CURRENT # load pointer to task_struct to R9
+ lh %r6,__LC_EXT_INT_CODE # get interruption code
++ stck __LC_INT_CLOCK
++ clc __LC_INT_CLOCK(8),__LC_JIFFY_TIMER
++ bhe BASED(ext_handle_tick)
++ext_call_handler:
+ lr %r1,%r6 # calculate index = code & 0xff
+ n %r1,BASED(.Lc0xff)
+ sll %r1,2
+@@ -770,7 +789,8 @@
+ ext_int_loop:
+ ch %r6,8(%r7) # compare external interrupt code
+ bne BASED(ext_int_next)
+- l %r1,4(%r7) # get handler address
++ icm %r1,15,4(%r7) # get handler address
++ bz BASED(ext_int_next)
+ la %r2,SP_PTREGS(%r15) # address of register-save area
+ lr %r3,%r6 # interruption code
+ basr %r14,%r1 # call handler
+@@ -779,6 +799,15 @@
+ bnz BASED(ext_int_loop)
+ b BASED(io_return)
+
++#
++# account tick
++#
++ext_handle_tick:
++ l %r1,BASED(.Laccount_ticks)
++ la %r2,SP_PTREGS(%r15) # address of register-save area
++ la %r14,BASED(ext_call_handler)
++ br %r1
++
+ /*
+ * Machine check handler routines
+ */
+@@ -787,6 +816,7 @@
+ mcck_int_handler:
+ SAVE_ALL_BASE
+ SAVE_ALL __LC_MCK_OLD_PSW,0
++ mc 0, 0
+ l %r1,BASED(.Ls390_mcck)
+ basr %r14,%r1 # call machine check handler
+ mcck_return:
+@@ -869,4 +899,4 @@
+ .Lvfork: .long sys_vfork
+
+ .Lschedtail: .long schedule_tail
+-
++.Laccount_ticks:.long account_ticks
+=== arch/s390/kernel/s390_ksyms.c
+==================================================================
+--- arch/s390/kernel/s390_ksyms.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390/kernel/s390_ksyms.c (/trunk/2.4.27) (revision 52)
+@@ -7,9 +7,13 @@
+ #include <linux/module.h>
+ #include <linux/smp.h>
+ #include <asm/checksum.h>
++#include <asm/cpcmd.h>
+ #include <asm/delay.h>
+ #include <asm/setup.h>
+ #include <asm/softirq.h>
++#ifdef CONFIG_VIRT_TIMER
++#include <asm/timer.h>
++#endif
+ #if CONFIG_IP_MULTICAST
+ #include <net/arp.h>
+ #endif
+@@ -20,6 +24,7 @@
+ EXPORT_SYMBOL_NOVERS(_oi_bitmap);
+ EXPORT_SYMBOL_NOVERS(_ni_bitmap);
+ EXPORT_SYMBOL_NOVERS(_zb_findmap);
++EXPORT_SYMBOL_NOVERS(empty_zero_page);
+ EXPORT_SYMBOL_NOVERS(__copy_from_user_asm);
+ EXPORT_SYMBOL_NOVERS(__copy_to_user_asm);
+ EXPORT_SYMBOL_NOVERS(__clear_user_asm);
+@@ -66,3 +71,22 @@
+ EXPORT_SYMBOL(get_storage_key);
+ EXPORT_SYMBOL_NOVERS(do_call_softirq);
+ EXPORT_SYMBOL(sys_wait4);
++EXPORT_SYMBOL(smp_call_function_on);
++EXPORT_SYMBOL(show_trace);
++EXPORT_SYMBOL(cpcmd);
++
++
++/*
++ * virtual CPU timer
++ */
++#ifdef CONFIG_VIRT_TIMER
++EXPORT_SYMBOL(init_virt_timer);
++EXPORT_SYMBOL(add_virt_timer);
++EXPORT_SYMBOL(add_virt_timer_periodic);
++EXPORT_SYMBOL(mod_virt_timer);
++EXPORT_SYMBOL(del_virt_timer);
++#endif
++
++/* urandom read needed for z90crypt */
++extern struct file_operations urandom_fops;
++EXPORT_SYMBOL_GPL(urandom_fops);
+=== arch/s390/kernel/traps.c
+==================================================================
+--- arch/s390/kernel/traps.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390/kernel/traps.c (/trunk/2.4.27) (revision 52)
+@@ -54,6 +54,9 @@
+ extern pgm_check_handler_t do_segment_exception;
+ extern pgm_check_handler_t do_page_exception;
+ extern pgm_check_handler_t do_pseudo_page_fault;
++#if defined (CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
++extern pgm_check_handler_t do_monitor_call;
++#endif
+ #ifdef CONFIG_PFAULT
+ extern int pfault_init(void);
+ extern void pfault_fini(void);
+@@ -628,7 +631,7 @@
+ int i;
+
+ for (i = 0; i < 128; i++)
+- pgm_check_table[i] = &default_trap_handler;
++ pgm_check_table[i] = &default_trap_handler;
+ pgm_check_table[1] = &illegal_op;
+ pgm_check_table[2] = &privileged_op;
+ pgm_check_table[3] = &execute_exception;
+@@ -644,6 +647,9 @@
+ pgm_check_table[0x14] = &do_pseudo_page_fault;
+ pgm_check_table[0x15] = &operand_exception;
+ pgm_check_table[0x1C] = &privileged_op;
++#if defined (CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
++ pgm_check_table[0x40] = &do_monitor_call;
++#endif
+ #ifdef CONFIG_PFAULT
+ if (MACHINE_IS_VM) {
+ /* request the 0x2603 external interrupt */
+=== arch/s390/defconfig
+==================================================================
+--- arch/s390/defconfig (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390/defconfig (/trunk/2.4.27) (revision 52)
+@@ -38,8 +38,8 @@
+ CONFIG_QDIO=m
+ # CONFIG_QDIO_PERF_STATS is not set
+ CONFIG_IPL=y
+-# CONFIG_IPL_TAPE is not set
+-CONFIG_IPL_VM=y
++CONFIG_IPL_TAPE=y
++# CONFIG_IPL_VM is not set
+ CONFIG_NET=y
+ CONFIG_SYSVIPC=y
+ # CONFIG_BSD_PROCESS_ACCT is not set
+@@ -50,8 +50,52 @@
+ # CONFIG_PROCESS_DEBUG is not set
+ CONFIG_PFAULT=y
+ # CONFIG_SHARED_KERNEL is not set
++# CONFIG_VIRT_TIMER is not set
++# CONFIG_APPLDATA_BASE is not set
++# CONFIG_APPLDATA_MEM is not set
++# CONFIG_APPLDATA_OS is not set
++# CONFIG_APPLDATA_NET_SUM is not set
++CONFIG_CMM=m
++CONFIG_CMM_PROC=y
++# CONFIG_CMM_IUCV is not set
+
+ #
++# SCSI support
++#
++CONFIG_SCSI=m
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=m
++CONFIG_SD_EXTRA_DEVS=1000
++CONFIG_CHR_DEV_ST=m
++# CONFIG_CHR_DEV_OSST is not set
++CONFIG_BLK_DEV_SR=m
++# CONFIG_BLK_DEV_SR_VENDOR is not set
++CONFIG_SR_EXTRA_DEVS=10
++CONFIG_CHR_DEV_SG=m
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_DEBUG_QUEUES=y
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++
++#
++# SCSI low-level drivers
++#
++CONFIG_ZFCP=m
++CONFIG_ZFCP_HBAAPI=m
++
++#
++# PCMCIA SCSI adapter support
++#
++# CONFIG_SCSI_PCMCIA is not set
++
++#
+ # Block device drivers
+ #
+ CONFIG_BLK_DEV_LOOP=y
+@@ -60,6 +104,7 @@
+ CONFIG_BLK_DEV_RAM_SIZE=24576
+ CONFIG_BLK_DEV_INITRD=y
+ CONFIG_BLK_DEV_XPRAM=m
++CONFIG_DCSSBLK=m
+
+ #
+ # S/390 block device drivers
+@@ -68,6 +113,7 @@
+ CONFIG_DASD_ECKD=y
+ CONFIG_DASD_FBA=y
+ CONFIG_DASD_DIAG=y
++CONFIG_S390_CMF=m
+
+ #
+ # Multi-device support (RAID and LVM)
+@@ -94,22 +140,24 @@
+ CONFIG_TN3270_CONSOLE=y
+ CONFIG_TN3215=y
+ CONFIG_TN3215_CONSOLE=y
+-CONFIG_HWC=y
+-CONFIG_HWC_CONSOLE=y
+-CONFIG_HWC_CPI=m
++CONFIG_SCLP=y
++CONFIG_SCLP_TTY=y
++CONFIG_SCLP_CONSOLE=y
++CONFIG_SCLP_VT220_TTY=y
++CONFIG_SCLP_VT220_CONSOLE=y
++CONFIG_SCLP_CPI=m
+ CONFIG_S390_TAPE=m
+
+ #
+ # S/390 tape interface support
+ #
+-CONFIG_S390_TAPE_CHAR=y
+ CONFIG_S390_TAPE_BLOCK=y
+
+ #
+ # S/390 tape hardware support
+ #
+-CONFIG_S390_TAPE_3490=y
+-CONFIG_S390_TAPE_3480=y
++CONFIG_S390_TAPE_34XX=m
++CONFIG_VMLOGRDR=m
+
+ #
+ # Network device drivers
+@@ -129,10 +177,29 @@
+ #
+ CONFIG_CHANDEV=y
+ CONFIG_HOTPLUG=y
++CONFIG_LCS=m
++CONFIG_QETH=m
++
++#
++# Gigabit Ethernet default settings
++#
++CONFIG_QETH_IPV6=y
++CONFIG_QETH_VLAN=y
++# CONFIG_QETH_PERF_STATS is not set
+ CONFIG_CTC=m
++# CONFIG_MPC is not set
+ CONFIG_IUCV=m
++CONFIG_NETIUCV=m
++CONFIG_SMSGIUCV=m
+
+ #
++# Miscellaneous
++#
++CONFIG_Z90CRYPT=m
++# CONFIG_NO_IDLE_HZ is not set
++# CONFIG_NO_IDLE_HZ_INIT is not set
++
++#
+ # Networking options
+ #
+ CONFIG_PACKET=y
+@@ -172,7 +239,6 @@
+ CONFIG_IP_NF_MATCH_MARK=m
+ CONFIG_IP_NF_MATCH_MULTIPORT=m
+ CONFIG_IP_NF_MATCH_TOS=m
+-# CONFIG_IP_NF_MATCH_RECENT is not set
+ # CONFIG_IP_NF_MATCH_ECN is not set
+ # CONFIG_IP_NF_MATCH_DSCP is not set
+ CONFIG_IP_NF_MATCH_AH_ESP=m
+@@ -205,14 +271,8 @@
+ CONFIG_IP_NF_TARGET_TCPMSS=m
+ CONFIG_IP_NF_ARPTABLES=m
+ CONFIG_IP_NF_ARPFILTER=m
+-# CONFIG_IP_NF_ARP_MANGLE is not set
+ # CONFIG_IP_NF_COMPAT_IPCHAINS is not set
+ # CONFIG_IP_NF_COMPAT_IPFWADM is not set
+-
+-#
+-# IP: Virtual Server Configuration
+-#
+-# CONFIG_IP_VS is not set
+ CONFIG_IPV6=m
+
+ #
+@@ -237,6 +297,7 @@
+ CONFIG_IP6_NF_TARGET_LOG=m
+ CONFIG_IP6_NF_MANGLE=m
+ CONFIG_IP6_NF_TARGET_MARK=m
++CONFIG_SHARED_IPV6_CARDS=y
+ # CONFIG_KHTTPD is not set
+
+ #
+@@ -328,6 +389,7 @@
+ # CONFIG_QNX4FS_FS is not set
+ # CONFIG_QNX4FS_RW is not set
+ # CONFIG_ROMFS_FS is not set
++CONFIG_XIP2FS=m
+ CONFIG_EXT2_FS=y
+ # CONFIG_SYSV_FS is not set
+ # CONFIG_UDF_FS is not set
+=== arch/s390/appldata/appldata_base.c
+==================================================================
+--- arch/s390/appldata/appldata_base.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390/appldata/appldata_base.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,650 @@
++/*
++ * arch/s390/appldata/appldata_base.c
++ *
++ * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1.
++ * Exports appldata_register_ops() and appldata_unregister_ops() for the
++ * data gathering modules.
++ *
++ * Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH.
++ *
++ * Author: Gerald Schaefer <geraldsc at de.ibm.com>
++ */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <linux/interrupt.h>
++#include <linux/proc_fs.h>
++#include <asm/timer.h>
++#include <linux/sysctl.h>
++
++#include "appldata.h"
++
++
++#define MY_PRINT_NAME "appldata" /* for debug messages, etc. */
++#define APPLDATA_INTERVAL 60 /* default monitoring
++ interval in seconds */
++#define VIRTUAL_SECOND 0x0F4240000 /* nr. of TOD clock units
++ for one second */
++#ifndef CONFIG_ARCH_S390X
++
++#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */
++#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */
++#define APPLDATA_GEN_EVENT_RECORD 0x02
++#define APPLDATA_START_CONFIG_REC 0x03
++
++#else
++
++#define APPLDATA_START_INTERVAL_REC 0x80
++#define APPLDATA_STOP_REC 0x81
++#define APPLDATA_GEN_EVENT_RECORD 0x82
++#define APPLDATA_START_CONFIG_REC 0x83
++
++#endif /* CONFIG_ARCH_S390X */
++
++
++/*
++ * Parameter list for DIAGNOSE X'DC'
++ */
++#ifndef CONFIG_ARCH_S390X
++struct appldata_parameter_list {
++ u16 diag; /* The DIAGNOSE code X'00DC' */
++ u8 function; /* The function code for the DIAGNOSE */
++ u8 parlist_length; /* Length of the parameter list */
++ u32 product_id_addr; /* Address of the 16-byte product ID */
++ u16 reserved;
++ u16 buffer_length; /* Length of the application data buffer */
++ u32 buffer_addr; /* Address of the application data buffer */
++};
++#else
++struct appldata_parameter_list {
++ u16 diag;
++ u8 function;
++ u8 parlist_length;
++ u32 unused01;
++ u16 reserved;
++ u16 buffer_length;
++ u32 unused02;
++ u64 product_id_addr;
++ u64 buffer_addr;
++};
++#endif /* CONFIG_ARCH_S390X */
++
++/*
++ * /proc entries (sysctl)
++ */
++static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata";
++static int appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
++ void *buffer, size_t *lenp);
++static int appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
++ void *buffer, size_t *lenp);
++
++static struct ctl_table_header *appldata_sysctl_header;
++static struct ctl_table appldata_table[] = {
++ {
++ .ctl_name = CTL_APPLDATA_TIMER,
++ .procname = "timer",
++ .mode = S_IRUGO | S_IWUSR,
++ .proc_handler = &appldata_timer_handler,
++ },
++ {
++ .ctl_name = CTL_APPLDATA_INTERVAL,
++ .procname = "interval",
++ .mode = S_IRUGO | S_IWUSR,
++ .proc_handler = &appldata_interval_handler,
++ },
++ { .ctl_name = 0 }
++};
++
++static struct ctl_table appldata_dir_table[] = {
++ {
++ .ctl_name = CTL_APPLDATA,
++ .procname = appldata_proc_name,
++ .maxlen = 0,
++ .mode = S_IRUGO | S_IXUGO,
++ .child = appldata_table,
++ },
++ { .ctl_name = 0 }
++};
++
++/*
++ * Timer
++ */
++static spinlock_t appldata_timer_lock = SPIN_LOCK_UNLOCKED;
++static struct vtimer_list appldata_timer[NR_CPUS];
++static int appldata_interval = APPLDATA_INTERVAL;
++static int appldata_timer_active;
++static atomic_t appldata_expire_count = ATOMIC_INIT(0);
++static struct appldata_mod_vtimer_args {
++ struct vtimer_list *timer;
++ u64 expires;
++} appldata_mod_vtimer_args;
++
++/*
++ * Tasklet
++ */
++static struct tasklet_struct appldata_tasklet_struct;
++
++/*
++ * Hook list
++ */
++static spinlock_t appldata_ops_lock = SPIN_LOCK_UNLOCKED;
++static LIST_HEAD(appldata_ops_list);
++
++
++/************************* timer, tasklet, DIAG ******************************/
++/*
++ * appldata_timer_function()
++ *
++ * schedule tasklet and reschedule timer
++ */
++static void appldata_timer_function(unsigned long data, struct pt_regs *regs)
++{
++ P_DEBUG(" -= Timer =-\n");
++ P_DEBUG("CPU: %i, expire: %i\n", smp_processor_id(),
++ atomic_read(&appldata_expire_count));
++ if (atomic_dec_and_test(&appldata_expire_count)) {
++ atomic_set(&appldata_expire_count, smp_num_cpus);
++ tasklet_schedule((struct tasklet_struct *) data);
++ }
++}
++
++/*
++ * appldata_tasklet_function()
++ *
++ * call data gathering function for each (active) module
++ */
++static void appldata_tasklet_function(unsigned long data)
++{
++ struct list_head *lh;
++ struct appldata_ops *ops;
++ int i;
++
++ P_DEBUG(" -= Tasklet =-\n");
++ i = 0;
++ spin_lock(&appldata_ops_lock);
++ list_for_each(lh, &appldata_ops_list) {
++ ops = list_entry(lh, struct appldata_ops, list);
++ P_DEBUG("list_for_each loop: %i) active = %u, name = %s\n",
++ ++i, ops->active, ops->name);
++ if (ops->active == 1) {
++ ops->callback(ops->data);
++ }
++ }
++ spin_unlock(&appldata_ops_lock);
++}
++
++/*
++ * appldata_mod_vtimer_wrap()
++ *
++ * wrapper function for mod_virt_timer(), because smp_call_function_on()
++ * accepts only one parameter.
++ */
++static void appldata_mod_vtimer_wrap(struct appldata_mod_vtimer_args *args) {
++ mod_virt_timer(args->timer, args->expires);
++}
++
++/*
++ * appldata_diag()
++ *
++ * prepare parameter list, issue DIAG 0xDC
++ */
++static int appldata_diag(char record_nr, u16 function, unsigned long buffer,
++ u16 length)
++{
++ unsigned long ry;
++ struct appldata_product_id {
++ char prod_nr[7]; /* product nr. */
++ char prod_fn[2]; /* product function */
++ char record_nr; /* record nr. */
++ char version_nr[2]; /* version */
++ char release_nr[2]; /* release */
++ char mod_lvl[2]; /* modification lvl. */
++ } appldata_product_id = {
++ /* all strings are EBCDIC, record_nr is byte */
++ .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4,
++ 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */
++ .prod_fn = {0xD5, 0xD3}, /* "NL" */
++ .record_nr = record_nr,
++ .version_nr = {0xF2, 0xF4}, /* "24" */
++ .release_nr = {0xF0, 0xF1}, /* "01" */
++ .mod_lvl = {0xF0, 0xF0}, /* "00" */
++ };
++ struct appldata_parameter_list appldata_parameter_list = {
++ .diag = 0xDC,
++ .function = function,
++ .parlist_length =
++ sizeof(appldata_parameter_list),
++ .buffer_length = length,
++ .product_id_addr =
++ (unsigned long) &appldata_product_id,
++ .buffer_addr = virt_to_phys((void *) buffer)
++ };
++
++ if (!MACHINE_IS_VM)
++ return -ENOSYS;
++ ry = -1;
++ asm volatile(
++ "diag %1,%0,0xDC\n\t"
++ : "=d" (ry) : "d" (&(appldata_parameter_list)) : "cc");
++ return (int) ry;
++}
++/********************** timer, tasklet, DIAG <END> ***************************/
++
++
++/****************************** /proc stuff **********************************/
++/*
++ * appldata_timer_handler()
++ *
++ * Start/Stop timer, show status of timer (0 = not active, 1 = active)
++ */
++static int
++appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
++ void *buffer, size_t *lenp)
++{
++ int len, i;
++ u64 per_cpu_interval;
++ char buf[2];
++
++ if (!*lenp || filp->f_pos) {
++ *lenp = 0;
++ return 0;
++ }
++ if (!write) {
++ len = sprintf(buf, appldata_timer_active ? "1\n" : "0\n");
++ if (len > *lenp)
++ len = *lenp;
++ if (copy_to_user(buffer, buf, len))
++ return -EFAULT;
++ goto out;
++ }
++
++ len = *lenp;
++ if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
++ return -EFAULT;
++
++ per_cpu_interval = (u64) (appldata_interval / smp_num_cpus) *
++ VIRTUAL_SECOND;
++ spin_lock(&appldata_timer_lock);
++ if ((buf[0] == '1') && (!appldata_timer_active)) {
++ for (i = 0; i < smp_num_cpus; i++) {
++ appldata_timer[i].expires = per_cpu_interval;
++ smp_call_function_on(add_virt_timer_periodic,
++ &appldata_timer[i], 0, 1, i);
++ }
++ appldata_timer_active = 1;
++ P_STATUS("Monitoring timer started.\n");
++ } else if ((buf[0] == '0') && (appldata_timer_active)) {
++ for (i = 0; i < smp_num_cpus; i++) {
++ smp_call_function_on((void *) del_virt_timer,
++ &appldata_timer[i],
++ 0, 1, i);
++ }
++ appldata_timer_active = 0;
++ P_STATUS("Monitoring timer stopped.\n");
++ }
++ spin_unlock(&appldata_timer_lock);
++out:
++ *lenp = len;
++ filp->f_pos += len;
++ return 0;
++}
++
++/*
++ * appldata_interval_handler()
++ *
++ * Set timer interval for collection of data (in seconds), show current
++ * timer interval.
++ */
++static int
++appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
++ void *buffer, size_t *lenp)
++{
++ int len, i;
++ u64 per_cpu_interval;
++ char buf[16];
++
++ if (!*lenp || filp->f_pos) {
++ *lenp = 0;
++ return 0;
++ }
++ if (!write) {
++ len = sprintf(buf, "%i\n", appldata_interval);
++ if (len > *lenp)
++ len = *lenp;
++ if (copy_to_user(buffer, buf, len))
++ return -EFAULT;
++ goto out;
++ }
++
++ len = *lenp;
++ if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) {
++ return -EFAULT;
++ }
++ sscanf(buf, "%i", &i);
++ if (i >= smp_num_cpus) {
++ spin_lock(&appldata_timer_lock);
++ per_cpu_interval = (u64) (i / smp_num_cpus) * VIRTUAL_SECOND;
++ appldata_interval = i;
++ if (appldata_timer_active) {
++ for (i = 0; i < smp_num_cpus; i++) {
++ appldata_mod_vtimer_args.timer =
++ &appldata_timer[i];
++ appldata_mod_vtimer_args.expires =
++ per_cpu_interval;
++ smp_call_function_on(
++ (void *) appldata_mod_vtimer_wrap,
++ &appldata_mod_vtimer_args,
++ 0, 1, i);
++ }
++ }
++ spin_unlock(&appldata_timer_lock);
++ P_STATUS("Monitoring interval set to %u seconds.\n",
++ appldata_interval);
++ } else {
++ P_ERROR("Timer interval has to be >= [nr. cpus] seconds, i.e. %i seconds!\n",
++ smp_num_cpus);
++ return -EINVAL;
++ }
++out:
++ *lenp = len;
++ filp->f_pos += len;
++ return 0;
++}
++
++/*
++ * appldata_generic_handler()
++ *
++ * Generic start/stop monitoring and DIAG, show status of
++ * monitoring (0 = not in process, 1 = in process)
++ */
++static int
++appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
++ void *buffer, size_t *lenp)
++{
++ struct appldata_ops *ops;
++ int rc, len;
++ char buf[2];
++
++ ops = ctl->data;
++ if (!*lenp || filp->f_pos) {
++ *lenp = 0;
++ return 0;
++ }
++ if (!write) {
++ len = sprintf(buf, ops->active ? "1\n" : "0\n");
++ if (len > *lenp)
++ len = *lenp;
++ if (copy_to_user(buffer, buf, len))
++ return -EFAULT;
++ goto out;
++ }
++
++ len = *lenp;
++ if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
++ return -EFAULT;
++
++ spin_lock_bh(&appldata_ops_lock);
++ if ((buf[0] == '1') && (ops->active == 0)) {
++ ops->active = 1;
++ ops->callback(ops->data); // init record
++ rc = appldata_diag(ops->record_nr,
++ APPLDATA_START_INTERVAL_REC,
++ (unsigned long) ops->data, ops->size);
++ if (rc != 0) {
++ P_ERROR("START DIAG 0xDC for %s failed, "
++ "return code: %d\n", ops->name, rc);
++ ops->active = 0;
++ } else {
++ P_STATUS("Monitoring %s data enabled, "
++ "DIAG 0xDC started.\n", ops->name);
++ }
++ } else if ((buf[0] == '0') && (ops->active == 1)) {
++ ops->active = 0;
++ rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
++ (unsigned long) ops->data, ops->size);
++ if (rc != 0) {
++ P_ERROR("STOP DIAG 0xDC for %s failed, "
++ "return code: %d\n", ops->name, rc);
++ } else {
++ P_STATUS("Monitoring %s data disabled, "
++ "DIAG 0xDC stopped.\n", ops->name);
++ }
++ }
++ spin_unlock_bh(&appldata_ops_lock);
++out:
++ *lenp = len;
++ filp->f_pos += len;
++ return 0;
++}
++/*************************** /proc stuff <END> *******************************/
++
++
++/************************* module-ops management *****************************/
++/*
++ * appldata_register_ops()
++ *
++ * update ops list, register /proc entries
++ */
++int appldata_register_ops(struct appldata_ops *ops)
++{
++ struct list_head *lh;
++ struct appldata_ops *tmp_ops;
++ int rc, i;
++
++ rc = 0;
++ i = 0;
++
++ if ((ops->size > APPLDATA_MAX_REC_SIZE) ||
++ (ops->size < 0)){
++ P_ERROR("Invalid size of %s record = %i, maximum = %i!\n",
++ ops->name, ops->size, APPLDATA_MAX_REC_SIZE);
++ rc = -ENOMEM;
++ goto out;
++ }
++ if ((ops->ctl_nr == CTL_APPLDATA) ||
++ (ops->ctl_nr == CTL_APPLDATA_TIMER) ||
++ (ops->ctl_nr == CTL_APPLDATA_INTERVAL)) {
++ P_ERROR("ctl_nr %i already in use!\n", ops->ctl_nr);
++ rc = -EBUSY;
++ goto out;
++ }
++ ops->ctl_table = kmalloc(4*sizeof(struct ctl_table), GFP_KERNEL);
++ if (ops->ctl_table == NULL) {
++ P_ERROR("Not enough memory for %s ctl_table!\n", ops->name);
++ rc = -ENOMEM;
++ goto out;
++ }
++ memset(ops->ctl_table, 0, 4*sizeof(struct ctl_table));
++
++ spin_lock_bh(&appldata_ops_lock);
++ list_for_each(lh, &appldata_ops_list) {
++ tmp_ops = list_entry(lh, struct appldata_ops, list);
++ P_DEBUG("register_ops loop: %i) name = %s, ctl = %i\n",
++ ++i, tmp_ops->name, tmp_ops->ctl_nr);
++ P_DEBUG("Comparing %s (ctl %i) with %s (ctl %i)\n",
++ tmp_ops->name, tmp_ops->ctl_nr, ops->name,
++ ops->ctl_nr);
++ if (strncmp(tmp_ops->name, ops->name,
++ APPLDATA_PROC_NAME_LENGTH) == 0) {
++ spin_unlock_bh(&appldata_ops_lock);
++ P_ERROR("Name \"%s\" already exists!\n", ops->name);
++ kfree(ops->ctl_table);
++ rc = -EBUSY;
++ goto out;
++ }
++ if (tmp_ops->ctl_nr == ops->ctl_nr) {
++ spin_unlock_bh(&appldata_ops_lock);
++ P_ERROR("ctl_nr %i already registered!\n", ops->ctl_nr);
++ kfree(ops->ctl_table);
++ rc = -EBUSY;
++ goto out;
++ }
++ }
++ list_add(&ops->list, &appldata_ops_list);
++ spin_unlock_bh(&appldata_ops_lock);
++
++ ops->ctl_table[0].ctl_name = CTL_APPLDATA;
++ ops->ctl_table[0].procname = appldata_proc_name;
++ ops->ctl_table[0].maxlen = 0;
++ ops->ctl_table[0].mode = S_IRUGO | S_IXUGO;
++ ops->ctl_table[0].child = &ops->ctl_table[2];
++
++ ops->ctl_table[1].ctl_name = 0;
++
++ ops->ctl_table[2].ctl_name = ops->ctl_nr;
++ ops->ctl_table[2].procname = ops->name;
++ ops->ctl_table[2].mode = S_IRUGO | S_IWUSR;
++ ops->ctl_table[2].proc_handler = appldata_generic_handler;
++ ops->ctl_table[2].data = ops;
++
++ ops->ctl_table[3].ctl_name = 0;
++
++ ops->sysctl_header = register_sysctl_table(ops->ctl_table,1);
++ ops->ctl_table[2].de->owner = ops->owner;
++ P_STATUS("%s-ops registered!\n", ops->name);
++out:
++ return rc;
++}
++
++/*
++ * appldata_unregister_ops()
++ *
++ * update ops list, unregister /proc entries, stop DIAG if necessary
++ */
++void appldata_unregister_ops(struct appldata_ops *ops)
++{
++ int rc;
++
++ unregister_sysctl_table(ops->sysctl_header);
++ kfree(ops->ctl_table);
++ if (ops->active == 1) {
++ ops->active = 0;
++ rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
++ (unsigned long) ops->data, ops->size);
++ if (rc != 0) {
++ P_ERROR("STOP DIAG 0xDC for %s failed, "
++ "return code: %d\n", ops->name, rc);
++ } else {
++ P_STATUS("Monitoring %s data disabled, "
++ "DIAG 0xDC stopped.\n", ops->name);
++ }
++
++ }
++ spin_lock_bh(&appldata_ops_lock);
++ list_del(&ops->list);
++ spin_unlock_bh(&appldata_ops_lock);
++ P_STATUS("%s-ops unregistered!\n", ops->name);
++}
++/********************** module-ops management <END> **************************/
++
++
++/******************************* init / exit *********************************/
++/*
++ * appldata_init()
++ *
++ * init timer and tasklet, register /proc entries
++ */
++static int __init appldata_init(void)
++{
++ int i;
++
++ P_DEBUG("sizeof(parameter_list) = %lu\n",
++ sizeof(struct appldata_parameter_list));
++
++ for (i = 0; i < smp_num_cpus; i++) {
++ smp_call_function_on((void *) init_virt_timer,
++ &appldata_timer[i],
++ 0, 1, i);
++ appldata_timer[i].function = appldata_timer_function;
++ appldata_timer[i].data = (unsigned long)
++ &appldata_tasklet_struct;
++ }
++ atomic_set(&appldata_expire_count, smp_num_cpus);
++
++ appldata_sysctl_header = register_sysctl_table(appldata_dir_table, 1);
++#ifdef MODULE
++ appldata_dir_table[0].de->owner = THIS_MODULE;
++ appldata_table[0].de->owner = THIS_MODULE;
++ appldata_table[1].de->owner = THIS_MODULE;
++#endif
++
++ tasklet_init(&appldata_tasklet_struct, appldata_tasklet_function, 0);
++ P_DEBUG("Base interface initialized.\n");
++ return 0;
++}
++
++/*
++ * appldata_exit()
++ *
++ * stop timer and tasklet, unregister /proc entries
++ */
++static void __exit appldata_exit(void)
++{
++ struct list_head *lh;
++ struct appldata_ops *ops;
++ int rc, i;
++
++ P_DEBUG("Unloading module ...\n");
++ /*
++ * ops list should be empty, but just in case something went wrong...
++ */
++ spin_lock_bh(&appldata_ops_lock);
++ list_for_each(lh, &appldata_ops_list) {
++ ops = list_entry(lh, struct appldata_ops, list);
++ rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
++ (unsigned long) ops->data, ops->size);
++ if (rc != 0) {
++ P_ERROR("STOP DIAG 0xDC for %s failed, "
++ "return code: %d\n", ops->name, rc);
++ }
++ }
++ spin_unlock_bh(&appldata_ops_lock);
++ for (i = 0; i < smp_num_cpus; i++) {
++ smp_call_function_on((void *) del_virt_timer, &appldata_timer[i],
++ 0, 1, i);
++ }
++ appldata_timer_active = 0;
++
++ unregister_sysctl_table(appldata_sysctl_header);
++
++ tasklet_kill(&appldata_tasklet_struct);
++
++ P_DEBUG("... module unloaded!\n");
++}
++/**************************** init / exit <END> ******************************/
++
++
++module_init(appldata_init);
++module_exit(appldata_exit);
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Gerald Schaefer");
++MODULE_DESCRIPTION("Linux-VM Monitor Stream, base infrastructure");
++
++EXPORT_SYMBOL_GPL(appldata_register_ops);
++EXPORT_SYMBOL_GPL(appldata_unregister_ops);
++
++#ifdef MODULE
++/*
++ * Kernel symbols needed by appldata_mem and appldata_os modules.
++ * However, if this file is compiled as a module (for testing only), these
++ * symbols are not exported. In this case, we define them locally and export
++ * those.
++ */
++void si_swapinfo(struct sysinfo *val)
++{
++ val->freeswap = -1ul;
++ val->totalswap = -1ul;
++}
++unsigned long avenrun[3] = {-1 - FIXED_1/200, -1 - FIXED_1/200,
++ -1 - FIXED_1/200};
++int nr_threads = -1;
++#endif /* MODULE */
++EXPORT_SYMBOL_GPL(si_swapinfo);
++EXPORT_SYMBOL_GPL(page_cache_size);
++EXPORT_SYMBOL_GPL(nr_threads);
++EXPORT_SYMBOL_GPL(avenrun);
+=== arch/s390/appldata/appldata_net_sum.c
+==================================================================
+--- arch/s390/appldata/appldata_net_sum.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390/appldata/appldata_net_sum.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,184 @@
++/*
++ * arch/s390/appldata/appldata_net_sum.c
++ *
++ * Data gathering module for Linux-VM Monitor Stream, Stage 1.
++ * Collects accumulated network statistics (Packets received/transmitted,
++ * dropped, errors, ...).
++ *
++ * Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH.
++ *
++ * Author: Gerald Schaefer <geraldsc at de.ibm.com>
++ */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/errno.h>
++#include <linux/kernel_stat.h>
++#include <linux/netdevice.h>
++
++#include "appldata.h"
++
++
++#define MY_PRINT_NAME "appldata_net_sum" /* for debug messages, etc. */
++
++/*
++ * Network data
++ */
++struct appldata_net_sum_data {
++ u64 timestamp;
++ u32 sync_count_1; /* after VM collected the record data, */
++ u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the
++ same. If not, the record has been updated on
++ the Linux side while VM was collecting the
++ (possibly corrupt) data */
++
++ u32 nr_interfaces; /* nr. of network interfaces being monitored */
++
++ u32 padding; /* next value is 64-bit aligned, so these */
++ /* 4 byte would be padded out by compiler */
++
++ u64 rx_packets; /* total packets received */
++ u64 tx_packets; /* total packets transmitted */
++ u64 rx_bytes; /* total bytes received */
++ u64 tx_bytes; /* total bytes transmitted */
++ u64 rx_errors; /* bad packets received */
++ u64 tx_errors; /* packet transmit problems */
++ u64 rx_dropped; /* no space in linux buffers */
++ u64 tx_dropped; /* no space available in linux */
++ u64 collisions; /* collisions while transmitting */
++} appldata_net_sum_data;
++
++
++static inline void appldata_print_debug(struct appldata_net_sum_data *net_data)
++{
++ P_DEBUG("--- NET - RECORD ---\n");
++
++ P_DEBUG("nr_interfaces = %u\n", net_data->nr_interfaces);
++ P_DEBUG("rx_packets = %8llu\n", ULL(net_data->rx_packets));
++ P_DEBUG("tx_packets = %8llu\n", ULL(net_data->tx_packets));
++ P_DEBUG("rx_bytes = %8llu\n", ULL(net_data->rx_bytes));
++ P_DEBUG("tx_bytes = %8llu\n", ULL(net_data->tx_bytes));
++ P_DEBUG("rx_errors = %8llu\n", ULL(net_data->rx_errors));
++ P_DEBUG("tx_errors = %8llu\n", ULL(net_data->tx_errors));
++ P_DEBUG("rx_dropped = %8llu\n", ULL(net_data->rx_dropped));
++ P_DEBUG("tx_dropped = %8llu\n", ULL(net_data->tx_dropped));
++ P_DEBUG("collisions = %8llu\n", ULL(net_data->collisions));
++
++ P_DEBUG("sync_count_1 = %u\n", net_data->sync_count_1);
++ P_DEBUG("sync_count_2 = %u\n", net_data->sync_count_2);
++ P_DEBUG("timestamp = %llX\n", ULL(net_data->timestamp));
++}
++
++/*
++ * appldata_get_net_sum_data()
++ *
++ * gather accumulated network statistics
++ */
++static void appldata_get_net_sum_data(void *data)
++{
++ int i;
++ struct appldata_net_sum_data *net_data;
++ struct net_device *dev;
++ struct net_device_stats *stats;
++ unsigned long rx_packets, tx_packets, rx_bytes, tx_bytes, rx_errors,
++ tx_errors, rx_dropped, tx_dropped, collisions;
++
++ net_data = data;
++ net_data->sync_count_1++;
++
++ i = 0;
++ rx_packets = 0;
++ tx_packets = 0;
++ rx_bytes = 0;
++ tx_bytes = 0;
++ rx_errors = 0;
++ tx_errors = 0;
++ rx_dropped = 0;
++ tx_dropped = 0;
++ collisions = 0;
++ read_lock(&dev_base_lock);
++ for (dev = dev_base; dev != NULL; dev = dev->next) {
++ if (dev->get_stats == NULL) {
++ continue;
++ }
++ stats = dev->get_stats(dev);
++ rx_packets += stats->rx_packets;
++ tx_packets += stats->tx_packets;
++ rx_bytes += stats->rx_bytes;
++ tx_bytes += stats->tx_bytes;
++ rx_errors += stats->rx_errors;
++ tx_errors += stats->tx_errors;
++ rx_dropped += stats->rx_dropped;
++ tx_dropped += stats->tx_dropped;
++ collisions += stats->collisions;
++ i++;
++ }
++ read_unlock(&dev_base_lock);
++ net_data->nr_interfaces = i;
++ net_data->rx_packets = rx_packets;
++ net_data->tx_packets = tx_packets;
++ net_data->rx_bytes = rx_bytes;
++ net_data->tx_bytes = tx_bytes;
++ net_data->rx_errors = rx_errors;
++ net_data->tx_errors = tx_errors;
++ net_data->rx_dropped = rx_dropped;
++ net_data->tx_dropped = tx_dropped;
++ net_data->collisions = collisions;
++
++ net_data->timestamp = get_clock();
++ net_data->sync_count_2++;
++ appldata_print_debug(net_data);
++}
++
++
++static struct appldata_ops ops = {
++ .ctl_nr = CTL_APPLDATA_NET_SUM,
++ .name = "net_sum",
++ .record_nr = APPLDATA_RECORD_NET_SUM_ID,
++ .size = sizeof(struct appldata_net_sum_data),
++ .callback = &appldata_get_net_sum_data,
++ .data = &appldata_net_sum_data,
++ .owner = THIS_MODULE,
++};
++
++
++/*
++ * appldata_net_init()
++ *
++ * init data, register ops
++ */
++static int __init appldata_net_init(void)
++{
++ int rc;
++
++ P_DEBUG("sizeof(net) = %lu\n", sizeof(struct appldata_net_sum_data));
++
++ rc = appldata_register_ops(&ops);
++ if (rc != 0) {
++ P_ERROR("Error registering ops, rc = %i\n", rc);
++ } else {
++ P_DEBUG("%s-ops registered!\n", ops.name);
++ }
++ return rc;
++}
++
++/*
++ * appldata_net_exit()
++ *
++ * unregister ops
++ */
++static void __exit appldata_net_exit(void)
++{
++ appldata_unregister_ops(&ops);
++ P_DEBUG("%s-ops unregistered!\n", ops.name);
++}
++
++
++module_init(appldata_net_init);
++module_exit(appldata_net_exit);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Gerald Schaefer");
++MODULE_DESCRIPTION("Linux-VM Monitor Stream, accumulated network statistics");
+=== arch/s390/appldata/appldata_mem.c
+==================================================================
+--- arch/s390/appldata/appldata_mem.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390/appldata/appldata_mem.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,165 @@
++/*
++ * arch/s390/appldata/appldata_mem.c
++ *
++ * Data gathering module for Linux-VM Monitor Stream, Stage 1.
++ * Collects data related to memory management.
++ *
++ * Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH.
++ *
++ * Author: Gerald Schaefer <geraldsc at de.ibm.com>
++ */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/errno.h>
++#include <linux/kernel_stat.h>
++#include <asm/io.h>
++
++#include "appldata.h"
++
++
++#define MY_PRINT_NAME "appldata_mem" /* for debug messages, etc. */
++#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
++
++/*
++ * Memory data
++ */
++struct appldata_mem_data {
++ u64 timestamp;
++ u32 sync_count_1; /* after VM collected the record data, */
++ u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the
++ same. If not, the record has been updated on
++ the Linux side while VM was collecting the
++ (possibly corrupt) data */
++
++ u64 pgpgin; /* pages read from disk */
++ u64 pgpgout; /* pages written to disk */
++ u64 pswpin; /* pages swapped in */
++ u64 pswpout; /* pages swapped out */
++
++ u64 sharedram; /* sharedram is currently set to 0 */
++
++ u64 totalram; /* total main memory size */
++ u64 freeram; /* free main memory size */
++ u64 totalhigh; /* total high memory size */
++ u64 freehigh; /* free high memory size */
++
++ u64 bufferram; /* memory reserved for buffers, free cache */
++ u64 cached; /* size of (used) cache, w/o buffers */
++ u64 totalswap; /* total swap space size */
++ u64 freeswap; /* free swap space */
++} appldata_mem_data;
++
++
++static inline void appldata_debug_print(struct appldata_mem_data *mem_data)
++{
++ P_DEBUG("--- MEM - RECORD ---\n");
++ P_DEBUG("pgpgin = %8llu KB\n", ULL(mem_data->pgpgin));
++ P_DEBUG("pgpgout = %8llu KB\n", ULL(mem_data->pgpgout));
++ P_DEBUG("pswpin = %8llu Pages\n", ULL(mem_data->pswpin));
++ P_DEBUG("pswpout = %8llu Pages\n", ULL(mem_data->pswpout));
++ P_DEBUG("sharedram = %8llu KB\n", ULL(mem_data->sharedram));
++ P_DEBUG("totalram = %8llu KB\n", ULL(mem_data->totalram));
++ P_DEBUG("freeram = %8llu KB\n", ULL(mem_data->freeram));
++ P_DEBUG("totalhigh = %8llu KB\n", ULL(mem_data->totalhigh));
++ P_DEBUG("freehigh = %8llu KB\n", ULL(mem_data->freehigh));
++ P_DEBUG("bufferram = %8llu KB\n", ULL(mem_data->bufferram));
++ P_DEBUG("cached = %8llu KB\n", ULL(mem_data->cached));
++ P_DEBUG("totalswap = %8llu KB\n", ULL(mem_data->totalswap));
++ P_DEBUG("freeswap = %8llu KB\n", ULL(mem_data->freeswap));
++ P_DEBUG("sync_count_1 = %u\n", mem_data->sync_count_1);
++ P_DEBUG("sync_count_2 = %u\n", mem_data->sync_count_2);
++ P_DEBUG("timestamp = %llX\n", ULL(mem_data->timestamp));
++}
++
++/*
++ * appldata_get_mem_data()
++ *
++ * gather memory data
++ */
++static void appldata_get_mem_data(void *data)
++{
++ struct sysinfo val;
++ struct appldata_mem_data *mem_data;
++
++ mem_data = data;
++ mem_data->sync_count_1++;
++
++ mem_data->pgpgin = kstat.pgpgin >> 1;
++ mem_data->pgpgout = kstat.pgpgout >> 1;
++ mem_data->pswpin = kstat.pswpin;
++ mem_data->pswpout = kstat.pswpout;
++
++ si_meminfo(&val);
++
++ mem_data->sharedram = val.sharedram;
++ mem_data->totalram = P2K(val.totalram);
++ mem_data->freeram = P2K(val.freeram);
++ mem_data->totalhigh = P2K(val.totalhigh);
++ mem_data->freehigh = P2K(val.freehigh);
++ mem_data->bufferram = P2K(val.bufferram);
++ mem_data->cached = P2K(page_cache_size -
++ val.bufferram);
++
++ si_swapinfo(&val);
++
++ mem_data->totalswap = P2K(val.totalswap);
++ mem_data->freeswap = P2K(val.freeswap);
++
++ mem_data->timestamp = get_clock();
++ mem_data->sync_count_2++;
++ appldata_debug_print(mem_data);
++}
++
++
++static struct appldata_ops ops = {
++ .ctl_nr = CTL_APPLDATA_MEM,
++ .name = "mem",
++ .record_nr = APPLDATA_RECORD_MEM_ID,
++ .size = sizeof(struct appldata_mem_data),
++ .callback = &appldata_get_mem_data,
++ .data = &appldata_mem_data,
++ .owner = THIS_MODULE,
++};
++
++
++/*
++ * appldata_mem_init()
++ *
++ * init_data, register ops
++ */
++static int __init appldata_mem_init(void)
++{
++ int rc;
++
++ P_DEBUG("sizeof(mem) = %lu\n", sizeof(struct appldata_mem_data));
++
++ rc = appldata_register_ops(&ops);
++ if (rc != 0) {
++ P_ERROR("Error registering ops, rc = %i\n", rc);
++ } else {
++ P_DEBUG("%s-ops registered!\n", ops.name);
++ }
++ return rc;
++}
++
++/*
++ * appldata_mem_exit()
++ *
++ * unregister ops
++ */
++static void __exit appldata_mem_exit(void)
++{
++ appldata_unregister_ops(&ops);
++ P_DEBUG("%s-ops unregistered!\n", ops.name);
++}
++
++
++module_init(appldata_mem_init);
++module_exit(appldata_mem_exit);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Gerald Schaefer");
++MODULE_DESCRIPTION("Linux-VM Monitor Stream, MEMORY statistics");
+=== arch/s390/appldata/appldata_os.c
+==================================================================
+--- arch/s390/appldata/appldata_os.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390/appldata/appldata_os.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,206 @@
++/*
++ * arch/s390/appldata/appldata_os.c
++ *
++ * Data gathering module for Linux-VM Monitor Stream, Stage 1.
++ * Collects misc. OS related data (CPU utilization, running processes).
++ *
++ * Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH.
++ *
++ * Author: Gerald Schaefer <geraldsc at de.ibm.com>
++ */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/errno.h>
++#include <linux/kernel_stat.h>
++#include <linux/netdevice.h>
++
++#include "appldata.h"
++
++
++#define MY_PRINT_NAME "appldata_os" /* for debug messages, etc. */
++#define LOAD_INT(x) ((x) >> FSHIFT)
++#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
++
++/*
++ * OS data
++ */
++struct appldata_os_per_cpu {
++ u32 per_cpu_user; /* timer ticks spent in user mode */
++ u32 per_cpu_nice; /* ... spent with modified priority */
++ u32 per_cpu_system; /* ... spent in kernel mode */
++ u32 per_cpu_idle; /* ... spent in idle mode */
++};
++
++struct appldata_os_data {
++ u64 timestamp;
++ u32 sync_count_1; /* after VM collected the record data, */
++ u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the
++ same. If not, the record has been updated on
++ the Linux side while VM was collecting the
++ (possibly corrupt) data */
++
++ u32 nr_cpus; /* number of (virtual) CPUs */
++ u32 per_cpu_size; /* size of the per-cpu data struct */
++ u32 cpu_offset; /* offset of the first per-cpu data struct */
++
++ u32 nr_running; /* number of runnable threads */
++ u32 nr_threads; /* number of threads */
++
++ u32 avenrun[3]; /* average nr. of running processes during */
++ /* the last 1, 5 and 15 minutes */
++
++ /* per cpu data */
++ struct appldata_os_per_cpu os_cpu[0];
++};
++
++static struct appldata_os_data *appldata_os_data;
++
++
++static inline void appldata_print_debug(struct appldata_os_data *os_data)
++{
++ int i;
++ unsigned int a0, a1, a2;
++
++ P_DEBUG("--- OS - RECORD ---\n");
++ P_DEBUG("nr_threads = %u\n", os_data->nr_threads);
++ P_DEBUG("nr_running = %u\n", os_data->nr_running);
++ P_DEBUG("avenrun(int) = %8x / %8x / %8x\n", os_data->avenrun[0],
++ os_data->avenrun[1], os_data->avenrun[2]);
++ a0 = os_data->avenrun[0];
++ a1 = os_data->avenrun[1];
++ a2 = os_data->avenrun[2];
++ P_DEBUG("avenrun(float) = %d.%02d / %d.%02d / %d.%02d\n",
++ LOAD_INT(a0), LOAD_FRAC(a0), LOAD_INT(a1), LOAD_FRAC(a1),
++ LOAD_INT(a2), LOAD_FRAC(a2));
++
++ for (i = 0; i < smp_num_cpus; i++) {
++ P_DEBUG("cpu%u : user = %u, nice = %u, system = %u, "
++ "idle = %u\n",
++ i,
++ os_data->os_cpu[i].per_cpu_user,
++ os_data->os_cpu[i].per_cpu_nice,
++ os_data->os_cpu[i].per_cpu_system,
++ os_data->os_cpu[i].per_cpu_idle);
++ }
++
++ P_DEBUG("sync_count_1 = %u\n", os_data->sync_count_1);
++ P_DEBUG("sync_count_2 = %u\n", os_data->sync_count_2);
++ P_DEBUG("timestamp = %llX\n", ULL(os_data->timestamp));
++}
++
++/*
++ * appldata_get_os_data()
++ *
++ * gather OS data
++ */
++static void appldata_get_os_data(void *data)
++{
++ int i;
++ struct appldata_os_data *os_data;
++
++ os_data = data;
++ os_data->sync_count_1++;
++ os_data->nr_cpus = smp_num_cpus;
++
++ os_data->nr_threads = nr_threads;
++ os_data->nr_running = nr_running;
++ os_data->avenrun[0] = (u32) avenrun[0] + (FIXED_1/200);
++ os_data->avenrun[1] = (u32) avenrun[1] + (FIXED_1/200);
++ os_data->avenrun[2] = (u32) avenrun[2] + (FIXED_1/200);
++
++ for (i = 0; i < smp_num_cpus; i++) {
++ os_data->os_cpu[i].per_cpu_user =
++ kstat.per_cpu_user[cpu_logical_map(i)];
++ os_data->os_cpu[i].per_cpu_nice =
++ kstat.per_cpu_nice[cpu_logical_map(i)];
++ os_data->os_cpu[i].per_cpu_system =
++ kstat.per_cpu_system[cpu_logical_map(i)];
++ os_data->os_cpu[i].per_cpu_idle = jiffies - (
++ os_data->os_cpu[i].per_cpu_user
++ + os_data->os_cpu[i].per_cpu_nice
++ + os_data->os_cpu[i].per_cpu_system);
++ }
++
++ os_data->timestamp = get_clock();
++ os_data->sync_count_2++;
++ appldata_print_debug(os_data);
++}
++
++
++static struct appldata_ops ops = {
++ .ctl_nr = CTL_APPLDATA_OS,
++ .name = "os",
++ .record_nr = APPLDATA_RECORD_OS_ID,
++ .callback = &appldata_get_os_data,
++ .owner = THIS_MODULE,
++};
++
++
++/*
++ * appldata_os_init()
++ *
++ * init data, register ops
++ */
++static int __init appldata_os_init(void)
++{
++ int rc, size;
++
++ size = sizeof(struct appldata_os_data) +
++ (smp_num_cpus * sizeof(struct appldata_os_per_cpu));
++ if (size > APPLDATA_MAX_REC_SIZE) {
++ P_ERROR("Size of record = %i, bigger than maximum (%i)!\n",
++ size, APPLDATA_MAX_REC_SIZE);
++ rc = -ENOMEM;
++ goto out;
++ }
++ P_DEBUG("sizeof(os) = %i, sizeof(os_cpu) = %lu\n", size,
++ sizeof(struct appldata_os_per_cpu));
++
++ appldata_os_data = kmalloc(size, GFP_DMA);
++ if (appldata_os_data == NULL) {
++ P_ERROR("No memory for %s!\n", ops.name);
++ rc = -ENOMEM;
++ goto out;
++ }
++ memset(appldata_os_data, 0, size);
++
++ appldata_os_data->per_cpu_size = sizeof(struct appldata_os_per_cpu);
++ appldata_os_data->cpu_offset = offsetof(struct appldata_os_data,
++ os_cpu);
++ P_DEBUG("cpu offset = %u\n", appldata_os_data->cpu_offset);
++
++ ops.data = appldata_os_data;
++ ops.size = size;
++ rc = appldata_register_ops(&ops);
++ if (rc != 0) {
++ P_ERROR("Error registering ops, rc = %i\n", rc);
++ kfree(appldata_os_data);
++ } else {
++ P_DEBUG("%s-ops registered!\n", ops.name);
++ }
++out:
++ return rc;
++}
++
++/*
++ * appldata_os_exit()
++ *
++ * unregister ops
++ */
++static void __exit appldata_os_exit(void)
++{
++ appldata_unregister_ops(&ops);
++ kfree(appldata_os_data);
++ P_DEBUG("%s-ops unregistered!\n", ops.name);
++}
++
++
++module_init(appldata_os_init);
++module_exit(appldata_os_exit);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Gerald Schaefer");
++MODULE_DESCRIPTION("Linux-VM Monitor Stream, OS statistics");
+=== arch/s390/appldata/appldata.h
+==================================================================
+--- arch/s390/appldata/appldata.h (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390/appldata/appldata.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,58 @@
++/*
++ * arch/s390/appldata/appldata.h
++ *
++ * Definitions for Linux - z/VM Monitor Stream.
++ *
++ * Copyright (C) 2003 IBM Corporation, IBM Deutschland Entwicklung GmbH.
++ *
++ * Author: Gerald Schaefer <geraldsc at de.ibm.com>
++ */
++
++//#define APPLDATA_DEBUG /* Debug messages on/off */
++
++#define APPLDATA_MAX_REC_SIZE 4024 /* Maximum size of the */
++ /* data buffer */
++
++#define APPLDATA_PROC_NAME_LENGTH 16 /* Max. length of /proc name */
++
++#define APPLDATA_RECORD_MEM_ID 0x01 /* IDs to identify the */
++#define APPLDATA_RECORD_OS_ID 0x02 /* individual records, */
++#define APPLDATA_RECORD_NET_SUM_ID 0x03 /* must be < 256 ! */
++
++#define CTL_APPLDATA 2120 /* sysctl IDs, must be unique */
++#define CTL_APPLDATA_TIMER 2121
++#define CTL_APPLDATA_INTERVAL 2122
++#define CTL_APPLDATA_MEM 2123
++#define CTL_APPLDATA_OS 2124
++#define CTL_APPLDATA_NET_SUM 2125
++
++#define ULL(var) (unsigned long long) var
++
++#define P_INFO(x...) printk(KERN_INFO MY_PRINT_NAME " info: " x)
++#define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x)
++#define P_STATUS(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x)
++
++#ifdef APPLDATA_DEBUG
++#define P_DEBUG(x...) printk(KERN_DEBUG MY_PRINT_NAME " debug: " x)
++#else
++#define P_DEBUG(x...) do {} while (0)
++#endif
++
++struct appldata_ops {
++ struct list_head list;
++ struct ctl_table_header *sysctl_header;
++ struct ctl_table *ctl_table;
++ int active; /* monitoring status */
++
++ /* fill in from here */
++ unsigned int ctl_nr; /* sysctl ID */
++ char name[APPLDATA_PROC_NAME_LENGTH]; /* name of /proc fs node */
++ unsigned char record_nr; /* Record Nr. for Product ID */
++ void (*callback)(void *data); /* callback function */
++ void *data; /* record data */
++ unsigned int size; /* size of record */
++ struct module *owner; /* THIS_MODULE */
++};
++
++extern int appldata_register_ops(struct appldata_ops *ops);
++extern void appldata_unregister_ops(struct appldata_ops *ops);
+=== arch/s390/appldata/Makefile
+==================================================================
+--- arch/s390/appldata/Makefile (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390/appldata/Makefile (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,13 @@
++#
++# Linux - VM Monitor Stream, Stage 1
++#
++
++O_TARGET := appldata.o
++
++obj-$(CONFIG_APPLDATA_BASE) += appldata_base.o
++obj-$(CONFIG_APPLDATA_MEM) += appldata_mem.o
++obj-$(CONFIG_APPLDATA_OS) += appldata_os.o
++obj-$(CONFIG_APPLDATA_NET_SUM) += appldata_net_sum.o
++export-objs += appldata_base.o
++
++include $(TOPDIR)/Rules.make
+=== arch/s390/mm/init.c
+==================================================================
+--- arch/s390/mm/init.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390/mm/init.c (/trunk/2.4.27) (revision 52)
+@@ -69,6 +69,8 @@
+
+ void diag10(unsigned long addr)
+ {
++ if (addr >= 0x7ff00000)
++ return;
+ asm volatile ("diag %0,%0,0x10" : : "a" (addr));
+ }
+
+=== arch/s390/mm/Makefile
+==================================================================
+--- arch/s390/mm/Makefile (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390/mm/Makefile (/trunk/2.4.27) (revision 52)
+@@ -9,6 +9,8 @@
+
+ O_TARGET := mm.o
+
+-obj-y := init.o fault.o ioremap.o extable.o
++obj-y := init.o fault.o ioremap.o extable.o dcss.o
++obj-$(CONFIG_CMM) += cmm.o
++export-objs := dcss.o cmm.o
+
+ include $(TOPDIR)/Rules.make
+=== arch/s390/mm/cmm.c
+==================================================================
+--- arch/s390/mm/cmm.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390/mm/cmm.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,448 @@
++/*
++ * arch/s390/mm/cmm.c
++ *
++ * S390 version
++ * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
++ * Author(s): Martin Schwidefsky (schwidefsky at de.ibm.com)
++ *
++ * Collaborative memory management interface.
++ */
++
++#include <linux/config.h>
++#include <linux/errno.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/sysctl.h>
++#include <linux/ctype.h>
++
++#include <asm/pgalloc.h>
++#include <asm/uaccess.h>
++
++#include "../../../drivers/s390/net/smsgiucv.h"
++
++#define CMM_NR_PAGES ((PAGE_SIZE / sizeof(unsigned long)) - 2)
++
++struct cmm_page_array {
++ struct cmm_page_array *next;
++ unsigned long index;
++ unsigned long pages[CMM_NR_PAGES];
++};
++
++static long cmm_pages = 0;
++static long cmm_timed_pages = 0;
++static volatile long cmm_pages_target = 0;
++static volatile long cmm_timed_pages_target = 0;
++static long cmm_timeout_pages = 0;
++static long cmm_timeout_seconds = 0;
++
++static struct cmm_page_array *cmm_page_list = 0;
++static struct cmm_page_array *cmm_timed_page_list = 0;
++
++static unsigned long cmm_thread_active = 0;
++static struct tq_struct cmm_thread_starter;
++static wait_queue_head_t cmm_thread_wait;
++static struct timer_list cmm_timer;
++
++static void cmm_timer_fn(unsigned long);
++static void cmm_set_timer(void);
++
++static long
++cmm_strtoul(const char *cp, char **endp)
++{
++ unsigned int base = 10;
++
++ if (*cp == '0') {
++ base = 8;
++ cp++;
++ if ((*cp == 'x' || *cp == 'X') && isxdigit(cp[1])) {
++ base = 16;
++ cp++;
++ }
++ }
++ return simple_strtoul(cp, endp, base);
++}
++
++static long
++cmm_alloc_pages(long pages, long *counter, struct cmm_page_array **list)
++{
++ struct cmm_page_array *pa;
++ unsigned long page;
++
++ pa = *list;
++ while (pages) {
++ page = __get_free_page(GFP_NOIO);
++ if (!page)
++ break;
++ if (!pa || pa->index >= CMM_NR_PAGES) {
++ /* Need a new page for the page list. */
++ pa = (struct cmm_page_array *)
++ __get_free_page(GFP_NOIO);
++ if (!pa) {
++ free_page(page);
++ break;
++ }
++ pa->next = *list;
++ pa->index = 0;
++ *list = pa;
++ }
++ diag10((unsigned long) page);
++ pa->pages[pa->index++] = page;
++ (*counter)++;
++ pages--;
++ }
++ return pages;
++}
++
++static void
++cmm_free_pages(long pages, long *counter, struct cmm_page_array **list)
++{
++ struct cmm_page_array *pa;
++ unsigned long page;
++
++ pa = *list;
++ while (pages) {
++ if (!pa || pa->index <= 0)
++ break;
++ page = pa->pages[--pa->index];
++ if (pa->index == 0) {
++ pa = pa->next;
++ free_page((unsigned long) *list);
++ *list = pa;
++ }
++ free_page(page);
++ (*counter)--;
++ pages--;
++ }
++}
++
++static int
++cmm_thread(void *dummy)
++{
++ int rc;
++
++ daemonize();
++ reparent_to_init();
++ strcpy(current->comm, "cmmthread");
++ set_cpus_allowed(current, 1);
++ while (1) {
++ rc = wait_event_interruptible(cmm_thread_wait,
++ (cmm_pages != cmm_pages_target ||
++ cmm_timed_pages != cmm_timed_pages_target));
++ if (rc == -ERESTARTSYS) {
++ /* Got kill signal. End thread. */
++ clear_bit(0, &cmm_thread_active);
++ cmm_pages_target = cmm_pages;
++ cmm_timed_pages_target = cmm_timed_pages;
++ break;
++ }
++ if (cmm_pages_target > cmm_pages) {
++ if (cmm_alloc_pages(1, &cmm_pages, &cmm_page_list))
++ cmm_pages_target = cmm_pages;
++ } else if (cmm_pages_target < cmm_pages) {
++ cmm_free_pages(1, &cmm_pages, &cmm_page_list);
++ }
++ if (cmm_timed_pages_target > cmm_timed_pages) {
++ if (cmm_alloc_pages(1, &cmm_timed_pages,
++ &cmm_timed_page_list))
++ cmm_timed_pages_target = cmm_timed_pages;
++ } else if (cmm_timed_pages_target < cmm_timed_pages) {
++ cmm_free_pages(1, &cmm_timed_pages,
++ &cmm_timed_page_list);
++ }
++ if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer))
++ cmm_set_timer();
++ }
++ return 0;
++}
++
++static void
++cmm_start_thread(void)
++{
++ kernel_thread(cmm_thread, 0, 0);
++}
++
++static void
++cmm_kick_thread(void)
++{
++ if (!test_and_set_bit(0, &cmm_thread_active))
++ schedule_task(&cmm_thread_starter);
++ wake_up(&cmm_thread_wait);
++}
++
++static void
++cmm_set_timer(void)
++{
++ if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) {
++ if (timer_pending(&cmm_timer))
++ del_timer(&cmm_timer);
++ return;
++ }
++ if (timer_pending(&cmm_timer)) {
++ if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ))
++ return;
++ }
++ cmm_timer.function = cmm_timer_fn;
++ cmm_timer.data = 0;
++ cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ;
++ add_timer(&cmm_timer);
++}
++
++static void
++cmm_timer_fn(unsigned long ignored)
++{
++ long pages;
++
++ pages = cmm_timed_pages_target - cmm_timeout_pages;
++ if (pages < 0)
++ cmm_timed_pages_target = 0;
++ else
++ cmm_timed_pages_target = pages;
++ cmm_kick_thread();
++ cmm_set_timer();
++}
++
++void
++cmm_set_pages(long pages)
++{
++ cmm_pages_target = pages;
++ cmm_kick_thread();
++}
++
++long
++cmm_get_pages(void)
++{
++ return cmm_pages;
++}
++
++void
++cmm_add_timed_pages(long pages)
++{
++ cmm_timed_pages_target += pages;
++ cmm_kick_thread();
++}
++
++long
++cmm_get_timed_pages(void)
++{
++ return cmm_timed_pages;
++}
++
++void
++cmm_set_timeout(long pages, long seconds)
++{
++ cmm_timeout_pages = pages;
++ cmm_timeout_seconds = seconds;
++ cmm_set_timer();
++}
++
++static inline int
++cmm_skip_blanks(char *cp, char **endp)
++{
++ char *str;
++
++ for (str = cp; *str == ' ' || *str == '\t'; str++);
++ *endp = str;
++ return str != cp;
++}
++
++#ifdef CONFIG_CMM_PROC
++/* These will someday get removed. */
++#define VM_CMM_PAGES 1111
++#define VM_CMM_TIMED_PAGES 1112
++#define VM_CMM_TIMEOUT 1113
++
++static struct ctl_table cmm_table[];
++
++static int
++cmm_pages_handler(ctl_table *ctl, int write, struct file *filp,
++ void *buffer, size_t *lenp)
++{
++ char buf[16], *p;
++ long pages;
++ int len;
++
++ if (!*lenp || (filp->f_pos && !write)) {
++ *lenp = 0;
++ return 0;
++ }
++
++ if (write) {
++ len = *lenp;
++ if (copy_from_user(buf, buffer,
++ len > sizeof(buf) ? sizeof(buf) : len))
++ return -EFAULT;
++ buf[sizeof(buf) - 1] = '\0';
++ cmm_skip_blanks(buf, &p);
++ pages = cmm_strtoul(p, &p);
++ if (ctl == &cmm_table[0])
++ cmm_set_pages(pages);
++ else
++ cmm_add_timed_pages(pages);
++ } else {
++ if (ctl == &cmm_table[0])
++ pages = cmm_get_pages();
++ else
++ pages = cmm_get_timed_pages();
++ len = sprintf(buf, "%ld\n", pages);
++ if (len > *lenp)
++ len = *lenp;
++ if (copy_to_user(buffer, buf, len))
++ return -EFAULT;
++ }
++ *lenp = len;
++ filp->f_pos += len;
++ return 0;
++}
++
++static int
++cmm_timeout_handler(ctl_table *ctl, int write, struct file *filp,
++ void *buffer, size_t *lenp)
++{
++ char buf[64], *p;
++ long pages, seconds;
++ int len;
++
++ if (!*lenp || (filp->f_pos && !write)) {
++ *lenp = 0;
++ return 0;
++ }
++
++ if (write) {
++ len = *lenp;
++ if (copy_from_user(buf, buffer,
++ len > sizeof(buf) ? sizeof(buf) : len))
++ return -EFAULT;
++ buf[sizeof(buf) - 1] = '\0';
++ cmm_skip_blanks(buf, &p);
++ pages = cmm_strtoul(p, &p);
++ cmm_skip_blanks(p, &p);
++ seconds = cmm_strtoul(p, &p);
++ cmm_set_timeout(pages, seconds);
++ } else {
++ len = sprintf(buf, "%ld %ld\n",
++ cmm_timeout_pages, cmm_timeout_seconds);
++ if (len > *lenp)
++ len = *lenp;
++ if (copy_to_user(buffer, buf, len))
++ return -EFAULT;
++ }
++ *lenp = len;
++ filp->f_pos += len;
++ return 0;
++}
++
++static struct ctl_table cmm_table[] = {
++ {
++ .ctl_name = VM_CMM_PAGES,
++ .procname = "cmm_pages",
++ .mode = 0600,
++ .proc_handler = &cmm_pages_handler,
++ },
++ {
++ .ctl_name = VM_CMM_TIMED_PAGES,
++ .procname = "cmm_timed_pages",
++ .mode = 0600,
++ .proc_handler = &cmm_pages_handler,
++ },
++ {
++ .ctl_name = VM_CMM_TIMEOUT,
++ .procname = "cmm_timeout",
++ .mode = 0600,
++ .proc_handler = &cmm_timeout_handler,
++ },
++ { .ctl_name = 0 }
++};
++
++static struct ctl_table cmm_dir_table[] = {
++ {
++ .ctl_name = CTL_VM,
++ .procname = "vm",
++ .maxlen = 0,
++ .mode = 0555,
++ .child = cmm_table,
++ },
++ { .ctl_name = 0 }
++};
++#endif
++
++#ifdef CONFIG_CMM_IUCV
++#define SMSG_PREFIX "CMM"
++static void
++cmm_smsg_target(char *msg)
++{
++ long pages, seconds;
++
++ if (!cmm_skip_blanks(msg + strlen(SMSG_PREFIX), &msg))
++ return;
++ if (strncmp(msg, "SHRINK", 6) == 0) {
++ if (!cmm_skip_blanks(msg + 6, &msg))
++ return;
++ pages = cmm_strtoul(msg, &msg);
++ cmm_skip_blanks(msg, &msg);
++ if (*msg == '\0')
++ cmm_set_pages(pages);
++ } else if (strncmp(msg, "RELEASE", 7) == 0) {
++ if (!cmm_skip_blanks(msg + 7, &msg))
++ return;
++ pages = cmm_strtoul(msg, &msg);
++ cmm_skip_blanks(msg, &msg);
++ if (*msg == '\0')
++ cmm_add_timed_pages(pages);
++ } else if (strncmp(msg, "REUSE", 5) == 0) {
++ if (!cmm_skip_blanks(msg + 5, &msg))
++ return;
++ pages = cmm_strtoul(msg, &msg);
++ if (!cmm_skip_blanks(msg, &msg))
++ return;
++ seconds = cmm_strtoul(msg, &msg);
++ cmm_skip_blanks(msg, &msg);
++ if (*msg == '\0')
++ cmm_set_timeout(pages, seconds);
++ }
++}
++#endif
++
++struct ctl_table_header *cmm_sysctl_header;
++
++static int
++cmm_init (void)
++{
++#ifdef CONFIG_CMM_PROC
++ cmm_sysctl_header = register_sysctl_table(cmm_dir_table, 1);
++#endif
++#ifdef CONFIG_CMM_IUCV
++ smsg_register_callback(SMSG_PREFIX, cmm_smsg_target);
++#endif
++ cmm_thread_starter.routine = (void *) cmm_start_thread;
++ cmm_thread_starter.data = 0;
++ init_waitqueue_head(&cmm_thread_wait);
++ init_timer(&cmm_timer);
++ return 0;
++}
++
++static void
++cmm_exit(void)
++{
++ cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
++ cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
++#ifdef CONFIG_CMM_PROC
++ unregister_sysctl_table(cmm_sysctl_header);
++#endif
++#ifdef CONFIG_CMM_IUCV
++ smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
++#endif
++}
++
++module_init(cmm_init);
++module_exit(cmm_exit);
++
++EXPORT_SYMBOL(cmm_set_pages);
++EXPORT_SYMBOL(cmm_get_pages);
++EXPORT_SYMBOL(cmm_add_timed_pages);
++EXPORT_SYMBOL(cmm_get_timed_pages);
++EXPORT_SYMBOL(cmm_set_timeout);
++
++MODULE_LICENSE("GPL");
+=== arch/s390/mm/dcss.c
+==================================================================
+--- arch/s390/mm/dcss.c (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390/mm/dcss.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,504 @@
++/*
++ * File...........: arch/s390/mm/dcss.c
++ * Author(s)......: Steven Shultz <shultzss at us.ibm.com>
++ * Carsten Otte <cotte at de.ibm.com>
++ * Bugreports.to..: <Linux390 at de.ibm.com>
++ * thanks to Rob M van der Heij
++ * - he wrote the diag64 function
++ * (C) IBM Corporation 2002
++ */
++
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/spinlock.h>
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <linux/bootmem.h>
++#include <asm/page.h>
++#include <asm/ebcdic.h>
++#include <asm/errno.h>
++#include <asm/dcss.h>
++#include <asm/cpcmd.h>
++#include <linux/ctype.h>
++
++#define DCSS_DEBUG /* Debug messages on/off */
++
++#define DCSS_NAME "dcss"
++#ifdef DCSS_DEBUG
++#define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSS_NAME " debug:" x)
++#else
++#define PRINT_DEBUG(x...) do {} while (0)
++#endif
++#define PRINT_INFO(x...) printk(KERN_INFO DCSS_NAME " info:" x)
++#define PRINT_WARN(x...) printk(KERN_WARNING DCSS_NAME " warning:" x)
++#define PRINT_ERR(x...) printk(KERN_ERR DCSS_NAME " error:" x)
++
++
++#define DCSS_LOADSHR 0x00
++#define DCSS_LOADNSR 0x04
++#define DCSS_PURGESEG 0x08
++#define DCSS_FINDSEG 0x0c
++#define DCSS_LOADNOLY 0x10
++#define DCSS_SEGEXT 0x18
++#define DCSS_QACTV 0x0c
++
++struct dcss_segment {
++ struct list_head list;
++ char dcss_name[8];
++ unsigned long start_addr;
++ unsigned long end;
++ atomic_t ref_count;
++ int dcss_attr;
++ int shared_attr;
++};
++
++static spinlock_t dcss_lock = SPIN_LOCK_UNLOCKED;
++static struct list_head dcss_list = LIST_HEAD_INIT(dcss_list);
++extern struct {unsigned long addr, size, type} memory_chunk[16];
++
++/*
++ * Create the 8 bytes, ebcdic VM segment name from
++ * an ascii name.
++ */
++static void inline dcss_mkname(char *name, char *dcss_name)
++{
++ int i;
++
++ for (i = 0; i <= 8; i++) {
++ if (name[i] == '\0')
++ break;
++ dcss_name[i] = toupper(name[i]);
++ };
++ for (; i <= 8; i++)
++ dcss_name[i] = ' ';
++ ASCEBC(dcss_name, 8);
++}
++
++/*
++ * Perform a function on a dcss segment.
++ */
++static inline int
++dcss_diag (__u8 func, void *parameter,
++ unsigned long *ret1, unsigned long *ret2)
++{
++ unsigned long rx, ry;
++ int rc;
++
++ rx = (unsigned long) parameter;
++ ry = (unsigned long) func;
++ __asm__ __volatile__(
++#ifdef CONFIG_ARCH_S390X
++ " sam31\n" // switch to 31 bit
++ " diag %0,%1,0x64\n"
++ " sam64\n" // switch back to 64 bit
++#else
++ " diag %0,%1,0x64\n"
++#endif
++ " ipm %2\n"
++ " srl %2,28\n"
++ : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc" );
++ *ret1 = rx;
++ *ret2 = ry;
++ return rc;
++}
++
++
++/* use to issue "extended" dcss query */
++static inline int
++dcss_diag_query(char *name, int *rwattr, int *shattr, unsigned long *segstart, unsigned long *segend)
++{
++ int i,j,rc;
++ unsigned long rx, ry;
++
++ typedef struct segentry {
++ char thisseg[8];
++ } segentry;
++
++ struct qout64 {
++ int segstart;
++ int segend;
++ int segcnt;
++ int segrcnt;
++ segentry segout[6];
++ };
++
++ struct qin64 {
++ char qopcode;
++ char rsrv1[3];
++ char qrcode;
++ char rsrv2[3];
++ char qname[8];
++ unsigned int qoutptr;
++ short int qoutlen;
++ };
++
++
++ struct qin64 *qinarea;
++ struct qout64 *qoutarea;
++
++ qinarea = (struct qin64*) get_free_page (GFP_DMA);
++ if (!qinarea) {
++ rc =-ENOMEM;
++ goto out;
++ }
++ qoutarea = (struct qout64*) get_free_page (GFP_DMA);
++ if (!qoutarea) {
++ rc = -ENOMEM;
++ free_page (qinarea);
++ goto out;
++ }
++ memset (qinarea,0,PAGE_SIZE);
++ memset (qoutarea,0,PAGE_SIZE);
++
++ qinarea->qopcode = DCSS_QACTV; /* do a query for active
++ segments */
++ qinarea->qoutptr = (unsigned long) qoutarea;
++ qinarea->qoutlen = sizeof(struct qout64);
++
++ /* Move segment name into double word aligned
++ field and pad with blanks to 8 long.
++ */
++
++ for (i = j = 0 ; i < 8; i++) {
++ qinarea->qname[i] = (name[j] == '\0') ? ' ' : name[j++];
++ }
++
++ /* name already in EBCDIC */
++ /* ASCEBC ((void *)&qinarea.qname, 8); */
++
++ /* set the assembler variables */
++ rx = (unsigned long) qinarea;
++ ry = DCSS_SEGEXT; /* this is extended function */
++
++ /* issue diagnose x'64' */
++ __asm__ __volatile__(
++#ifdef CONFIG_ARCH_S390X
++ " sam31\n" // switch to 31 bit
++ " diag %0,%1,0x64\n"
++ " sam64\n" // switch back to 64 bit
++#else
++ " diag %0,%1,0x64\n"
++#endif
++ " ipm %2\n"
++ " srl %2,28\n"
++ : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc" );
++
++ /* parse the query output area */
++ *segstart=qoutarea->segstart;
++ *segend=qoutarea->segend;
++
++ if (rc > 1)
++ {
++ *rwattr = 2;
++ *shattr = 2;
++ rc = 0;
++ goto free;
++ }
++
++ if (qoutarea->segcnt > 6)
++ {
++ *rwattr = 3;
++ *shattr = 3;
++ rc = 0;
++ goto free;
++ }
++
++ *rwattr = 1;
++ *shattr = 1;
++
++ for (i=0; i < qoutarea->segrcnt; i++) {
++ if (qoutarea->segout[i].thisseg[3] == 2 ||
++ qoutarea->segout[i].thisseg[3] == 3 ||
++ qoutarea->segout[i].thisseg[3] == 6 )
++ *rwattr = 0;
++ if (qoutarea->segout[i].thisseg[3] == 1 ||
++ qoutarea->segout[i].thisseg[3] == 3 ||
++ qoutarea->segout[i].thisseg[3] == 5 )
++ *shattr = 0;
++ } /* end of for statement */
++ rc = 0;
++ free:
++ free_page (qoutarea);
++ free_page (qinarea);
++ out:
++ return rc;
++}
++
++/*
++ * Load a DCSS segment via the diag 0x64.
++ */
++int segment_load(char *name, int segtype, unsigned long *addr,
++ unsigned long *end)
++{
++ char dcss_name[8];
++ struct list_head *l;
++ struct dcss_segment *seg, *tmp;
++ unsigned long dummy;
++ unsigned long segstart, segend;
++ int rc = 0,i;
++ int initrc = 0;
++ int rwattr, shattr;
++
++ if (!MACHINE_IS_VM)
++ return -ENOSYS;
++ dcss_mkname(name, dcss_name);
++ /* search for the dcss in list of currently loaded segments */
++ spin_lock(&dcss_lock);
++ seg = NULL;
++ list_for_each(l, &dcss_list) {
++ tmp = list_entry(l, struct dcss_segment, list);
++ if (memcmp(tmp->dcss_name, dcss_name, 8) == 0) {
++ seg = tmp;
++ break;
++ }
++ }
++
++ if (seg == NULL) {
++ /* find out the attributes of this
++ shared segment */
++ dcss_diag_query(dcss_name, &rwattr, &shattr, &segstart, &segend);
++ /* does segment collide with main memory? */
++ for (i=0; i<16; i++) {
++ if (memory_chunk[i].type != 0)
++ continue;
++ if (memory_chunk[i].addr > segend)
++ continue;
++ if (memory_chunk[i].addr + memory_chunk[i].size <= segstart)
++ continue;
++ spin_unlock(&dcss_lock);
++ return -ENOENT;
++ }
++ /* or does it collide with other (loaded) segments? */
++ list_for_each(l, &dcss_list) {
++ tmp = list_entry(l, struct dcss_segment, list);
++ if ((segstart <= tmp->end && segstart >= tmp->start_addr) ||
++ (segend <= tmp->end && segend >= tmp->start_addr) ||
++ (segstart <= tmp->start_addr && segend >= tmp->end)) {
++ PRINT_ERR("Segment Overlap!\n");
++ spin_unlock(&dcss_lock);
++ return -ENOENT;
++ }
++ }
++
++ /* do case statement on segtype */
++ /* if asking for shared ro,
++ shared rw works */
++ /* if asking for exclusive ro,
++ exclusive rw works */
++
++ switch(segtype) {
++ case SEGMENT_SHARED_RO:
++ if (shattr > 1 || rwattr > 1) {
++ spin_unlock(&dcss_lock);
++ return -ENOENT;
++ } else {
++ if (shattr == 0 && rwattr == 0)
++ rc = SEGMENT_EXCLUSIVE_RO;
++ if (shattr == 0 && rwattr == 1)
++ rc = SEGMENT_EXCLUSIVE_RW;
++ if (shattr == 1 && rwattr == 0)
++ rc = SEGMENT_SHARED_RO;
++ if (shattr == 1 && rwattr == 1)
++ rc = SEGMENT_SHARED_RW;
++ }
++ break;
++ case SEGMENT_SHARED_RW:
++ if (shattr > 1 || rwattr != 1) {
++ spin_unlock(&dcss_lock);
++ return -ENOENT;
++ } else {
++ if (shattr == 0)
++ rc = SEGMENT_EXCLUSIVE_RW;
++ if (shattr == 1)
++ rc = SEGMENT_SHARED_RW;
++ }
++ break;
++
++ case SEGMENT_EXCLUSIVE_RO:
++ if (shattr > 0 || rwattr > 1) {
++ spin_unlock(&dcss_lock);
++ return -ENOENT;
++ } else {
++ if (rwattr == 0)
++ rc = SEGMENT_EXCLUSIVE_RO;
++ if (rwattr == 1)
++ rc = SEGMENT_EXCLUSIVE_RW;
++ }
++ break;
++
++ case SEGMENT_EXCLUSIVE_RW:
++/* if (shattr != 0 || rwattr != 1) {
++ spin_unlock(&dcss_lock);
++ return -ENOENT;
++ } else {
++*/
++ rc = SEGMENT_EXCLUSIVE_RW;
++// }
++ break;
++
++ default:
++ spin_unlock(&dcss_lock);
++ return -ENOENT;
++ } /* end switch */
++
++ seg = kmalloc(sizeof(struct dcss_segment), GFP_DMA);
++ if (seg != NULL) {
++ memcpy(seg->dcss_name, dcss_name, 8);
++ if (rc == SEGMENT_EXCLUSIVE_RW) {
++ if (dcss_diag(DCSS_LOADNSR, seg->dcss_name,
++ &seg->start_addr, &seg->end) == 0) {
++ if (seg->end < max_low_pfn*PAGE_SIZE ) {
++ atomic_set(&seg->ref_count, 1);
++ list_add(&seg->list, &dcss_list);
++ *addr = seg->start_addr;
++ *end = seg->end;
++ seg->dcss_attr = rc;
++ if (shattr == 1 && rwattr == 1)
++ seg->shared_attr = SEGMENT_SHARED_RW;
++ else if (shattr == 1 && rwattr == 0)
++ seg->shared_attr = SEGMENT_SHARED_RO;
++ else
++ seg->shared_attr = SEGMENT_EXCLUSIVE_RW;
++ } else {
++ dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
++ kfree (seg);
++ rc = -ENOENT;
++ }
++ } else {
++ kfree(seg);
++ rc = -ENOENT;
++ }
++ goto out;
++ }
++ if (dcss_diag(DCSS_LOADNOLY, seg->dcss_name,
++ &seg->start_addr, &seg->end) == 0) {
++ if (seg->end < max_low_pfn*PAGE_SIZE ) {
++ atomic_set(&seg->ref_count, 1);
++ list_add(&seg->list, &dcss_list);
++ *addr = seg->start_addr;
++ *end = seg->end;
++ seg->dcss_attr = rc;
++ seg->shared_attr = rc;
++ } else {
++ dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
++ kfree (seg);
++ rc = -ENOENT;
++ }
++ } else {
++ kfree(seg);
++ rc = -ENOENT;
++ }
++ } else rc = -ENOMEM;
++ } else {
++ /* found */
++ if ((segtype == SEGMENT_EXCLUSIVE_RW) && (seg->dcss_attr != SEGMENT_EXCLUSIVE_RW)) {
++ PRINT_ERR("Segment already loaded in other mode than EXCLUSIVE_RW!\n");
++ rc = -EPERM;
++ goto out;
++ /* reload segment in exclusive mode */
++/* dcss_diag(DCSS_LOADNSR, seg->dcss_name,
++ &seg->start_addr, &seg->end);
++ seg->dcss_attr = SEGMENT_EXCLUSIVE_RW;*/
++ }
++ if ((segtype != SEGMENT_EXCLUSIVE_RW) && (seg->dcss_attr == SEGMENT_EXCLUSIVE_RW)) {
++ PRINT_ERR("Segment already loaded in EXCLUSIVE_RW mode!\n");
++ rc = -EPERM;
++ goto out;
++ }
++ atomic_inc(&seg->ref_count);
++ *addr = seg->start_addr;
++ *end = seg->end;
++ rc = seg->dcss_attr;
++ }
++out:
++ spin_unlock(&dcss_lock);
++ return rc;
++}
++
++/*
++ * Decrease the use count of a DCSS segment and remove
++ * it from the address space if nobody is using it
++ * any longer.
++ */
++void segment_unload(char *name)
++{
++ char dcss_name[8];
++ unsigned long dummy;
++ struct list_head *l,*l_tmp;
++ struct dcss_segment *seg;
++
++ if (!MACHINE_IS_VM)
++ return;
++ dcss_mkname(name, dcss_name);
++ spin_lock(&dcss_lock);
++ list_for_each_safe(l, l_tmp, &dcss_list) {
++ seg = list_entry(l, struct dcss_segment, list);
++ if (memcmp(seg->dcss_name, dcss_name, 8) == 0) {
++ if (atomic_dec_return(&seg->ref_count) == 0) {
++ /* Last user of the segment is
++ gone. */
++ list_del(&seg->list);
++ dcss_diag(DCSS_PURGESEG, seg->dcss_name,
++ &dummy, &dummy);
++ kfree(seg);
++ }
++ break;
++ }
++ }
++ spin_unlock(&dcss_lock);
++}
++
++/*
++ * Replace an existing DCSS segment, so that machines
++ * that load it anew will see the new version.
++ */
++void segment_replace(char *name)
++{
++ char dcss_name[8];
++ struct list_head *l;
++ struct dcss_segment *seg;
++ int mybeg = 0;
++ int myend = 0;
++ char mybuff1[80];
++ char mybuff2[80];
++
++ if (!MACHINE_IS_VM)
++ return;
++ dcss_mkname(name, dcss_name);
++
++ memset (mybuff1, 0, sizeof(mybuff1));
++ memset (mybuff2, 0, sizeof(mybuff2));
++
++ spin_lock(&dcss_lock);
++ list_for_each(l, &dcss_list) {
++ seg = list_entry(l, struct dcss_segment, list);
++ if (memcmp(seg->dcss_name, dcss_name, 8) == 0) {
++ mybeg = seg->start_addr >> 12;
++ myend = (seg->end) >> 12;
++ if (seg->shared_attr == SEGMENT_EXCLUSIVE_RW)
++ sprintf(mybuff1, "DEFSEG %s %X-%X EW",
++ name, mybeg, myend);
++ if (seg->shared_attr == SEGMENT_EXCLUSIVE_RO)
++ sprintf(mybuff1, "DEFSEG %s %X-%X RO",
++ name, mybeg, myend);
++ if (seg->shared_attr == SEGMENT_SHARED_RW)
++ sprintf(mybuff1, "DEFSEG %s %X-%X SW",
++ name, mybeg, myend);
++ if (seg->shared_attr == SEGMENT_SHARED_RO)
++ sprintf(mybuff1, "DEFSEG %s %X-%X SR",
++ name, mybeg, myend);
++ spin_unlock(&dcss_lock);
++ sprintf(mybuff2, "SAVESEG %s", name);
++ cpcmd(mybuff1, NULL, 80);
++ cpcmd(mybuff2, NULL, 80);
++ break;
++ }
++
++ }
++ if (myend == 0) spin_unlock(&dcss_lock);
++}
++
++EXPORT_SYMBOL(segment_load);
++EXPORT_SYMBOL(segment_unload);
++EXPORT_SYMBOL(segment_replace);
+=== arch/s390/config.in
+==================================================================
+--- arch/s390/config.in (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390/config.in (/trunk/2.4.27) (revision 52)
+@@ -62,8 +62,35 @@
+ bool 'Show crashed user process info' CONFIG_PROCESS_DEBUG
+ bool 'Pseudo page fault support' CONFIG_PFAULT
+ bool 'VM shared kernel support' CONFIG_SHARED_KERNEL
++bool 'No HZ timer ticks in idle' CONFIG_NO_IDLE_HZ
++if [ "$CONFIG_NO_IDLE_HZ" = "y" ] ; then
++ bool ' Idle HZ timer on by default' CONFIG_NO_IDLE_HZ_INIT
++fi
++bool 'Virtual CPU timer support' CONFIG_VIRT_TIMER
++dep_bool 'Linux - VM Monitor Stream, base infrastructure' CONFIG_APPLDATA_BASE \
++$CONFIG_PROC_FS $CONFIG_VIRT_TIMER
++dep_tristate ' Monitor memory management statistics' CONFIG_APPLDATA_MEM $CONFIG_APPLDATA_BASE
++dep_tristate ' Monitor OS statistics' CONFIG_APPLDATA_OS $CONFIG_APPLDATA_BASE
++dep_tristate ' Monitor overall network statistics' CONFIG_APPLDATA_NET_SUM $CONFIG_APPLDATA_BASE
++tristate 'Collaborative memory management' CONFIG_CMM
++if [ "$CONFIG_CMM" != "n" ]; then
++ dep_bool '/proc interface to cooperative memory management' CONFIG_CMM_PROC $CONFIG_PROC_FS
++ if [ "$CONFIG_SMSGIUCV" = "y" -o "$CONFIG_SMSGIUCV" = "$CONFIG_CMM" ]; then
++ bool 'IUCV special message interface to cooperative memory management' CONFIG_CMM_IUCV
++ fi
++fi
+ endmenu
+
++mainmenu_option next_comment
++comment 'SCSI support'
++
++tristate 'SCSI support' CONFIG_SCSI
++
++if [ "$CONFIG_SCSI" != "n" ]; then
++ source drivers/scsi/Config.in
++fi
++endmenu
++
+ source drivers/s390/Config.in
+
+ if [ "$CONFIG_NET" = "y" ]; then
+=== arch/s390/Makefile
+==================================================================
+--- arch/s390/Makefile (/upstream/vanilla/2.4.27) (revision 52)
++++ arch/s390/Makefile (/trunk/2.4.27) (revision 52)
+@@ -23,15 +23,18 @@
+ LINKFLAGS =-T $(TOPDIR)/arch/s390/vmlinux.lds $(LDFLAGS)
+ endif
+
++CFLAGS_ARCH := -m31
+ CFLAGS_PIPE := -pipe
+ CFLAGS_NSR := -fno-strength-reduce
+-CFLAGS := $(CFLAGS) $(CFLAGS_PIPE) $(CFLAGS_NSR)
++CFLAGS := $(CFLAGS) $(CFLAGS_ARCH) $(CFLAGS_PIPE) $(CFLAGS_NSR)
++AFLAGS := $(AFLAGS) $(CFLAGS_ARCH)
+
+ HEAD := arch/s390/kernel/head.o arch/s390/kernel/init_task.o
+
+ SUBDIRS := $(SUBDIRS) arch/s390/mm arch/s390/kernel arch/s390/lib \
+- drivers/s390 arch/s390/math-emu
+-CORE_FILES := arch/s390/mm/mm.o arch/s390/kernel/kernel.o $(CORE_FILES)
++ arch/s390/appldata drivers/s390 arch/s390/math-emu
++CORE_FILES := arch/s390/mm/mm.o arch/s390/kernel/kernel.o \
++ arch/s390/appldata/appldata.o $(CORE_FILES)
+ DRIVERS := $(DRIVERS) drivers/s390/io.o
+ LIBS := $(TOPDIR)/arch/s390/lib/lib.a $(LIBS) $(TOPDIR)/arch/s390/lib/lib.a
+
+@@ -39,7 +42,7 @@
+ CORE_FILES := $(CORE_FILES) arch/s390/math-emu/math-emu.o
+ endif
+
+-all: image listing
++all: image
+
+ listing: vmlinux
+ @$(MAKEBOOT) listing
+@@ -47,6 +50,9 @@
+ arch/s390/kernel: dummy
+ $(MAKE) linuxsubdirs SUBDIRS=arch/s390/kernel
+
++arch/s390/appldata: dummy
++ $(MAKE) linuxsubdirs SUBDIRS=arch/s390/appldata
++
+ arch/s390/mm: dummy
+ $(MAKE) linuxsubdirs SUBDIRS=arch/s390/mm
+
+=== Makefile
+==================================================================
+--- Makefile (/upstream/vanilla/2.4.27) (revision 52)
++++ Makefile (/trunk/2.4.27) (revision 52)
+@@ -285,7 +285,7 @@
+ boot: vmlinux
+ @$(MAKE) CFLAGS="$(CFLAGS) $(CFLAGS_KERNEL)" -C arch/$(ARCH)/boot
+
+-vmlinux: include/linux/version.h $(CONFIGURATION) init/main.o init/version.o init/do_mounts.o linuxsubdirs
++vmlinux: include/linux/version.h $(CONFIGURATION) init/main.o init/version.o init/do_mounts.o Kerntypes linuxsubdirs
+ $(LD) $(LINKFLAGS) $(HEAD) init/main.o init/version.o init/do_mounts.o \
+ --start-group \
+ $(CORE_FILES) \
+@@ -296,6 +296,11 @@
+ -o vmlinux
+ $(NM) vmlinux | grep -v '\(compiled\)\|\(\.o$$\)\|\( [aUw] \)\|\(\.\.ng$$\)\|\(LASH[RL]DI\)' | sort > System.map
+
++Kerntypes: init/kerntypes.o
++ @if [ -f init/kerntypes.o ]; then \
++ mv init/kerntypes.o Kerntypes; \
++ fi
++
+ symlinks:
+ rm -f include/asm
+ ( cd include ; ln -sf asm-$(ARCH) asm)
+@@ -350,6 +355,9 @@
+ echo > .ver1
+ @echo \#define LINUX_COMPILE_DOMAIN \"`cat .ver1 | $(uts_truncate)`\" >> .ver
+ @echo \#define LINUX_COMPILER \"`$(CC) $(CFLAGS) -v 2>&1 | tail -n 1`\" >> .ver
++ @echo \__linux_compile_version_id__`echo $(KERNELRELEASE) | tr -c '[0-9A-Za-z\n]' '_'`__`hostname | tr -c '[0-9A-Za-z\n]' '_'`__`LANG=C date | tr -c '[0-9A-Za-z\n]' '_'` > .ver1
++ @echo \#define LINUX_COMPILE_VERSION_ID `cat .ver1` >> .ver
++ @echo "typedef char* `cat .ver1`_t;" >> .ver
+ @mv -f .ver $@
+ @rm -f .ver1
+
+@@ -366,6 +374,9 @@
+ init/version.o: init/version.c include/linux/compile.h include/config/MARKER
+ $(CC) $(CFLAGS) $(CFLAGS_KERNEL) -DUTS_MACHINE='"$(ARCH)"' -DKBUILD_BASENAME=$(subst $(comma),_,$(subst -,_,$(*F))) -c -o init/version.o init/version.c
+
++init/kerntypes.o: init/kerntypes.c include/config/MARKER include/linux/compile.h
++ $(CC) $(CFLAGS) $(CFLAGS_KERNEL) $(PROFILING) -gstabs -c -o $*.o $<
++
+ init/main.o: init/main.c include/config/MARKER
+ $(CC) $(CFLAGS) $(CFLAGS_KERNEL) $(PROFILING) -DKBUILD_BASENAME=$(subst $(comma),_,$(subst -,_,$(*F))) -c -o $@ $<
+
+=== drivers/s390/scsi/zh_ioctl32.c
+==================================================================
+--- drivers/s390/scsi/zh_ioctl32.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/scsi/zh_ioctl32.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,248 @@
++/*
++ * $Id: zh_ioctl32.c,v 1.4.2.2 2004/03/24 11:18:00 aherrman Exp $
++ *
++ * (C) Copyright IBM Corp. 2003
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version. See the file COPYING for more
++ * information.
++ *
++ * Authors:
++ * Stefan Voelkel <Stefan.Voelkel at millenux.com>
++ * Andreas Herrmann <aherrman at de.ibm.com>
++ *
++ * No need for special handler functions here since our ioctl() method is 32 BIT
++ * aware.
++ *
++ * For this trick to work we define a 32bit structure (see "zh.h") and a normal
++ * one (eg struct zh_scsi_report_luns32 and struct zh_scsi_report_luns). We also
++ * define an extra 32 BIT ioctl number, ZH_IOC_SCSI_REPORT_LUNS32 using the
++ * zh_scsi_report_luns32 structure.
++ *
++ * This creates an ioctl command that is the same on 32 and 64 BIT. On a 64 BIT
++ * kernel only 32 BIT applications will call the ioctl() with the 32 BIT ioctl
++ * command, thus we can react on that and apply our pointer voodoo.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++
++#include <linux/fs.h>
++#include <linux/ioctl.h>
++#include <asm/uaccess.h>
++#include <asm/ioctl32.h>
++
++#include "zh.h"
++
++#include "../../../arch/s390x/kernel/linux32.h"
++
++/* void* casts needed because of fourth parameter */
++#define ZH_IOC_DEFAULT(cmd) {cmd, (void*) sys_ioctl}
++#define ZH_IOC_HANDLER(cmd, handler) {cmd, (void*) handler}
++
++/**
++ * zh_ioctl_entry - one entry in the conversion array
++ * @cmd: ioctl cmd
++ * @handler: the corresponding handler
++ */
++struct zh_ioctl_entry
++{
++ unsigned int cmd;
++ ioctl_trans_handler_t handler;
++};
++
++/**
++ * struct zh_send_ct32 - data needed to send out a Generic Service command,
++ * 32BIT version
++ * @devid: id of HBA via which to send CT
++ * @req_length: size the request buffer
++ * @req: request buffer
++ * @resp_length: size of response buffer
++ * @resp: response buffer
++ */
++struct zh_send_ct32
++{
++ devid_t devid;
++ u32 req_length;
++ u32 req;
++ u32 resp_length;
++ u32 resp;
++} __attribute__((packed));
++
++#define ZH_IOC_SEND_CT32 _IOWR(ZH_IOC_MAGIC, 7, struct zh_send_ct32)
++
++/**
++ * zh_send_ct32 - ioctl32 conversion function for ZH_IOC_SEND_CT
++ * @fd: fd of device file
++ * @cmd: command to execute
++ * @arg: parameter(s) for the command
++ * Return: 0 on success, else -E* code
++ * Context: User
++ */
++static inline int zh_send_ct32(unsigned int fd, unsigned int cmd,
++ unsigned long arg)
++{
++ int ret;
++ struct zh_send_ct ioc_data;
++ struct zh_send_ct32 ioc_data32;
++ struct zh_send_ct32 *u_ptr = (struct zh_send_ct32 *) A(arg);
++
++ if (copy_from_user(&ioc_data32, u_ptr, sizeof(ioc_data32)))
++ return -EFAULT;
++
++ ioc_data.devid = ioc_data32.devid;
++ ioc_data.req_length = ioc_data32.req_length;
++ ioc_data.resp_length = ioc_data32.resp_length;
++ ioc_data.req = (void *) A(ioc_data32.req);
++ ioc_data.resp = (void *) A(ioc_data32.resp);
++
++ ret = zh_send_ct_helper(&ioc_data);
++
++ return ret;
++}
++
++/**
++ * struct zh_scsi_report_luns32 - data needed for an REPORT_LUNS, 32BIT version
++ * @devid: of the adapter
++ * @wwpn: of the port
++ * @*rsp_buffer: pointer to response buffer
++ * @rsp_buffer_size: of the response buffer
++ * @sense: buffer for sense data
++ */
++struct zh_scsi_report_luns32
++{
++ devid_t devid;
++ wwn_t wwpn;
++ u32 rsp_buffer;
++ u32 rsp_buffer_size;
++ u8 sense[ZH_SCSI_SENSE_BUFFERSIZE];
++} __attribute__((packed));
++
++#define ZH_IOC_SCSI_REPORT_LUNS32 \
++_IOW(ZH_IOC_MAGIC, 10, struct zh_scsi_report_luns32)
++
++/**
++ * zh_scsi_report_luns32 - ioctl32 conversion function for
++ * ZH_SCSI_REPORT_LUNS32
++ * @fd: fd of device file
++ * @cmd: command to execute
++ * @arg: parameter(s) for the command
++ * Return: 0 on success, else -E* code
++ * Context: User
++ */
++static inline int zh_scsi_report_luns32(unsigned int fd, unsigned int cmd,
++ unsigned long arg)
++{
++ int ret;
++ struct zh_scsi_report_luns ioc_data = { 0 };
++ struct zh_scsi_report_luns32 ioc_data32;
++ struct zh_scsi_report_luns32 *u_ptr;
++
++ u_ptr = (struct zh_scsi_report_luns32 *) A(arg);
++
++ if (copy_from_user(&ioc_data32, u_ptr, sizeof(ioc_data32)))
++ return -EFAULT;
++
++ ioc_data.devid = ioc_data32.devid;
++ ioc_data.wwpn = ioc_data32.wwpn;
++ ioc_data.rsp_buffer = (void *) A(ioc_data32.rsp_buffer);
++ ioc_data.rsp_buffer_size = ioc_data32.rsp_buffer_size;
++
++ ret = zh_report_luns_helper(&ioc_data);
++
++ if (ret >= 0) {
++ memcpy(&ioc_data32.sense, &(ioc_data.sense),
++ ZH_SCSI_SENSE_BUFFERSIZE);
++ if (copy_to_user(u_ptr, &ioc_data32, sizeof(ioc_data32))) {
++ ret = -EFAULT;
++ }
++ if (ret > 0) {
++ ret = 0;
++ }
++ }
++
++ return ret;
++}
++
++/*
++ * All the commands we have to register, may be freed if not compiled as module
++ */
++static struct zh_ioctl_entry zh_conversion[] __initdata = {
++ ZH_IOC_DEFAULT(ZH_IOC_GET_ADAPTERATTRIBUTES),
++ ZH_IOC_DEFAULT(ZH_IOC_GET_PORTATTRIBUTES),
++ ZH_IOC_DEFAULT(ZH_IOC_GET_PORTSTATISTICS),
++ ZH_IOC_DEFAULT(ZH_IOC_GET_DPORTATTRIBUTES),
++ ZH_IOC_DEFAULT(ZH_IOC_GET_RNID),
++ ZH_IOC_DEFAULT(ZH_IOC_SEND_RNID),
++ ZH_IOC_HANDLER(ZH_IOC_SEND_CT32, zh_send_ct32),
++ ZH_IOC_DEFAULT(ZH_IOC_SCSI_INQUIRY),
++ ZH_IOC_DEFAULT(ZH_IOC_SCSI_READ_CAPACITY),
++ ZH_IOC_HANDLER(ZH_IOC_SCSI_REPORT_LUNS32, zh_scsi_report_luns32),
++ ZH_IOC_DEFAULT(ZH_IOC_GET_EVENT_BUFFER),
++ ZH_IOC_DEFAULT(ZH_IOC_GET_CONFIG),
++ ZH_IOC_DEFAULT(ZH_IOC_CLEAR_CONFIG),
++ ZH_IOC_DEFAULT(ZH_IOC_EVENT_START),
++ ZH_IOC_DEFAULT(ZH_IOC_EVENT_STOP),
++ ZH_IOC_DEFAULT(ZH_IOC_EVENT),
++ ZH_IOC_DEFAULT(ZH_IOC_EVENT_INSERT),
++};
++
++/**
++ * do_unregister - unregister previously registered conversion ioctl()s
++ * @from: index in the zh_conversion table to start deregistering from
++ *
++ * All ioctl()s _before_ from will be deregistered. If from is 0 all will be
++ * unregistered.
++ */
++static int do_unregister(int from)
++{
++ int i;
++
++ if (0 == from) {
++ from = sizeof(zh_conversion)/sizeof(zh_conversion[0]);
++ }
++
++ for (i = from - 1; i >= 0; --i) {
++ unregister_ioctl32_conversion(zh_conversion[i].cmd);
++ }
++
++ return 0;
++}
++
++/**
++ * do_register - register ioctl() conversion routines
++ */
++static int do_register(void)
++{
++ unsigned int i;
++ int ret;
++
++ for (i = 0; i < sizeof(zh_conversion)/sizeof(zh_conversion[0]); ++i) {
++ ret = register_ioctl32_conversion(zh_conversion[i].cmd,
++ zh_conversion[i].handler);
++ if (ret != 0) {
++ do_unregister(i - 1);
++ break;
++ }
++ }
++
++ return ret;
++}
++
++/**
++ * zh_unregister_ioctl_conversion - clean up method
++ */
++int zh_unregister_ioctl_conversion(void)
++{
++ return do_unregister(0);
++}
++
++/**
++ * zh_register_ioctl_conversion - initialization method
++ */
++int zh_register_ioctl_conversion(void)
++{
++ return do_register();
++}
+=== drivers/s390/scsi/zfcp_zh.c
+==================================================================
+--- drivers/s390/scsi/zfcp_zh.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/scsi/zfcp_zh.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,951 @@
++/*
++ * $Id: zfcp_zh.c,v 1.3.2.4 2004/09/20 16:20:30 aherrman Exp $
++ *
++ * Module providing an interface for HBA API (FC-HBA) implementation
++ * to the zfcp driver.
++ *
++ * (C) Copyright IBM Corp. 2002, 2003
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version. See the file COPYING for more
++ * information.
++ *
++ * Authors:
++ * Stefan Voelkel <Stefan.Voelkel at millenux.com>
++ * Andreas Herrmann <aherrman at de.ibm.com>
++ */
++#include "zfcp.h"
++#include "zfcp_zh.h"
++
++#include <linux/module.h>
++
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++extern zfcp_data_t zfcp_data;
++
++struct zfcp_callbacks zfcp_callback = { };
++
++/**
++ * zfcp_callback_do_adapter_add - callback wrapper
++ * @*filp: passed through to the callback
++ * @*a: adapter that was added
++ *
++ * We do not need irq safe spinlocks here, since we onyl do a read_lock. All
++ * writers must use the _irq version though.
++ */
++void zfcp_callback_do_adapter_add(struct file *filp, const zfcp_adapter_t *a)
++{
++ unsigned long flags;
++
++ read_lock_irqsave(&zfcp_callback.lock, flags);
++
++ if ((zfcp_callback.callbacks != NULL) &&
++ (zfcp_callback.callbacks->adapter_add != NULL)) {
++ zfcp_callback.callbacks->adapter_add(filp, a->devno, a->wwnn,
++ a->wwpn);
++ }
++
++ read_unlock_irqrestore(&zfcp_callback.lock, flags);
++}
++
++/**
++ * zfcp_callback_do_port_add - callback wrapper
++ * @*filp: passed through to the callback
++ * @*a: adapter the port was added to
++ * @*p: port that was added
++ */
++void zfcp_callback_do_port_add(struct file *filp, const zfcp_adapter_t *a,
++ const zfcp_port_t *p)
++{
++ unsigned long flags;
++
++ read_lock_irqsave(&zfcp_callback.lock, flags);
++
++ if ((zfcp_callback.callbacks != NULL) &&
++ (zfcp_callback.callbacks->port_add != NULL)) {
++ zfcp_callback.callbacks->port_add(filp, a->devno, p->wwpn,
++ p->wwnn, p->d_id);
++ }
++
++ read_unlock_irqrestore(&zfcp_callback.lock, flags);
++}
++
++/**
++ * zfcp_callback_do_unit_add - callback wrapper
++ * @*filp: passed through to the callback
++ * @*a: Adapter the port belongs to
++ * @*p: Port the unit belongs to
++ * @*u: unit that was added
++ */
++void zfcp_callback_do_unit_add(struct file *filp, const zfcp_adapter_t *a,
++ const zfcp_port_t *p, const zfcp_unit_t *u)
++{
++ unsigned long flags;
++
++ read_lock_irqsave(&zfcp_callback.lock, flags);
++
++ if ((zfcp_callback.callbacks != NULL) &&
++ (zfcp_callback.callbacks->unit_add != NULL)) {
++ zfcp_callback.callbacks->unit_add(filp, a->devno, p->wwpn,
++ u->fcp_lun, a->scsi_host->host_no,
++ u->device->channel, u->device->id,
++ u->device->lun);
++ }
++
++ read_unlock_irqrestore(&zfcp_callback.lock, flags);
++}
++
++/**
++ * zfcp_callback_do_incomming_els - callback wrapper
++ * @*a: adapter the ELS was recieved
++ * @*v: pointer to the ELS payload
++ */
++void zfcp_callback_do_incomming_els(const zfcp_adapter_t *a, const void *v)
++{
++ unsigned long flags;
++
++ read_lock_irqsave(&zfcp_callback.lock, flags);
++
++ if ((zfcp_callback.callbacks != NULL) &&
++ (zfcp_callback.callbacks->incomming_els != NULL)) {
++ zfcp_callback.callbacks->incomming_els(a->devno, a->s_id, v);
++ }
++
++ read_unlock_irqrestore(&zfcp_callback.lock, flags);
++}
++
++/**
++ * zfcp_callback_do_link_down - callback wrapper
++ * @*a: adapter that lost the link
++ */
++void zfcp_callback_do_link_down(const zfcp_adapter_t *a)
++{
++ unsigned long flags;
++
++ read_lock_irqsave(&zfcp_callback.lock, flags);
++
++ if ((zfcp_callback.callbacks != NULL) &&
++ (zfcp_callback.callbacks->link_down != NULL)) {
++ zfcp_callback.callbacks->link_down(a->s_id);
++ }
++
++ read_unlock_irqrestore(&zfcp_callback.lock, flags);
++}
++
++/**
++ * zfcp_callback_do_link_up - callback wrapper
++ * @*a: adapter with link up event
++ */
++void zfcp_callback_do_link_up(const zfcp_adapter_t *a)
++{
++ unsigned long flags;
++
++ read_lock_irqsave(&zfcp_callback.lock, flags);
++
++ if ((zfcp_callback.callbacks != NULL) &&
++ (zfcp_callback.callbacks->link_up != NULL)) {
++ zfcp_callback.callbacks->link_up(a->s_id);
++ }
++
++ read_unlock_irqrestore(&zfcp_callback.lock, flags);
++}
++
++/**
++ * zfcp_search_unit - search for a unit
++ * @*port: pointer to port structure of port where unit is attached to
++ * @lun: FC LUN of the unit to search for
++ * @**unit: address to write pointer to found unit structure to
++ * Return: 0 on success, -E* code else
++ * Locks: lock/unlock of port->unit_list_lock
++ *
++ * Search for an unit and return its address.
++ * See also zfcp_search_port_and_unit().
++ */
++static int zfcp_search_unit(zfcp_port_t *port, fcp_lun_t lun,
++ zfcp_unit_t **unit)
++{
++ zfcp_unit_t *u;
++ unsigned long flags;
++ int ret = 0;
++
++ *unit = NULL;
++
++ read_lock_irqsave(&port->unit_list_lock, flags);
++ ZFCP_FOR_EACH_UNIT(port, u)
++ {
++ if (u->fcp_lun == lun) {
++ *unit = u;
++ }
++ }
++ read_unlock_irqrestore(&port->unit_list_lock, flags);
++ if (NULL == *unit) {
++ ret = -ENOUNI;
++ }
++
++ return ret;
++}
++
++/**
++ * zfcp_search_port_unit - search for a port and unit
++ * @*adapter: pointer to adapter structure of adapter where port is attached to
++ * @wwpn: of the port to search for
++ * @lun: of the unit to search for, ignored if @**unit == NULL
++ * @**port: address to write pointer to the found port structure to
++ * @**unit: address to write pointer to the found unit structure to
++ * (iff @**unit != NULL)
++ * Return: 0 on success, -E* code else
++ * Locks: lock/unlock of adapter->port_list_lock
++ *
++ * Search for port and unit and return their addresses.
++ * If @**unit == NULL search only for port.
++ * If @**unit != NULL search also for unit.
++ * See also zfcp_search_adapter_port_unit() and zfcp_search_unit().
++ */
++static int zfcp_search_port_unit(zfcp_adapter_t *adapter, wwn_t wwpn,
++ fcp_lun_t lun, zfcp_port_t **port,
++ zfcp_unit_t **unit)
++{
++ zfcp_port_t *p;
++ unsigned long flags;
++ int ret = 0;
++
++ *port = NULL;
++
++ read_lock_irqsave(&adapter->port_list_lock, flags);
++ ZFCP_FOR_EACH_PORT(adapter, p)
++ {
++ if (p->wwpn == wwpn) {
++ *port = p;
++ }
++ }
++ read_unlock_irqrestore(&adapter->port_list_lock, flags);
++
++ if (NULL == *port) {
++ ret = -ENOPOR;
++ } else {
++ if (NULL != unit) {
++ ret = zfcp_search_unit(*port, lun, unit);
++ }
++ }
++
++ return ret;
++}
++
++/**
++ * zfcp_search_adapter_port_unit - search for an adapter, port and unit
++ * @devno: of the adapter to search for
++ * @wwpn: of the port to search for, ignored if **port == NULL
++ * @lun: of the unit to search for, ignored if **port == NULL || **unit == NULL
++ * @**adapter: address to write pointer to found adapter structure to
++ * @**port: address to write pointer to found port structure to,
++ * (iff **port != NULL)
++ * @**unit: address to write pointer to found unit structure to,
++ * (iff **port != NULL && **unit != NULL)
++ * Return: 0 on success, -E* code else
++ * Locks: lock/unlock of zfcp_data.adapter_list_lock
++ *
++ * Search for an adapter, port and unit and return their addresses.
++ * If @**port == NULL, search only for an adapter.
++ * If @**port != NULL, search also for port.
++ * If @**port != NULL and @**unit != NULL search also for port and unit.
++ * See also zfcp_search_port_unit().
++ */
++static int zfcp_search_adapter_port_unit(devno_t devno, wwn_t wwpn,
++ fcp_lun_t lun,
++ zfcp_adapter_t **adapter,
++ zfcp_port_t **port, zfcp_unit_t **unit)
++{
++ zfcp_adapter_t *a;
++ unsigned long flags;
++ int ret = 0;
++
++ if (NULL == adapter) {
++ return -EINVAL;
++ }
++
++ *adapter = NULL;
++
++ read_lock_irqsave(&zfcp_data.adapter_list_lock, flags);
++ ZFCP_FOR_EACH_ADAPTER(a)
++ {
++ if (a->devno == devno) {
++ if (!atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED,
++ &a->status) &&
++ atomic_test_mask((ZFCP_STATUS_COMMON_RUNNING |
++ ZFCP_STATUS_COMMON_UNBLOCKED),
++ &a->status)) {
++ *adapter = a;
++ }
++ break;
++ }
++ }
++ read_unlock_irqrestore(&zfcp_data.adapter_list_lock, flags);
++ if (NULL == *adapter) {
++ ret = -ENOADA;
++ } else {
++ if (NULL != port) {
++ ret = zfcp_search_port_unit(*adapter, wwpn, lun,
++ port, unit);
++ }
++ }
++
++ return ret;
++}
++
++/**
++ * get_config_units - create unit config events
++ * @*filp: passed through to callback
++ * @*adapter: the unit belongs to
++ * @*port: the unit belongs to
++ *
++ * generate one unit add event for each unit below the passed port
++ */
++static inline int get_config_units(struct file *filp, zfcp_adapter_t *adapter,
++ zfcp_port_t *port)
++{
++ zfcp_unit_t *unit;
++ unsigned long flags;
++ int ret = 0;
++
++ read_lock_irqsave(&port->unit_list_lock, flags);
++
++ ZFCP_FOR_EACH_UNIT(port, unit)
++ {
++ if (unit->device != NULL) {
++ zfcp_callback_do_unit_add(filp, adapter, port, unit);
++ ++ret;
++ }
++ }
++
++ read_unlock_irqrestore(&port->unit_list_lock, flags);
++
++ return ret;
++}
++
++/**
++ * get_config_ports - create port config events, or search for a port.
++ * @*filp: passed through to callback
++ * @*adapter: the port belongs to
++ * @wwpn: generate port add events if 0, or search port and get_config_units()
++ */
++static inline int get_config_ports(struct file *filp, zfcp_adapter_t *adapter,
++ wwn_t wwpn, unsigned int config_flags)
++{
++ zfcp_port_t *port;
++ unsigned long flags;
++ int ret = 0;
++
++ read_lock_irqsave(&adapter->port_list_lock, flags);
++
++ ZFCP_FOR_EACH_PORT(adapter, port)
++ {
++ if (ZH_GET_CONFIG_PORTS == config_flags) {
++ /* ignore name server port */
++ if (0 != port->wwpn) {
++ zfcp_callback_do_port_add(filp, adapter, port);
++ ++ret;
++ }
++ } else {
++ if (port->wwpn != wwpn) {
++ ret = -ENOPOR;
++ } else {
++ ret = get_config_units(filp, adapter, port);
++ break;
++ }
++ }
++ }
++
++ read_unlock_irqrestore(&adapter->port_list_lock, flags);
++
++ return ret;
++}
++
++/**
++ * zfcp_zh_get_config - Prepare config for userspace
++ * @*filp: passed through to callback
++ * @devno: generate adapter add events if 0, or search and get_config_ports()
++ * @wwpn: passed to get_config_ports()
++ *
++ * TNG get_config method, uses the callback methods and so generates
++ * events for each configured item. The events are private to the
++ * passed file descriptor.
++ */
++int zfcp_zh_get_config(struct file *filp, devno_t devno, wwn_t wwpn,
++ unsigned int config_flags)
++{
++ zfcp_adapter_t *adapter;
++ unsigned long flags;
++ int ret = 0;
++
++ read_lock_irqsave(&zfcp_data.adapter_list_lock, flags);
++
++ ZFCP_FOR_EACH_ADAPTER(adapter)
++ {
++ if (ZH_GET_CONFIG_ADAPTERS == config_flags) {
++ zfcp_callback_do_adapter_add(filp, adapter);
++ ++ret;
++ } else {
++ if (adapter->devno != devno) {
++ ret = -ENOADA;
++ } else {
++ ret = get_config_ports(filp, adapter, wwpn,
++ config_flags);
++ break;
++ }
++
++ }
++ }
++
++ read_unlock_irqrestore(&zfcp_data.adapter_list_lock, flags);
++
++ return ret;
++}
++
++/**
++ * zfcp_zh_get_adapter_attributes - provide data for the api call
++ * @devno: of the adapter
++ * @attr: pointer to struct zfcp_adapter_attributes to return attributes
++ * Return: 0 on success, -E* else
++ * Context: user
++ * Locks: lock/unlock zfcp_data.adapter_list_lock
++ */
++int zfcp_zh_get_adapter_attributes(devno_t devno,
++ struct zfcp_adapter_attributes *attr)
++{
++ zfcp_adapter_t *adapter;
++ int ret = 0;
++
++ memset(attr, 0, sizeof(*attr));
++
++ ret = zfcp_search_adapter_port_unit(devno, 0, 0, &adapter, NULL, NULL);
++ if (ret != 0) {
++ return ret;
++ }
++
++ strcpy(attr->manufacturer, "IBM");
++ strncpy(attr->serial_number, adapter->serial_number, 32);
++ switch (adapter->hydra_version) {
++ case FSF_ADAPTER_TYPE_FICON:
++ strcpy(attr->model, "FICON FCP");
++ break;
++ case FSF_ADAPTER_TYPE_FICON_EXPRESS:
++ strcpy(attr->model, "FICON Express FCP");
++ break;
++ }
++ strcpy(attr->model_description, "zSeries Fibre Channel Adapter");
++ attr->node_wwn = adapter->wwnn;
++ sprintf(attr->hardware_version, "0x%x",
++ adapter->hardware_version);
++ sprintf(attr->driver_version, "0x%x", zfcp_data.driver_version);
++ sprintf(attr->firmware_version, "0x%x",
++ adapter->fsf_lic_version);
++ attr->vendor_specific_id = 42;
++ attr->number_of_ports = 1;
++ strcpy(attr->driver_name, "zfcp.o");
++ /* option_rom_version not used, node_symbolic_name not set */
++
++ return ret;
++}
++
++/**
++ * zfcp_zh_get_port_statistics - Retrieve statistics of adapter port
++ * @devno: adapter of which port data should be reported
++ * @stat: pointer to struct zfcp_port_statistics to return statistics
++ * Return: 0 on success, -E* else
++ * Context: user
++ * Locks: lock/unlock zfcp_data.adapter_list_lock
++ */
++int zfcp_zh_get_port_statistics(devno_t devno,
++ struct zfcp_port_statistics *stat)
++{
++ zfcp_adapter_t *adapter;
++ fsf_qtcb_bottom_port_t data;
++ int ret = 0;
++
++ memset(stat, 0, sizeof(*stat));
++
++ ret = zfcp_search_adapter_port_unit(devno, 0, 0, &adapter, NULL, NULL);
++ if (ret != 0) {
++ return ret;
++ }
++
++ if(adapter->supported_features & FSF_FEATURE_HBAAPI_MANAGEMENT){
++ ret = zfcp_fsf_exchange_port_data(adapter, &data);
++ if (0 == ret) {
++ /* convert fsf_qtcb_bottom_port into
++ zfcp_port_statistics */
++ stat->last_reset = data.seconds_since_last_reset;
++ stat->tx_frames = data.tx_frames;
++ stat->tx_words = data.tx_words;
++ stat->rx_frames = data.rx_words;
++ stat->lip = data.lip;
++ stat->nos = data.nos;
++ stat->error_frames = data.error_frames;
++ stat->dumped_frames = data.dumped_frames;
++ stat->link_failure = data.link_failure;
++ stat->loss_of_sync = data.loss_of_sync;
++ stat->loss_of_signal = data.loss_of_signal;
++ stat->prim_seq_prot_error = data.psp_error_counts;
++ stat->invalid_tx_words = data.invalid_tx_words;
++ stat->invalid_crc = data.invalid_crcs;
++ stat->input_requests = data.input_requests;
++ stat->output_requests = data.output_requests;
++ stat->control_requests = data.control_requests;
++ stat->input_megabytes = data.input_mb;
++ stat->output_megabytes = data.output_mb;
++ }
++ } else {
++ ret = -EOPNOTSUPP;
++ }
++
++ return ret;
++}
++
++
++/**
++ * zfcp_zh_get_port_attributes - Retrieve attributes of adapter port
++ * @devno: adapter of which port data should be reported
++ * @attr: pointer to struct zfcp_port_attributes to return attributes
++ * Return: 0 on success, -E* else
++ * Context: user
++ * Locks: lock/unlock zfcp_data.adapter_list_lock, adapter->port_list_lock
++ */
++int zfcp_zh_get_port_attributes(devno_t devno,
++ struct zfcp_port_attributes *attr)
++{
++ zfcp_adapter_t *adapter;
++ zfcp_port_t *port;
++ fsf_qtcb_bottom_port_t data;
++ unsigned long flags;
++ int ret = 0;
++
++ memset(attr, 0, sizeof(*attr));
++
++ ret = zfcp_search_adapter_port_unit(devno, 0, 0, &adapter, NULL, NULL);
++ if (ret != 0) {
++ return ret;
++ }
++
++ attr->wwnn = adapter->wwnn;
++ attr->wwpn = adapter->wwpn;
++ attr->speed = adapter->fc_link_speed;
++ attr->discovered_ports = adapter->ports;
++
++ /* ignore nameserver port */
++ read_lock_irqsave(&adapter->port_list_lock, flags);
++ ZFCP_FOR_EACH_PORT(adapter, port)
++ {
++ if(port->wwpn == 0){
++ if(attr->discovered_ports)
++ --attr->discovered_ports;
++ break;
++ }
++ }
++ read_unlock_irqrestore(&adapter->port_list_lock, flags);
++
++ if(adapter->supported_features & FSF_FEATURE_HBAAPI_MANAGEMENT){
++ ret = zfcp_fsf_exchange_port_data(adapter, &data);
++ if (0 == ret) {
++ /* convert fsf_qtcb_bottom_port into
++ zfcp_port_attributes */
++ attr->fcid = data.fc_port_id;
++ attr->type = data.port_type;
++ attr->state = data.port_state;
++ attr->supported_class_of_service =
++ data.class_of_service;
++ attr->supported_speed = data.supported_speed;
++ attr->max_frame_size = data.maximum_frame_size;
++ memcpy(&attr->supported_fc4_types,
++ &data.supported_fc4_types, 32);
++ memcpy(&attr->active_fc4_types,
++ &data.active_fc4_types, 32);
++ }
++ } else {
++ ret = -EOPNOTSUPP;
++ }
++ /* fabric_name and symbolic_name not set */
++
++ return ret;
++}
++
++/**
++ * zfcp_zh_get_discovered_port_attributes - Retrieve attributes of target port
++ * @devno: adapter for which port data should be reported
++ * @wwpn: wwn of discovered port
++ * @attr: pointer to struct zfcp_port_attributes to return attributes
++ * Return: 0 on success, -E* else
++ * Context: user
++ * Locks: lock/unlock zfcp_data.adapter_list_lock, adapter->port_list_lock
++ */
++int zfcp_zh_get_dport_attributes(devno_t devno, wwn_t wwpn,
++ struct zfcp_port_attributes *attr)
++{
++ zfcp_adapter_t *adapter;
++ zfcp_port_t *port;
++ struct ct_iu_ga_nxt *ct_iu_resp;
++ int ret;
++
++ memset(attr, 0, sizeof(*attr));
++
++ ret = zfcp_search_adapter_port_unit(devno, wwpn, 0,
++ &adapter, &port, NULL);
++ if (ret != 0) {
++ return ret;
++ }
++
++ ct_iu_resp = kmalloc(sizeof(*ct_iu_resp), GFP_KERNEL);
++ if (0 == ct_iu_resp) {
++ return -ENOMEM;
++ }
++
++ ret = zfcp_ns_ga_nxt_request(port, ct_iu_resp);
++ if (0 == ret) {
++ attr->wwnn = port->wwnn;
++ attr->wwpn = port->wwpn;
++ attr->fabric_name = (wwn_t) ct_iu_resp->du.fabric_wwn;
++ attr->fcid = port->d_id;
++
++ /* map FC-GS-2 port types to HBA API
++ port types */
++ switch(ct_iu_resp->du.port_type) {
++ case 0x01: /* N_Port */
++ attr->type = FSF_HBA_PORTTYPE_NPORT;
++ case 0x02: /* NL_Port */
++ attr->type = FSF_HBA_PORTTYPE_NLPORT;
++ case 0x81: /* F_Port */
++ attr->type = FSF_HBA_PORTTYPE_FPORT;
++ case 0x82: /* FL_Port */
++ attr->type = FSF_HBA_PORTTYPE_FLPORT;
++ case 0x03: /* F/NL_Port */
++ case 0x7f: /* Nx_Port */
++ case 0x84: /* E_Port */
++ attr->type = FSF_HBA_PORTTYPE_OTHER;
++ case 0x00: /* Unidentified */
++ default: /* reserved */
++ attr->type = FSF_HBA_PORTTYPE_UNKNOWN;
++ }
++
++ attr->state = FSF_HBA_PORTSTATE_UNKNOWN;
++ attr->supported_class_of_service = ct_iu_resp->du.cos;
++ memcpy(&attr->active_fc4_types, &ct_iu_resp->du.fc4_types, 32);
++ memcpy(&attr->symbolic_name,
++ &ct_iu_resp->du.node_symbolic_name,
++ ct_iu_resp->du.port_symbolic_name_length);
++ }
++ kfree(ct_iu_resp);
++
++ /* supported_speed, speed, max_frame_size, supported_fc4_types,
++ discovered_ports not set */
++
++ return ret;
++}
++
++
++/**
++ * zfcp_zh_send_ct_handler() - handler for zfcp_zh_send_ct()
++ * @data: a pointer to struct zfcp_send_ct, It was set as handler_data
++ * in zfcp_zh_send_ct().
++ * Context: interrupt
++ *
++ * This handler is called on completion of a send_ct request. We just wake up
++ * our own zfcp_zh_send_ct() function.
++ */
++static void zfcp_zh_send_ct_handler(unsigned long data)
++{
++
++ struct zfcp_send_ct *ct = (struct zfcp_send_ct *) data;
++ struct completion *wait = (struct completion*) ct->completion;
++
++ complete(wait);
++ return;
++}
++
++
++/**
++ * zfcp_zh_send_ct() - send a CT_IU containing FC-GS-4 command
++ * @devno: adapter for which port data should be reported
++ * @req: scatter-gather list with request data
++ * @req_count: number of elements in @req
++ * @resp: scatter-gather list for response data
++ * @resp_count: number of elements in @resp
++ * Return: 0 on success, -E* else
++ * Context: user
++ * Locks: lock/unlock zfcp_data.adapter_list_lock
++ *
++ * Note: Currently only requests to the nameserver port are supported.
++ * Other well-known ports currently have no representation in zfcp.
++ */
++int zfcp_zh_send_ct(devno_t devno,
++ struct scatterlist *req, unsigned int req_count,
++ struct scatterlist *resp, unsigned int resp_count)
++{
++ struct zfcp_send_ct ct;
++ zfcp_adapter_t *adapter;
++ struct ct_hdr *ct_header;
++ int ret;
++
++ DECLARE_COMPLETION(wait);
++
++ memset(&ct, 0, sizeof(ct));
++
++ ret = zfcp_search_adapter_port_unit(devno, 0, 0, &adapter, NULL, NULL);
++ if (ret != 0) {
++ return ret;
++ }
++
++ ct_header = (struct ct_hdr *) req[0].address;
++ if ((ct_header->gs_type != ZFCP_CT_DIRECTORY_SERVICE) ||
++ (ct_header->gs_subtype != ZFCP_CT_NAME_SERVER)) {
++ /* currently only nameserver requests are supported */
++ ZFCP_LOG_NORMAL("Tried to send CT IU to other service than "
++ "Name Server Directory Service. This is "
++ "currently not supported.\n");
++ return -ENOPOR;
++ }
++
++ if (!adapter->nameserver_port) {
++ ZFCP_LOG_NORMAL("Name Server port not available\n.");
++ return -ENOPOR;
++ }
++ ct.port = adapter->nameserver_port;
++
++ ct.req = req;
++ ct.resp = resp;
++ ct.req_count = req_count;
++ ct.resp_count = resp_count;
++ ct.handler = zfcp_zh_send_ct_handler;
++ ct.handler_data = (unsigned long) &ct;
++ ct.completion = &wait;
++ ct.timeout = ZFCP_CT_TIMEOUT;
++
++ ret = zfcp_fsf_send_ct(&ct, 0, 0);
++ if (0 == ret) {
++ wait_for_completion(&wait);
++ if (ct.status)
++ ret = -EIO;
++ else
++ zfcp_check_ct_response((void *)resp->address);
++ }
++
++ return ret;
++}
++
++/**
++ * zfcp_zh_callbacks_register - register callbacks of zfcp_hbaapi
++ * @*cb: pointer to the callback structure
++ * Return: 0 on success, else -E* code
++ * Locks: lock/unlock of zfcp_callback.lock
++ * Context: user
++ */
++int zfcp_zh_callbacks_register(struct zfcp_zh_callbacks *cb)
++{
++ unsigned long flags;
++ int ret = 0;
++
++ write_lock_irqsave(&zfcp_callback.lock, flags);
++
++ if (zfcp_callback.callbacks != NULL) {
++ ret = -EBUSY;
++ } else {
++ zfcp_callback.callbacks = cb;
++ }
++
++ write_unlock_irqrestore(&zfcp_callback.lock, flags);
++
++ return ret;
++}
++
++/**
++ * zfcp_zh_callbacks_unregister - unregister callbacks for zfcp_hbaapi
++ * @*cb: pointer to the callback structure
++ * Return: 0 on success, -E* code else
++ * Locks: lock/unlock of zfcp_callback.lock
++ * Context: user
++ */
++int zfcp_zh_callbacks_unregister(struct zfcp_zh_callbacks *cb)
++{
++ unsigned long flags;
++ int ret = 0;
++
++ write_lock_irqsave(&zfcp_callback.lock, flags);
++
++ if (zfcp_callback.callbacks == NULL) {
++ ret = -EBUSY;
++ } else {
++ if (zfcp_callback.callbacks != cb) {
++ ZFCP_LOG_DEBUG("Tried to unregister callbacks from "
++ "different address.\n");
++ ret = -EINVAL;
++ } else {
++ zfcp_callback.callbacks = NULL;
++ }
++ }
++
++ write_unlock_irqrestore(&zfcp_callback.lock, flags);
++
++ return ret;
++}
++
++/**
++ * zfcp_zh_assert_fclun_zero - Assert that there is a FC LUN 0
++ * @devno: devno of the adapter
++ * @wwpn: wwpn of the discovered port
++ *
++ * Look for an unit at the passed adapter:port with FC LUN 0.
++ * Add it if it does not exist. This unit is needed for
++ * REPORT_LUNS.
++ *
++ * Note: No unit with a FC LUN 0 can be added for the same adpater and port
++ * after this call. (We could add an "overwriteable" flag to the
++ * zfcp_unit_t structure as a work-around for this.)
++ */
++int zfcp_zh_assert_fclun_zero(devno_t devno, wwn_t wwpn)
++{
++ int ret;
++ zfcp_config_record_t cfg;
++ zfcp_adapter_t *adapter;
++ zfcp_port_t *port;
++ zfcp_unit_t *unit;
++
++ ret = zfcp_search_adapter_port_unit(devno, wwpn, 0,
++ &adapter, &port, &unit);
++ if (ret != -ENOUNI) {
++ return ret;
++ }
++
++ memset (&cfg, 0, sizeof(cfg));
++ cfg.devno = devno;
++ cfg.scsi_id = port->scsi_id;
++ cfg.wwpn = port->wwpn;
++ cfg.scsi_lun = port->max_scsi_lun +1;
++
++ ret = zfcp_config_parse_record_add(&cfg);
++ if (0 != ret) {
++ return ret;
++ }
++
++ /* wait until the unit is opened */
++ return zfcp_erp_wait(port->adapter);
++}
++
++/**
++ * zfcp_zh_send_scsi - Send a SCSI command to an unit
++ * @devno: devno of the adapter
++ * @wwpn: WWPN of the discovered port the unit is attached to
++ * @lun: FC LUN of the unit to send the command to
++ * @cmnd: address of the prepared Scsi_Cmnd
++ * Return: 0 on success, > 0 on SCSI error, -E* code else
++ *
++ * FC LUN 0 is handled with extra care, to be able to send REPORT_LUNS.
++ */
++int zfcp_zh_send_scsi(devno_t devno, wwn_t wwpn, fcp_lun_t lun,
++ Scsi_Cmnd *cmnd)
++{
++ int ret;
++ zfcp_adapter_t *adapter;
++ zfcp_port_t *port;
++ zfcp_unit_t *unit;
++
++ ret = zfcp_search_adapter_port_unit(devno, wwpn, lun,
++ &adapter, &port, &unit);
++ if (ret != 0) {
++ return ret;
++ }
++
++ ret = zfcp_scsi_command_sync(unit, cmnd);
++ if (ret < 0) {
++ return ret;
++ } else {
++ return cmnd->result;
++ }
++}
++
++/**
++ * zfcp_zh_send_els_handler() - handler for zfcp_zh_send_els()
++ * @data: a pointer to struct zfcp_send_els. It is set as handler_data
++ * in zfcp_zh_send_els().
++ * Context: interrupt
++ *
++ * This handler is called on completion of a send_els request. We just wake up
++ * our own zfcp_zh_send_els() function.
++ */
++static void
++zfcp_zh_send_els_handler(unsigned long data)
++{
++ struct zfcp_send_els *els = (struct zfcp_send_els *) data;
++ complete(els->completion);
++}
++
++/**
++ * zfcp_zh_send_els - send an els to a port
++ * @devno: of the adapter to send via
++ * @wwpn: of the port to send to
++ * @send: scatterlist describing the els payload to be sent
++ * @send_count: number of elements in the send scatterlist
++ * @receive: scatterlist describing buffers for the reply payload
++ * @receive_count: number of elements in the receive scatterlist
++ * Return: 0 on success, -E* code else
++ * Locks: lock/unlock of zfcp_data.adapter_list_lock, adapter->port_list_lock
++ */
++int zfcp_zh_send_els(devno_t devno, wwn_t wwpn, struct scatterlist *send,
++ unsigned int send_count, struct scatterlist *receive,
++ unsigned int receive_count)
++{
++ int ret;
++ struct zfcp_send_els *req;
++ zfcp_adapter_t *adapter;
++ zfcp_port_t *port;
++
++ DECLARE_COMPLETION(wait);
++
++ ret = zfcp_search_adapter_port_unit(devno, wwpn, 0,
++ &adapter, &port, NULL);
++ if (ret != 0) {
++ return ret;
++ }
++
++ req = kmalloc(sizeof(*req), GFP_KERNEL);
++ if (NULL == req) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ memset(req, 0, sizeof(*req));
++
++ req->port = port;
++ req->req = send;
++ req->req_count = send_count;
++ req->resp = receive;
++ req->resp_count = receive_count;
++ req->handler = zfcp_zh_send_els_handler;
++ req->handler_data = (unsigned long) req;
++ req->completion = &wait;
++
++ ret = zfcp_fsf_send_els(req);
++ if (0 == ret) {
++ wait_for_completion(&wait);
++ if (req->status)
++ ret = -EIO;
++ }
++
++ kfree(req);
++
++ goto out;
++
++out:
++ return ret;
++}
++
++EXPORT_SYMBOL(zfcp_zh_callbacks_register);
++EXPORT_SYMBOL(zfcp_zh_callbacks_unregister);
++EXPORT_SYMBOL(zfcp_zh_get_config);
++EXPORT_SYMBOL(zfcp_zh_get_adapter_attributes);
++EXPORT_SYMBOL(zfcp_zh_get_port_attributes);
++EXPORT_SYMBOL(zfcp_zh_get_port_statistics);
++EXPORT_SYMBOL(zfcp_zh_get_dport_attributes);
++EXPORT_SYMBOL(zfcp_zh_send_ct);
++EXPORT_SYMBOL(zfcp_zh_send_els);
++EXPORT_SYMBOL(zfcp_zh_send_scsi);
++EXPORT_SYMBOL(zfcp_zh_assert_fclun_zero);
+=== drivers/s390/scsi/zfcp_fsf.h
+==================================================================
+--- drivers/s390/scsi/zfcp_fsf.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/scsi/zfcp_fsf.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,451 @@
++/*
++ * $Id: zfcp_fsf.h,v 1.7.2.4 2004/08/13 14:01:13 aherrman Exp $
++ *
++ * header file for FCP adapter driver for IBM eServer zSeries
++ *
++ * (C) Copyright IBM Corp. 2002, 2003
++ *
++ * Authors:
++ * Martin Peschke <mpeschke at de.ibm.com>
++ * Raimund Schroeder <raimund.schroeder at de.ibm.com>
++ * Aron Zeh
++ * Wolfgang Taphorn
++ * Andreas Herrmann <aherrman at de.ibm.com>
++ */
++
++#ifndef FSF_H
++#define FSF_H
++
++#define FSF_QTCB_VERSION1 0x00000001
++#define FSF_QTCB_CURRENT_VERSION FSF_QTCB_VERSION1
++
++/* FSF commands */
++#define FSF_QTCB_FCP_CMND 0x00000001
++#define FSF_QTCB_ABORT_FCP_CMND 0x00000002
++#define FSF_QTCB_OPEN_PORT_WITH_DID 0x00000005
++#define FSF_QTCB_OPEN_LUN 0x00000006
++#define FSF_QTCB_CLOSE_LUN 0x00000007
++#define FSF_QTCB_CLOSE_PORT 0x00000008
++#define FSF_QTCB_CLOSE_PHYSICAL_PORT 0x00000009
++#define FSF_QTCB_SEND_ELS 0x0000000B
++#define FSF_QTCB_SEND_GENERIC 0x0000000C
++#define FSF_QTCB_EXCHANGE_CONFIG_DATA 0x0000000D
++#define FSF_QTCB_EXCHANGE_PORT_DATA 0x0000000E
++#define FSF_QTCB_DOWNLOAD_CONTROL_FILE 0x00000012
++#define FSF_QTCB_UPLOAD_CONTROL_FILE 0x00000013
++
++/* FSF QTCB types */
++#define FSF_IO_COMMAND 0x00000001
++#define FSF_SUPPORT_COMMAND 0x00000002
++#define FSF_CONFIG_COMMAND 0x00000003
++#define FSF_PORT_COMMAND 0x00000004
++
++/* FSF control file upload/download operations' subtype and options */
++#define FSF_CFDC_OPERATION_SUBTYPE 0x00020001
++#define FSF_CFDC_OPTION_NORMAL_MODE 0x00000000
++#define FSF_CFDC_OPTION_FORCE 0x00000001
++#define FSF_CFDC_OPTION_FULL_ACCESS 0x00000002
++#define FSF_CFDC_OPTION_RESTRICTED_ACCESS 0x00000004
++
++/* FSF protocol stati */
++#define FSF_PROT_GOOD 0x00000001
++#define FSF_PROT_QTCB_VERSION_ERROR 0x00000010
++#define FSF_PROT_SEQ_NUMB_ERROR 0x00000020
++#define FSF_PROT_UNSUPP_QTCB_TYPE 0x00000040
++#define FSF_PROT_HOST_CONNECTION_INITIALIZING 0x00000080
++#define FSF_PROT_FSF_STATUS_PRESENTED 0x00000100
++#define FSF_PROT_DUPLICATE_REQUEST_ID 0x00000200
++#define FSF_PROT_LINK_DOWN 0x00000400
++#define FSF_PROT_REEST_QUEUE 0x00000800
++#define FSF_PROT_ERROR_STATE 0x01000000
++
++/* FSF stati */
++#define FSF_GOOD 0x00000000
++#define FSF_PORT_ALREADY_OPEN 0x00000001
++#define FSF_LUN_ALREADY_OPEN 0x00000002
++#define FSF_PORT_HANDLE_NOT_VALID 0x00000003
++#define FSF_LUN_HANDLE_NOT_VALID 0x00000004
++#define FSF_HANDLE_MISMATCH 0x00000005
++#define FSF_SERVICE_CLASS_NOT_SUPPORTED 0x00000006
++#define FSF_FCPLUN_NOT_VALID 0x00000009
++#define FSF_ACCESS_DENIED 0x00000010
++#define FSF_ACCESS_TYPE_NOT_VALID 0x00000011
++#define FSF_LUN_SHARING_VIOLATION 0x00000012
++#define FSF_COMMAND_ABORTED_ULP 0x00000020
++#define FSF_COMMAND_ABORTED_ADAPTER 0x00000021
++#define FSF_FCP_COMMAND_DOES_NOT_EXIST 0x00000022
++#define FSF_DIRECTION_INDICATOR_NOT_VALID 0x00000030
++#define FSF_INBOUND_DATA_LENGTH_NOT_VALID 0x00000031 /* FIXME: obsolete? */
++#define FSF_OUTBOUND_DATA_LENGTH_NOT_VALID 0x00000032 /* FIXME: obsolete? */
++#define FSF_CMND_LENGTH_NOT_VALID 0x00000033
++#define FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED 0x00000040
++#define FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED 0x00000041
++#define FSF_REQUEST_BUF_NOT_VALID 0x00000042
++#define FSF_RESPONSE_BUF_NOT_VALID 0x00000043
++#define FSF_ELS_COMMAND_REJECTED 0x00000050
++#define FSF_GENERIC_COMMAND_REJECTED 0x00000051
++#define FSF_OPERATION_PARTIALLY_SUCCESSFUL 0x00000052
++#define FSF_AUTHORIZATION_FAILURE 0x00000053
++#define FSF_CFDC_ERROR_DETECTED 0x00000054
++#define FSF_CONTROL_FILE_UPDATE_ERROR 0x00000055
++#define FSF_CONTROL_FILE_TOO_LARGE 0x00000056
++#define FSF_ACCESS_CONFLICT_DETECTED 0x00000057
++#define FSF_CONFLICTS_OVERRULED 0x00000058
++#define FSF_PORT_BOXED 0x00000059
++#define FSF_LUN_BOXED 0x0000005A
++#define FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE 0x0000005B
++#define FSF_PAYLOAD_SIZE_MISMATCH 0x00000060
++#define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061
++#define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062
++#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
++#define FSF_FCP_RSP_AVAILABLE 0x000000AF
++#define FSF_UNKNOWN_COMMAND 0x000000E2
++#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3
++#define FSF_INVALID_COMMAND_OPTION 0x000000E5
++//#define FSF_ERROR 0x000000FF
++
++/* FSF status qualifier, recommendations */
++#define FSF_SQ_NO_RECOM 0x00
++#define FSF_SQ_FCP_RSP_AVAILABLE 0x01
++#define FSF_SQ_RETRY_IF_POSSIBLE 0x02
++#define FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED 0x03
++#define FSF_SQ_INVOKE_LINK_TEST_PROCEDURE 0x04
++#define FSF_SQ_ULP_PROGRAMMING_ERROR 0x05
++#define FSF_SQ_COMMAND_ABORTED 0x06
++#define FSF_SQ_NO_RETRY_POSSIBLE 0x07
++
++/* FSF status qualifier for ACT download/upload commands */
++#define FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE 0x00000001
++#define FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE2 0x00000002
++/* ACT subtable codes */
++#define FSF_SQ_CFDC_SUBTABLE_OS 0x0001
++#define FSF_SQ_CFDC_SUBTABLE_PORT_WWPN 0x0002
++#define FSF_SQ_CFDC_SUBTABLE_PORT_DID 0x0003
++#define FSF_SQ_CFDC_SUBTABLE_LUN 0x0004
++
++/* FSF status qualifier (most significant 4 bytes), local link down */
++#define FSF_PSQ_LINK_NOLIGHT 0x00000004
++#define FSF_PSQ_LINK_WRAPPLUG 0x00000008
++#define FSF_PSQ_LINK_NOFCP 0x00000010
++
++/* payload size in status read buffer */
++#define FSF_STATUS_READ_PAYLOAD_SIZE 4032
++
++/* number of status read buffers that should be sent by ULP */
++#define FSF_STATUS_READS_RECOM 16
++
++/* status types in status read buffer */
++#define FSF_STATUS_READ_PORT_CLOSED 0x00000001
++#define FSF_STATUS_READ_INCOMING_ELS 0x00000002
++#define FSF_STATUS_READ_BIT_ERROR_THRESHOLD 0x00000004
++#define FSF_STATUS_READ_LINK_DOWN 0x00000005 /* FIXME: really? */
++#define FSF_STATUS_READ_LINK_UP 0x00000006
++#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A
++#define FSF_STATUS_READ_CFDC_HARDENED 0x0000000B
++
++/* status subtypes in status read buffer */
++#define FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT 0x00000001
++#define FSF_STATUS_READ_SUB_ERROR_PORT 0x00000002
++#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE 0x00000002
++#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2 0x0000000F
++
++#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001
++
++/* topologie that is detected by the adapter */
++#define FSF_TOPO_ERROR 0x00000000
++#define FSF_TOPO_P2P 0x00000001
++#define FSF_TOPO_FABRIC 0x00000002
++#define FSF_TOPO_AL 0x00000003
++#define FSF_TOPO_FABRIC_VIRT 0x00000004
++
++/* data direction for FCP commands */
++#define FSF_DATADIR_WRITE 0x00000001
++#define FSF_DATADIR_READ 0x00000002
++#define FSF_DATADIR_READ_WRITE 0x00000003
++#define FSF_DATADIR_CMND 0x00000004
++
++/* fc service class */
++#define FSF_CLASS_1 0x00000001
++#define FSF_CLASS_2 0x00000002
++#define FSF_CLASS_3 0x00000003
++
++/* SBAL chaining */
++#define FSF_MAX_SBALS_PER_REQ 36
++#define FSF_MAX_SBALS_PER_ELS_REQ 2
++
++/* logging space behind QTCB */
++#define FSF_QTCB_LOG_SIZE 1024
++
++/* channel features */
++#define FSF_FEATURE_QTCB_SUPPRESSION 0x00000001
++#define FSF_FEATURE_CFDC 0x00000002
++#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010
++#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
++
++/* adapter types */
++#define FSF_ADAPTER_TYPE_FICON 0x00000001
++#define FSF_ADAPTER_TYPE_FICON_EXPRESS 0x00000002
++
++/* port types */
++#define FSF_HBA_PORTTYPE_UNKNOWN 0x00000001
++#define FSF_HBA_PORTTYPE_NOTPRESENT 0x00000003
++#define FSF_HBA_PORTTYPE_NPORT 0x00000005
++#define FSF_HBA_PORTTYPE_PTP 0x00000021
++/* following are not defined and used by FSF Spec
++ but are additionally defined by FC-HBA */
++#define FSF_HBA_PORTTYPE_OTHER 0x00000002
++#define FSF_HBA_PORTTYPE_NOTPRESENT 0x00000003
++#define FSF_HBA_PORTTYPE_NLPORT 0x00000006
++#define FSF_HBA_PORTTYPE_FLPORT 0x00000007
++#define FSF_HBA_PORTTYPE_FPORT 0x00000008
++#define FSF_HBA_PORTTYPE_LPORT 0x00000020
++
++/* port states */
++#define FSF_HBA_PORTSTATE_UNKNOWN 0x00000001
++#define FSF_HBA_PORTSTATE_ONLINE 0x00000002
++#define FSF_HBA_PORTSTATE_OFFLINE 0x00000003
++#define FSF_HBA_PORTSTATE_LINKDOWN 0x00000006
++#define FSF_HBA_PORTSTATE_ERROR 0x00000007
++
++/* IO states of adapter */
++#define FSF_IOSTAT_NPORT_RJT 0x00000004
++#define FSF_IOSTAT_FABRIC_RJT 0x00000005
++#define FSF_IOSTAT_LS_RJT 0x00000009
++
++
++struct fsf_queue_designator;
++struct fsf_status_read_buffer;
++struct fsf_port_closed_payload;
++struct fsf_bit_error_payload;
++union fsf_prot_status_qual;
++struct fsf_qual_version_error;
++struct fsf_qual_sequence_error;
++struct fsf_qtcb_prefix;
++struct fsf_qtcb_header;
++struct fsf_qtcb_bottom_config;
++struct fsf_qtcb_bottom_support;
++struct fsf_qtcb_bottom_io;
++union fsf_qtcb_bottom;
++
++typedef struct fsf_queue_designator {
++ u8 cssid;
++ u8 chpid;
++ u8 hla;
++ u8 ua;
++ u32 res1;
++} __attribute__ ((packed)) fsf_queue_designator_t;
++
++typedef struct fsf_port_closed_payload {
++ fsf_queue_designator_t queue_designator;
++ u32 port_handle;
++} __attribute__ ((packed)) fsf_port_closed_payload_t;
++
++typedef struct fsf_bit_error_payload {
++ u32 res1;
++ u32 link_failure_error_count;
++ u32 loss_of_sync_error_count;
++ u32 loss_of_signal_error_count;
++ u32 primitive_sequence_error_count;
++ u32 invalid_transmission_word_error_count;
++ u32 crc_error_count;
++ u32 primitive_sequence_event_timeout_count;
++ u32 elastic_buffer_overrun_error_count;
++ u32 fcal_arbitration_timeout_count;
++ u32 advertised_receive_b2b_credit;
++ u32 current_receive_b2b_credit;
++ u32 advertised_transmit_b2b_credit;
++ u32 current_transmit_b2b_credit;
++} __attribute__ ((packed)) fsf_bit_error_payload_t;
++
++typedef struct fsf_status_read_buffer {
++ u32 status_type;
++ u32 status_subtype;
++ u32 length;
++ u32 res1;
++ fsf_queue_designator_t queue_designator;
++ u32 d_id;
++ u32 class;
++ u64 fcp_lun;
++ u8 res3[24];
++ u8 payload[FSF_STATUS_READ_PAYLOAD_SIZE];
++} __attribute__ ((packed)) fsf_status_read_buffer_t;
++
++typedef struct fsf_qual_version_error {
++ u32 fsf_version;
++ u32 res1[3];
++} __attribute__ ((packed)) fsf_qual_version_error_t;
++
++typedef struct fsf_qual_sequence_error {
++ u32 exp_req_seq_no;
++ u32 res1[3];
++} __attribute__ ((packed)) fsf_qual_sequence_error_t;
++
++typedef struct fsf_qual_locallink_error {
++ u32 code;
++ u32 res1[3];
++} __attribute__ ((packed)) fsf_qual_locallink_error_t;
++
++typedef union fsf_prot_status_qual {
++ fsf_qual_version_error_t version_error;
++ fsf_qual_sequence_error_t sequence_error;
++ fsf_qual_locallink_error_t locallink_error;
++} __attribute__ ((packed)) fsf_prot_status_qual_t;
++
++typedef struct fsf_qtcb_prefix {
++ u64 req_id;
++ u32 qtcb_version;
++ u32 ulp_info;
++ u32 qtcb_type;
++ u32 req_seq_no;
++ u32 prot_status;
++ fsf_prot_status_qual_t prot_status_qual;
++ u8 res1[20];
++} __attribute__ ((packed)) fsf_qtcb_prefix_t;
++
++typedef union fsf_status_qual {
++#define FSF_STATUS_QUAL_SIZE 16
++ u8 byte[FSF_STATUS_QUAL_SIZE];
++ u16 halfword[FSF_STATUS_QUAL_SIZE / sizeof(u16)];
++ u32 word[FSF_STATUS_QUAL_SIZE / sizeof(u32)];
++ fsf_queue_designator_t fsf_queue_designator;
++} __attribute__ ((packed)) fsf_status_qual_t;
++
++typedef struct fsf_qtcb_header {
++ u64 req_handle;
++ u32 fsf_command;
++ u32 res1;
++ u32 port_handle;
++ u32 lun_handle;
++ u32 res2;
++ u32 fsf_status;
++ fsf_status_qual_t fsf_status_qual;
++ u8 res3[28];
++ u16 log_start;
++ u16 log_length;
++ u8 res4[16];
++} __attribute__ ((packed)) fsf_qtcb_header_t;
++
++typedef u64 fsf_wwn_t;
++
++typedef struct fsf_nport_serv_param {
++ u8 common_serv_param[16];
++ fsf_wwn_t wwpn;
++ fsf_wwn_t wwnn;
++ u8 class1_serv_param[16];
++ u8 class2_serv_param[16];
++ u8 class3_serv_param[16];
++ u8 class4_serv_param[16];
++ u8 vendor_version_level[16];
++ u8 res1[16];
++} __attribute__ ((packed)) fsf_nport_serv_param_t;
++
++typedef struct fsf_plogi {
++ u32 code;
++ fsf_nport_serv_param_t serv_param;
++} __attribute__ ((packed)) fsf_plogi_t;
++
++#define FSF_FCP_CMND_SIZE 288
++#define FSF_FCP_RSP_SIZE 128
++
++typedef struct fsf_qtcb_bottom_io {
++ u32 data_direction;
++ u32 service_class;
++ u8 res1[8];
++ u32 fcp_cmnd_length;
++ u8 res2[12];
++ u8 fcp_cmnd[FSF_FCP_CMND_SIZE];
++ u8 fcp_rsp[FSF_FCP_RSP_SIZE];
++ u8 res3[64];
++} __attribute__ ((packed)) fsf_qtcb_bottom_io_t;
++
++typedef struct fsf_qtcb_bottom_support {
++ u32 op_subtype;
++ u8 res1[12];
++ u32 d_id;
++ u32 option;
++ u64 fcp_lun;
++ u64 res2;
++ u64 req_handle;
++ u32 service_class;
++ u8 res3[3];
++ u8 timeout;
++ u8 res4[184];
++ u32 els1_length;
++ u32 els2_length;
++ u32 req_buf_length;
++ u32 resp_buf_length;
++ u8 els[256];
++} __attribute__ ((packed)) fsf_qtcb_bottom_support_t;
++
++typedef struct fsf_qtcb_bottom_config {
++ u32 lic_version;
++ u32 feature_selection;
++ u32 high_qtcb_version;
++ u32 low_qtcb_version;
++ u32 max_qtcb_size;
++ u32 max_data_transfer_size;
++ u32 supported_features;
++ u8 res1[4];
++ u32 fc_topology;
++ u32 fc_link_speed;
++ u32 adapter_type;
++ u32 peer_d_id;
++ u8 res2[12];
++ u32 s_id;
++ fsf_nport_serv_param_t nport_serv_param;
++ u8 res3[8];
++ u32 adapter_ports;
++ u32 hardware_version;
++ u8 serial_number[32];
++ u8 res4[272];
++} __attribute__ ((packed)) fsf_qtcb_bottom_config_t;
++
++typedef struct fsf_qtcb_bottom_port {
++ u8 res1[8];
++ u32 fc_port_id;
++ u32 port_type;
++ u32 port_state;
++ u32 class_of_service; /* should be 0x00000006 for class 2 and 3 */
++ u8 supported_fc4_types[32]; /* should be 0x00000100 for scsi fcp */
++ u8 active_fc4_types[32];
++ u32 supported_speed; /* 0x0001 for 1 GBit/s or 0x0002 for 2 GBit/s */
++ u32 maximum_frame_size; /* fixed value of 2112 */
++ u64 seconds_since_last_reset;
++ u64 tx_frames;
++ u64 tx_words;
++ u64 rx_frames;
++ u64 rx_words;
++ u64 lip; /* 0 */
++ u64 nos; /* currently 0 */
++ u64 error_frames; /* currently 0 */
++ u64 dumped_frames; /* currently 0 */
++ u64 link_failure;
++ u64 loss_of_sync;
++ u64 loss_of_signal;
++ u64 psp_error_counts;
++ u64 invalid_tx_words;
++ u64 invalid_crcs;
++ u64 input_requests;
++ u64 output_requests;
++ u64 control_requests;
++ u64 input_mb; /* where 1 MByte == 1.000.000 Bytes */
++ u64 output_mb; /* where 1 MByte == 1.000.000 Bytes */
++ u8 res2[256];
++} __attribute__ ((packed)) fsf_qtcb_bottom_port_t;
++
++typedef union fsf_qtcb_bottom {
++ fsf_qtcb_bottom_io_t io;
++ fsf_qtcb_bottom_support_t support;
++ fsf_qtcb_bottom_config_t config;
++ fsf_qtcb_bottom_port_t port;
++} fsf_qtcb_bottom_t;
++
++typedef struct fsf_qtcb {
++ fsf_qtcb_prefix_t prefix;
++ fsf_qtcb_header_t header;
++ fsf_qtcb_bottom_t bottom;
++ u8 log[FSF_QTCB_LOG_SIZE];
++} __attribute__ ((packed)) fsf_qtcb_t;
++
++#endif /* FSF_H */
++
+=== drivers/s390/scsi/zh_ioctl32.h
+==================================================================
+--- drivers/s390/scsi/zh_ioctl32.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/scsi/zh_ioctl32.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,26 @@
++/*
++ * $Id: zh_ioctl32.h,v 1.2.2.1 2004/01/26 17:26:34 mschwide Exp $
++ *
++ * Module providing an interface for HBA API (FC-HBA) implementation
++ * to the zfcp driver.
++ *
++ * (C) Copyright IBM Corp. 2003
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version. See the file COPYING for more
++ * information.
++ *
++ * Authors:
++ * Stefan Voelkel <Stefan.Voelkel at millenux.com>
++ * Andreas Herrmann <aherrman at de.ibm.com>
++ */
++
++#ifndef _ZH_IOCTL32_H_
++#define _ZH_IOCTL32_H_
++
++int zh_register_ioctl_conversion(void);
++int zh_unregister_ioctl_conversion(void);
++
++#endif /* _ZH_IOCTL32_H_ */
+=== drivers/s390/scsi/zh_main.c
+==================================================================
+--- drivers/s390/scsi/zh_main.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/scsi/zh_main.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,2046 @@
++/*
++ * $Id: zh_main.c,v 1.10.2.3 2004/09/17 08:29:41 aherrman Exp $
++ *
++ * Module providing an interface for HBA API (FC-HBA) implementation
++ * to the zfcp driver.
++ *
++ * (C) Copyright IBM Corp. 2003
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version. See the file COPYING for more
++ * information.
++ *
++ * Authors:
++ * Stefan Voelkel <Stefan.Voelkel at millenux.com>
++ * Andreas Herrmann <aherrman at de.ibm.com>
++ */
++
++/*
++ * To automatically create the device node (after module loading) use:
++ *
++ * minor=`cat /proc/misc | awk "\\$2==\\"zfcp_hbaapi\\" {print \\$1}"`
++ * mknod /dev/zfcp_hbaapi c 10 $minor
++ */
++
++#define HBAAPI_REVISION "$Revision: 1.10.2.3 $"
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/init.h>
++
++#include <linux/kmod.h>
++
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++#include <linux/miscdevice.h>
++#include <linux/stringify.h>
++
++#include <asm/uaccess.h>
++#include <asm/current.h>
++#include <asm/atomic.h>
++#include <asm/div64.h>
++#include <asm/semaphore.h>
++
++#include <linux/spinlock.h>
++
++#include "zh.h"
++#include "zfcp.h"
++#include "zfcp_zh.h"
++
++#ifdef CONFIG_S390_SUPPORT
++# include "zh_ioctl32.h"
++#endif
++
++MODULE_AUTHOR("Stefan Voelkel <Stefan.Voelkel at millenux.com>, "
++ "Andreas Herrmann <aherrman at de.ibm.com>, "
++ "IBM Deutschland Entwicklung GmbH");
++MODULE_DESCRIPTION("Interface for HBA API to FCP HBA driver for IBM zSeries, "
++ HBAAPI_REVISION);
++MODULE_LICENSE("GPL");
++
++EXPORT_NO_SYMBOLS;
++
++/*
++ * module and kernel parameters
++ */
++int maxshared = ZH_EVENTS_MAX;
++int maxpolled = ZH_EVENTS_MAX;
++int minor = MISC_DYNAMIC_MINOR;
++
++#ifdef MODULE
++MODULE_PARM(maxshared,"i");
++MODULE_PARM_DESC(maxshared, "Maximum number of events in the shared event"
++ " queue, defaults to "__stringify(ZH_EVENTS_MAX));
++
++MODULE_PARM(maxpolled,"i");
++MODULE_PARM_DESC(maxpolled, "Maximum number of events in the polled event"
++ " queue, defaults to "__stringify(ZH_EVENTS_MAX));
++
++MODULE_PARM(minor, "i");
++MODULE_PARM_DESC(minor, "Minor of the misc device to register, defaults to"
++ "dynamic registration");
++#else
++static int __init zh_maxshared_setup(char *str)
++{
++ maxshared = simple_strtol(str, NULL, 0);
++ return 1;
++}
++__setup("zfcp_hbaapi_maxshared=", zh_maxshared_setup);
++
++static int __init zh_maxpolled_setup(char *str)
++{
++ maxpolled = simple_strtol(str, NULL, 0);
++ return 1;
++}
++__setup("zfcp_hbaapi_maxpolled=", zh_maxpolled_setup);
++
++static int __init zh_minor_setup(char *str)
++{
++ minor = simple_strtol(str, NULL, 0);
++ return 1;
++}
++__setup("zfcp_hbaapi_minor=", zh_minor_setup);
++#endif
++
++/**
++ * struct zh_event_item - An event
++ * @event: the event itself, as it is passed to userspace
++ * @count: reference counter
++ * @list: list handling
++ *
++ * An item in the kernel event queue.
++ */
++struct zh_event_item
++{
++ struct zh_event event;
++ atomic_t count;
++ struct list_head list;
++};
++
++/**
++ * struct zh_config - An config entry
++ * @event: the event itself
++ * @list: list handling
++ *
++ * A non-counted event. Used for configuration and polled events.
++ */
++struct zh_config
++{
++ struct zh_event event;
++ struct list_head list;
++};
++
++/**
++ * struct zh_client - per client data
++ * @registered: 1 if the fd is registered for events, else 0
++ * @lost: 1 if the fd has lost an event, else 0
++ * @*last: pointer to the last delivered event or NULL
++ * @config: list of private config events
++ * @clients: list handling for list of clients
++ *
++ * This structure is attached to filp->private_data and used throughout the
++ * module to save per client data.
++ */
++struct zh_client
++{
++ struct semaphore sem;
++ unsigned int registered:1;
++ unsigned int lost:1;
++ struct list_head *last;
++ struct list_head config;
++ struct list_head clients;
++};
++
++/* ioctl workers
++ * We could inline them since only about two are called from more than one
++ * place. OTOH this is no time critical code */
++static int zh_ioc_get_adapterattributes(struct zh_get_adapterattributes *);
++static int zh_ioc_get_portattributes(struct zh_get_portattributes *);
++static int zh_ioc_get_portstatistics(struct zh_get_portstatistics *);
++static int zh_ioc_get_dportattributes(struct zh_get_portattributes *);
++static int zh_ioc_get_config(struct zh_get_config *, struct file *);
++static int zh_ioc_clear_config(struct file *);
++static int zh_ioc_get_event_buffer(struct zh_get_event_buffer *);
++static int zh_ioc_event_start(struct file *);
++static int zh_ioc_event_stop(struct file *);
++static int zh_ioc_event(struct zh_event *, struct file *);
++static int zh_ioc_event_insert(void);
++static int zh_ioc_scsi_inquiry(struct zh_scsi_inquiry *);
++static int zh_ioc_scsi_read_capacity(struct zh_scsi_read_capacity *);
++static int zh_ioc_scsi_report_luns(struct zh_scsi_report_luns *);
++static int zh_ioc_get_rnid(struct zh_get_rnid *);
++static int zh_ioc_send_rnid(struct zh_send_rnid *);
++static int zh_ioc_send_ct(struct zh_send_ct *);
++
++/* callbacks for asynchronous event generation. called form zfcp.o */
++static void zh_cb_adapter_add(struct file *, devno_t, wwn_t, wwn_t);
++static void zh_cb_port_add(struct file *, devno_t, wwn_t, wwn_t, fc_id_t);
++static void zh_cb_unit_add(struct file *, devno_t, wwn_t, fcp_lun_t,
++ unsigned int, unsigned int, unsigned int,
++ unsigned int);
++static void zh_cb_incomming_els(const devno_t, const fc_id_t, const void *);
++static void zh_cb_link_down(const fc_id_t);
++static void zh_cb_link_up(const fc_id_t);
++
++/* implemented file operations for our device file */
++static int zh_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
++static int zh_open(struct inode *, struct file *);
++static ssize_t zh_read(struct file *, char *, size_t, loff_t *);
++static int zh_release(struct inode *, struct file *);
++
++/* auxiliary functions */
++static void zh_map_port_speed(u32 *, int);
++#define ZH_PORT_OPERATING_SPEED 1
++#define ZH_PORT_SUPPORTED_SPEED 0
++
++/*
++ * zh_fops - Device file operations
++ *
++ * Structure describing the possible operations on our device file
++ * */
++static struct file_operations zh_fops = {
++ .ioctl = zh_ioctl,
++ .open = zh_open,
++ .release = zh_release,
++ .read = zh_read,
++};
++
++/*
++ * struct zh_misc - misc device description
++ * @minor: our minor
++ * @name: that is what we are called
++ * @fops: file operations
++ *
++ * Passed to register_misc
++ */
++static struct miscdevice zh_misc = {
++ .name = "zfcp_hbaapi",
++ .fops = &zh_fops
++};
++
++/*
++ * struct zh_callbacks - Callbacks for zfcp events
++ * @adapter_add: called on adapter add events
++ *
++ * This is passed to the zfcp_register_callbacks method, to enable
++ * zfcp to call our hooks.
++ */
++static struct zfcp_zh_callbacks zh_callbacks = {
++ .adapter_add = zh_cb_adapter_add,
++ .port_add = zh_cb_port_add,
++ .unit_add = zh_cb_unit_add,
++ .incomming_els = zh_cb_incomming_els,
++ .link_down = zh_cb_link_down,
++ .link_up = zh_cb_link_up
++};
++
++/**
++ * struct zh_events
++ * @lock: spinlock protecting the structure
++ * @wq: wait queue to wait for events
++ * @registered: number of processes to notify on events
++ * @pending: number of events in the queue
++ * @queue: list of events
++ * @clients: anchor for list of clients
++ *
++ * This structure contains all data needed for asynchronous event handling
++ */
++struct zh_shared_events {
++ spinlock_t lock;
++ wait_queue_head_t wq;
++ unsigned short registered;
++ unsigned int pending;
++ struct list_head queue;
++ struct list_head clients;
++};
++
++static struct zh_shared_events zh_shared;
++
++/**
++ * struct zh_polled_events
++ * @lock: spinlock protecting this structure
++ * @pending: number of events pending
++ * @queue: list of events
++ *
++ * Polled events must be in an extra queue according to FC-HBA.
++ */
++struct zh_polled_events
++{
++ spinlock_t lock;
++ zh_count_t pending;
++ struct list_head queue;
++};
++
++static struct zh_polled_events zh_polled;
++
++/**
++ * add_event_to_polled - Add an event to the polled queue
++ * @*c: pointer to an event
++ *
++ * We use the zh_config structure here. It distinguishes itself from the
++ * zh_event only through the missing "left" counter. Since a polled event is
++ * only delivered once, we do not need a counter here.
++ */
++static void add_event_to_polled(struct zh_config *c)
++{
++ struct zh_config *last;
++ unsigned long flags;
++
++ spin_lock_irqsave(&zh_polled.lock, flags);
++
++ if (zh_polled.pending == maxpolled) {
++ last = list_entry(zh_polled.queue.next, struct zh_config, list);
++ list_del(zh_polled.queue.next);
++ kfree(last);
++ } else {
++ ++zh_polled.pending;
++ }
++
++ list_add_tail(&c->list, &zh_polled.queue);
++
++ spin_unlock_irqrestore(&zh_polled.lock, flags);
++}
++
++/**
++ * add_event_to_shared - Add an event to the list of pending events
++ * @e: The event that should be added
++ * Context: irq/user
++ *
++ * Events will be thrown away if nobody is registered for delivery. If there
++ * are already &maxevents events in the list, the oldest is discarded.
++ */
++static void add_event_to_shared(struct zh_event_item *e)
++{
++ struct zh_event_item *item;
++ struct list_head *go, *first;
++ struct zh_client *c;
++ unsigned long flags;
++
++ /* atm we can be called from user, and from irq context
++ * so we need the irqsafe thingie here. */
++ spin_lock_irqsave(&zh_shared.lock, flags);
++
++ /* do not keep events if we have nobody to deliver it to */
++ if (zh_shared.registered == 0) {
++ spin_unlock_irqrestore(&zh_shared.lock, flags);
++ kfree(e);
++ return;
++ }
++
++ /* is the queue full? */
++ if (zh_shared.pending == maxshared) {
++ first = zh_shared.queue.next;
++
++ /* check if we have to flag some clients */
++ list_for_each(go, &zh_shared.clients) {
++ c = list_entry(go, struct zh_client, clients);
++
++ if (NULL == c->last) {
++ ZH_LOG(KERN_INFO, "lost event for client "
++ "with pid %u\n", current->pid);
++ c->lost = 1;
++ } else {
++ if (first == c->last) {
++ c->last = NULL;
++ }
++ }
++ }
++
++ item = list_entry(first, struct zh_event_item, list);
++ list_del(first);
++ kfree(item);
++
++ ZH_LOG(KERN_INFO, "event queue full, deleted item %p\n",
++ item);
++
++ } else {
++ ++zh_shared.pending;
++ }
++
++ /* initialize event, add it to the list */
++ atomic_set(&e->count, zh_shared.registered);
++
++ list_add_tail(&e->list, &zh_shared.queue);
++
++ spin_unlock_irqrestore(&zh_shared.lock, flags);
++
++ /* wake up all processes waiting for events */
++ wake_up_interruptible_all(&zh_shared.wq);
++}
++
++/**
++ * add_event - Enqueue an event
++ * @filp: struct file to add the event to, or NULL
++ * @e: the event to enqueue
++ * @c: the event to enqueue
++ *
++ * Either add the event privately to the fd directly, or to the
++ * global queue of events.
++ * FIXME (stage 2) check whether some polled events have to be added to shared
++ * queueu, too.
++ */
++static inline void add_event(struct file *filp, struct zh_event_item *e, struct
++ zh_config *c)
++{
++ if (NULL == filp) {
++ add_event_to_shared(e);
++ } else {
++ struct zh_client *head = (void*) filp->private_data;
++
++ list_add_tail(&c->list, &head->config);
++ }
++}
++
++/**
++ * zh_open - Implements the device open call
++ * @inode: struct inode
++ * @filp: struct file
++ * Return: 0 on success, else -ENOMEM
++ * Context: user
++ *
++ * Called when the zfcp_hbaapi device file is opened. Iniializes
++ * filp->private_data.
++ */
++static int zh_open(struct inode *inode, struct file *filp)
++{
++ struct zh_client *data;
++
++ MOD_INC_USE_COUNT;
++
++ data = kmalloc(sizeof(*data), GFP_KERNEL);
++
++ if (data == NULL) {
++ MOD_DEC_USE_COUNT;
++ return -ENOMEM;
++ }
++
++ sema_init(&data->sem, 1);
++ data->last = NULL;
++ data->registered = 0;
++ data->lost = 0;
++ INIT_LIST_HEAD(&data->config);
++ INIT_LIST_HEAD(&data->clients);
++ filp->private_data = data;
++
++ return 0;
++}
++
++/**
++ * zh_release - Called on file release
++ * @inode: struct inode
++ * @filp: struct file
++ * Return: always 0
++ * Context: user
++ *
++ * Called when all copies of a filedescriptor are closed, thus we
++ * can mess around with private_data, and free it.
++ */
++static int zh_release(struct inode *inode, struct file *filp)
++{
++ zh_ioc_event_stop(filp);
++ zh_ioc_clear_config(filp);
++
++ kfree(filp->private_data);
++ filp->private_data = NULL;
++
++ MOD_DEC_USE_COUNT;
++ return 0;
++}
++
++/**
++ * zh_read - The device's read method
++ * Context: user
++ *
++ * Used to read the whole configuration data, eg adapters, ports or units
++ * from zfcp.
++ *
++ * Access is serialized with a semaphore thus cloned file descriptors may block,
++ * eg fd = open(); fork(); parent:ioctl(ZH_IOC_EVENT); child:read(); The child
++ * will be block until the parent returns from the ioctl(), IF they use *
++ * _exactly the same_ file descriptor. Diffrent file descriptors do _not_ block
++ * each other.
++ */
++static ssize_t zh_read(struct file *filp, char* buf, size_t s, loff_t *off)
++{
++ size_t count, i, ret;
++ struct zh_config *c;
++ struct list_head *go, *safe;
++ struct zh_client *client = (struct zh_client*) filp->private_data;
++
++ if (down_interruptible(&client->sem))
++ return -ERESTARTSYS;
++
++ if (s < sizeof(c->event)) {
++ ret = -ENOSPC;
++ goto up;
++ }
++
++ if (list_empty(&client->config)) {
++ ret = 0;
++ goto up;
++ }
++
++ count = s / sizeof(c->event);
++ i = 0;
++
++ list_for_each_safe(go, safe, &client->config)
++ {
++ c = list_entry(go, struct zh_config, list);
++
++ if (copy_to_user(buf, &c->event, sizeof(c->event))) {
++ ret = -EFAULT;
++ goto up;
++ }
++
++ list_del(go);
++ kfree(c);
++
++ buf += sizeof(c->event);
++
++ if (++i >= count) {
++ break;
++ }
++ }
++
++ ret = i * sizeof(c->event);
++
++up:
++ up(&client->sem);
++
++ return ret;
++};
++
++/**
++ * zh_ioctl - The device's ioctl() method
++ * @inode: struct inode
++ * @filp: struct file
++ * @cmd: command to execute
++ * @arg: parameter(s) for the command
++ * Return: 0 on success, else -E* code
++ * Context: User
++ *
++ * This is the main interaction method between the vendor lib and the
++ * kernel. Here we only determine what we should do, and then call the
++ * corresponding worker method.
++ *
++ * Also read zh_read()'s comment.
++ */
++static int zh_ioctl(struct inode *inode, struct file *filp,
++ unsigned int cmd, unsigned long arg)
++{
++
++ int ret;
++ struct zh_client *client = (void*) filp->private_data;
++
++ if (down_interruptible(&client->sem))
++ return -ERESTARTSYS;
++
++ switch (cmd)
++ {
++ case ZH_IOC_GET_ADAPTERATTRIBUTES:
++ ret = zh_ioc_get_adapterattributes(
++ (struct zh_get_adapterattributes *) arg);
++ break;
++ case ZH_IOC_GET_PORTATTRIBUTES:
++ ret = zh_ioc_get_portattributes(
++ (struct zh_get_portattributes *) arg);
++ break;
++ case ZH_IOC_GET_DPORTATTRIBUTES:
++ ret = zh_ioc_get_dportattributes(
++ (struct zh_get_portattributes *) arg);
++ break;
++ case ZH_IOC_GET_PORTSTATISTICS:
++ ret = zh_ioc_get_portstatistics(
++ (struct zh_get_portstatistics *) arg);
++ break;
++ case ZH_IOC_GET_EVENT_BUFFER:
++ ret = zh_ioc_get_event_buffer(
++ (struct zh_get_event_buffer *) arg);
++ break;
++ case ZH_IOC_EVENT_START:
++ ret = zh_ioc_event_start(filp);
++ break;
++ case ZH_IOC_EVENT_STOP:
++ ret = zh_ioc_event_stop(filp);
++ break;
++ case ZH_IOC_EVENT:
++ ret = zh_ioc_event((struct zh_event*) arg, filp);
++ break;
++ case ZH_IOC_EVENT_INSERT: /* DEBUG ONLY */
++ ret = zh_ioc_event_insert();
++ break;
++ case ZH_IOC_SCSI_INQUIRY:
++ ret = zh_ioc_scsi_inquiry((struct zh_scsi_inquiry*) arg);
++ break;
++ case ZH_IOC_SCSI_READ_CAPACITY:
++ ret = zh_ioc_scsi_read_capacity(
++ (struct zh_scsi_read_capacity *) arg);
++ break;
++ case ZH_IOC_SCSI_REPORT_LUNS:
++ ret = zh_ioc_scsi_report_luns(
++ (struct zh_scsi_report_luns *) arg);
++ break;
++ case ZH_IOC_GET_CONFIG:
++ ret = zh_ioc_get_config((struct zh_get_config *) arg, filp);
++ break;
++ case ZH_IOC_CLEAR_CONFIG:
++ ret = zh_ioc_clear_config(filp);
++ break;
++ case ZH_IOC_GET_RNID:
++ ret = zh_ioc_get_rnid((struct zh_get_rnid *) arg);
++ break;
++ case ZH_IOC_SEND_RNID:
++ ret = zh_ioc_send_rnid((struct zh_send_rnid *) arg);
++ break;
++ case ZH_IOC_SEND_CT:
++ ret = zh_ioc_send_ct((struct zh_send_ct *) arg);
++ break;
++ default:
++ ret = -ENOTTY;
++ break;
++ }
++
++ up(&client->sem);
++
++ return ret;
++}
++
++/**
++ * zh_init - Module initialisation
++ * Return: 0 on success, else < 0
++ * Context: user
++ *
++ * Sets owner, registers with zfcp, registers misc device, initializes
++ * global events structure.
++ *
++ * FIXME Register a misc minor number for zfcp_hbaapi at www.lanana.org.
++ */
++static int __init zh_init(void)
++{
++ int ret;
++
++ if (0 >= maxshared) {
++ ZH_LOG(KERN_ERR, "illegal value for maxshared: %d, "
++ "minimum is 1\n", maxshared);
++ return -EINVAL;
++ }
++
++#ifdef CONFIG_ARCH_S390X
++ ret = zh_register_ioctl_conversion();
++ if (ret < 0) {
++ return ret;
++ }
++#endif
++
++ /* register callbacks with zfcp */
++ ret = zfcp_zh_callbacks_register(&zh_callbacks);
++ if (ret < 0) {
++ return ret;
++ }
++
++ SET_MODULE_OWNER(&zh_fops);
++
++ /* register a misc char device */
++ zh_misc.minor = minor;
++ ret = misc_register(&zh_misc);
++ if (ret < 0) {
++ goto failed_misc;
++ }
++
++ /* initialize shared events */
++ spin_lock_init(&zh_shared.lock);
++ init_waitqueue_head(&zh_shared.wq);
++ zh_shared.registered = 0;
++ zh_shared.pending = 0;
++ INIT_LIST_HEAD(&zh_shared.queue);
++ INIT_LIST_HEAD(&zh_shared.clients);
++
++ /* initalize polled events */
++ spin_lock_init(&zh_polled.lock);
++ zh_polled.pending = 0;
++ INIT_LIST_HEAD(&zh_polled.queue);
++
++ ZH_LOG(KERN_NOTICE, "loaded hbaapi.o, version %s, maxshared=%d, "
++ "maxpolled=%d\n", HBAAPI_REVISION, maxshared,
++ maxpolled);
++
++ if (MISC_DYNAMIC_MINOR == minor) {
++ ZH_LOG(KERN_INFO, "registered dynamic minor with misc "
++ "device\n");
++ } else {
++ ZH_LOG(KERN_INFO, "registered minor %d with misc device\n",
++ minor);
++ }
++
++ return 0;
++
++failed_misc:
++ zfcp_zh_callbacks_unregister(&zh_callbacks);
++
++ return ret;
++}
++
++/**
++ * zh_exit - Module finalisation
++ * Context: user
++ *
++ * Undoes all work done in zh_init()
++ */
++static void __exit zh_exit(void)
++{
++ struct list_head *go, *save;
++ struct zh_event_item *e;
++ struct zh_config *c;
++
++ zfcp_zh_callbacks_unregister(&zh_callbacks);
++ misc_deregister(&zh_misc);
++
++#ifdef CONFIG_ARCH_S390X
++ /* FIXME return value? log message? */
++ zh_unregister_ioctl_conversion();
++#endif
++
++ if (!list_empty(&zh_shared.queue)) {
++ ZH_LOG(KERN_ERR, "event queue not empty while unloading "
++ "module\n");
++
++ /* the module can only be unloaded when all file descriptors
++ * have been closed. If there are events left in the queue, we
++ * do have an error in our code. Since nobody else can access
++ * these events, we can free them.
++ */
++ list_for_each_safe(go, save, &zh_shared.queue)
++ {
++ e = list_entry(go, struct zh_event_item, list);
++ list_del(go);
++ kfree(e);
++ --zh_shared.pending;
++ }
++ }
++
++ if (zh_shared.pending) {
++ ZH_LOG(KERN_ERR, "number of pending events not zero but %u\n",
++ zh_shared.pending);
++ }
++
++ /* throw away polled events */
++ list_for_each_safe(go, save, &zh_polled.queue) {
++ c = list_entry(go, struct zh_config, list);
++ list_del(go);
++ kfree(c);
++ }
++}
++
++/**
++ * zh_ioc_get_adapterattributes - Retrieve attributes of an adapter
++ * @u_ptr: userspace pointer to copy data from and to
++ * Return: 0 on success, else -E* code
++ * Context: user
++ */
++static int zh_ioc_get_adapterattributes(struct zh_get_adapterattributes *u_ptr)
++{
++ struct zh_get_adapterattributes ioc_data;
++ int ret;
++
++ if (copy_from_user(&ioc_data, u_ptr, sizeof(ioc_data))) {
++ return -EFAULT;
++ }
++
++ ret = zfcp_zh_get_adapter_attributes(DEVID_TO_DEVNO(ioc_data.devid),
++ &ioc_data.attributes);
++ if (0 == ret) {
++ if (copy_to_user(u_ptr, &ioc_data, sizeof(*u_ptr))) {
++ ret = -EFAULT;
++ }
++ }
++
++ return ret;
++}
++
++/**
++ * zh_ioc_get_portattributes - Retrieve attributes of an adapter port
++ * @*u_ptr: userspace pointer to copy the data to
++ * Return: 0 on success, else -E* code
++ * Context: user
++ */
++static int zh_ioc_get_portattributes(struct zh_get_portattributes *u_ptr)
++{
++ struct zh_get_portattributes ioc_data;
++ int ret;
++
++ if (copy_from_user(&ioc_data, u_ptr, sizeof(ioc_data))) {
++ return -EFAULT;
++ }
++
++ ret = zfcp_zh_get_port_attributes(DEVID_TO_DEVNO(ioc_data.devid),
++ &ioc_data.attributes);
++
++ if ((0 == ret) || (EOPNOTSUPP == ret)) {
++ zh_map_port_speed(&ioc_data.attributes.supported_speed,
++ ZH_PORT_SUPPORTED_SPEED);
++ zh_map_port_speed(&ioc_data.attributes.speed,
++ ZH_PORT_OPERATING_SPEED);
++ if (copy_to_user(u_ptr, &ioc_data, sizeof(*u_ptr))) {
++ ret = -EFAULT;
++ }
++ }
++
++ return ret;
++}
++
++/**
++ * zh_ioc_get_portstatistics - Retrieve statistics of an adapter port
++ * @*u_ptr: userspace pointer to copy data from and to
++ * Return: 0 on success, else -E* code
++ * Context: user
++ */
++static int zh_ioc_get_portstatistics(struct zh_get_portstatistics *u_ptr)
++{
++ struct zh_get_portstatistics ioc_data;
++ int ret;
++
++ if (copy_from_user(&ioc_data, u_ptr, sizeof(ioc_data))) {
++ return -EFAULT;
++ }
++
++ ret = zfcp_zh_get_port_statistics(DEVID_TO_DEVNO(ioc_data.devid),
++ &ioc_data.statistics);
++
++ if ((0 == ret) || (EOPNOTSUPP == ret)) {
++ if (copy_to_user(u_ptr, &ioc_data, sizeof(*u_ptr))) {
++ ret = -EFAULT;
++ }
++ }
++
++ return ret;
++}
++
++/**
++ * zh_ioc_get_dportattributes - Retrieve attributes of an target port
++ * @*u_ptr: userspace pointer to copy data from and to
++ * Return: 0 on success, else -E* code
++ * Context: user
++ */
++static int zh_ioc_get_dportattributes(struct zh_get_portattributes *u_ptr)
++{
++ struct zh_get_portattributes ioc_data;
++ int ret;
++
++ if (copy_from_user(&ioc_data, u_ptr, sizeof(ioc_data))) {
++ return -EFAULT;
++ }
++
++ ret = zfcp_zh_get_dport_attributes(DEVID_TO_DEVNO(ioc_data.devid),
++ ioc_data.wwpn,
++ &ioc_data.attributes);
++
++ if (0 == ret) {
++ if (copy_to_user(u_ptr, &ioc_data, sizeof(*u_ptr))) {
++ ret = -EFAULT;
++ }
++ }
++
++ return ret;
++}
++
++/**
++ * zh_ioc_get_event_buffer - Retrieve events from the polled queue
++ * @*u_ptr: userspace pointer to copy data from and to
++ * Return: number of return events on success, else -E* code
++ *
++ * Copy events belonging to an adapter to userspace and delete them.
++ */
++static int zh_ioc_get_event_buffer(struct zh_get_event_buffer *u_ptr)
++{
++ int ret;
++ struct zh_get_event_buffer ioc_data;
++ struct zh_config *c;
++ struct list_head *go, *safe;
++ unsigned short i = 0;
++
++ if (copy_from_user(&ioc_data, u_ptr, sizeof(ioc_data))) {
++ return -EFAULT;
++ }
++
++ if (ioc_data.count > ZH_GET_EVENT_BUFFER_COUNT) {
++ ioc_data.count = ZH_GET_EVENT_BUFFER_COUNT;
++ }
++
++ spin_lock_irq(&zh_polled.lock);
++
++ list_for_each_safe(go, safe, &zh_polled.queue)
++ {
++ c = list_entry(go, struct zh_config, list);
++
++ if (i >= ioc_data.count) {
++ break;
++ }
++
++ if (ioc_data.devid == c->event.data.polled.devid){
++ ioc_data.polled[i] = c->event.data.polled;
++
++ list_del(go);
++ kfree(c);
++ --zh_polled.pending;
++
++ ++i;
++ }
++ }
++
++ spin_unlock_irq(&zh_polled.lock);
++
++ if (copy_to_user(u_ptr, &ioc_data, sizeof(*u_ptr))) {
++ ret = -EFAULT;
++ } else {
++ ret = i;
++ }
++
++ return ret;
++}
++
++/**
++ * zh_ioc_event_start - Called to enable event delivery
++ * @*filp: file for which event delivery should be enabled
++ * Return: 0 on success, else -E* code
++ * Context: user
++ *
++ * Mark the fd as target for events, increase each events
++ * "to-be-delivered-to" counter by 1.
++ */
++static int zh_ioc_event_start(struct file *filp)
++{
++ struct zh_client *client = (struct zh_client*) filp->private_data;
++
++ /* registration is only allowed once */
++ if (client->registered) {
++ return -EINVAL;
++ }
++
++ spin_lock_irq(&zh_shared.lock);
++
++ /* remember that there is one more fd events have to be delivered to */
++ ++zh_shared.registered;
++
++ client->registered = 1;
++
++ /* set number of the next event, 0 means no events available ATM */
++ if (list_empty(&zh_shared.queue)) {
++ client->last = NULL;
++ } else {
++ client->last = zh_shared.queue.prev;
++ }
++
++ list_add_tail(&client->clients, &zh_shared.clients);
++
++ spin_unlock_irq(&zh_shared.lock);
++
++ return 0;
++}
++
++/**
++ * count_down_event - Refcount event
++ * @list: containded list structure of the event
++ * Context: user
++ * Return: 1 of the event was deleted, 0 else
++ * Locks: zh_shared.lock must be held
++ *
++ * Removes an event from the list, if it was delivered to all fd's. Otherwise
++ * just refcount it.
++ */
++static inline int count_down_event(struct list_head *list)
++{
++ struct zh_event_item *e = list_entry(list, struct zh_event_item, list);
++
++ if (atomic_dec_and_test(&e->count)) {
++ --zh_shared.pending;
++
++ list_del(list);
++ kfree(e);
++
++ return 1;
++ } else {
++ return 0;
++ }
++}
++
++/**
++ * zh_ioc_event_stop - Stop event delivery
++ * @*filp: file for which event delivery should be disabled
++ * Return: 0 on success, else -E* code
++ * Context: user
++ *
++ * Decrease total number of fd's which get events, count down all events
++ * _after_ the event delivered to this fd.
++ */
++static int zh_ioc_event_stop(struct file *filp)
++{
++ struct list_head *go, *safe;
++ struct zh_client *client = (struct zh_client*) filp->private_data;
++
++ /* deregistration is only allowed once */
++ if (!client->registered) {
++ return -EINVAL;
++ }
++
++ spin_lock_irq(&zh_shared.lock);
++
++ --zh_shared.registered;
++
++ list_del(&client->clients);
++
++ /* count down all not yet delivered events for this fd */
++ list_for_each_safe(go, safe, &zh_shared.queue)
++ {
++ if (NULL == client->last) {
++ count_down_event(go);
++ } else {
++ if (go == client->last) {
++ client->last = NULL;
++ }
++ }
++ }
++
++ spin_unlock_irq(&zh_shared.lock);
++
++ client->registered = 0;
++ client->lost = 0;
++ client->last = NULL;
++
++ return 0;
++}
++
++/**
++ * has_next_event - Condition for wait_event_interruptible
++ * @head: private data of the fd
++ * Return: 1 if there is an event waiting, 0 else
++ * Locks: lock/unlock of zh_shared.lock
++ *
++ * This is used as the condition for the wait_event_interruptible()
++ * call. It avoids a wakeup, statechange race.
++ */
++static inline int has_next_event(struct zh_client *client)
++{
++ int ret = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&zh_shared.lock, flags);
++
++ if (NULL == client->last) {
++ ret = !list_empty(&zh_shared.queue);
++ } else {
++ ret = (zh_shared.queue.prev != client->last);
++ }
++
++ spin_unlock_irqrestore(&zh_shared.lock, flags);
++
++ return ret;
++}
++
++/**
++ * zh_ioc_event - Wait for an event
++ * @*u_ptr: userspace pointer to copy data to
++ * @filp: descriptor receiving events
++ * Return: 0 on success, -E* code else
++ * Context: user
++ *
++ * The heart of the event delivery. Waits for events and delivers the next one
++ */
++static int zh_ioc_event(struct zh_event *u_ptr, struct file *filp)
++{
++ struct zh_event_item *event;
++ struct list_head *entry;
++ int ret;
++ struct zh_client *client = (struct zh_client*) filp->private_data;
++
++ if (!client->registered) {
++ return -EINVAL;
++ }
++
++ /* wait for events */
++ ret = wait_event_interruptible(zh_shared.wq, has_next_event(client));
++ if (-ERESTARTSYS == ret) {
++ /* ERESTARTSYS should never be seen by user programs */
++ return -ERESTART;
++ }
++
++ spin_lock_irq(&zh_shared.lock);
++
++ /* need to check it lock protected */
++ if (client->lost) {
++ client->last = NULL;
++ client->lost = 0;
++ ret = -ENXIO;
++ goto release;
++ }
++
++ if (NULL == client->last) {
++ entry = zh_shared.queue.next;
++ } else {
++ entry = client->last->next;
++ }
++
++ event = list_entry(entry, struct zh_event_item, list);
++
++ if (copy_to_user(u_ptr, &event->event, sizeof(*u_ptr))) {
++ ret = -EFAULT;
++ goto release; /* keep the event in the queue */
++ }
++
++ if (count_down_event(entry)) {
++ client->last = NULL;
++ } else {
++ client->last = entry;
++ }
++
++ ret = 0;
++
++release:
++ spin_unlock_irq(&zh_shared.lock);
++ return ret;
++}
++
++/**
++ * zh_ioc_event_insert - Insert an event into the list
++ * @*u_ptr: userspace pointer to copy data from
++ * Return: 0 on success, else -E* code
++ * Debug: DEBUG ONLY
++ *
++ * Insert a dummy event into the list of events, used to determine if
++ * the event handling code is working. Insert a dummy event into the
++ * polled event buffer, used to test the polled event buffer code.
++ */
++static int zh_ioc_event_insert(void)
++{
++ struct zh_event_item *e;
++
++ e = (struct zh_event_item*) kmalloc(sizeof(*e), GFP_KERNEL);
++ if (e == NULL) {
++ return -ENOMEM;
++ }
++
++ memset(e, 0, sizeof(*e));
++ e->event.event = ZH_EVENT_DUMMY;
++
++ add_event_to_shared(e);
++
++ return 0;
++}
++
++/**
++ * zh_sg_free - Free an allocated scatterlist
++ * @sg: the scatterlist
++ */
++static void zh_sg_free(struct sg_list *sg)
++{
++ int i;
++
++ for (i = sg->count - 1; i >= 0; --i) {
++ free_page((unsigned long) sg->sg[i].address);
++ }
++
++ kfree(sg->sg);
++ memset(sg, 0, sizeof(*sg));
++}
++
++/**
++ * zh_sg_alloc - Allocate a scatterlist
++ * @*sg: scatterlist
++ * Return: 0 on success, else -E* code
++ * Context: User
++ *
++ * Either all pages can be allocated, or none. On error *sg is invalid and
++ * has to be reinitialized.
++ */
++static int zh_sg_alloc(struct sg_list *sg, size_t size)
++{
++ unsigned int i;
++ size_t lastsize, array;
++
++ if (0 == size) {
++ return -EINVAL;
++ }
++
++ sg->count = size / PAGE_SIZE;
++ lastsize = size - (sg->count * PAGE_SIZE);
++ if (0 != lastsize) {
++ ++sg->count;
++ } else {
++ lastsize = PAGE_SIZE;
++ }
++
++ /* allocate the array */
++ array = sizeof(struct scatterlist) * sg->count;
++ sg->sg = kmalloc(array, GFP_KERNEL);
++ if (NULL == sg->sg) {
++ return -ENOMEM;
++ }
++
++ memset(sg->sg, 0, array);
++
++ /* allocate needed pages */
++ for (i = 0; i < sg->count; ++i) {
++ sg->sg[i].address = (void*) get_zeroed_page(GFP_KERNEL);
++ if (NULL == sg->sg[i].address) {
++ sg->count = i;
++ zh_sg_free(sg);
++ return -ENOMEM;
++ }
++
++ sg->sg[i].length = PAGE_SIZE;
++ }
++
++ sg->sg[i-1].length = lastsize;
++
++ return 0;
++}
++
++/**
++ * zh_sg_get_size - calculate size in bytes of a sg
++ * @*sg: pointer to a sg structure
++ */
++static inline size_t zh_sg_get_size(struct sg_list *sg)
++{
++ return (((sg->count - 1) * PAGE_SIZE) + sg->sg[sg->count - 1].length);
++}
++
++/**
++ * zh_sg_copy_from_user - copy data from user
++ * @*u_ptr: address in userspace to copy from
++ * @size: bytes to copy
++ * @*sg: describing space to copy to
++ * Return: 0 on success, -E* code else
++ *
++ * This method refuses to copy data if size is too big (-ENOSPC)
++ */
++static int zh_sg_copy_from_user(char *u_ptr, size_t size, struct sg_list *sg)
++{
++ unsigned int i;
++
++ if (size > zh_sg_get_size(sg)) {
++ return -ENOSPC;
++ }
++
++ for (i = 0; i < sg->count - 1; ++i) {
++ if (copy_from_user(sg->sg[i].address, u_ptr, PAGE_SIZE)) {
++ return -EFAULT;
++ }
++
++ u_ptr += PAGE_SIZE;
++ }
++
++ if (copy_from_user(sg->sg[i].address, u_ptr,
++ sg->sg[sg->count - 1].length)) {
++ return -EFAULT;
++ }
++
++ return 0;
++}
++
++/**
++ * zh_sg_copy_from_user_alloc - Allocate a scatterlist and copy data from user
++ */
++static inline int zh_sg_copy_from_user_alloc(char *u_ptr, size_t size,
++ struct sg_list *sg)
++{
++ int ret;
++
++ ret = zh_sg_alloc(sg, size);
++ if (ret < 0) {
++ return ret;
++ }
++
++ return zh_sg_copy_from_user(u_ptr, size, sg);
++}
++
++/**
++ * zh_sg_copy_to_user - Copy sg to userspace
++ * @*u_ptr: address in userspace to copy to
++ * @*size: bytes to copy, contains number of copied bytes on return
++ * @*sg: describing data to copy
++ * Return: copied bytes on success, -E* code else
++ */
++static int zh_sg_copy_to_user(char *u_ptr, size_t *size, struct sg_list *sg)
++{
++ unsigned int i;
++ unsigned int copy;
++ size_t left;
++ char *begin;
++
++ begin = u_ptr;
++ left = *size;
++ *size = 0;
++
++ for (i = 0; (i < sg->count) && (left > 0); ++i) {
++ /* FIXME: What to check here? I think one assumption is
++ (sg[i].address == 0) => (sg[i].offset == 0) which becomes
++ (sg[i].address != 0) ||
++ ((sg[i].address == 0) && sg[i].offset == 0) */
++ if ((NULL == sg->sg[i].address) && (0 != sg->sg[i].offset)) {
++ return -EINVAL;
++ }
++
++ copy = min(PAGE_SIZE, left);
++ copy = min(copy, sg->sg[i].length);
++
++ if (copy_to_user(u_ptr, sg->sg[i].address, copy)) {
++ return -EFAULT;
++ }
++
++ u_ptr += copy;
++ left -= copy;
++ *size += copy;
++ }
++
++ if ((u_ptr - begin) != zh_sg_get_size(sg)) {
++ return -ENOSPC;
++ } else {
++ return 0;
++ }
++}
++
++/**
++ * zh_sg_copy_to_user_truncate - Copy as much from sg to user as fits
++ * @*u_ptr: address in userspace to copy to
++ * @*size: bytes to copy, contains number of copied bytes on return
++ * @*sg: describing data to copy
++ * Return: 0 on success, -E* code else
++ *
++ * This wrapper to zh_sg_copy_to_user() simply ignores -ENOSPC errors, which is
++ * what we want when we use this function.
++ */
++static inline int zh_sg_copy_to_user_truncate(char *u_ptr, size_t *size,
++ struct sg_list *sg)
++{
++ long long ret;
++
++ ret = zh_sg_copy_to_user(u_ptr, size, sg);
++
++ if (-ENOSPC == ret) {
++ ret = 0;
++ }
++
++ return ret;
++}
++
++/**
++ * zh_alloc_scsi_cmnd - Allocate and fill a Scsi_Cmnd
++ * @cmd: The scsi command as specified in the SCSI standards
++ * @cmd_size: size of cmd in bytes
++ * @*sg: pointer to scatterlist to retrieve response
++ * Return: The created Scsi_Cmnd on succes, else NULL
++ */
++static Scsi_Cmnd* zh_alloc_scsi_cmnd(void *cmd, size_t cmd_size,
++ struct sg_list *sg)
++{
++ Scsi_Cmnd *sc;
++
++ sc = kmalloc(sizeof(*sc), GFP_KERNEL);
++ if (NULL == sc) {
++ return NULL;
++ }
++
++ memset(sc, 0, sizeof(*sc));
++ /* zfcp uses host_scribble, bh_next and scsi_done, don't touch em */
++
++ sc->sc_data_direction = SCSI_DATA_READ;
++
++ sc->use_sg = sg->count;
++ sc->sglist_len = sg->count;
++ sc->buffer = sg->sg;
++ sc->bufflen = zh_sg_get_size(sg);
++ sc->request_buffer = sc->buffer;
++ sc->request_bufflen = sc->bufflen;
++
++ sc->cmd_len = cmd_size;
++
++ memcpy(sc->cmnd, cmd, cmd_size);
++ memcpy(sc->data_cmnd, cmd, cmd_size);
++
++ return sc;
++}
++
++/**
++ * zh_do_scsi_command - worker for sending scsi commands
++ * @devid: of the adpapter to send via
++ * @wwpn: of the port to send the command to
++ * @lun: to send the command to
++ * @cmd: SCSI command to send
++ * @cmd_size: size of the command in bytes
++ * @rsp: userspace pointer to copy the response to
++ * @rsp_size: size of the userspace buffer
++ * @sense: userspace pointer to copy sense data to
++ */
++static int zh_do_scsi_command(devid_t devid, wwn_t wwpn, fcp_lun_t lun,
++ void *cmd, size_t cmd_size, struct sg_list *rsp,
++ void *sense)
++{
++ Scsi_Cmnd *sc;
++ int ret;
++
++ sc = zh_alloc_scsi_cmnd(cmd, cmd_size, rsp);
++ if (NULL == sc) {
++ return -ENOMEM;
++ }
++
++ ret = zfcp_zh_send_scsi(DEVID_TO_DEVNO(devid), wwpn, lun, sc);
++
++ /* the scsi stack sets this, if there was a scsi error */
++ if (ret > 0) {
++ memcpy(sense, sc->sense_buffer, SCSI_SENSE_BUFFERSIZE);
++ }
++
++ kfree(sc);
++
++ return ret;
++}
++
++/**
++ * zh_ioc_scsi_report_luns - Send SCSI REPORT LUNS
++ * @*u_ptr: userspace pointer to copy data from and to
++ * Return: 0 on success, < 0 -E* code, > 0 SCSI ERROR
++ * Context: user
++ */
++static int zh_ioc_scsi_report_luns(struct zh_scsi_report_luns *u_ptr)
++{
++ int ret;
++ struct zh_scsi_report_luns ioc_data;
++
++ if (copy_from_user(&ioc_data, u_ptr, sizeof(ioc_data)))
++ return -EFAULT;
++
++ ret = zh_report_luns_helper(&ioc_data);
++
++ if (ret >= 0) {
++ if (copy_to_user(u_ptr, &ioc_data, sizeof(ioc_data))) {
++ return -EFAULT;
++ }
++ if (ret > 0) {
++ ret = 0;
++ }
++ }
++
++ return ret;
++}
++
++/**
++ * zh_report_luns_helper - Send SCSI REPORT LUNS
++ * @*u_ptr: userspace pointer to copy data from and to
++ * Return: 0 on success, < 0 -E* code, > 0 SCSI ERROR
++ * Context: user
++ */
++int zh_report_luns_helper(struct zh_scsi_report_luns *ioc_data)
++{
++ int ret;
++ size_t copy;
++ struct sg_list sg;
++ struct scsi_report_luns_cmd cmd = { 0 };
++
++ if (ioc_data->rsp_buffer_size < SCSI_REPORT_LUNS_SIZE_MIN) {
++ return -EINVAL;
++ }
++
++ ret = zfcp_zh_assert_fclun_zero(DEVID_TO_DEVNO(ioc_data->devid),
++ ioc_data->wwpn);
++ if (0 != ret) {
++ return ret;
++ }
++
++ ret = zh_sg_alloc(&sg, ioc_data->rsp_buffer_size);
++ if (ret < 0) {
++ return ret;
++ }
++
++ cmd.op = REPORT_LUNS;
++ cmd.alloc_length = ioc_data->rsp_buffer_size;
++
++ ret = zh_do_scsi_command(ioc_data->devid, ioc_data->wwpn, 0, &cmd,
++ sizeof(cmd), &sg, ioc_data->sense);
++
++ if (ret >= 0) {
++ copy = ioc_data->rsp_buffer_size;
++ ret = zh_sg_copy_to_user_truncate(ioc_data->rsp_buffer, ©,
++ &sg);
++ }
++
++ zh_sg_free(&sg);
++
++ return ret;
++}
++
++/**
++ * zh_ioc_scsi_read_capacity - send SCSI READ CAPACITY
++ * @*u_ptr: userspace pointer to copy data from and to
++ * Return: 0 on success, < 0 -E* code, > 0 SCSI ERROR
++ * Context: user
++ */
++static int zh_ioc_scsi_read_capacity(struct zh_scsi_read_capacity *u_ptr)
++{
++ int ret;
++ struct sg_list sg;
++ struct zh_scsi_read_capacity ioc_data;
++ struct scsi_read_capacity_cmd cmd = { 0 };
++
++ if (copy_from_user(&ioc_data, u_ptr, sizeof(ioc_data))) {
++ return -EFAULT;
++ }
++
++ ret = zh_sg_alloc(&sg, ZH_SCSI_READ_CAPACITY_SIZE);
++ if (ret < 0) {
++ return ret;
++ }
++
++ cmd.op = READ_CAPACITY;
++
++ ret = zh_do_scsi_command(ioc_data.devid, ioc_data.wwpn,
++ ioc_data.fclun, &cmd, sizeof(cmd), &sg,
++ ioc_data.sense);
++
++ if (ret >= 0) {
++ memcpy(ioc_data.read_capacity, sg.sg[0].address,
++ sg.sg[0].length);
++
++ if (copy_to_user(u_ptr, &ioc_data, sizeof(*u_ptr))) {
++ ret = -EFAULT;
++ }
++ }
++
++ zh_sg_free(&sg);
++
++ return ret;
++}
++
++/**
++ * zh_ioc_scsi_inquiry - send SCSI INQUIRY
++ * @*u_ptr: userspace pointer to copy data from and to
++ * Return: 0 on success, < 0 -E* code, > 0 SCSI ERROR
++ * Context: user
++ */
++static int zh_ioc_scsi_inquiry(struct zh_scsi_inquiry *u_ptr)
++{
++ int ret;
++ struct sg_list sg;
++ struct zh_scsi_inquiry ioc_data;
++ struct scsi_inquiry_cmd cmd = { 0 };
++
++ if (copy_from_user(&ioc_data, u_ptr, sizeof(ioc_data))) {
++ return -EFAULT;
++ }
++
++ ret = zh_sg_alloc(&sg, ZH_SCSI_INQUIRY_SIZE);
++ if (ret < 0) {
++ return ret;
++ }
++
++ /* alloc_lentgh is of type u8, thus can only range to 255 */
++ cmd.op = INQUIRY;
++ cmd.alloc_length = 255;
++
++ if (ioc_data.evpd) {
++ cmd.evpd = 1;
++ cmd.page_code = ioc_data.page_code;
++ }
++
++ ret = zh_do_scsi_command(ioc_data.devid, ioc_data.wwpn,
++ ioc_data.fclun, &cmd, sizeof(cmd), &sg,
++ ioc_data.sense);
++
++ if (ret >= 0) {
++ memcpy(ioc_data.inquiry, sg.sg[0].address, sg.sg[0].length);
++
++ if (copy_to_user(u_ptr, &ioc_data, sizeof(*u_ptr))) {
++ ret = -EFAULT;
++ }
++ }
++
++ zh_sg_free(&sg);
++
++ return ret;
++}
++
++/**
++ * zh_ioc_get_config - create private events for a specfific struct
++ * @*u_ptr: userspace pointer to copy data from
++ * @*filp: requesting fd
++ * Return: no of created events, else -E* code
++ * Context: user
++ *
++ * With this ioctl events are generated and attached to the fd only. Used to
++ * enumerate currently configured adapters/ports/units. Subsequent calls
++ * discard prior created events.
++ */
++static int zh_ioc_get_config(struct zh_get_config *u_ptr, struct file *filp)
++{
++ struct zh_get_config ioc_data;
++ struct zh_client *head = (void*) filp->private_data;
++
++ if (!list_empty(&head->config)) {
++ zh_ioc_clear_config(filp);
++ }
++
++ if (copy_from_user(&ioc_data, u_ptr, sizeof(ioc_data))) {
++ return -EFAULT;
++ }
++
++ return zfcp_zh_get_config(filp, DEVID_TO_DEVNO(ioc_data.devid),
++ ioc_data.wwpn, ioc_data.flags);
++}
++
++/**
++ * zh_ioc_clear_config - remove config events from fd
++ * @filp: fd requesting to clear its config
++ * Return: always 0
++ * Context: user
++ */
++static int zh_ioc_clear_config(struct file *filp)
++{
++ struct zh_config *c;
++ struct list_head *go, *safe;
++ struct zh_client *client = (struct zh_client*) filp->private_data;
++
++ list_for_each_safe(go, safe, &client->config)
++ {
++ c = list_entry(go, struct zh_config, list);
++ list_del(go);
++ kfree(c);
++ }
++
++ INIT_LIST_HEAD(&client->config);
++
++ return 0;
++}
++
++/**
++ * zh_ioc_get_rnid() - get RNID from adapter
++ * @u_ptr: userspace pointer to copy data from and to
++ *
++ * Note: We set data in zfcp_hbaapi since we can not access the data
++ * the adapter sends out.
++ * Note: wwpn and wwnn not set because not used in ZFCP HBA API Library
++ */
++static int zh_ioc_get_rnid(struct zh_get_rnid *u_ptr)
++{
++ struct zh_get_rnid ioc_data;
++
++ if (copy_from_user(&ioc_data, u_ptr, sizeof(ioc_data))) {
++ return -EFAULT;
++ }
++
++ memset(&ioc_data.payload, 0, sizeof(ioc_data.payload));
++
++ ioc_data.payload.code = ZFCP_LS_RNID;
++ ioc_data.payload.node_id_format = 0xDF;
++ ioc_data.payload.common_id_length =
++ sizeof(struct zfcp_ls_rnid_common_id);
++ ioc_data.payload.specific_id_length =
++ sizeof(struct zfcp_ls_rnid_general_topology_id);
++
++ /* all other fields not set */
++ ioc_data.payload.specific_id.associated_type = 0x000000a;
++ ioc_data.payload.specific_id.physical_port_number = 1;
++ ioc_data.payload.specific_id.nr_attached_nodes = 1;
++
++ if (copy_to_user(u_ptr, &ioc_data, sizeof(*u_ptr))) {
++ return -EFAULT;
++ }
++
++ return 0;
++}
++
++/**
++ * zh_ioc_send_rnid - send an rnid to a port
++ * @*u_ptr: userspace pointer to copy data from and to
++ *
++ * Send a FC-FS ELS RNID to a discovered port.
++ */
++static int zh_ioc_send_rnid(struct zh_send_rnid *u_ptr)
++{
++ int ret;
++ struct sg_list sg_send, sg_receive;
++ struct zh_send_rnid ioc_data;
++ struct zfcp_ls_rnid *rnid;
++
++ if (copy_from_user(&ioc_data, u_ptr, sizeof(ioc_data))) {
++ return -EFAULT;
++ }
++
++ ret = zh_sg_alloc(&sg_send, sizeof(struct zfcp_ls_rnid));
++ if (ret < 0) {
++ return ret;
++ }
++
++ ret = zh_sg_alloc(&sg_receive, sizeof(struct zfcp_ls_rnid_acc));
++ if (ret < 0) {
++ goto free_send;
++ }
++
++ rnid = (void*) sg_send.sg[0].address;
++ rnid->code = ZFCP_LS_RNID;
++ rnid->node_id_format = 0xDF;
++
++ ret = zfcp_zh_send_els(DEVID_TO_DEVNO(ioc_data.devid), ioc_data.wwpn,
++ sg_send.sg, sg_send.count, sg_receive.sg,
++ sg_receive.count);
++ if (0 == ret) {
++ ioc_data.size = sg_receive.sg[0].length;
++ memcpy(&ioc_data.payload, sg_receive.sg[0].address,
++ ioc_data.size);
++
++ if (copy_to_user(u_ptr, &ioc_data, sizeof(*u_ptr))) {
++ ret = -EFAULT;
++ }
++ }
++
++ zh_sg_free(&sg_receive);
++free_send:
++ zh_sg_free(&sg_send);
++
++ return ret;
++}
++
++/**
++ * zh_ioc_send_ct - send a Generic Service command
++ * @u_ptr: userspace pointer to parameter structure
++ * Return: 0 on success, -E* code else
++ *
++ * Send a FC-GS CT IU
++ */
++static int zh_ioc_send_ct(struct zh_send_ct *u_ptr)
++{
++ int ret;
++ struct zh_send_ct ioc_data;
++
++ if (copy_from_user(&ioc_data, u_ptr, sizeof(ioc_data))) {
++ return -EFAULT;
++ }
++
++ ret = zh_send_ct_helper(&ioc_data);
++
++ return ret;
++}
++
++/**
++ * zh_send_ct_helper - send a Generic Service command
++ * @send_ct: userspace pointer to parameter structure
++ * Return: 0 on success, -E* code else
++ *
++ * Send a FC-GS CT IU
++ */
++int zh_send_ct_helper(struct zh_send_ct *send_ct)
++{
++ int ret;
++ struct sg_list req, resp;
++
++ ret = zh_sg_alloc(&req, send_ct->req_length);
++ if (ret < 0) {
++ return ret;
++ }
++
++ if (zh_sg_copy_from_user(send_ct->req, send_ct->req_length, &req)) {
++ ret = -EFAULT;
++ goto free_req;
++ }
++
++ ret = zh_sg_alloc(&resp, send_ct->resp_length);
++ if (ret < 0) {
++ goto free_req;
++ }
++
++ ret = zfcp_zh_send_ct(DEVID_TO_DEVNO(send_ct->devid),
++ req.sg, req.count, resp.sg, resp.count);
++
++ if (0 == ret) {
++ size_t size = send_ct->resp_length;
++ ret = zh_sg_copy_to_user_truncate(send_ct->resp, &size, &resp);
++ }
++
++ zh_sg_free(&resp);
++free_req:
++ zh_sg_free(&req);
++
++ return ret;
++}
++
++/**
++ * prepare_event - prepare event structure
++ * @filp: fd event to attach
++ * @e: countable event
++ * @c: non-countable event
++ * Return: created event on success, else NULL
++ * Context: irq/user
++ *
++ * To be able to generate private and shared events with the same callbacks,
++ * this function creates a private or shared event depending on filp beeing NULL
++ * or not.
++ */
++static inline struct zh_event*
++prepare_event(struct file *filp, struct zh_event_item **e, struct zh_config **c)
++{
++ if (NULL != filp) {
++ *c = (struct zh_config*) kmalloc(sizeof(**c), GFP_ATOMIC);
++ if (NULL == *c) {
++ return NULL;
++ }
++
++ *e = NULL;
++ return &(*c)->event;
++ } else {
++ *e = (struct zh_event_item*) kmalloc(sizeof(**e), GFP_ATOMIC);
++ if (NULL == *e) {
++ return NULL;
++ }
++
++ *c = NULL;
++ return &(*e)->event;
++ }
++}
++
++/**
++ * zh_cb_adapter_add - Callback for adapter add events
++ * @filp: Address of struct file (see below)
++ * @devno: of the adapter
++ * @wwnn: of the adapter
++ * @wwpn: of the adapter
++ * Context: irq/user
++ * FIXME: check context
++ *
++ * Called in two situations:
++ * - initial config creation (filp != NULL) (USER-ctx)
++ * - adapter added to running system (filp == NULL) (IRQ-ctx)
++ *
++ * When called with (filp == NULL) the created event is added to the
++ * global list of events, and thus delivered to all registered fd's.
++ * When called with (filp != NULL) the created event is private to the
++ * specific filp, and is appended only there.
++ * */
++static void zh_cb_adapter_add(struct file *filp, devno_t devno, wwn_t wwnn,
++ wwn_t wwpn)
++{
++ struct zh_event *event;
++ struct zh_event_item *e = NULL;
++ struct zh_config *c = NULL;
++
++ ZH_LOG(KERN_DEBUG, "wwpn: 0x%llx\n", (long long) wwpn);
++
++ event = prepare_event(filp, &e, &c);
++ if (NULL == event) {
++ return;
++ }
++
++ event->event = ZH_EVENT_ADAPTER_ADD;
++ event->data.adapter_add.devid = ZH_DEVID(0, 0, devno);
++ event->data.adapter_add.wwnn = wwnn;
++ event->data.adapter_add.wwpn = wwpn;
++
++ add_event(filp, e, c);
++}
++
++/**
++ * zh_cb_port_add() - Called on port add events
++ * @filp: Address of struct file (see zh_cb_adapter_add)
++ * @devno: of the adapter
++ * @wwnn: of the port
++ * @wwpn: of the port
++ * @did: of the port
++ * Context: irq/user
++ * FIXME: check context
++ */
++static void zh_cb_port_add(struct file *filp, devno_t devno, wwn_t wwpn,
++ wwn_t wwnn, fc_id_t did)
++{
++ struct zh_event *event;
++ struct zh_event_item *e;
++ struct zh_config *c;
++
++ ZH_LOG(KERN_DEBUG, "wwpn: 0x%llx\n", (long long) wwpn);
++
++ event = prepare_event(filp, &e, &c);
++ if (NULL == event) {
++ return;
++ }
++
++ event->event = ZH_EVENT_PORT_ADD;
++ event->data.port_add.devid = ZH_DEVID(0, 0, devno);
++ event->data.port_add.wwpn = wwpn;
++ event->data.port_add.wwnn = wwnn;
++ event->data.port_add.did = did;
++
++ add_event(filp, e, c);
++}
++
++/**
++ * zh_cb_unit_add() - Called on unit add events
++ * @filp: Address of struct file (see zh_cb_adapter_add)
++ * @devno: of the adapter
++ * @wwnn: of the port
++ * @wwpn: of the port
++ * @fclun: of the unit
++ * @host: SCSI host id
++ * @channel: SCSI channel
++ * @target: SCSI target
++ * @lun: SCSI lun
++ * Context: irq/user
++ * FIXME: check context
++ */
++static void zh_cb_unit_add(struct file *filp, devno_t devno, wwn_t wwpn,
++ fcp_lun_t fclun, unsigned int host,
++ unsigned int channel, unsigned int target,
++ unsigned int lun)
++{
++ struct zh_event *event;
++ struct zh_event_item *e;
++ struct zh_config *c;
++
++ ZH_LOG(KERN_DEBUG, "wwpn: 0x%llx lun: 0x%llx\n", (long long) wwpn,
++ (long long) fclun);
++
++ event = prepare_event(filp, &e, &c);
++ if (NULL == event) {
++ return;
++ }
++
++ event->event = ZH_EVENT_UNIT_ADD;
++ event->data.unit_add.devid = ZH_DEVID(0, 0, devno);
++ event->data.unit_add.wwpn = wwpn;
++ event->data.unit_add.fclun = fclun;
++ event->data.unit_add.host = host;
++ event->data.unit_add.channel = channel;
++ event->data.unit_add.id = target;
++ event->data.unit_add.lun = lun;
++
++ add_event(filp, e, c);
++}
++
++/**
++ * zh_cb_incomming_els_rscn - RSCN ELS worker
++ * @devno: of the adapter
++ * @s_id: sid of the sending port
++ * @v: pointer to rscn payload
++ * Context: irq
++ */
++static inline void zh_cb_incomming_els_rscn(const devno_t devno,
++ const fc_id_t s_id, const void *v)
++{
++ int count, i;
++ struct zh_config *c;
++ const struct zh_els_rscn *rscn = v;
++ struct zh_els_rscn_payload *payload = rscn->payload;
++
++ ZH_LOG(KERN_INFO, "incoming RSCN\n");
++
++ count = rscn->payload_length / sizeof(struct zh_els_rscn_payload);
++
++ for (i = 0; i < count; ++i, ++payload) {
++ c = kmalloc(sizeof(*c), GFP_ATOMIC);
++ if (NULL == c) {
++ return;
++ }
++
++ c->event.event = ZH_EVENT_POLLED;
++ c->event.data.polled.event = ZH_EVENT_POLLED_RSCN;
++ c->event.data.polled.devid = ZH_DEVID(0, 0, devno);
++ c->event.data.polled.data.rscn.port_fc_id = s_id;
++ memcpy(&c->event.data.polled.data.rscn.port_page, payload,
++ sizeof(*payload));
++
++ add_event_to_polled(c);
++ }
++}
++
++/**
++ * zh_cb_incomming_els - hook for incomming els'
++ * @devno: of the adapter the els came in
++ * @s_id: sid of the port the els came in
++ * @*v: pointer to the els payload
++ * Context: irq
++ */
++static void zh_cb_incomming_els(const devno_t devno, const fc_id_t s_id,
++ const void *v)
++{
++ const u8 *op = v;
++
++ switch (*op)
++ {
++ case ZFCP_LS_RSCN:
++ zh_cb_incomming_els_rscn(devno, s_id, v);
++ break;
++ }
++}
++
++/**
++ * zh_cb_link_down - hook for link down events
++ * @id: s_id this event occured on
++ * Context: irq
++ *
++ * Atm this hook is only called for local link down events.
++ */
++static void zh_cb_link_down(const fc_id_t id)
++{
++ struct zh_config *c;
++
++ c = kmalloc(sizeof(*c), GFP_ATOMIC);
++ if (NULL == c) {
++ return;
++ }
++
++ c->event.event = ZH_EVENT_POLLED_LINK_DOWN;
++ c->event.data.polled.data.link.port_fc_id = id;
++
++ add_event_to_polled(c);
++}
++
++/**
++ * zh_cb_link_up - hook for link up events
++ * @id: s_id this event occured on
++ * Context: irq
++ *
++ * Atm this hook is only called for local link up events.
++ */
++static void zh_cb_link_up(const fc_id_t id)
++{
++ struct zh_config *c;
++
++ c = kmalloc(sizeof(*c), GFP_ATOMIC);
++ if (NULL == c) {
++ return;
++ }
++
++ c->event.event = ZH_EVENT_POLLED_LINK_UP;
++ c->event.data.polled.data.link.port_fc_id = id;
++
++ add_event_to_polled(c);
++}
++
++/**
++ * zh_map_port_speed - maps port speed between FC-FS/FC-GS4 and FC-HBA
++ * @speed: address of u32 to be mapped
++ */
++static void zh_map_port_speed(u32 *speed, int flag_operating_speed)
++{
++ if (flag_operating_speed == ZH_PORT_OPERATING_SPEED) {
++ switch(*speed) {
++ case 4:
++ *speed = 8;
++ case 8:
++ *speed = 4;
++ case (1<<14):
++ *speed = 0;
++ }
++ } else { /* ZH_PORT_SUPPORTED_SPEED */
++ switch(*speed) {
++ case 4:
++ *speed = 8;
++ case 8:
++ *speed = 4;
++ case (1<<15):
++ *speed = 0;
++ }
++ }
++
++ return;
++}
++
++module_init(zh_init);
++module_exit(zh_exit);
+=== drivers/s390/scsi/zfcp_main.c
+==================================================================
+--- drivers/s390/scsi/zfcp_main.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/scsi/zfcp_main.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,21393 @@
++/*
++ * FCP adapter driver for IBM eServer zSeries
++ *
++ * (C) Copyright IBM Corp. 2002, 2003
++ *
++ * Authors:
++ * Martin Peschke <mpeschke at de.ibm.com>
++ * Raimund Schroeder <raimund.schroeder at de.ibm.com>
++ * Aron Zeh
++ * Wolfgang Taphorn
++ * Stefan Bader <stefan.bader at de.ibm.com>
++ * Andreas Herrmann <aherrman at de.ibm.com>
++ * Stefan Voelkel <Stefan.Voelkel at millenux.com>
++ */
++
++/* this drivers version (do not edit !!! generated and updated by cvs) */
++#define ZFCP_REVISION "$Revision: 1.31.2.25 $"
++
++#define ZFCP_QTCB_VERSION FSF_QTCB_CURRENT_VERSION
++
++#define ZFCP_PRINT_FLAGS
++
++#undef ZFCP_CAUSE_ERRORS
++
++#undef ZFCP_MEMORY_DEBUG
++
++#undef ZFCP_MEM_POOL_ONLY
++
++#define ZFCP_DEBUG_REQUESTS /* fsf_req tracing */
++
++#define ZFCP_DEBUG_COMMANDS /* host_byte tracing */
++
++#define ZFCP_DEBUG_ABORTS /* scsi_cmnd abort tracing */
++
++#define ZFCP_DEBUG_INCOMING_ELS /* incoming ELS tracing */
++
++#undef ZFCP_RESID
++
++#define ZFCP_STAT_REQSIZES
++#define ZFCP_STAT_QUEUES
++
++// current implementation does not work due to proc_sema
++#undef ZFCP_ERP_DEBUG_SINGLE_STEP
++
++#ifdef ZFCP_CAUSE_ERRORS
++struct timer_list zfcp_force_error_timer;
++#endif
++
++/* ATTENTION: value must not be used by hardware */
++#define FSF_QTCB_UNSOLICITED_STATUS 0x6305
++
++#define ZFCP_FAKE_SCSI_COMPLETION_TIME (HZ / 3)
++
++#define ZFCP_SCSI_LOW_MEM_TIMEOUT (100*HZ)
++
++#define ZFCP_SCSI_ER_TIMEOUT (100*HZ)
++
++#define ZFCP_SCSI_RETRY_TIMEOUT (120*HZ)
++
++/********************* QDIO SPECIFIC DEFINES *********************************/
++
++/* allow as much chained SBALs as supported by hardware */
++#define ZFCP_MAX_SBALS_PER_REQ FSF_MAX_SBALS_PER_REQ
++#define ZFCP_MAX_SBALS_PER_CT_REQ FSF_MAX_SBALS_PER_REQ
++#define ZFCP_MAX_SBALS_PER_ELS_REQ FSF_MAX_SBALS_PER_ELS_REQ
++/* DMQ bug workaround: don't use last SBALE */
++#define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
++/* index of last SBALE (with respect to DMQ bug workaround) */
++#define ZFCP_LAST_SBALE_PER_SBAL (ZFCP_MAX_SBALES_PER_SBAL - 1)
++/* max. number of (data buffer) SBALEs in largest SBAL chain */
++#define ZFCP_MAX_SBALES_PER_REQ \
++ (ZFCP_MAX_SBALS_PER_REQ * ZFCP_MAX_SBALES_PER_SBAL \
++ - 2) /* request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
++
++/* FIXME(tune): free space should be one max. SBAL chain plus what? */
++#define ZFCP_QDIO_PCI_INTERVAL (QDIO_MAX_BUFFERS_PER_Q - (ZFCP_MAX_SBALS_PER_REQ + 4))
++
++#define ZFCP_SBAL_TIMEOUT (5 * HZ)
++
++#define ZFCP_STATUS_READ_FAILED_THRESHOLD 3
++
++/* parsing stuff */
++#define ZFCP_PARSE_SPACE_CHARS " \t"
++#define ZFCP_PARSE_RECORD_DELIM_CHARS ";\n"
++#define ZFCP_PARSE_DELIM_CHARS ":"
++#define ZFCP_PARSE_COMMENT_CHARS "#"
++#define ZFCP_PARSE_ADD 1
++#define ZFCP_PARSE_DEL 0
++
++#include <linux/config.h>
++
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/ctype.h>
++#include <linux/mm.h>
++#include <linux/timer.h>
++#include <linux/delay.h>
++#include <linux/slab.h>
++#include <linux/version.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/proc_fs.h>
++#include <linux/notifier.h>
++#include <linux/reboot.h>
++#include <linux/time.h>
++
++#include <linux/ioctl.h>
++#include <linux/major.h>
++#include <linux/miscdevice.h>
++
++#include "../../fc4/fc.h"
++
++#include <linux/module.h>
++
++#include <asm/semaphore.h>
++#include <asm/io.h>
++#include <asm/uaccess.h>
++
++#include <asm/ebcdic.h>
++#include <asm/cpcmd.h> /* Debugging only */
++#include <asm/processor.h> /* Debugging only */
++#include <asm/div64.h>
++#include <asm/ebcdic.h>
++
++#include "zfcp.h"
++#include "zfcp_zh.h"
++
++#define ZFCP_MAX_SBALES_PER_CONTROL_FILE \
++ (ZFCP_CFDC_MAX_CONTROL_FILE_SIZE + PAGE_SIZE - 1) >> PAGE_SHIFT
++
++/* Cosmetics */
++#define ZFCP_FSFREQ_CLEANUP_TIMEOUT HZ/10
++
++#define ZFCP_TYPE2_RECOVERY_TIME 8*HZ
++
++#ifdef ZFCP_STAT_REQSIZES
++#define ZFCP_MAX_PROC_SIZE 3 * PAGE_SIZE
++#else
++#define ZFCP_MAX_PROC_SIZE PAGE_SIZE
++#endif
++
++#define ZFCP_64BIT (BITS_PER_LONG == 64)
++#define ZFCP_31BIT (!ZFCP_64BIT)
++
++#define QDIO_SCSI_QFMT 1 /* 1 for FSF */
++
++/* queue polling (values in microseconds) */
++#define ZFCP_MAX_INPUT_THRESHOLD 5000 /* FIXME: tune */
++#define ZFCP_MAX_OUTPUT_THRESHOLD 1000 /* FIXME: tune */
++#define ZFCP_MIN_INPUT_THRESHOLD 1 /* ignored by QDIO layer */
++#define ZFCP_MIN_OUTPUT_THRESHOLD 1 /* ignored by QDIO layer */
++
++#define ZFCP_PARM_FILE "mod_parm"
++#define ZFCP_MAP_FILE "map"
++#define ZFCP_ADD_MAP_FILE "add_map"
++#define ZFCP_STATUS_FILE "status"
++#define ZFCP_MAX_PROC_LINE 1024
++
++#define ZFCP_CFDC_DEV_NAME "zfcp_cfdc"
++#define ZFCP_CFDC_DEV_MAJOR MISC_MAJOR
++#define ZFCP_CFDC_DEV_MINOR MISC_DYNAMIC_MINOR
++
++#define ZFCP_CFDC_IOC_MAGIC 0xDD
++#define ZFCP_CFDC_IOC \
++ _IOWR(ZFCP_CFDC_IOC_MAGIC, 0, zfcp_cfdc_sense_data_t)
++
++#ifdef CONFIG_S390_SUPPORT
++
++#define ZFCP_IOC_DEFAULT(cmd) {cmd, (void*)sys_ioctl}
++
++typedef struct _zfcp_ioctl_entry {
++ unsigned int cmd;
++ ioctl_trans_handler_t handler;
++} zfcp_ioctl_entry_t;
++
++static zfcp_ioctl_entry_t zfcp_cfdc_ioc __initdata =
++ ZFCP_IOC_DEFAULT(ZFCP_CFDC_IOC);
++
++#endif
++
++#define ZFCP_RESET_ERP "reset erp"
++#define ZFCP_SET_OFFLINE "set offline"
++#define ZFCP_SET_ONLINE "set online"
++#define ZFCP_RTV "rtv"
++#define ZFCP_RLS "rls"
++#define ZFCP_PDISC "pdisc"
++#define ZFCP_ADISC "adisc"
++#define ZFCP_STAT_ON "stat on"
++#define ZFCP_STAT_OFF "stat off"
++#define ZFCP_STAT_RESET "stat reset"
++
++#define ZFCP_DID_MASK 0x00ffffff
++
++/* Adapter Identification Parameters */
++#define ZFCP_CONTROL_UNIT_TYPE 0x1731
++#define ZFCP_CONTROL_UNIT_MODEL 0x03
++#define ZFCP_DEVICE_TYPE 0x1732
++#define ZFCP_DEVICE_MODEL 0x03
++#define ZFCP_DEVICE_MODEL_PRIV 0x04
++
++#define ZFCP_FC_SERVICE_CLASS_DEFAULT FSF_CLASS_3
++
++/* timeout for name-server lookup (in seconds) */
++/* FIXME(tune) */
++#define ZFCP_NS_GID_PN_TIMEOUT 10
++#define ZFCP_NS_GA_NXT_TIMEOUT 120
++
++#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 6
++#define ZFCP_EXCHANGE_CONFIG_DATA_SLEEP 50
++
++#define ZFCP_STATUS_READS_RECOM FSF_STATUS_READS_RECOM
++
++/* largest SCSI command we can process */
++/* FCP-2 (FCP_CMND IU) allows up to (255-3+16) */
++#define ZFCP_MAX_SCSI_CMND_LENGTH 255
++/* maximum number of commands in LUN queue */
++#define ZFCP_CMND_PER_LUN 32
++
++/* debug feature entries per adapter */
++#define ZFCP_ERP_DBF_INDEX 1
++#define ZFCP_ERP_DBF_AREAS 2
++#define ZFCP_ERP_DBF_LENGTH 16
++#define ZFCP_ERP_DBF_LEVEL 3
++#define ZFCP_ERP_DBF_NAME "zfcp_erp"
++
++#define ZFCP_REQ_DBF_INDEX 1
++#define ZFCP_REQ_DBF_AREAS 1
++#define ZFCP_REQ_DBF_LENGTH 8
++#define ZFCP_REQ_DBF_LEVEL DEBUG_OFF_LEVEL
++#define ZFCP_REQ_DBF_NAME "zfcp_req"
++
++#define ZFCP_CMD_DBF_INDEX 2
++#define ZFCP_CMD_DBF_AREAS 1
++#define ZFCP_CMD_DBF_LENGTH 8
++#define ZFCP_CMD_DBF_LEVEL 3
++#define ZFCP_CMD_DBF_NAME "zfcp_cmd"
++
++#define ZFCP_ABORT_DBF_INDEX 2
++#define ZFCP_ABORT_DBF_AREAS 1
++#define ZFCP_ABORT_DBF_LENGTH 8
++#define ZFCP_ABORT_DBF_LEVEL 6
++#define ZFCP_ABORT_DBF_NAME "zfcp_abt"
++
++#define ZFCP_IN_ELS_DBF_INDEX 2
++#define ZFCP_IN_ELS_DBF_AREAS 1
++#define ZFCP_IN_ELS_DBF_LENGTH 8
++#define ZFCP_IN_ELS_DBF_LEVEL 6
++#define ZFCP_IN_ELS_DBF_NAME "zfcp_els"
++
++/*
++ * paranoia: some extra checks ensuring driver consistency and probably
++ * reducing performance,
++ * should be compiled in and defined per default during development
++ * should be compiled in and disabled per default in beta program
++ * should not be compiled in field
++ */
++
++/* do (not) compile in paranoia code (by means of "dead code") */
++#undef ZFCP_PARANOIA_DEAD_CODE
++
++/* enable/disable paranoia checks if compiled in */
++#define ZFCP_PARANOIA_PER_DEFAULT
++
++/* paranoia status (override by means of module parameter allowed) */
++#ifdef ZFCP_PARANOIA_PER_DEFAULT
++unsigned char zfcp_paranoia = 1;
++#else
++unsigned char zfcp_paranoia = 0;
++#endif
++
++/*
++ * decide whether paranoia checks are (1) dead code,
++ * (2) active code + disabled, or (3) active code + enabled
++ */
++#ifdef ZFCP_PARANOIA_DEAD_CODE
++#define ZFCP_PARANOIA if (0)
++#else
++#define ZFCP_PARANOIA if (zfcp_paranoia)
++#endif
++
++/* association between FSF command and FSF QTCB type */
++static u32 fsf_qtcb_type[] = {
++ [ FSF_QTCB_FCP_CMND ] = FSF_IO_COMMAND,
++ [ FSF_QTCB_ABORT_FCP_CMND ] = FSF_SUPPORT_COMMAND,
++ [ FSF_QTCB_OPEN_PORT_WITH_DID ] = FSF_SUPPORT_COMMAND,
++ [ FSF_QTCB_OPEN_LUN ] = FSF_SUPPORT_COMMAND,
++ [ FSF_QTCB_CLOSE_LUN ] = FSF_SUPPORT_COMMAND,
++ [ FSF_QTCB_CLOSE_PORT ] = FSF_SUPPORT_COMMAND,
++ [ FSF_QTCB_CLOSE_PHYSICAL_PORT ] = FSF_SUPPORT_COMMAND,
++ [ FSF_QTCB_SEND_ELS ] = FSF_SUPPORT_COMMAND,
++ [ FSF_QTCB_SEND_GENERIC ] = FSF_SUPPORT_COMMAND,
++ [ FSF_QTCB_EXCHANGE_CONFIG_DATA ] = FSF_CONFIG_COMMAND,
++ [ FSF_QTCB_EXCHANGE_PORT_DATA ] = FSF_PORT_COMMAND,
++ [ FSF_QTCB_DOWNLOAD_CONTROL_FILE ] = FSF_SUPPORT_COMMAND,
++ [ FSF_QTCB_UPLOAD_CONTROL_FILE ] = FSF_SUPPORT_COMMAND
++};
++
++/* accumulated log level (module parameter) */
++static u32 loglevel = ZFCP_LOG_LEVEL_DEFAULTS;
++
++unsigned long debug_addr;
++unsigned long debug_len;
++
++const char zfcp_topologies[5][25] = {
++ {"<error>"},
++ {"point-to-point"},
++ {"fabric"},
++ {"arbitrated loop"},
++ {"fabric (virt. adapter)"}
++};
++
++const char zfcp_act_subtable_type[5][8] = {
++ {"unknown"}, {"OS"}, {"WWPN"}, {"DID"}, {"LUN"}
++};
++
++inline void _zfcp_hex_dump(char *addr, int count)
++{
++ int i;
++ for (i = 0; i < count; i++) {
++ printk("%02x", addr[i]);
++ if ((i % 4) == 3)
++ printk(" ");
++ if ((i % 32) == 31)
++ printk("\n");
++ }
++ if ((i % 32) != 31)
++ printk("\n");
++}
++
++#define ZFCP_HEX_DUMP(level, addr, count) \
++ if (ZFCP_LOG_CHECK(level)) { \
++ _zfcp_hex_dump(addr, count); \
++ }
++
++static int proc_debug=1;
++
++/*
++ * buffer struct used for private_data entry (proc interface)
++ */
++
++typedef struct {
++ int len;
++ char *buf;
++} procbuf_t;
++
++
++/*
++ * not yet optimal but useful:
++ * waits until condition is met or timeout is expired,
++ * condition might be a function call which allows to
++ * execute some additional instructions aside from check
++ * (e.g. get a lock without race if condition is met),
++ * timeout is modified and holds the remaining time,
++ * thus timeout is zero if timeout is expired,
++ * result value zero indicates that condition has not been met
++ */
++#define __ZFCP_WAIT_EVENT_TIMEOUT(timeout, condition) \
++do { \
++ set_current_state(TASK_UNINTERRUPTIBLE); \
++ while (!(condition) && timeout) \
++ timeout = schedule_timeout(timeout); \
++ current->state = TASK_RUNNING; \
++} while (0);
++
++#define ZFCP_WAIT_EVENT_TIMEOUT(waitqueue, timeout, condition) \
++do { \
++ wait_queue_t entry; \
++ init_waitqueue_entry(&entry, current); \
++ add_wait_queue(&waitqueue, &entry); \
++ __ZFCP_WAIT_EVENT_TIMEOUT(timeout, condition) \
++ remove_wait_queue(&waitqueue, &entry); \
++} while (0);
++
++
++/* General type defines */
++
++typedef long long unsigned int llui_t;
++
++/* QDIO request identifier */
++typedef u64 qdio_reqid_t;
++
++
++/* FCP(-2) FCP_CMND IU */
++typedef struct _fcp_cmnd_iu {
++ /* FCP logical unit number */
++ fcp_lun_t fcp_lun;
++ /* command reference number */
++ u8 crn;
++ /* reserved */
++ u8 reserved0:5;
++ /* task attribute */
++ u8 task_attribute:3;
++ /* task management flags */
++ u8 task_management_flags;
++ /* additional FCP_CDB length */
++ u8 add_fcp_cdb_length:6;
++ /* read data */
++ u8 rddata:1;
++ /* write data */
++ u8 wddata:1;
++ /* */
++ u8 fcp_cdb[FCP_CDB_LENGTH];
++ /* variable length fields (additional FCP_CDB, FCP_DL) */
++} __attribute__((packed)) fcp_cmnd_iu_t;
++
++/* data length field may be at variable position in FCP-2 FCP_CMND IU */
++typedef u32 fcp_dl_t;
++
++
++#define RSP_CODE_GOOD 0
++#define RSP_CODE_LENGTH_MISMATCH 1
++#define RSP_CODE_FIELD_INVALID 2
++#define RSP_CODE_RO_MISMATCH 3
++#define RSP_CODE_TASKMAN_UNSUPP 4
++#define RSP_CODE_TASKMAN_FAILED 5
++
++/* see fc-fs */
++#define LS_FAN 0x60000000
++#define LS_RSCN 0x61040000
++
++typedef struct _fcp_rscn_head {
++ u8 command;
++ u8 page_length; /* always 0x04 */
++ u16 payload_len;
++} __attribute__((packed)) fcp_rscn_head_t;
++
++typedef struct _fcp_rscn_element {
++ u8 reserved:2;
++ u8 event_qual:4;
++ u8 addr_format:2;
++ u32 nport_did:24;
++} __attribute__((packed)) fcp_rscn_element_t;
++
++#define ZFCP_PORT_ADDRESS 0x0
++#define ZFCP_AREA_ADDRESS 0x1
++#define ZFCP_DOMAIN_ADDRESS 0x2
++#define ZFCP_FABRIC_ADDRESS 0x3
++
++#define ZFCP_PORTS_RANGE_PORT 0xFFFFFF
++#define ZFCP_PORTS_RANGE_AREA 0xFFFF00
++#define ZFCP_PORTS_RANGE_DOMAIN 0xFF0000
++#define ZFCP_PORTS_RANGE_FABRIC 0x000000
++
++#define ZFCP_NO_PORTS_PER_AREA 0x100
++#define ZFCP_NO_PORTS_PER_DOMAIN 0x10000
++#define ZFCP_NO_PORTS_PER_FABRIC 0x1000000
++
++typedef struct _fcp_fan {
++ u32 command;
++ u32 fport_did;
++ wwn_t fport_wwpn;
++ wwn_t fport_wwname;
++} __attribute__((packed)) fcp_fan_t;
++
++/* see fc-ph */
++typedef struct _fcp_logo {
++ u32 command;
++ u32 nport_did;
++ wwn_t nport_wwpn;
++} __attribute__((packed)) fcp_logo_t;
++
++
++/* FCP(-2) FCP_RSP IU */
++typedef struct _fcp_rsp_iu {
++ /* reserved */
++ u8 reserved0[10];
++ union {
++ struct {
++ /* reserved */
++ u8 reserved1:3;
++ /* */
++ u8 fcp_conf_req:1;
++ /* */
++ u8 fcp_resid_under:1;
++ /* */
++ u8 fcp_resid_over:1;
++ /* */
++ u8 fcp_sns_len_valid:1;
++ /* */
++ u8 fcp_rsp_len_valid:1;
++ } bits;
++ u8 value;
++ } validity;
++ /* */
++ u8 scsi_status;
++ /* */
++ u32 fcp_resid;
++ /* */
++ u32 fcp_sns_len;
++ /* */
++ u32 fcp_rsp_len;
++ /* variable length fields: FCP_RSP_INFO, FCP_SNS_INFO */
++} __attribute__((packed)) fcp_rsp_iu_t;
++
++
++inline char *zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu_t *fcp_rsp_iu)
++{
++ char *fcp_rsp_info_ptr = NULL;
++ fcp_rsp_info_ptr=
++ (unsigned char*)fcp_rsp_iu + (sizeof(fcp_rsp_iu_t));
++
++ return fcp_rsp_info_ptr;
++}
++
++
++inline char *zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu_t *fcp_rsp_iu)
++{
++ char *fcp_sns_info_ptr = NULL;
++ fcp_sns_info_ptr =
++ (unsigned char*)fcp_rsp_iu + (sizeof(fcp_rsp_iu_t));
++ // NOTE:fcp_rsp_info is really only a part of the whole as
++ // defined in FCP-2 documentation
++ if (fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)
++ fcp_sns_info_ptr = (char *)fcp_sns_info_ptr +
++ fcp_rsp_iu->fcp_rsp_len;
++
++ return fcp_sns_info_ptr;
++}
++
++#ifdef ZFCP_STAT_REQSIZES
++typedef struct _zfcp_statistics {
++ struct list_head list;
++ u32 num;
++ u32 hits;
++} zfcp_statistics_t;
++#endif
++
++
++/*********************** LIST DEFINES ****************************************/
++
++#define ZFCP_FIRST_ADAPTER \
++ ZFCP_FIRST_ENTITY(&zfcp_data.adapter_list_head,zfcp_adapter_t)
++
++#define ZFCP_FIRST_PORT(a) \
++ ZFCP_FIRST_ENTITY(&(a)->port_list_head,zfcp_port_t)
++
++#define ZFCP_FIRST_UNIT(p) \
++ ZFCP_FIRST_ENTITY(&(p)->unit_list_head,zfcp_unit_t)
++
++#define ZFCP_FIRST_SCSIREQ(a) \
++ ZFCP_FIRST_ENTITY(&(a)->scsi_req_list_head,zfcp_scsi_req_t)
++
++#define ZFCP_FIRST_FSFREQ(a) \
++ ZFCP_FIRST_ENTITY(&(a)->fsf_req_list_head,zfcp_fsf_req_t)
++
++#define ZFCP_LAST_ADAPTER \
++ ZFCP_LAST_ENTITY(&zfcp_data.adapter_list_head,zfcp_adapter_t)
++
++#define ZFCP_LAST_PORT(a) \
++ ZFCP_LAST_ENTITY(&(a)->port_list_head,zfcp_port_t)
++
++#define ZFCP_LAST_UNIT(p) \
++ ZFCP_LAST_ENTITY(&(p)->unit_list_head,zfcp_unit_t)
++
++#define ZFCP_LAST_SCSIREQ(a) \
++ ZFCP_LAST_ENTITY(&(a)->scsi_req_list_head,zfcp_scsi_req_t)
++
++#define ZFCP_LAST_FSFREQ(a) \
++ ZFCP_LAST_ENTITY(&(a)->fsf_req_list_head,zfcp_fsf_req_t)
++
++#define ZFCP_PREV_ADAPTER(a) \
++ ZFCP_PREV_ENTITY(&zfcp_data.adapter_list_head,(a),zfcp_adapter_t)
++
++#define ZFCP_PREV_PORT(p) \
++ ZFCP_PREV_ENTITY(&(p)->adapter->port_list_head,(p),zfcp_port_t)
++
++#define ZFCP_PREV_UNIT(u) \
++ ZFCP_PREV_ENTITY(&(u)->port->unit_list_head,(u),zfcp_unit_t)
++
++#define ZFCP_PREV_SCSIREQ(s) \
++ ZFCP_PREV_ENTITY(&(s)->adapter->scsi_req_list_head,(s),zfcp_scsi_req_t)
++
++#define ZFCP_PREV_FSFREQ(o) \
++ ZFCP_PREV_ENTITY(&(o)->adapter->fsf_req_list_head,(o), \
++ zfcp_fsf_req_t)
++
++#define ZFCP_NEXT_ADAPTER(a) \
++ ZFCP_NEXT_ENTITY(&zfcp_data.adapter_list_head,(a),zfcp_adapter_t)
++
++#define ZFCP_NEXT_PORT(p) \
++ ZFCP_NEXT_ENTITY(&(p)->adapter->port_list_head,(p),zfcp_port_t)
++
++#define ZFCP_NEXT_UNIT(u) \
++ ZFCP_NEXT_ENTITY(&(u)->port->unit_list_head,(u),zfcp_unit_t)
++
++#define ZFCP_NEXT_SCSIREQ(s) \
++ ZFCP_NEXT_ENTITY(&(s)->adapter->scsi_req_list_head,(s),zfcp_scsi_req_t)
++
++#define ZFCP_NEXT_FSFREQ(o) \
++ ZFCP_NEXT_ENTITY(&(o)->adapter->fsf_req_list_head,(o), \
++ zfcp_fsf_req_t)
++
++#define ZFCP_FOR_EACH_FSFREQ(a,o) \
++ ZFCP_FOR_EACH_ENTITY(&(a)->fsf_req_list_head,(o),zfcp_fsf_req_t)
++
++/*
++ * use these macros if you traverse a list and do not stop after
++ * altering the list,
++ * attention: do not modify "tmp" (last) arg during iterations,
++ * usually: removing several elements from somewhere in the middle of the list,
++ * lock the list by means of the associated rwlock before entering
++ * the loop and thus above the macro,
++ * unlock the list (the associated rwlock) after leaving the loop
++ * belonging to the macro,
++ * use write variant of lock
++ */
++
++#define ZFCP_FOR_NEXT_ENTITY(head,curr,type,tmp) \
++ for (curr = ZFCP_FIRST_ENTITY(head,type), \
++ tmp = ZFCP_NEXT_ENTITY(head,curr,type); \
++ curr; \
++ curr = tmp, \
++ tmp = ZFCP_NEXT_ENTITY(head,curr,type))
++
++#define ZFCP_FOR_NEXT_ADAPTER(a,n) \
++ ZFCP_FOR_NEXT_ENTITY(&zfcp_data.adapter_list_head,(a), \
++ zfcp_adapter_t,(n))
++
++#define ZFCP_FOR_NEXT_PORT(a,p,n) \
++ ZFCP_FOR_NEXT_ENTITY(&(a)->port_list_head,(p),zfcp_port_t,(n))
++
++#define ZFCP_FOR_NEXT_UNIT(p,u,n) \
++ ZFCP_FOR_NEXT_ENTITY(&(p)->unit_list_head,(u),zfcp_unit_t,(n))
++
++#define ZFCP_FOR_NEXT_SCSIREQ(a,s,n) \
++ ZFCP_FOR_NEXT_ENTITY(&(a)->scsi_req_list_head,(s),zfcp_scsi_req_t,(n))
++
++#define ZFCP_FOR_NEXT_FSFREQ(a,o,n) \
++ ZFCP_FOR_NEXT_ENTITY(&(a)->fsf_req_list_head,(o), \
++ zfcp_fsf_req_t,(n))
++
++/*
++ * use these macros for loops do not walking through lists but*
++ * changing the list at their heads,
++ * attention: without changing the head of the list or any other
++ * break condition this will become an endless loop,
++ * next and previous pointers may become invalid !!!
++ * usually: removing all elements in lists
++ * lock the list by means of the associated rwlock before entering
++ * the loop and thus
++ * above the macro,
++ * unlock the list (the associated rwlock) after leaving the loop
++ * belonging to the macro,
++ * use write variant of lock
++ */
++#define ZFCP_WHILE_ENTITY(head,curr,type) \
++ while (((curr) = ZFCP_FIRST_ENTITY(head,type)))
++
++#define ZFCP_WHILE_ADAPTER(a) \
++ ZFCP_WHILE_ENTITY(&zfcp_data.adapter_list_head,(a),zfcp_adapter_t)
++
++#define ZFCP_WHILE_PORT(a,p) \
++ ZFCP_WHILE_ENTITY(&(a)->port_list_head,(p),zfcp_port_t)
++
++#define ZFCP_WHILE_UNIT(p,u) \
++ ZFCP_WHILE_ENTITY(&(p)->unit_list_head,(u),zfcp_unit_t)
++
++#define ZFCP_WHILE_SCSIREQ(a,s) \
++ ZFCP_WHILE_ENTITY(&(a)->scsi_req_list_head,(s),zfcp_scsi_req_t)
++
++#define ZFCP_WHILE_FSFREQ(a,o) \
++ ZFCP_WHILE_ENTITY(&(a)->fsf_req_list_head,(o),zfcp_fsf_req_t)
++
++/* prototypes for functions which could kernel lib functions (but aren't) */
++static size_t strnspn(const char*, const char*, size_t);
++char* strnpbrk(const char*, const char*, size_t);
++char* strnchr(const char*, int, size_t);
++char* strnrchr(const char*, int, size_t);
++
++/* prototypes for functions written against the modul interface */
++static int __init zfcp_module_init(void);
++static void __exit zfcp_module_exit(void);
++
++int zfcp_reboot_handler (struct notifier_block*, unsigned long, void*);
++
++/* prototypes for functions written against the SCSI stack HBA driver interface */
++int zfcp_scsi_detect (Scsi_Host_Template*);
++int zfcp_scsi_revoke (Scsi_Device*);
++int zfcp_scsi_release (struct Scsi_Host*);
++int zfcp_scsi_queuecommand (Scsi_Cmnd*, void (*done)(Scsi_Cmnd*));
++int zfcp_scsi_eh_abort_handler (Scsi_Cmnd*);
++int zfcp_scsi_eh_device_reset_handler (Scsi_Cmnd*);
++int zfcp_scsi_eh_bus_reset_handler (Scsi_Cmnd*);
++int zfcp_scsi_eh_host_reset_handler (Scsi_Cmnd*);
++void zfcp_scsi_select_queue_depth(struct Scsi_Host*, Scsi_Device*);
++
++static inline int zfcp_sg_list_alloc(zfcp_sg_list_t*, size_t);
++static inline int zfcp_sg_list_free(zfcp_sg_list_t*);
++static inline int zfcp_sg_list_copy_from_user(zfcp_sg_list_t*, void*, size_t);
++static inline int zfcp_sg_list_copy_to_user(void*, zfcp_sg_list_t*, size_t);
++
++/* prototypes for functions written against the FSF interface */
++static int zfcp_fsf_req_send(zfcp_fsf_req_t*, struct timer_list*);
++static int zfcp_fsf_req_create(zfcp_adapter_t *, u32, int, zfcp_mem_pool_t *,
++ unsigned long *, zfcp_fsf_req_t **);
++static int zfcp_fsf_req_free(zfcp_fsf_req_t*);
++static int zfcp_fsf_exchange_config_data (zfcp_erp_action_t*);
++static int zfcp_fsf_open_port (zfcp_erp_action_t*);
++static int zfcp_fsf_close_port (zfcp_erp_action_t*);
++static int zfcp_fsf_open_unit (zfcp_erp_action_t*);
++static int zfcp_fsf_close_unit (zfcp_erp_action_t*);
++static int zfcp_fsf_close_physical_port (zfcp_erp_action_t*);
++static int zfcp_fsf_send_fcp_command_task (zfcp_unit_t*, Scsi_Cmnd *);
++static zfcp_fsf_req_t* zfcp_fsf_send_fcp_command_task_management
++ (zfcp_adapter_t*, zfcp_unit_t*, u8, int);
++static zfcp_fsf_req_t* zfcp_fsf_abort_fcp_command
++ (unsigned long, zfcp_adapter_t*, zfcp_unit_t*, int);
++static void zfcp_fsf_start_scsi_er_timer(zfcp_adapter_t*);
++static void zfcp_fsf_scsi_er_timeout_handler(unsigned long);
++static int zfcp_fsf_status_read (zfcp_adapter_t*, int);
++static int zfcp_fsf_control_file(
++ zfcp_adapter_t*, zfcp_fsf_req_t**, u32, u32, zfcp_sg_list_t*);
++
++static int zfcp_fsf_exchange_config_data_handler
++ (zfcp_fsf_req_t*);
++static int zfcp_fsf_exchange_port_data_handler (zfcp_fsf_req_t*);
++static int zfcp_fsf_open_port_handler (zfcp_fsf_req_t*);
++static int zfcp_fsf_close_port_handler (zfcp_fsf_req_t*);
++static int zfcp_fsf_close_physical_port_handler (zfcp_fsf_req_t*);
++static int zfcp_fsf_open_unit_handler (zfcp_fsf_req_t*);
++static int zfcp_fsf_close_unit_handler (zfcp_fsf_req_t*);
++static int zfcp_fsf_send_fcp_command_handler (zfcp_fsf_req_t*);
++static int zfcp_fsf_send_fcp_command_task_handler
++ (zfcp_fsf_req_t*);
++static int zfcp_fsf_send_fcp_command_task_management_handler
++ (zfcp_fsf_req_t*);
++static int zfcp_fsf_abort_fcp_command_handler (zfcp_fsf_req_t*);
++static int zfcp_fsf_send_ct_handler (zfcp_fsf_req_t*);
++static int zfcp_fsf_status_read_handler (zfcp_fsf_req_t*);
++void zfcp_fsf_incoming_els (zfcp_fsf_req_t *);
++static int zfcp_fsf_control_file_handler(zfcp_fsf_req_t*);
++
++static inline int
++ zfcp_fsf_req_create_sbal_check
++ (unsigned long*, zfcp_qdio_queue_t*, int);
++
++static int zfcp_fsf_send_els_handler(zfcp_fsf_req_t *);
++
++static inline int
++ zfcp_mem_pool_element_alloc (zfcp_mem_pool_t*, int);
++static inline int
++ zfcp_mem_pool_element_free (zfcp_mem_pool_t*, int);
++static inline void*
++ zfcp_mem_pool_element_get (zfcp_mem_pool_t*, int);
++static inline int
++ zfcp_mem_pool_element_put (zfcp_mem_pool_t*, int);
++
++static inline int
++ zfcp_mem_pool_create (zfcp_mem_pool_t*, int, int,
++ void (*function)(unsigned long),
++ unsigned long);
++static inline int
++ zfcp_mem_pool_destroy (zfcp_mem_pool_t*);
++static inline void*
++ zfcp_mem_pool_find (zfcp_mem_pool_t*);
++static inline int
++ zfcp_mem_pool_return (void*, zfcp_mem_pool_t*);
++
++
++
++static void zfcp_scsi_low_mem_buffer_timeout_handler
++ (unsigned long);
++
++static zfcp_fsf_req_t* zfcp_fsf_req_alloc(zfcp_mem_pool_t *, int, int);
++static int zfcp_fsf_req_cleanup (zfcp_fsf_req_t*);
++static int zfcp_fsf_req_wait_and_cleanup
++ (zfcp_fsf_req_t*, int, u32*);
++static int zfcp_fsf_req_complete (zfcp_fsf_req_t*);
++static int zfcp_fsf_protstatus_eval (zfcp_fsf_req_t*);
++static int zfcp_fsf_fsfstatus_eval (zfcp_fsf_req_t*);
++static int zfcp_fsf_fsfstatus_qual_eval(zfcp_fsf_req_t *);
++static int zfcp_fsf_req_dispatch (zfcp_fsf_req_t*);
++static int zfcp_fsf_req_dismiss (zfcp_fsf_req_t*);
++static int zfcp_fsf_req_dismiss_all (zfcp_adapter_t*);
++
++/* prototypes for FCP related functions */
++static int zfcp_nameserver_enqueue (zfcp_adapter_t*);
++static int zfcp_ns_gid_pn_request(zfcp_erp_action_t*);
++static void zfcp_ns_gid_pn_handler(unsigned long);
++static void zfcp_ns_ga_nxt_handler(unsigned long);
++
++/* prototypes for functions written against the QDIO layer interface */
++qdio_handler_t zfcp_qdio_request_handler;
++qdio_handler_t zfcp_qdio_response_handler;
++
++int zfcp_qdio_handler_error_check
++ (zfcp_adapter_t*, unsigned int, unsigned int,
++ unsigned int);
++
++/* prototypes for functions written against the Dynamic I/O layer interface */
++static int zfcp_dio_oper_handler (int, devreg_t *);
++static void zfcp_dio_not_oper_handler (int, int);
++
++/* prototypes for functions written against the Common I/O layer interface */
++static void zfcp_cio_handler (int, void *, struct pt_regs *);
++
++/* prototypes for other functions */
++static int zfcp_task_management_function (zfcp_unit_t*, u8);
++
++static void zfcp_config_parse_error(
++ unsigned char *, unsigned char *, const char *, ...)
++ __attribute__ ((format (printf, 3, 4)));
++static int zfcp_config_parse_record(
++ unsigned char*, int, zfcp_config_record_t*);
++static int zfcp_config_parse_record_list (unsigned char*, int, int);
++//static int zfcp_config_parse_record_del (zfcp_config_record_t*);
++static int zfcp_config_cleanup (void);
++
++static int zfcp_adapter_enqueue (devno_t, zfcp_adapter_t**);
++static int zfcp_port_enqueue (zfcp_adapter_t*, scsi_id_t, wwn_t, u32,
++ zfcp_port_t**);
++static int zfcp_unit_enqueue (zfcp_port_t*, scsi_lun_t, fcp_lun_t,
++ zfcp_unit_t**);
++
++static int zfcp_adapter_dequeue (zfcp_adapter_t*);
++static int zfcp_port_dequeue (zfcp_port_t*);
++static int zfcp_unit_dequeue (zfcp_unit_t*);
++
++static int zfcp_adapter_detect(zfcp_adapter_t*);
++static int zfcp_adapter_irq_register(zfcp_adapter_t*);
++static int zfcp_adapter_irq_unregister(zfcp_adapter_t*);
++static int zfcp_adapter_scsi_register(zfcp_adapter_t*);
++static int zfcp_adapter_scsi_register_all (void);
++static int zfcp_adapter_shutdown_all (void);
++
++static u32 zfcp_derive_driver_version (void);
++
++static inline int
++ zfcp_qdio_reqid_check(zfcp_adapter_t*, void*);
++
++static inline void zfcp_qdio_sbal_limit(zfcp_fsf_req_t*, int);
++static inline volatile qdio_buffer_element_t*
++ zfcp_qdio_sbale_get
++ (zfcp_qdio_queue_t*, int, int);
++static inline volatile qdio_buffer_element_t*
++ zfcp_qdio_sbale_req
++ (zfcp_fsf_req_t*, int, int);
++static inline volatile qdio_buffer_element_t*
++ zfcp_qdio_sbale_resp
++ (zfcp_fsf_req_t*, int, int);
++static inline volatile qdio_buffer_element_t*
++ zfcp_qdio_sbale_curr
++ (zfcp_fsf_req_t*);
++static inline volatile qdio_buffer_element_t*
++ zfcp_qdio_sbal_chain
++ (zfcp_fsf_req_t*, unsigned long);
++static inline volatile qdio_buffer_element_t*
++ zfcp_qdio_sbale_next
++ (zfcp_fsf_req_t*, unsigned long);
++static inline int
++ zfcp_qdio_sbals_zero
++ (zfcp_qdio_queue_t*, int, int);
++static inline int
++ zfcp_qdio_sbals_wipe
++ (zfcp_fsf_req_t*);
++static inline void
++ zfcp_qdio_sbale_fill
++ (zfcp_fsf_req_t*, unsigned long, void*, int);
++static inline int
++ zfcp_qdio_sbals_from_segment
++ (zfcp_fsf_req_t*, unsigned long, void*, unsigned long);
++static inline int zfcp_qdio_sbals_from_buffer(zfcp_fsf_req_t *, unsigned long,
++ void *, unsigned long, int);
++static inline int zfcp_qdio_sbals_from_sg(zfcp_fsf_req_t*, unsigned long,
++ struct scatterlist*, int, int);
++static inline int
++ zfcp_qdio_sbals_from_scsicmnd
++ (zfcp_fsf_req_t*, unsigned long, struct scsi_cmnd*);
++static inline void
++ zfcp_zero_sbals(qdio_buffer_t**, int, int);
++
++static zfcp_unit_t*
++ zfcp_unit_lookup (zfcp_adapter_t*, int, int, int);
++
++/* prototypes for functions faking callbacks of lower layers */
++inline void zfcp_scsi_process_and_clear_fake_queue(unsigned long);
++inline void zfcp_scsi_insert_into_fake_queue(zfcp_adapter_t *,
++ Scsi_Cmnd *);
++void zfcp_fake_outbound_callback (unsigned long);
++void zfcp_fake_inbound_callback (unsigned long);
++
++/* prototypes for proc-file interfacing stuff */
++unsigned long zfcp_find_forward(char**,
++ unsigned long*,
++ char**,
++ unsigned long*);
++unsigned long zfcp_find_backward(char**,
++ unsigned long*,
++ char**,
++ unsigned long*);
++int zfcp_create_root_proc(void);
++int zfcp_delete_root_proc(void);
++int zfcp_create_data_procs(void);
++int zfcp_delete_data_procs(void);
++int zfcp_proc_map_open(struct inode*, struct file*);
++int zfcp_proc_map_close(struct inode*, struct file*);
++int zfcp_open_parm_proc(struct inode*, struct file*);
++int zfcp_close_parm_proc(struct inode*, struct file*);
++int zfcp_open_add_map_proc(struct inode*, struct file*);
++int zfcp_close_add_map_proc(struct inode*, struct file*);
++int zfcp_adapter_proc_open(struct inode*, struct file*);
++int zfcp_adapter_proc_close(struct inode*, struct file*);
++int zfcp_port_proc_open(struct inode*, struct file*);
++int zfcp_port_proc_close(struct inode*, struct file*);
++int zfcp_unit_proc_open(struct inode*, struct file*);
++int zfcp_unit_proc_close(struct inode*, struct file*);
++int zfcp_cfdc_dev_ioctl(struct inode*, struct file*, unsigned int, unsigned long);
++ssize_t zfcp_parm_proc_read(struct file*,
++ char*,
++ size_t,
++ loff_t*);
++ssize_t zfcp_parm_proc_write(struct file*,
++ const char*,
++ size_t,
++ loff_t*);
++ssize_t zfcp_add_map_proc_write(struct file*,
++ const char*,
++ size_t,
++ loff_t*);
++ssize_t zfcp_proc_map_read(struct file*,
++ char*,
++ size_t,
++ loff_t*);
++ssize_t zfcp_adapter_proc_read(struct file*,
++ char*,
++ size_t,
++ loff_t*);
++ssize_t zfcp_adapter_proc_write(struct file*,
++ const char*,
++ size_t,
++ loff_t*);
++ssize_t zfcp_port_proc_read(struct file*,
++ char*,
++ size_t,
++ loff_t*);
++ssize_t zfcp_port_proc_write(struct file*,
++ const char*,
++ size_t,
++ loff_t*);
++ssize_t zfcp_unit_proc_read(struct file*,
++ char*,
++ size_t,
++ loff_t*);
++ssize_t zfcp_unit_proc_write(struct file*,
++ const char*,
++ size_t,
++ loff_t*);
++int zfcp_create_adapter_proc(zfcp_adapter_t*);
++int zfcp_delete_adapter_proc(zfcp_adapter_t*);
++int zfcp_create_port_proc(zfcp_port_t*);
++int zfcp_delete_port_proc(zfcp_port_t*);
++int zfcp_create_unit_proc(zfcp_unit_t*);
++int zfcp_delete_unit_proc(zfcp_unit_t*);
++
++/* prototypes for initialisation functions */
++static int zfcp_dio_register (void);
++
++/* prototypes for extended link services functions */
++static int zfcp_els(zfcp_port_t*, u8);
++static void zfcp_els_handler(unsigned long);
++static int zfcp_test_link(zfcp_port_t*);
++
++/* prototypes for error recovery functions */
++static int zfcp_erp_adapter_reopen (zfcp_adapter_t*, int);
++static int zfcp_erp_port_forced_reopen (zfcp_port_t*, int);
++static int zfcp_erp_port_reopen (zfcp_port_t*, int);
++static int zfcp_erp_unit_reopen (zfcp_unit_t*, int);
++
++static int zfcp_erp_adapter_reopen_internal (zfcp_adapter_t*, int);
++static int zfcp_erp_port_forced_reopen_internal (zfcp_port_t*, int);
++static int zfcp_erp_port_reopen_internal (zfcp_port_t*, int);
++static int zfcp_erp_unit_reopen_internal (zfcp_unit_t*, int);
++
++static int zfcp_erp_port_reopen_all (zfcp_adapter_t*, int);
++static int zfcp_erp_port_reopen_all_internal (zfcp_adapter_t*, int);
++static int zfcp_erp_unit_reopen_all_internal (zfcp_port_t*, int);
++
++static inline int zfcp_erp_adapter_shutdown (zfcp_adapter_t*, int);
++static inline int zfcp_erp_port_shutdown (zfcp_port_t*, int);
++static inline int zfcp_erp_port_shutdown_all (zfcp_adapter_t*, int);
++static inline int zfcp_erp_unit_shutdown (zfcp_unit_t*, int);
++
++static int zfcp_erp_adapter_block (zfcp_adapter_t*, int);
++static int zfcp_erp_adapter_unblock (zfcp_adapter_t*);
++static int zfcp_erp_port_block (zfcp_port_t*, int);
++static int zfcp_erp_port_unblock (zfcp_port_t*);
++static int zfcp_erp_unit_block (zfcp_unit_t*, int);
++static int zfcp_erp_unit_unblock (zfcp_unit_t*);
++
++static int zfcp_erp_thread (void*);
++static int zfcp_erp_thread_setup (zfcp_adapter_t*);
++static void zfcp_erp_thread_setup_task (void*);
++static int zfcp_erp_thread_kill (zfcp_adapter_t*);
++
++static int zfcp_erp_strategy (zfcp_erp_action_t*);
++
++static int zfcp_erp_strategy_do_action (zfcp_erp_action_t*);
++static int zfcp_erp_strategy_memwait (zfcp_erp_action_t*);
++static int zfcp_erp_strategy_check_target (zfcp_erp_action_t*, int);
++static int zfcp_erp_strategy_check_unit (zfcp_unit_t*, int);
++static int zfcp_erp_strategy_check_port (zfcp_port_t*, int);
++static int zfcp_erp_strategy_check_adapter (zfcp_adapter_t*, int);
++static int zfcp_erp_strategy_statechange
++ (int, u32, zfcp_adapter_t*, zfcp_port_t*, zfcp_unit_t*, int);
++static inline int zfcp_erp_strategy_statechange_detected (atomic_t*, u32);
++static int zfcp_erp_strategy_followup_actions
++ (int, zfcp_adapter_t*, zfcp_port_t*, zfcp_unit_t*, int);
++static int zfcp_erp_strategy_check_queues (zfcp_adapter_t*);
++static int zfcp_erp_strategy_check_action(zfcp_erp_action_t *, int);
++
++static int zfcp_erp_adapter_strategy (zfcp_erp_action_t*);
++static int zfcp_erp_adapter_strategy_generic (zfcp_erp_action_t*, int);
++static int zfcp_erp_adapter_strategy_close (zfcp_erp_action_t*);
++static int zfcp_erp_adapter_strategy_close_irq (zfcp_erp_action_t*);
++static int zfcp_erp_adapter_strategy_close_qdio (zfcp_erp_action_t*);
++static int zfcp_erp_adapter_strategy_close_fsf (zfcp_erp_action_t*);
++static int zfcp_erp_adapter_strategy_open (zfcp_erp_action_t*);
++static int zfcp_erp_adapter_strategy_open_irq (zfcp_erp_action_t*);
++static int zfcp_erp_adapter_strategy_open_qdio (zfcp_erp_action_t*);
++static int zfcp_erp_adapter_strategy_open_fsf (zfcp_erp_action_t*);
++static int zfcp_erp_adapter_strategy_open_fsf_xconfig (zfcp_erp_action_t*);
++static int zfcp_erp_adapter_strategy_open_fsf_statusread (zfcp_erp_action_t*);
++
++static int zfcp_erp_port_forced_strategy (zfcp_erp_action_t*);
++static int zfcp_erp_port_forced_strategy_close (zfcp_erp_action_t*);
++
++static int zfcp_erp_port_strategy (zfcp_erp_action_t*);
++static int zfcp_erp_port_strategy_clearstati (zfcp_port_t*);
++static int zfcp_erp_port_strategy_close (zfcp_erp_action_t*);
++static int zfcp_erp_port_strategy_open (zfcp_erp_action_t*);
++static int zfcp_erp_port_strategy_open_nameserver (zfcp_erp_action_t*);
++static int zfcp_erp_port_strategy_open_nameserver_wakeup (zfcp_erp_action_t*);
++static int zfcp_erp_port_strategy_open_common (zfcp_erp_action_t*);
++static int zfcp_erp_port_strategy_open_common_lookup (zfcp_erp_action_t*);
++static int zfcp_erp_port_strategy_open_port (zfcp_erp_action_t*);
++
++static int zfcp_erp_unit_strategy (zfcp_erp_action_t*);
++static int zfcp_erp_unit_strategy_clearstati (zfcp_unit_t*);
++static int zfcp_erp_unit_strategy_close (zfcp_erp_action_t*);
++static int zfcp_erp_unit_strategy_open (zfcp_erp_action_t*);
++
++static void zfcp_erp_modify_adapter_status(zfcp_adapter_t*, u32, int);
++static void zfcp_erp_modify_port_status(zfcp_port_t*, u32, int);
++static void zfcp_erp_modify_unit_status(zfcp_unit_t*, u32, int);
++static void zfcp_erp_adapter_failed(zfcp_adapter_t*);
++static void zfcp_erp_port_failed(zfcp_port_t*);
++static void zfcp_erp_unit_failed(zfcp_unit_t*);
++
++static int zfcp_erp_action_dismiss_adapter (zfcp_adapter_t*);
++static int zfcp_erp_action_dismiss_port(zfcp_port_t*);
++/* zfcp_erp_action_dismiss_unit not needed */
++static int zfcp_erp_action_dismiss (zfcp_erp_action_t*);
++
++static int zfcp_erp_action_enqueue
++ (int, zfcp_adapter_t*, zfcp_port_t*, zfcp_unit_t*);
++static int zfcp_erp_action_dequeue (zfcp_erp_action_t*);
++
++static int zfcp_erp_action_ready (zfcp_erp_action_t*);
++static int zfcp_erp_action_exists (zfcp_erp_action_t*);
++
++static inline void zfcp_erp_action_to_ready (zfcp_erp_action_t*);
++static inline void zfcp_erp_action_to_running (zfcp_erp_action_t*);
++static inline void zfcp_erp_from_one_to_other
++ (struct list_head*, struct list_head*);
++
++static int zfcp_erp_async_handler (zfcp_erp_action_t*, unsigned long);
++static void zfcp_erp_memwait_handler (unsigned long);
++static void zfcp_erp_timeout_handler (unsigned long);
++static int zfcp_erp_timeout_init (zfcp_erp_action_t*);
++
++int zfcp_erp_wait (zfcp_adapter_t*);
++
++
++#ifdef ZFCP_STAT_REQSIZES
++static int zfcp_statistics_clear
++ (zfcp_adapter_t*, struct list_head*);
++static int zfcp_statistics_print
++ (zfcp_adapter_t*, struct list_head*, char*, char*, int, int);
++static void zfcp_statistics_inc
++ (zfcp_adapter_t*, struct list_head*, u32);
++static inline void zfcp_statistics_new
++ (zfcp_adapter_t*, struct list_head*, u32);
++static inline void zfcp_statistics_sort
++ (struct list_head*, struct list_head*, zfcp_statistics_t*);
++#endif
++
++
++/* driver data */
++static struct file_operations zfcp_parm_fops =
++{
++ open: zfcp_open_parm_proc,
++ read: zfcp_parm_proc_read,
++ write: zfcp_parm_proc_write,
++ release: zfcp_close_parm_proc,
++};
++
++static struct file_operations zfcp_map_fops =
++{
++ open: zfcp_proc_map_open,
++ read: zfcp_proc_map_read,
++ release: zfcp_proc_map_close,
++};
++
++static struct file_operations zfcp_add_map_fops =
++{
++ open: zfcp_open_add_map_proc,
++ write: zfcp_add_map_proc_write,
++ release: zfcp_close_add_map_proc,
++};
++
++static struct file_operations zfcp_adapter_fops =
++{
++ open: zfcp_adapter_proc_open,
++ read: zfcp_adapter_proc_read,
++ write: zfcp_adapter_proc_write,
++ release: zfcp_adapter_proc_close,
++};
++
++static struct file_operations zfcp_port_fops =
++{
++ open: zfcp_port_proc_open,
++ read: zfcp_port_proc_read,
++ write: zfcp_port_proc_write,
++ release: zfcp_port_proc_close,
++};
++
++static struct file_operations zfcp_unit_fops =
++{
++ open: zfcp_unit_proc_open,
++ read: zfcp_unit_proc_read,
++ write: zfcp_unit_proc_write,
++ release: zfcp_unit_proc_close,
++};
++
++static struct file_operations zfcp_cfdc_fops =
++{
++ ioctl: zfcp_cfdc_dev_ioctl
++};
++
++static struct miscdevice zfcp_cfdc_misc = {
++ minor: ZFCP_CFDC_DEV_MINOR,
++ name: ZFCP_CFDC_DEV_NAME,
++ fops: &zfcp_cfdc_fops
++};
++
++zfcp_data_t zfcp_data = {
++ { /* Scsi Host Template */
++ name: ZFCP_NAME,
++ proc_name: "dummy",
++ proc_info: NULL, /* we don't need scsi proc info */
++ detect: zfcp_scsi_detect,
++ revoke: zfcp_scsi_revoke,
++ release: zfcp_scsi_release,
++ queuecommand: zfcp_scsi_queuecommand,
++ eh_abort_handler: zfcp_scsi_eh_abort_handler,
++ eh_device_reset_handler:zfcp_scsi_eh_device_reset_handler,
++ eh_bus_reset_handler: zfcp_scsi_eh_bus_reset_handler,
++ eh_host_reset_handler: zfcp_scsi_eh_host_reset_handler,
++ /* FIXME(openfcp): Tune */
++ can_queue: 4096,
++ this_id: 0,
++ /*
++ * FIXME:
++ * one less? can zfcp_create_sbale cope with it?
++ */
++ sg_tablesize: ZFCP_MAX_SBALES_PER_REQ,
++ /* some moderate value for the moment */
++ cmd_per_lun: ZFCP_CMND_PER_LUN,
++ /* no requirement on the addresses of data buffers */
++ unchecked_isa_dma: 0,
++ /* maybe try it later */
++ use_clustering: 1,
++ /* we are straight forward */
++ use_new_eh_code: 1
++ }
++ /* rest initialized with zeros */
++};
++
++
++inline fcp_dl_t *zfcp_get_fcp_dl_ptr(fcp_cmnd_iu_t *fcp_cmd)
++{
++ int additional_length = fcp_cmd->add_fcp_cdb_length << 2;
++ fcp_dl_t *fcp_dl_addr=
++ (fcp_dl_t *)(
++ (unsigned char*)fcp_cmd +
++ sizeof(fcp_cmnd_iu_t) +
++ additional_length);
++ /*
++ * fcp_dl_addr = start address of fcp_cmnd structure +
++ * size of fixed part + size of dynamically sized add_dcp_cdb field
++ * SEE FCP-2 documentation
++ */
++ return fcp_dl_addr;
++}
++
++
++inline fcp_dl_t zfcp_get_fcp_dl(fcp_cmnd_iu_t *fcp_cmd)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ ZFCP_LOG_TRACE("enter (fcp_cmd=0x%lx)\n",
++ (unsigned long) fcp_cmd);
++ ZFCP_LOG_TRACE("exit 0x%lx\n",
++ (unsigned long)*zfcp_get_fcp_dl_ptr(fcp_cmd));
++ return *zfcp_get_fcp_dl_ptr(fcp_cmd);
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++inline void zfcp_set_fcp_dl(fcp_cmnd_iu_t *fcp_cmd, fcp_dl_t fcp_dl)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++ ZFCP_LOG_TRACE("enter (fcp_cmd=0x%lx), (fcp_dl=0x%x)\n",
++ (unsigned long)fcp_cmd,
++ fcp_dl);
++ *zfcp_get_fcp_dl_ptr(fcp_cmd)=fcp_dl;
++ ZFCP_LOG_TRACE("exit\n");
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++#ifdef MODULE
++/* declare driver module init/cleanup functions */
++module_init(zfcp_module_init);
++module_exit(zfcp_module_exit);
++
++MODULE_AUTHOR(
++ "Martin Peschke <mpeschke at de.ibm.com>, "
++ "Raimund Schroeder <raimund.schroeder at de.ibm.com>, "
++ "Aron Zeh <arzeh at de.ibm.com>, "
++ "IBM Deutschland Entwicklung GmbH");
++/* what this driver module is about */
++MODULE_DESCRIPTION(
++ "FCP (SCSI over Fibre Channel) HBA driver for IBM eServer zSeries, " ZFCP_REVISION);
++MODULE_LICENSE("GPL");
++/* log level may be provided as a module parameter */
++MODULE_PARM(loglevel, "i");
++/* short explaination of the previous module parameter */
++MODULE_PARM_DESC(loglevel,
++ "log levels, 8 nibbles: "
++ "(unassigned) ERP QDIO DIO Config FSF SCSI Other, "
++ "levels: 0=none 1=normal 2=devel 3=trace");
++#endif /* MODULE */
++
++#ifdef ZFCP_PRINT_FLAGS
++static u32 flags_dump=0;
++MODULE_PARM(flags_dump, "i");
++#define ZFCP_LOG_FLAGS(ll, m...) \
++ if (ll<=flags_dump) \
++ _ZFCP_LOG(m)
++#else
++#define ZFCP_LOG_FLAGS(ll, m...)
++#endif
++
++static char *map = NULL;
++#ifdef MODULE
++MODULE_PARM(map, "s");
++MODULE_PARM_DESC(map,
++ "Initial FC to SCSI mapping table");
++
++/* enable/disable paranoia (extra checks to ensure driver consistency) */
++MODULE_PARM(zfcp_paranoia, "b");
++/* short explaination of the previous module parameter */
++MODULE_PARM_DESC(zfcp_paranoia,
++ "extra checks to ensure driver consistency, "
++ "0=disabled other !0=enabled");
++#else
++
++/* zfcp_map boot parameter */
++static int __init zfcp_map_setup(char *str)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++
++ /* don't parse trailing " */
++ map = str + 1;
++ /* don't parse final " */
++ map[strlen(map) - 1] = ZFCP_PARSE_SPACE_CHARS[0];
++ ZFCP_LOG_INFO("map is %s\n", map);
++ return 1; /* why just 1? */
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++__setup("zfcp_map=", zfcp_map_setup);
++
++/* zfcp_loglevel boot_parameter */
++static int __init zfcp_loglevel_setup(char *str)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++
++ loglevel = simple_strtoul(str, NULL, 0);
++ //ZFCP_LOG_NORMAL("loglevel is 0x%x\n", loglevel);
++ return 1; /* why just 1? */
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++__setup("zfcp_loglevel=", zfcp_loglevel_setup);
++
++#endif /* MODULE */
++
++#ifdef ZFCP_CAUSE_ERRORS
++void zfcp_force_error(unsigned long data)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ zfcp_adapter_t *adapter;
++
++ ZFCP_LOG_NORMAL("Cause error....\n");
++ adapter = ZFCP_FIRST_ADAPTER;
++ printk("adater reopen\n");
++ zfcp_erp_adapter_reopen(adapter, 0);
++ printk("adater close\n");
++ zfcp_erp_adapter_shutdown(adapter, 0);
++ /*
++ zfcp_force_error_timer.function = zfcp_force_error;
++ zfcp_force_error_timer.data = 0;
++ zfcp_force_error_timer.expires = jiffies + 60*HZ;
++ add_timer(&zfcp_force_error_timer);
++ */
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++#endif //ZFCP_CAUSE_ERRORS
++
++
++static inline int zfcp_fsf_req_is_scsi_cmnd(zfcp_fsf_req_t *fsf_req)
++{
++ return ((fsf_req->fsf_command == FSF_QTCB_FCP_CMND) &&
++ !(fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT));
++}
++
++
++static inline void zfcp_cmd_dbf_event_fsf(
++ const char *text,
++ zfcp_fsf_req_t *fsf_req,
++ void *add_data,
++ int add_length)
++{
++#ifdef ZFCP_DEBUG_COMMANDS
++ zfcp_adapter_t *adapter = fsf_req->adapter;
++ Scsi_Cmnd *scsi_cmnd;
++ int level = 3;
++ int i;
++ unsigned long flags;
++
++ write_lock_irqsave(&adapter->cmd_dbf_lock, flags);
++ if (zfcp_fsf_req_is_scsi_cmnd(fsf_req)) {
++ scsi_cmnd = fsf_req->data.send_fcp_command_task.scsi_cmnd;
++ debug_text_event(adapter->cmd_dbf, level, "fsferror");
++ debug_text_event(adapter->cmd_dbf, level, text);
++ debug_event(adapter->cmd_dbf, level, &fsf_req, sizeof(unsigned long));
++ debug_event(adapter->cmd_dbf, level, &fsf_req->seq_no, sizeof(u32));
++ debug_event(adapter->cmd_dbf, level, &scsi_cmnd, sizeof(unsigned long));
++ debug_event(adapter->cmd_dbf, level, &scsi_cmnd->cmnd,
++ min(ZFCP_CMD_DBF_LENGTH, (int)scsi_cmnd->cmd_len));
++ for (i = 0; i < add_length; i += ZFCP_CMD_DBF_LENGTH)
++ debug_event(
++ adapter->cmd_dbf,
++ level,
++ (char*)add_data + i,
++ min(ZFCP_CMD_DBF_LENGTH, add_length - i));
++ }
++ write_unlock_irqrestore(&adapter->cmd_dbf_lock, flags);
++#endif
++}
++
++
++static inline void zfcp_cmd_dbf_event_scsi(
++ const char *text,
++ zfcp_adapter_t *adapter,
++ Scsi_Cmnd *scsi_cmnd)
++{
++#ifdef ZFCP_DEBUG_COMMANDS
++ zfcp_fsf_req_t *fsf_req = (zfcp_fsf_req_t*) scsi_cmnd->host_scribble;
++ int level = ((host_byte(scsi_cmnd->result) != 0) ? 1 : 5);
++ unsigned long flags;
++
++ write_lock_irqsave(&adapter->cmd_dbf_lock, flags);
++ debug_text_event(adapter->cmd_dbf, level, "hostbyte");
++ debug_text_event(adapter->cmd_dbf, level, text);
++ debug_event(adapter->cmd_dbf, level, &scsi_cmnd->result, sizeof(u32));
++ debug_event(adapter->cmd_dbf, level, &scsi_cmnd, sizeof(unsigned long));
++ debug_event(adapter->cmd_dbf, level, &scsi_cmnd->cmnd,
++ min(ZFCP_CMD_DBF_LENGTH, (int)scsi_cmnd->cmd_len));
++ if (fsf_req) {
++ debug_event(adapter->cmd_dbf, level, &fsf_req, sizeof(unsigned long));
++ debug_event(adapter->cmd_dbf, level, &fsf_req->seq_no, sizeof(u32));
++ } else {
++ debug_text_event(adapter->cmd_dbf, level, "");
++ debug_text_event(adapter->cmd_dbf, level, "");
++ }
++ write_unlock_irqrestore(&adapter->cmd_dbf_lock, flags);
++#endif
++}
++
++
++static inline void zfcp_in_els_dbf_event(
++ zfcp_adapter_t *adapter,
++ const char *text,
++ fsf_status_read_buffer_t *status_buffer,
++ int length)
++{
++#ifdef ZFCP_DEBUG_INCOMING_ELS
++ int level = 1;
++ int i;
++
++ debug_text_event(adapter->in_els_dbf, level, text);
++ debug_event(adapter->in_els_dbf, level, &status_buffer->d_id, 8);
++ for (i = 0; i < length; i += ZFCP_IN_ELS_DBF_LENGTH)
++ debug_event(
++ adapter->in_els_dbf,
++ level,
++ (char*)status_buffer->payload + i,
++ min(ZFCP_IN_ELS_DBF_LENGTH, length - i));
++#endif
++}
++
++
++/****************************************************************/
++
++
++/*
++ * function: zfcp_module_init
++ *
++ * purpose: driver module initialization routine
++ *
++ * locks: initialises zfcp_data.proc_sema, zfcp_data.adapter_list_lock
++ * zfcp_data.proc_sema is taken and released within this
++ * function
++ *
++ * returns: 0 success
++ * !0 failure
++ */
++static int __init zfcp_module_init(void)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++ int retval = 0;
++
++ atomic_set(&zfcp_data.loglevel, loglevel);
++
++#ifdef ZFCP_LOW_MEM_CREDITS
++ atomic_set(&zfcp_data.lowmem_credit, 0);
++#endif
++
++ ZFCP_LOG_DEBUG(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
++ ZFCP_LOG_TRACE("enter\n");
++
++ ZFCP_LOG_TRACE(
++ "Start Address of module: 0x%lx\n",
++ (unsigned long) &zfcp_module_init);
++
++ /* derive driver version number from cvs revision string */
++ zfcp_data.driver_version = zfcp_derive_driver_version();
++ ZFCP_LOG_NORMAL(
++ "driver version 0x%x\n",
++ zfcp_data.driver_version);
++
++ /* initialize adapter list */
++ rwlock_init(&zfcp_data.adapter_list_lock);
++ INIT_LIST_HEAD(&zfcp_data.adapter_list_head);
++
++ /* initialize lock for callback handling */
++ rwlock_init(&zfcp_callback.lock);
++
++ /* initialize map list */
++ INIT_LIST_HEAD(&zfcp_data.map_list_head);
++
++#ifdef CONFIG_S390_SUPPORT
++ retval = register_ioctl32_conversion(
++ zfcp_cfdc_ioc.cmd, zfcp_cfdc_ioc.handler);
++ if (retval != 0) {
++ ZFCP_LOG_INFO(
++ "Cannot register a 32-bit support of the IOC handler\n");
++ goto failed_ioctl32;
++ }
++#endif
++ retval = misc_register(&zfcp_cfdc_misc);
++ if (retval != 0) {
++ ZFCP_LOG_INFO(
++ "Special file of the control file data channel "
++ "cannot be registered\n");
++ goto failed_misc_register;
++ } else {
++ ZFCP_LOG_INFO(
++ "Special file of the control file data channel "
++ "has become MAJOR/MINOR numbers %d/%d\n",
++ ZFCP_CFDC_DEV_MAJOR,
++ zfcp_cfdc_misc.minor);
++ }
++
++ /* Initialise proc semaphores */
++ sema_init(&zfcp_data.proc_sema,1);
++ down(&zfcp_data.proc_sema); /* config changes protected by proc_sema */
++
++#ifdef CONFIG_PROC_FS
++ retval = zfcp_create_root_proc();
++ if (retval) {
++ ZFCP_LOG_NORMAL(
++ "Error: Proc fs startup failed\n");
++ goto failed_root_proc;
++ }
++
++ retval = zfcp_create_data_procs();
++ if (retval) {
++ ZFCP_LOG_NORMAL(
++ "Error: Proc fs startup failed\n");
++ goto failed_data_procs;
++ }
++#endif
++
++ /* always succeeds for now */
++ /* FIXME: set priority? */
++ zfcp_data.reboot_notifier.notifier_call = zfcp_reboot_handler;
++ register_reboot_notifier(&zfcp_data.reboot_notifier);
++
++ /*
++ * parse module parameter string for valid configurations and create
++ * entries for configured adapters, remote ports and logical units
++ */
++ if (map) {
++ retval = zfcp_config_parse_record_list(
++ map,
++ strlen(map),
++ ZFCP_PARSE_ADD);
++
++ if (retval < 0)
++ goto failed_parse; /* some entries may have been created */
++ }
++
++ /* save address of data structure managing the driver module */
++ zfcp_data.scsi_host_template.module = THIS_MODULE;
++
++ /*
++ * register driver module with SCSI stack
++ * we do this last to avoid the need to revert this step
++ * if other init stuff goes wrong
++ * (scsi_unregister_module() does not work here!)
++ */
++ retval = scsi_register_module(
++ MODULE_SCSI_HA,
++ &zfcp_data.scsi_host_template);
++ if (retval) {
++ ZFCP_LOG_NORMAL(
++ "error: Registration of the driver module "
++ "with the Linux SCSI stack failed.\n");
++ goto failed_scsi;
++ }
++
++ /* setup dynamic I/O */
++ retval = zfcp_dio_register();
++ if (retval) {
++ ZFCP_LOG_NORMAL(
++ "warning: Dynamic attach/detach facilities for "
++ "the adapter(s) could not be started. \n");
++ retval = 0;
++ }
++
++ up(&zfcp_data.proc_sema); /* release procfs */
++
++#ifdef ZFCP_CAUSE_ERRORS
++ init_timer(&zfcp_force_error_timer);
++ zfcp_force_error_timer.function = zfcp_force_error;
++ zfcp_force_error_timer.data = 0;
++ zfcp_force_error_timer.expires = jiffies + 60*HZ;
++ add_timer(&zfcp_force_error_timer);
++#endif
++
++ /* we did it, skip all cleanups related to failures */
++ goto out;
++
++ failed_scsi:
++ failed_parse:
++ /* FIXME: might there be a race between module unload and shutdown? */
++ unregister_reboot_notifier(&zfcp_data.reboot_notifier);
++ zfcp_adapter_shutdown_all();
++ zfcp_config_cleanup();
++ /*
++ * FIXME(design):
++ * We need a way to cancel all proc usage at this point.
++ * Just having a semaphore is not sufficient since this
++ * semaphore makes exploiters sleep in our proc code.
++ * If we wake them then we do not know when they actually
++ * left the proc path. They must left the proc path before
++ * we are allowed to delete proc entries. We need a kind of
++ * handshaking to ensure that all proc-users are really
++ * gone. Even if we have this then we can't ensure
++ * that another proc-user enters the proc-path before
++ * we delete proc-entries.
++ */
++ zfcp_delete_data_procs();
++
++ failed_data_procs:
++ zfcp_delete_root_proc();
++
++ failed_root_proc:
++ misc_deregister(&zfcp_cfdc_misc);
++ failed_misc_register:
++#ifdef CONFIG_S390_SUPPORT
++ unregister_ioctl32_conversion(zfcp_cfdc_ioc.cmd);
++ failed_ioctl32:
++#endif
++
++ ZFCP_LOG_NORMAL("error: Module could not be loaded.\n");
++
++ out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++__initcall(zfcp_module_init);
++
++/*
++ * function: zfcp_module_exit
++ *
++ * purpose: driver module cleanup routine
++ *
++ * locks: zfcp_data.proc_sema is acquired prior to calling
++ * zfcp_config_cleanup and released afterwards
++ *
++ * returns: void
++ */
++static void __exit zfcp_module_exit(void)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++ int temp_ret=0;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ /* FIXME: might there be a race between module unload and shutdown? */
++ unregister_reboot_notifier(&zfcp_data.reboot_notifier);
++
++ /* unregister driver module from SCSI stack */
++ scsi_unregister_module(MODULE_SCSI_HA, &zfcp_data.scsi_host_template);
++
++ /* shutdown all adapters (incl. those not registered in SCSI stack) */
++ zfcp_adapter_shutdown_all();
++
++ zfcp_delete_data_procs();
++ /* unregister from Dynamic I/O */
++ temp_ret = s390_device_unregister(&zfcp_data.devreg);
++ ZFCP_LOG_TRACE(
++ "s390_device_unregister returned %i\n",
++ temp_ret);
++ temp_ret = s390_device_unregister(&zfcp_data.devreg_priv);
++ ZFCP_LOG_TRACE(
++ "s390_device_unregister returned %i (privileged subchannel)\n",
++ temp_ret);
++ ZFCP_LOG_TRACE("Before zfcp_config_cleanup\n");
++
++ /* free all resources dynamically allocated */
++
++ /* block proc access to config */
++ down(&zfcp_data.proc_sema);
++ temp_ret=zfcp_config_cleanup();
++ up(&zfcp_data.proc_sema);
++
++ if (temp_ret) {
++ ZFCP_LOG_NORMAL("bug: Could not free all memory "
++ "(debug info %d)\n",
++ temp_ret);
++ }
++
++ zfcp_delete_root_proc();
++
++ misc_deregister(&zfcp_cfdc_misc);
++#ifdef CONFIG_S390_SUPPORT
++ unregister_ioctl32_conversion(zfcp_cfdc_ioc.cmd);
++#endif
++
++#ifdef ZFCP_CAUSE_ERRORS
++ del_timer(&zfcp_force_error_timer);
++#endif
++
++ ZFCP_LOG_TRACE("exit\n");
++ ZFCP_LOG_DEBUG("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n");
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_reboot_handler
++ *
++ * purpose: This function is called automatically by the kernel whenever
++ * a reboot or a shut-down is initiated and zfcp is still
++ * loaded
++ *
++ * locks: zfcp_data.proc_sema is taken prior to shutting down the module
++ * and removing all structures
++ *
++ * returns: NOTIFY_DONE in all cases
++ */
++int zfcp_reboot_handler(struct notifier_block *notifier, unsigned long code, void *ptr)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++ int retval = NOTIFY_DONE;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ /* block proc access to config (for rest of lifetime of this Linux) */
++ down(&zfcp_data.proc_sema);
++ zfcp_adapter_shutdown_all();
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_adapter_shutdown_all
++ *
++ * purpose: recursively calls zfcp_erp_adapter_shutdown to stop all
++ * IO on each adapter, return all outstanding packets and
++ * relinquish all IRQs
++ * Note: This function waits for completion of all shutdowns
++ *
++ * returns: 0 in all cases
++ */
++static int zfcp_adapter_shutdown_all(void)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++ int retval = 0;
++ zfcp_adapter_t *adapter;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ /*
++ * no adapter list lock since list won't change (proc is blocked),
++ * this allows sleeping while iterating the list
++ */
++ ZFCP_FOR_EACH_ADAPTER(adapter)
++ zfcp_erp_adapter_shutdown(adapter, 0);
++ /* start all shutdowns first before any waiting to allow for concurreny */
++ ZFCP_FOR_EACH_ADAPTER(adapter)
++ zfcp_erp_wait(adapter);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_erp_port_shutdown_all
++ *
++ * purpose: wrapper around zfcp_erp_port_reopen_all setting all the
++ * required parameters to close a port
++ *
++ * returns: 0 in all cases
++ */
++static int zfcp_erp_port_shutdown_all(zfcp_adapter_t *adapter, int clear_mask)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter (adapter=0x%lx)\n",
++ (unsigned long)adapter);
++
++ zfcp_erp_port_reopen_all(adapter,
++ ZFCP_STATUS_COMMON_RUNNING |
++ ZFCP_STATUS_COMMON_ERP_FAILED |
++ clear_mask);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++
++/*
++ * function: zfcp_dio_register
++ *
++ * purpose: Registers the FCP-adapter specific device number with the common
++ * io layer. All oper/not_oper calls will only be presented for
++ * devices that match the below criteria.
++ *
++ * returns: 0 on success
++ * -error code on failure
++ */
++static int zfcp_dio_register()
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_DIO
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_DIO
++
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ /* register handler for Dynamic I/O */
++ zfcp_data.devreg.ci.hc.ctype = ZFCP_CONTROL_UNIT_TYPE;
++ zfcp_data.devreg.ci.hc.cmode = ZFCP_CONTROL_UNIT_MODEL;
++ zfcp_data.devreg.ci.hc.dtype = ZFCP_DEVICE_TYPE;
++ zfcp_data.devreg.ci.hc.dmode = ZFCP_DEVICE_MODEL;
++ zfcp_data.devreg.flag = DEVREG_TYPE_DEVCHARS | DEVREG_EXACT_MATCH;
++ zfcp_data.devreg.oper_func = &zfcp_dio_oper_handler;
++
++ retval = s390_device_register(&zfcp_data.devreg);
++ if (retval < 0) {
++ ZFCP_LOG_NORMAL(
++ "bug: The FSF device type could not "
++ "be registered with the S/390 i/o layer "
++ "(debug info %d)",
++ retval);
++ }
++
++ zfcp_data.devreg_priv.ci.hc.ctype = ZFCP_CONTROL_UNIT_TYPE;
++ zfcp_data.devreg_priv.ci.hc.cmode = ZFCP_CONTROL_UNIT_MODEL;
++ zfcp_data.devreg_priv.ci.hc.dtype = ZFCP_DEVICE_TYPE;
++ zfcp_data.devreg_priv.ci.hc.dmode = ZFCP_DEVICE_MODEL_PRIV;
++ zfcp_data.devreg_priv.flag = DEVREG_TYPE_DEVCHARS | DEVREG_EXACT_MATCH;
++ zfcp_data.devreg_priv.oper_func = &zfcp_dio_oper_handler;
++
++ retval = s390_device_register(&zfcp_data.devreg_priv);
++ if (retval < 0) {
++ ZFCP_LOG_NORMAL(
++ "bug: The FSF privileged device type could not "
++ "be registered with the S/390 i/o layer "
++ "(debug info %d)",
++ retval);
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_config_cleanup
++ *
++ * purpose: must only be called after all adapters are properly shut down
++ * Frees all device structs (unit, port, adapter) and removes them
++ * from the lists
++ *
++ * returns: 0 - no error occured during cleanup
++ * !0 - one or more errors occured during cleanup
++ * (retval is not guaranteed to be a valid -E*)
++ *
++ * context: called on failure of module_init and from module_exit
++ *
++ * locks: zfcp_data.proc_sema needs to be held on function entry
++ * adapter->port_list_lock,
++ * port->unit_list_lock are held when walking the
++ * respective lists
++ */
++static int zfcp_config_cleanup(void)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++ int retval = 0;
++ zfcp_adapter_t *adapter, *tmp_adapter;
++ zfcp_port_t *port, *tmp_port;
++ zfcp_unit_t *unit, *tmp_unit;
++ unsigned long flags = 0;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ /* Note: no adapter_list_lock is needed as we have the proc_sema */
++ ZFCP_FOR_NEXT_ADAPTER (adapter, tmp_adapter) {
++ write_lock_irqsave(&adapter->port_list_lock, flags);
++ ZFCP_FOR_NEXT_PORT (adapter, port, tmp_port) {
++ write_lock(&port->unit_list_lock);
++ ZFCP_FOR_NEXT_UNIT (port, unit, tmp_unit){
++ retval |= zfcp_unit_dequeue(unit);
++ }
++ write_unlock(&port->unit_list_lock);
++ retval |= zfcp_port_dequeue(port);
++ }
++ write_unlock_irqrestore(&adapter->port_list_lock, flags);
++ retval |= zfcp_adapter_dequeue(adapter);
++ }
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_derive_driver_version
++ *
++ * purpose: generates a 32 bit value from the cvs mantained revision,
++ *
++ * returns: !0 - driver version
++ * format: 0 .. 7 8 .. 15 16 .. 31
++ * (reserved) major . minor
++ * 0 - if no version string could be assembled
++ */
++static u32 zfcp_derive_driver_version(void)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++ char *revision = ZFCP_REVISION;
++ u32 version = 0;
++ char *d;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ /* major */
++ for (d = revision; !isdigit(d[0]); d++) {}
++ version |= (simple_strtoul(d, &d, 10) & 0xFF) << 16;
++
++ /* dot */
++ if (d[0] != '.') {
++ ZFCP_LOG_NORMAL(
++ "bug: Revision number generation from string "
++ "unsuccesfull. Setting revision number to 0 and "
++ "continuing (debug info %s).\n",
++ revision);
++ version = 0;
++ goto out;
++ }
++ d++;
++
++ /* minor */
++ version |= simple_strtoul(d, NULL, 10) & 0xFFFF;
++
++out:
++ ZFCP_LOG_TRACE("exit (0x%x)\n", version);
++
++ return version;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_buffers_enqueue
++ *
++ * purpose: allocates BUFFER memory to each of the pointers of
++ * the qdio_buffer_t array in the adapter struct
++ *
++ * returns: number of buffers allocated
++ *
++ * comments: cur_buf is the pointer array and count can be any
++ * number of required buffers, the page-fitting arithmetic is
++ * done entirely within this funciton
++ *
++ * locks: must only be called with zfcp_data.proc_sema taken
++ */
++int zfcp_buffers_enqueue(qdio_buffer_t **cur_buf, int count)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++
++ int buf_pos;
++ int qdio_buffers_per_page;
++ int page_pos = 0;
++ qdio_buffer_t *first_in_page = NULL;
++
++ ZFCP_LOG_TRACE(
++ "enter cur_buf 0x%lx\n",
++ (unsigned long)cur_buf);
++
++ qdio_buffers_per_page = PAGE_SIZE / sizeof(qdio_buffer_t);
++ ZFCP_LOG_TRACE(
++ "Buffers per page %d.\n",
++ qdio_buffers_per_page);
++
++ for (buf_pos = 0; buf_pos < count; buf_pos++) {
++ if (page_pos == 0) {
++ cur_buf[buf_pos] = (qdio_buffer_t*) ZFCP_GET_ZEROED_PAGE(GFP_KERNEL);
++ if (cur_buf[buf_pos] == NULL) {
++ ZFCP_LOG_INFO(
++ "error: Could not allocate "
++ "memory for qdio transfer structures.\n");
++ goto out;
++ }
++ first_in_page = cur_buf[buf_pos];
++ } else {
++ cur_buf[buf_pos] = first_in_page + page_pos;
++
++ }
++ /* was initialised to zero */
++ page_pos++;
++ page_pos %= qdio_buffers_per_page;
++ } // for (buf_pos = 0; buf_pos < count; buf_pos++)
++ out:
++ ZFCP_LOG_TRACE("exit (%d)\n", buf_pos);
++ return buf_pos;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_buffers_dequeue
++ *
++ * purpose: frees BUFFER memory for each of the pointers of
++ * the qdio_buffer_t array in the adapter struct
++ *
++ * returns: sod all
++ *
++ * comments: cur_buf is the pointer array and count can be any
++ * number of buffers in the array that should be freed
++ * starting from buffer 0
++ *
++ * locks: must only be called with zfcp_data.proc_sema taken
++ */
++void zfcp_buffers_dequeue(qdio_buffer_t **cur_buf, int count)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++
++ int buf_pos;
++ int qdio_buffers_per_page;
++
++ ZFCP_LOG_TRACE("enter cur_buf 0x%lx count %d\n",
++ (unsigned long)cur_buf,
++ count);
++
++ qdio_buffers_per_page = PAGE_SIZE / sizeof(qdio_buffer_t);
++ ZFCP_LOG_TRACE(
++ "Buffers per page %d.\n",
++ qdio_buffers_per_page);
++
++ for (buf_pos = 0; buf_pos < count; buf_pos += qdio_buffers_per_page) {
++ ZFCP_FREE_PAGE((unsigned long)cur_buf[buf_pos]);
++ }
++
++ ZFCP_LOG_TRACE("exit\n");
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_allocate_qdio_queues
++ *
++ * purpose: wrapper around zfcp_buffers_enqueue with possible calls
++ * to zfcp_buffers_dequeue in the error case. Deals with
++ * request and response queues
++ *
++ * returns: 0 on success
++ * -EIO if allocation of buffers failed
++ * (all buffers are guarranteed to be un-allocated in this case)
++ *
++ * comments: called only from adapter_enqueue
++ *
++ * locks: must only be called with zfcp_data.proc_sema taken
++ */
++int zfcp_allocate_qdio_queues(zfcp_adapter_t *adapter)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++
++ int buffer_count;
++ int retval=0;
++
++ ZFCP_LOG_TRACE("enter (adapter=0x%lx)\n",
++ (unsigned long)adapter);
++
++ buffer_count = zfcp_buffers_enqueue(
++ &(adapter->request_queue.buffer[0]),
++ QDIO_MAX_BUFFERS_PER_Q);
++ if (buffer_count < QDIO_MAX_BUFFERS_PER_Q) {
++ ZFCP_LOG_DEBUG("error: Out of memory allocating "
++ "request queue, only %d buffers got. "
++ "Binning them.\n",
++ buffer_count);
++ zfcp_buffers_dequeue(
++ &(adapter->request_queue.buffer[0]),
++ buffer_count);
++ retval = -ENOMEM;
++ goto out;
++ }
++
++ buffer_count = zfcp_buffers_enqueue(
++ &(adapter->response_queue.buffer[0]),
++ QDIO_MAX_BUFFERS_PER_Q);
++ if (buffer_count < QDIO_MAX_BUFFERS_PER_Q) {
++ ZFCP_LOG_DEBUG("error: Out of memory allocating "
++ "response queue, only %d buffers got. "
++ "Binning them.\n",
++ buffer_count);
++ zfcp_buffers_dequeue(
++ &(adapter->response_queue.buffer[0]),
++ buffer_count);
++ ZFCP_LOG_TRACE("Deallocating request_queue Buffers.\n");
++ zfcp_buffers_dequeue(
++ &(adapter->request_queue.buffer[0]),
++ QDIO_MAX_BUFFERS_PER_Q);
++ retval = -ENOMEM;
++ goto out;
++ }
++ out:
++ ZFCP_LOG_TRACE("exit (%d)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_free_qdio_queues
++ *
++ * purpose: wrapper around zfcp_buffers_dequeue for request and response
++ * queues
++ *
++ * returns: sod all
++ *
++ * comments: called only from adapter_dequeue
++ *
++ * locks: must only be called with zfcp_data.proc_sema taken
++ */
++void zfcp_free_qdio_queues(zfcp_adapter_t *adapter)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++
++ ZFCP_LOG_TRACE("enter (adapter=0x%lx)\n",
++ (unsigned long)adapter);
++
++ ZFCP_LOG_TRACE("Deallocating request_queue Buffers.\n");
++ zfcp_buffers_dequeue(
++ &(adapter->request_queue.buffer[0]),
++ QDIO_MAX_BUFFERS_PER_Q);
++
++ ZFCP_LOG_TRACE("Deallocating response_queue Buffers.\n");
++ zfcp_buffers_dequeue(
++ &(adapter->response_queue.buffer[0]),
++ QDIO_MAX_BUFFERS_PER_Q);
++
++ ZFCP_LOG_TRACE("exit\n");
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_free_low_mem_buffers
++ *
++ * purpose: frees all static memory in the pools previously allocated by
++ * zfcp_allocate_low_mem buffers
++ *
++ * returns: sod all
++ *
++ * locks: must only be called with zfcp_data.proc_sema taken
++ */
++static void zfcp_free_low_mem_buffers(zfcp_adapter_t *adapter)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++
++ ZFCP_LOG_TRACE("enter (adapter 0x%lx)\n",
++ (unsigned long)adapter);
++
++ zfcp_mem_pool_destroy(&adapter->pool.fsf_req_status_read);
++ zfcp_mem_pool_destroy(&adapter->pool.data_status_read);
++ zfcp_mem_pool_destroy(&adapter->pool.data_gid_pn);
++ zfcp_mem_pool_destroy(&adapter->pool.fsf_req_erp);
++ zfcp_mem_pool_destroy(&adapter->pool.fsf_req_scsi);
++
++ ZFCP_LOG_TRACE("exit\n");
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_allocate_low_mem_buffers
++ *
++ * purpose: The pivot for the static memory buffer pool generation.
++ * Called only from zfcp_adapter_enqueue in order to allocate
++ * a combined QTCB/fsf_req buffer for erp actions and fcp/SCSI
++ * commands.
++ * It also genrates fcp-nameserver request/response buffer pairs
++ * and unsolicited status read fsf_req buffers by means of
++ * function calls to the apropriate handlers.
++ *
++ * returns: 0 on success
++ * -ENOMEM on failure (some buffers might be allocated)
++ *
++ * locks: must only be called with zfcp_data.proc_sema taken
++ */
++static int zfcp_allocate_low_mem_buffers(zfcp_adapter_t *adapter)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter (adapter 0x%lx)\n",
++ (unsigned long)adapter);
++
++ retval = zfcp_mem_pool_create(&adapter->pool.fsf_req_erp, 1,
++ sizeof(struct zfcp_fsf_req_pool_buffer),
++ 0, 0);
++ if (retval) {
++ ZFCP_LOG_INFO(
++ "error: FCP command buffer pool allocation failed\n");
++ goto out;
++ }
++
++ retval = zfcp_mem_pool_create(&adapter->pool.data_gid_pn, 1,
++ sizeof(struct zfcp_gid_pn_data), 0, 0);
++
++ if (retval) {
++ ZFCP_LOG_INFO(
++ "error: Nameserver buffer pool allocation failed\n");
++ goto out;
++ }
++
++ retval = zfcp_mem_pool_create(&adapter->pool.fsf_req_status_read,
++ ZFCP_STATUS_READS_RECOM,
++ sizeof(zfcp_fsf_req_t), 0, 0);
++ if (retval) {
++ ZFCP_LOG_INFO(
++ "error: Status read request pool allocation failed\n");
++ goto out;
++ }
++
++ retval = zfcp_mem_pool_create(&adapter->pool.data_status_read,
++ ZFCP_STATUS_READS_RECOM,
++ sizeof(fsf_status_read_buffer_t), 0, 0);
++ if (retval) {
++ ZFCP_LOG_INFO(
++ "error: Status read buffer pool allocation failed\n");
++ goto out;
++ }
++
++ retval = zfcp_mem_pool_create(&adapter->pool.fsf_req_scsi,
++ 1, sizeof(struct zfcp_fsf_req_pool_buffer),
++ zfcp_scsi_low_mem_buffer_timeout_handler,
++ (unsigned long) adapter);
++ if (retval) {
++ ZFCP_LOG_INFO(
++ "error: FCP command buffer pool allocation failed\n");
++ goto out;
++ }
++
++out:
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_adapter_enqueue
++ *
++ * purpose: enqueues an adapter at the end of the adapter list
++ * in the driver data
++ * all adapter internal structures are set up
++ * proc-fs entries are also created
++ *
++ * returns: 0 if a new adapter was successfully enqueued
++ * ZFCP_KNOWN if an adapter with this devno was already present
++ * -ENOMEM if alloc failed
++ *
++ * locks: proc_sema must be held to serialise chnages to the adapter list
++ * zfcp_data.adapter_list_lock is taken and released several times
++ * within the function (must not be held on entry)
++ */
++static int zfcp_adapter_enqueue(devno_t devno, zfcp_adapter_t **adapter_p)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++
++ int retval = 0;
++ zfcp_adapter_t *adapter;
++ unsigned long flags;
++ char dbf_name[20];
++
++ ZFCP_LOG_TRACE(
++ "enter (devno=0x%04x ,adapter_p=0x%lx)\n",
++ devno,
++ (unsigned long)adapter_p);
++
++ read_lock_irqsave(&zfcp_data.adapter_list_lock, flags);
++ ZFCP_FOR_EACH_ADAPTER(adapter) {
++ if (adapter->devno == devno) {
++ ZFCP_LOG_TRACE(
++ "Adapter with devno 0x%04x "
++ "already exists.\n",
++ devno);
++ retval = ZFCP_KNOWN;
++ break;
++ }
++ }
++ read_unlock_irqrestore(&zfcp_data.adapter_list_lock, flags);
++ if (retval == ZFCP_KNOWN)
++ goto known_adapter;
++
++ /*
++ * Note: It is safe to release the list_lock, as any list changes
++ * are protected by the proc_sema, which must be held to get here
++ */
++
++ /* try to allocate new adapter data structure (zeroed) */
++ adapter = ZFCP_KMALLOC(sizeof(zfcp_adapter_t), GFP_KERNEL);
++ if (!adapter) {
++ ZFCP_LOG_INFO(
++ "error: Allocation of base adapter "
++ "structure failed\n");
++ retval = -ENOMEM;
++ goto adapter_alloc_failed;
++ }
++
++ retval = zfcp_allocate_qdio_queues(adapter);
++ if (retval)
++ goto queues_alloc_failed;
++
++ retval = zfcp_allocate_low_mem_buffers(adapter);
++ if (retval)
++ goto failed_low_mem_buffers;
++
++ /* initialise list of ports */
++ rwlock_init(&adapter->port_list_lock);
++ INIT_LIST_HEAD(&adapter->port_list_head);
++
++ /* initialize list of fsf requests */
++ rwlock_init(&adapter->fsf_req_list_lock);
++ INIT_LIST_HEAD(&adapter->fsf_req_list_head);
++
++ /* initialize abort lock */
++ rwlock_init(&adapter->abort_lock);
++
++ /* initialise scsi faking structures */
++ rwlock_init(&adapter->fake_list_lock);
++ init_timer(&adapter->fake_scsi_timer);
++
++ /* initialise some erp stuff */
++ init_waitqueue_head(&adapter->erp_thread_wqh);
++ init_waitqueue_head(&adapter->erp_done_wqh);
++
++ /* initialize lock of associated request queue */
++ rwlock_init(&adapter->request_queue.queue_lock);
++
++ /* intitialise SCSI ER timer */
++ init_timer(&adapter->scsi_er_timer);
++
++ /* save devno */
++ adapter->devno = devno;
++
++ /* set FC service class used per default */
++ adapter->fc_service_class = ZFCP_FC_SERVICE_CLASS_DEFAULT;
++
++#ifdef ZFCP_DEBUG_REQUESTS
++ /* debug feature area which records fsf request sequence numbers */
++ sprintf(dbf_name, ZFCP_REQ_DBF_NAME"0x%04x",adapter->devno);
++ adapter->req_dbf = debug_register(
++ dbf_name,
++ ZFCP_REQ_DBF_INDEX,
++ ZFCP_REQ_DBF_AREAS,
++ ZFCP_REQ_DBF_LENGTH);
++ if (!adapter->req_dbf) {
++ ZFCP_LOG_INFO(
++ "error: Out of resources. Request debug feature for "
++ "adapter with devno 0x%04x could not be generated.\n",
++ adapter->devno);
++ retval = -ENOMEM;
++ goto failed_req_dbf;
++ }
++ debug_register_view(adapter->req_dbf, &debug_hex_ascii_view);
++ debug_set_level(adapter->req_dbf, ZFCP_REQ_DBF_LEVEL);
++ debug_text_event(adapter->req_dbf, 1, "zzz");
++#endif /* ZFCP_DEBUG_REQUESTS */
++
++#ifdef ZFCP_DEBUG_COMMANDS
++ /* debug feature area which records SCSI command failures (hostbyte) */
++ rwlock_init(&adapter->cmd_dbf_lock);
++ sprintf(dbf_name, ZFCP_CMD_DBF_NAME"0x%04x", adapter->devno);
++ adapter->cmd_dbf = debug_register(
++ dbf_name,
++ ZFCP_CMD_DBF_INDEX,
++ ZFCP_CMD_DBF_AREAS,
++ ZFCP_CMD_DBF_LENGTH);
++ if (!adapter->cmd_dbf) {
++ ZFCP_LOG_INFO(
++ "error: Out of resources. Command debug feature for "
++ "adapter with devno 0x%04x could not be generated.\n",
++ adapter->devno);
++ retval = -ENOMEM;
++ goto failed_cmd_dbf;
++ }
++ debug_register_view(adapter->cmd_dbf, &debug_hex_ascii_view);
++ debug_set_level(adapter->cmd_dbf, ZFCP_CMD_DBF_LEVEL);
++#endif /* ZFCP_DEBUG_COMMANDS */
++
++#ifdef ZFCP_DEBUG_ABORTS
++ /* debug feature area which records SCSI command aborts */
++ sprintf(dbf_name, ZFCP_ABORT_DBF_NAME"0x%04x", adapter->devno);
++ adapter->abort_dbf = debug_register(
++ dbf_name,
++ ZFCP_ABORT_DBF_INDEX,
++ ZFCP_ABORT_DBF_AREAS,
++ ZFCP_ABORT_DBF_LENGTH);
++ if (!adapter->abort_dbf) {
++ ZFCP_LOG_INFO(
++ "error: Out of resources. Abort debug feature for "
++ "adapter with devno 0x%04x could not be generated.\n",
++ adapter->devno);
++ retval = -ENOMEM;
++ goto failed_abort_dbf;
++ }
++ debug_register_view(adapter->abort_dbf, &debug_hex_ascii_view);
++ debug_set_level(adapter->abort_dbf, ZFCP_ABORT_DBF_LEVEL);
++#endif /* ZFCP_DEBUG_ABORTS */
++
++#ifdef ZFCP_DEBUG_INCOMING_ELS
++ /* debug feature area which records SCSI command aborts */
++ sprintf(dbf_name, ZFCP_IN_ELS_DBF_NAME"0x%04x", adapter->devno);
++ adapter->in_els_dbf = debug_register(
++ dbf_name,
++ ZFCP_IN_ELS_DBF_INDEX,
++ ZFCP_IN_ELS_DBF_AREAS,
++ ZFCP_IN_ELS_DBF_LENGTH);
++ if (!adapter->in_els_dbf) {
++ ZFCP_LOG_INFO(
++ "error: Out of resources. ELS debug feature for "
++ "adapter with devno 0x%04x could not be generated.\n",
++ adapter->devno);
++ retval = -ENOMEM;
++ goto failed_in_els_dbf;
++ }
++ debug_register_view(adapter->in_els_dbf, &debug_hex_ascii_view);
++ debug_set_level(adapter->in_els_dbf, ZFCP_IN_ELS_DBF_LEVEL);
++#endif /* ZFCP_DEBUG_INCOMING_ELS */
++
++ sprintf(dbf_name, ZFCP_ERP_DBF_NAME"0x%04x", adapter->devno);
++ adapter->erp_dbf = debug_register(
++ dbf_name,
++ ZFCP_ERP_DBF_INDEX,
++ ZFCP_ERP_DBF_AREAS,
++ ZFCP_ERP_DBF_LENGTH);
++ if (!adapter->erp_dbf) {
++ ZFCP_LOG_INFO(
++ "error: Out of resources. ERP debug feature for "
++ "adapter with devno 0x%04x could not be generated.\n",
++ adapter->devno);
++ retval = -ENOMEM;
++ goto failed_erp_dbf;
++ }
++ debug_register_view(adapter->erp_dbf, &debug_hex_ascii_view);
++ debug_set_level(adapter->erp_dbf, ZFCP_ERP_DBF_LEVEL);
++
++ /* Init proc structures */
++#ifdef CONFIG_PROC_FS
++ ZFCP_LOG_TRACE("Generating proc entry....\n");
++ retval = zfcp_create_adapter_proc(adapter);
++ if (retval) {
++ ZFCP_LOG_INFO(
++ "error: Out of resources. "
++ "proc-file entries for adapter with "
++ "devno 0x%04x could not be generated\n",
++ adapter->devno);
++ goto proc_failed;
++ }
++ ZFCP_LOG_TRACE("Proc entry created.\n");
++#endif
++
++ retval = zfcp_erp_thread_setup(adapter);
++ if (retval) {
++ ZFCP_LOG_INFO(
++ "error: out of resources. "
++ "error recovery thread for the adapter with "
++ "devno 0x%04x could not be started\n",
++ adapter->devno);
++ goto thread_failed;
++ }
++
++#ifndef ZFCP_PARANOIA_DEAD_CODE
++ /* set magics */
++ adapter->common_magic = ZFCP_MAGIC;
++ adapter->specific_magic = ZFCP_MAGIC_ADAPTER;
++#endif
++
++#ifdef ZFCP_STAT_REQSIZES
++ rwlock_init(&adapter->stat_lock);
++ atomic_set(&adapter->stat_on, 0);
++ atomic_set(&adapter->stat_errors, 0);
++ INIT_LIST_HEAD(&adapter->read_req_head);
++ INIT_LIST_HEAD(&adapter->write_req_head);
++#endif
++
++ /* put allocated adapter at list tail */
++ write_lock_irqsave(&zfcp_data.adapter_list_lock, flags);
++ list_add_tail(&adapter->list, &zfcp_data.adapter_list_head);
++ zfcp_data.adapters++;
++ write_unlock_irqrestore(&zfcp_data.adapter_list_lock, flags);
++
++ sprintf(adapter->name, "0x%04x", adapter->devno);
++ ASCEBC(adapter->name, strlen(adapter->name));
++
++ atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &adapter->status);
++
++ ZFCP_LOG_TRACE(
++ "adapter allocated at 0x%lx, %i adapters in list\n",
++ (unsigned long)adapter,
++ zfcp_data.adapters);
++ goto out;
++
++thread_failed:
++ zfcp_delete_adapter_proc(adapter);
++
++proc_failed:
++ debug_unregister(adapter->erp_dbf);
++
++failed_erp_dbf:
++#ifdef ZFCP_DEBUG_INCOMING_ELS
++ debug_unregister(adapter->in_els_dbf);
++failed_in_els_dbf:
++#endif
++
++#ifdef ZFCP_DEBUG_ABORTS
++ debug_unregister(adapter->abort_dbf);
++failed_abort_dbf:
++#endif
++
++#ifdef ZFCP_DEBUG_COMMANDS
++ debug_unregister(adapter->cmd_dbf);
++failed_cmd_dbf:
++#endif
++
++#ifdef ZFCP_DEBUG_REQUESTS
++ debug_unregister(adapter->req_dbf);
++failed_req_dbf:
++#endif
++
++failed_low_mem_buffers:
++ zfcp_free_low_mem_buffers(adapter);
++ zfcp_free_qdio_queues(adapter);
++
++queues_alloc_failed:
++ ZFCP_LOG_TRACE(
++ "freeing adapter struct 0x%lx\n",
++ (unsigned long) adapter);
++ /* 'typeof' works as well */
++ ZFCP_KFREE(adapter, sizeof(typeof(adapter)));
++
++adapter_alloc_failed:
++ adapter = NULL;
++
++known_adapter:
++out:
++ *adapter_p = adapter;
++ ZFCP_LOG_TRACE("exit (%d)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_adapter_dequeue
++ *
++ * purpose: dequeues the specified adapter from the list in the driver data
++ *
++ * returns: 0 - zfcp_adapter_t data structure successfully removed
++ * !0 - zfcp_adapter_t data structure could not be removed
++ * (e.g. still used)
++ *
++ * locks: adapter list write lock is assumed to be held by caller
++ * adapter->fsf_req_list_lock is taken and released within this
++ * function and must not be held on entry
++ */
++static int zfcp_adapter_dequeue(zfcp_adapter_t *adapter)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++
++ int retval = 0;
++ unsigned long flags;
++
++ ZFCP_LOG_TRACE("enter (adapter=0x%lx)\n", (unsigned long)adapter);
++
++ /*
++ * sanity check:
++ * I/O interrupt should be disabled, leave if not
++ */
++
++ /* Note: no adapter_list_lock is needed as we have the proc_sema */
++
++ /* sanity check: valid adapter data structure address */
++ if (!adapter) {
++ ZFCP_LOG_NORMAL(
++ "bug: Pointer to an adapter struct is a null "
++ "pointer\n");
++ retval = -EINVAL;
++ goto out;
++ }
++
++ /* sanity check: no remote ports pending */
++ if (adapter->ports) {
++ ZFCP_LOG_NORMAL(
++ "bug: Adapter with devno 0x%04x is still in use, "
++ "%i remote ports are still existing "
++ "(debug info 0x%lx)\n",
++ adapter->devno,
++ adapter->ports,
++ (unsigned long)adapter);
++ retval = -EBUSY;
++ goto out;
++ }
++
++ /* sanity check: no pending FSF requests */
++ read_lock_irqsave(&adapter->fsf_req_list_lock, flags);
++
++ retval = !list_empty(&adapter->fsf_req_list_head);
++
++ read_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
++ if (retval) {
++ ZFCP_LOG_NORMAL(
++ "bug: Adapter with devno 0x%04x is still in use, "
++ "%i requests are still outstanding "
++ "(debug info 0x%lx)\n",
++ adapter->devno,
++ atomic_read(&adapter->fsf_reqs_active),
++ (unsigned long)adapter);
++ retval = -EBUSY;
++ goto out;
++ }
++
++ /* remove specified adapter data structure from list */
++ list_del(&adapter->list);
++
++ /* decrease number of adapters in list */
++ zfcp_data.adapters--;
++
++ ZFCP_LOG_TRACE(
++ "adapter 0x%lx removed from list, "
++ "%i adapters still in list\n",
++ (unsigned long)adapter,
++ zfcp_data.adapters);
++
++ retval = zfcp_erp_thread_kill(adapter);
++
++#ifdef ZFCP_STAT_REQSIZES
++ zfcp_statistics_clear(adapter, &adapter->read_req_head);
++ zfcp_statistics_clear(adapter, &adapter->write_req_head);
++#endif
++
++ zfcp_delete_adapter_proc(adapter);
++ ZFCP_LOG_TRACE("Proc entry removed.\n");
++
++ debug_unregister(adapter->erp_dbf);
++
++#ifdef ZFCP_DEBUG_REQUESTS
++ debug_unregister(adapter->req_dbf);
++#endif
++
++#ifdef ZFCP_DEBUG_COMMANDS
++ debug_unregister(adapter->cmd_dbf);
++#endif
++
++#ifdef ZFCP_DEBUG_ABORTS
++ debug_unregister(adapter->abort_dbf);
++#endif
++
++#ifdef ZFCP_DEBUG_INCOMING_ELS
++ debug_unregister(adapter->in_els_dbf);
++#endif
++
++
++ zfcp_free_low_mem_buffers(adapter);
++ /* free memory of adapter data structure and queues */
++ zfcp_free_qdio_queues(adapter);
++ ZFCP_LOG_TRACE("Freeing adapter structure.\n");
++ ZFCP_KFREE(adapter, sizeof(zfcp_adapter_t));
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval; /* succeed */
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_port_enqueue
++ *
++ * purpose: enqueues an remote port at the end of the port list
++ * associated with the specified adapter
++ * all port internal structures are set-up and the proc-fs
++ * entry is also allocated
++ * some SCSI-stack structures are modified for the port
++ *
++ * returns: 0 if a new port was successfully enqueued
++ * ZFCP_KNOWN if a port with the requested wwpn already exists
++ * -ENOMEM if allocation failed
++ * -EINVAL if at least one of the specified parameters was wrong
++ *
++ * locks: proc_sema must be held to serialise changes to the port list
++ * adapter->port_list_lock is taken and released several times
++ * within this function (must not be held on entry)
++ */
++static int
++ zfcp_port_enqueue(
++ zfcp_adapter_t *adapter,
++ scsi_id_t scsi_id,
++ wwn_t wwpn,
++ u32 status,
++ zfcp_port_t **port_p)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++
++ zfcp_port_t *port = NULL;
++ int check_scsi_id, check_wwpn;
++ unsigned long flags;
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (adapter=0x%lx scsi_id=%i wwpn=0x%Lx status=0x%x)\n",
++ (unsigned long)adapter,
++ scsi_id,
++ (llui_t)wwpn,
++ status);
++
++ /* to check that there is not a port with either this
++ * SCSI ID or WWPN already in list
++ */
++ check_scsi_id = !(status & ZFCP_STATUS_PORT_NO_SCSI_ID);
++ check_wwpn = !(status & ZFCP_STATUS_PORT_NO_WWPN);
++
++ if (check_scsi_id && check_wwpn) {
++ read_lock_irqsave(&adapter->port_list_lock, flags);
++ ZFCP_FOR_EACH_PORT(adapter, port) {
++ if ((port->scsi_id != scsi_id) && (port->wwpn != wwpn))
++ continue;
++ if ((port->scsi_id == scsi_id) && (port->wwpn == wwpn)) {
++ ZFCP_LOG_TRACE(
++ "Port with SCSI ID 0x%x and WWPN 0x%016Lx already in list\n",
++ scsi_id, (llui_t)wwpn);
++ retval = ZFCP_KNOWN;
++ read_unlock_irqrestore(&adapter->port_list_lock, flags);
++ goto known_port;
++ }
++ ZFCP_LOG_NORMAL(
++ "user error: new mapping 0x%x:0x%016Lx "
++ "does not match existing mapping 0x%x:0x%016Lx "
++ "(adapter devno 0x%04x)\n",
++ scsi_id,
++ (llui_t)wwpn,
++ port->scsi_id,
++ (llui_t)port->wwpn,
++ port->adapter->devno);
++ retval = -EINVAL;
++ read_unlock_irqrestore(&adapter->port_list_lock, flags);
++ goto match_failed;
++ }
++ read_unlock_irqrestore(&adapter->port_list_lock, flags);
++ }
++
++ /*
++ * Note: It is safe to release the list_lock, as any list changes
++ * are protected by the proc_sema, which must be held to get here
++ */
++
++ /* try to allocate new port data structure (zeroed) */
++ port = ZFCP_KMALLOC(sizeof(zfcp_port_t), GFP_KERNEL);
++ if (!port) {
++ ZFCP_LOG_INFO(
++ "error: Allocation of port struct failed. "
++ "Out of memory.\n");
++ retval = -ENOMEM;
++ goto port_alloc_failed;
++ }
++
++ /* initialize unit list */
++ rwlock_init(&port->unit_list_lock);
++ INIT_LIST_HEAD(&port->unit_list_head);
++
++ /* save pointer to "parent" adapter */
++ port->adapter = adapter;
++
++ /* save SCSI ID */
++ if (check_scsi_id)
++ port->scsi_id = scsi_id;
++
++ /* save WWPN */
++ if (check_wwpn)
++ port->wwpn = wwpn;
++
++ /* save initial status */
++ atomic_set_mask(status, &port->status);
++
++#ifndef ZFCP_PARANOIA_DEAD_CODE
++ /* set magics */
++ port->common_magic = ZFCP_MAGIC;
++ port->specific_magic = ZFCP_MAGIC_PORT;
++#endif
++
++ /* Init proc structures */
++#ifdef CONFIG_PROC_FS
++ ZFCP_LOG_TRACE("Generating proc entry....\n");
++ retval = zfcp_create_port_proc(port);
++ if (retval)
++ goto proc_failed;
++ ZFCP_LOG_TRACE("Proc entry created.\n");
++#endif
++
++ if (check_scsi_id) {
++ /*
++ * update max. SCSI ID of remote ports attached to
++ * "parent" adapter if necessary
++ * (do not care about the adapters own SCSI ID)
++ */
++ if (adapter->max_scsi_id < scsi_id) {
++ adapter->max_scsi_id = scsi_id;
++ ZFCP_LOG_TRACE(
++ "max. SCSI ID of adapter 0x%lx now %i\n",
++ (unsigned long)adapter,
++ scsi_id);
++ }
++ /*
++ * update max. SCSI ID of remote ports attached to
++ * "parent" host (SCSI stack) if necessary
++ */
++ if (adapter->scsi_host &&
++ (adapter->scsi_host->max_id < (scsi_id + 1))) {
++ adapter->scsi_host->max_id = scsi_id + 1;
++ ZFCP_LOG_TRACE(
++ "max. SCSI ID of ports attached "
++ "via host # %d now %i\n",
++ adapter->scsi_host->host_no,
++ adapter->scsi_host->max_id);
++ }
++ }
++
++ /* Port is allocated, enqueue it*/
++ write_lock_irqsave(&adapter->port_list_lock,flags);
++ list_add_tail(&port->list, &adapter->port_list_head);
++ adapter->ports++;
++ write_unlock_irqrestore(&adapter->port_list_lock,flags);
++
++ atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status);
++
++ /* ignore nameserver port */
++ if (port->wwpn != 0)
++ zfcp_callback_do_port_add(NULL, adapter, port);
++
++ ZFCP_LOG_TRACE(
++ "port allocated at 0x%lx, %i ports in list "
++ "of adapter 0x%lx\n",
++ (unsigned long)port,
++ adapter->ports,
++ (unsigned long)adapter);
++ goto out;
++
++proc_failed:
++ ZFCP_KFREE(port, sizeof(zfcp_port_t));
++ ZFCP_LOG_TRACE(
++ "freeing port struct 0x%lx\n",
++ (unsigned long) port);
++
++port_alloc_failed:
++match_failed:
++ port = NULL;
++
++known_port:
++out:
++ *port_p = port;
++ ZFCP_LOG_TRACE("exit (%d)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_port_dequeue
++ *
++ * purpose: dequeues the specified port from the list of the
++ * "parent" adapter
++ *
++ * returns: 0 - zfcp_port_t data structure successfully removed
++ * !0 - zfcp_port_t data structure could not be removed
++ * (e.g. still used)
++ *
++ * locks : port list write lock is assumed to be held by caller
++ */
++static int zfcp_port_dequeue(zfcp_port_t *port)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter (port=0x%lx)\n", (unsigned long)port);
++
++ /* sanity check: valid port data structure address (simple check) */
++ if (!port) {
++ ZFCP_LOG_NORMAL(
++ "bug: Pointer to a port struct is a null "
++ "pointer\n");
++ retval = -EINVAL;
++ goto out;
++ }
++
++ /*
++ * sanity check:
++ * leave if required list lock is not held,
++ * do not know whether it is held by the calling routine (required!)
++ * protecting this critical area or someone else (must not occur!),
++ * but a lock not held by anyone is definetely wrong
++ */
++ if (!spin_is_locked(&port->adapter->port_list_lock)) {
++ ZFCP_LOG_NORMAL("bug: Port list lock not held "
++ "(debug info 0x%lx)\n",
++ (unsigned long) port);
++ retval = -EPERM;
++ goto out;
++ }
++
++ /* sanity check: no logical units pending */
++ if (port->units) {
++ ZFCP_LOG_NORMAL(
++ "bug: Port with SCSI-id 0x%x is still in use, "
++ "%i units (LUNs) are still existing "
++ "(debug info 0x%lx)\n",
++ port->scsi_id,
++ port->units,
++ (unsigned long)port);
++ retval = -EBUSY;
++ goto out;
++ }
++
++ /* remove specified port data structure from list */
++ list_del(&port->list);
++
++ /* decrease number of ports in list */
++ port->adapter->ports--;
++
++ ZFCP_LOG_TRACE(
++ "port 0x%lx removed from list of adapter 0x%lx, "
++ "%i ports still in list\n",
++ (unsigned long)port,
++ (unsigned long)port->adapter,
++ port->adapter->ports);
++
++ /* free memory of port data structure */
++ ZFCP_LOG_TRACE("Deleting proc entry......\n");
++ zfcp_delete_port_proc(port);
++ ZFCP_LOG_TRACE("Proc entry removed.\n");
++ ZFCP_KFREE(port, sizeof(zfcp_port_t));
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval; /* succeed */
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_scsi_low_mem_buffer_timeout_handler
++ *
++ * purpose: This function needs to be called whenever the SCSI command
++ * in the low memory buffer does not return.
++ * Re-opening the adapter means that the command can be returned
++ * by zfcp (it is guarranteed that it does not return via the
++ * adapter anymore). The buffer can then be used again.
++ *
++ * returns: sod all
++ */
++static void zfcp_scsi_low_mem_buffer_timeout_handler(unsigned long data)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ zfcp_adapter_t *adapter = (zfcp_adapter_t *)data ;
++
++ ZFCP_LOG_TRACE("enter (data=0x%lx)\n",
++ (unsigned long) data);
++ /*DEBUG*/
++ ZFCP_LOG_INFO("*****************************mem_timeout******************************\n");
++ zfcp_erp_adapter_reopen(adapter, 0);
++ ZFCP_LOG_TRACE("exit\n");
++
++ return;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_scsi_er_timeout_handler
++ *
++ * purpose: This function needs to be called whenever a SCSI error recovery
++ * action (abort/reset) does not return.
++ * Re-opening the adapter means that the command can be returned
++ * by zfcp (it is guarranteed that it does not return via the
++ * adapter anymore). The buffer can then be used again.
++ *
++ * returns: sod all
++ */
++static void zfcp_fsf_scsi_er_timeout_handler(unsigned long data)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ zfcp_adapter_t *adapter = (zfcp_adapter_t *)data;
++
++ ZFCP_LOG_TRACE("enter (data=0x%lx)\n",
++ (unsigned long) data);
++ /*DEBUG*/
++ ZFCP_LOG_INFO("*****************************er_timeout******************************\n");
++ zfcp_erp_adapter_reopen(adapter, 0);
++ ZFCP_LOG_TRACE("exit\n");
++
++ return;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * memory pool implementation
++ * the first four functions (element_alloc, element_release, element_get, element_put)
++ * are for internal use,
++ * the other four functions (create, destroy, find, free) are the external interface
++ * which should be used by exploiter of the memory pool
++ */
++
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++/* associate a buffer with the specified memory pool element */
++static inline int zfcp_mem_pool_element_alloc(
++ zfcp_mem_pool_t *pool,
++ int index)
++{
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (pool=0x%lx, index=%i)\n",
++ (unsigned long)pool,
++ index);
++
++ pool->element[index].buffer = ZFCP_KMALLOC(pool->size, GFP_KERNEL);
++ if (!pool->element[index].buffer) {
++ retval = -ENOMEM;
++ };
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++}
++
++
++/* release the buffer associated with the specified memory pool element */
++static inline int zfcp_mem_pool_element_free(
++ zfcp_mem_pool_t *pool,
++ int index)
++{
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (pool=0x%lx, index=%i)\n",
++ (unsigned long)pool,
++ index);
++
++ if (atomic_read(&pool->element[index].use) != 0) {
++ ZFCP_LOG_NORMAL("bug: memory pool is in use\n");
++ retval = -EINVAL;
++ } else if (pool->element[index].buffer)
++ ZFCP_KFREE(pool->element[index].buffer, pool->size);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++}
++
++
++/* try to get hold of buffer associated with the specified memory pool element */
++static inline void *zfcp_mem_pool_element_get(
++ zfcp_mem_pool_t *pool,
++ int index)
++{
++ void *buffer;
++
++ ZFCP_LOG_TRACE(
++ "enter (pool=0x%lx, index=%i)\n",
++ (unsigned long)pool,
++ index);
++
++ ZFCP_LOG_DEBUG("buffer=0x%lx\n",
++ (unsigned long)pool->element[index].buffer);
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_DEBUG,
++ (char*)pool->element,
++ pool->entries * sizeof(zfcp_mem_pool_element_t));
++
++ if (atomic_compare_and_swap(0, 1, &pool->element[index].use))
++ buffer = NULL;
++ else {
++ memset(pool->element[index].buffer, 0, pool->size);
++ buffer = pool->element[index].buffer;
++ }
++
++
++ ZFCP_LOG_TRACE("exit (0x%lx)\n", (unsigned long)buffer);
++
++ return buffer;
++}
++
++
++/* mark buffer associated with the specified memory pool element as available */
++static inline int zfcp_mem_pool_element_put(
++ zfcp_mem_pool_t *pool,
++ int index)
++{
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (pool=0x%lx, index=%i)\n",
++ (unsigned long)pool,
++ index);
++
++ if (atomic_compare_and_swap(1, 0, &pool->element[index].use)) {
++ ZFCP_LOG_NORMAL("bug: memory pool is broken (element not in use)\n");
++ retval = -EINVAL;
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++}
++
++
++/*
++ * creation of a new memory pool including setup of management data structures
++ * as well as allocation of memory pool elements
++ * (this routine does not cleanup partially set up pools, instead the corresponding
++ * destroy routine should be called)
++ */
++static inline int zfcp_mem_pool_create(zfcp_mem_pool_t *pool,
++ int entries, int size,
++ void (*function) (unsigned long),
++ unsigned long data)
++{
++ int retval = 0;
++ int i;
++
++ ZFCP_LOG_TRACE(
++ "enter (pool=0x%lx, entries=%i, size=%i)\n",
++ (unsigned long)pool,
++ entries,
++ size);
++
++ if (pool->element || pool->entries) {
++ ZFCP_LOG_NORMAL("bug: memory pool is broken (pool is in use)\n");
++ retval = -EINVAL;
++ goto out;
++ }
++
++ pool->element = ZFCP_KMALLOC(entries * sizeof(zfcp_mem_pool_element_t),
++ GFP_KERNEL);
++ if (!pool->element) {
++ ZFCP_LOG_NORMAL("warning: memory pool not avalaible\n");
++ retval = -ENOMEM;
++ goto out;
++ }
++ /* Ensure that the use flag is 0. */
++
++ memset(pool->element, 0, entries * sizeof(zfcp_mem_pool_element_t));
++ pool->entries = entries;
++ pool->size = size;
++
++ for (i = 0; i < entries; i++) {
++ retval = zfcp_mem_pool_element_alloc(pool, i);
++ if (retval) {
++ ZFCP_LOG_NORMAL("warning: memory pool not avalaible\n");
++ retval = -ENOMEM;
++ goto out;
++ }
++ }
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_DEBUG,
++ (char*)pool->element,
++ entries * sizeof(zfcp_mem_pool_element_t));
++
++ init_timer(&pool->timer);
++ pool->timer.function = function;
++ pool->timer.data = data;
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++}
++
++
++/*
++ * give up memory pool with all its memory pool elements as well as
++ * data structures used for management purposes
++ * (this routine is able to handle partially alloacted memory pools)
++ */
++static inline int zfcp_mem_pool_destroy(
++ zfcp_mem_pool_t *pool)
++{
++ int retval = 0;
++ int i;
++
++ ZFCP_LOG_TRACE(
++ "enter (pool=0x%lx)\n",
++ (unsigned long)pool);
++
++ for (i = 0; i < pool->entries; i++)
++ retval |= zfcp_mem_pool_element_free(pool, i);
++
++ if (pool->element)
++ ZFCP_KFREE(pool->element, pool->entries);
++
++ pool->element = NULL;
++ pool->entries = 0;
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++}
++
++
++/*
++ * try to find next available element in the specified memory pool,
++ * on success get hold of buffer associated with the selected element
++ */
++static inline void* zfcp_mem_pool_find(
++ zfcp_mem_pool_t *pool)
++{
++ void *buffer = NULL;
++ int i;
++
++ ZFCP_LOG_TRACE(
++ "enter (pool=0x%lx)\n",
++ (unsigned long)pool);
++
++ for (i = 0; i < pool->entries; i++) {
++ buffer = zfcp_mem_pool_element_get(pool, i);
++ if (buffer)
++ break;
++ }
++
++ if ((buffer != 0) && (pool->timer.function != 0)) {
++ /*
++ * watch low mem buffer
++ * Note: Take care if more than 1 timer is active.
++ * The first expired timer has to delete all other
++ * timers. (See ZFCP_SCSI_LOW_MEM_TIMEOUT and
++ * ZFCP_SCSI_ER_TIMEOUT)
++ */
++ pool->timer.expires = jiffies + ZFCP_SCSI_LOW_MEM_TIMEOUT;
++ add_timer(&pool->timer);
++ }
++
++ ZFCP_LOG_TRACE("exit (0x%lx)\n", (unsigned long)buffer);
++
++ return buffer;
++}
++
++
++/*
++ * make buffer available to memory pool again,
++ * (since buffers are specified by their own address instead of the
++ * memory pool element they are associated with a search for the
++ * right element of the given memory pool)
++ */
++static inline int zfcp_mem_pool_return(void *buffer, zfcp_mem_pool_t *pool)
++{
++ int retval = 0;
++ int i;
++
++ ZFCP_LOG_TRACE(
++ "enter (buffer=0x%lx, pool=0x%lx)\n",
++ (unsigned long)buffer,
++ (unsigned long)pool);
++
++ if (pool->timer.function) {
++ del_timer(&pool->timer);
++ }
++
++ for (i = 0; i < pool->entries; i++) {
++ if (buffer == pool->element[i].buffer) {
++ retval = zfcp_mem_pool_element_put(pool, i);
++ goto out;
++ }
++ }
++
++ if (i == pool->entries) {
++ ZFCP_LOG_NORMAL("bug: memory pool is broken (buffer not found)\n");
++ retval = -EINVAL;
++ }
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++}
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++
++/* end of memory pool implementation */
++
++/*
++ * function: zfcp_fsf_req_alloc
++ *
++ * purpose: Obtains an fsf_req and potentially a qtcb (for all but
++ * unsolicited requests) via helper functions
++ * Does some initial fsf request set-up.
++ *
++ * returns: pointer to allocated fsf_req if successfull
++ * NULL otherwise
++ *
++ * locks: none
++ *
++ */
++static zfcp_fsf_req_t *zfcp_fsf_req_alloc(zfcp_mem_pool_t *pool, int flags,
++ int kmalloc_flags)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ zfcp_fsf_req_t *fsf_req = NULL;
++
++ if (!(flags & ZFCP_REQ_USE_MEMPOOL)) {
++ fsf_req = ZFCP_KMALLOC(sizeof(struct zfcp_fsf_req_pool_buffer),
++ kmalloc_flags);
++ }
++
++ if ((fsf_req == 0) && (pool != 0)) {
++ fsf_req = zfcp_mem_pool_find(pool);
++ if (fsf_req){
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_POOL;
++ fsf_req->pool = pool;
++ }
++ }
++
++ if (fsf_req == 0) {
++ ZFCP_LOG_DEBUG("error: Out of memory. Allocation of FSF "
++ "request structure failed\n");
++ }
++
++ return fsf_req;
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_req_free
++ *
++ * purpose: Frees the memory of an fsf_req (and potentially a qtcb) or
++ * returns it into the pool via helper functions.
++ *
++ * returns: sod all
++ *
++ * locks: none
++ */
++static inline int zfcp_fsf_req_free(zfcp_fsf_req_t *fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (fsf_req=0x%lx)\n",
++ (unsigned long)fsf_req);
++
++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_POOL) {
++ retval = zfcp_mem_pool_return(fsf_req, fsf_req->pool);
++ } else {
++ ZFCP_KFREE(fsf_req, sizeof(struct zfcp_fsf_req_pool_buffer));
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_unit_enqueue
++ *
++ * purpose: enqueues a logical unit at the end of the unit list
++ * associated with the specified port
++ * also sets up unit internal structures
++ *
++ * returns: 0 if a new unit was successfully enqueued
++ * -ENOMEM if the allocation failed
++ * -EINVAL if at least one specified parameter was faulty
++ *
++ * locks: proc_sema must be held to serialise changes to the unit list
++ * port->unit_list_lock is taken and released several times
++ */
++static int
++ zfcp_unit_enqueue(
++ zfcp_port_t *port,
++ scsi_lun_t scsi_lun,
++ fcp_lun_t fcp_lun,
++ zfcp_unit_t **unit_p)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++
++ zfcp_unit_t *unit;
++ int retval = 0;
++ unsigned long flags;
++
++ ZFCP_LOG_TRACE(
++ "enter (port=0x%lx scsi_lun=%i fcp_lun=0x%Lx)\n",
++ (unsigned long)port, scsi_lun, (llui_t)fcp_lun);
++
++ /*
++ * check that there is no unit with either this
++ * SCSI LUN or FCP_LUN already in list
++ * Note: Unlike for the adapter and the port, this is an error
++ */
++ read_lock_irqsave(&port->unit_list_lock, flags);
++ ZFCP_FOR_EACH_UNIT(port, unit) {
++ if (unit->scsi_lun == scsi_lun) {
++ ZFCP_LOG_NORMAL(
++ "Warning: A Unit with SCSI LUN 0x%x already "
++ "exists. Skipping record.\n",
++ scsi_lun);
++ retval = -EINVAL;
++ break;
++ } else if (unit->fcp_lun == fcp_lun) {
++ ZFCP_LOG_NORMAL(
++ "Warning: A Unit with FCP_LUN 0x%016Lx is already present. "
++ "Record was ignored\n",
++ (llui_t)fcp_lun);
++ retval = -EINVAL;
++ break;
++ }
++ }
++ read_unlock_irqrestore(&port->unit_list_lock, flags);
++ if (retval == -EINVAL)
++ goto known_unit;
++
++ /* try to allocate new unit data structure (zeroed) */
++ unit = ZFCP_KMALLOC(sizeof(zfcp_unit_t), GFP_KERNEL);
++ if (!unit) {
++ ZFCP_LOG_INFO("error: Allocation of unit struct failed. "
++ "Out of memory.\n");
++ retval = -ENOMEM;
++ goto unit_alloc_failed;
++ }
++
++ /* save pointer to "parent" port */
++ unit->port = port;
++
++ /* save SCSI LUN */
++ unit->scsi_lun = scsi_lun;
++
++ /* save FCP_LUN */
++ unit->fcp_lun = fcp_lun;
++
++#ifndef ZFCP_PARANOIA_DEAD_CODE
++ /* set magics */
++ unit->common_magic = ZFCP_MAGIC;
++ unit->specific_magic = ZFCP_MAGIC_UNIT;
++#endif
++
++ /* Init proc structures */
++#ifdef CONFIG_PROC_FS
++ ZFCP_LOG_TRACE("Generating proc entry....\n");
++ retval = zfcp_create_unit_proc(unit);
++ if (retval) {
++ ZFCP_LOG_TRACE(
++ "freeing unit struct 0x%lx\n",
++ (unsigned long) unit);
++ goto proc_failed;
++ }
++ ZFCP_LOG_TRACE("Proc entry created.\n");
++#endif
++
++ /*
++ * update max. SCSI LUN of logical units attached to
++ * "parent" remote port if necessary
++ */
++ if (port->max_scsi_lun < scsi_lun) {
++ port->max_scsi_lun = scsi_lun;
++ ZFCP_LOG_TRACE(
++ "max. SCSI LUN of units of remote "
++ "port 0x%lx now %i\n",
++ (unsigned long)port,
++ scsi_lun);
++ }
++
++ /*
++ * update max. SCSI LUN of logical units attached to
++ * "parent" adapter if necessary
++ */
++ if (port->adapter->max_scsi_lun < scsi_lun) {
++ port->adapter->max_scsi_lun = scsi_lun;
++ ZFCP_LOG_TRACE(
++ "max. SCSI LUN of units attached "
++ "via adapter with devno 0x%04x now %i\n",
++ port->adapter->devno,
++ scsi_lun);
++ }
++
++ /*
++ * update max. SCSI LUN of logical units attached to
++ * "parent" host (SCSI stack) if necessary
++ */
++ if (port->adapter->scsi_host &&
++ (port->adapter->scsi_host->max_lun < (scsi_lun + 1))) {
++ port->adapter->scsi_host->max_lun = scsi_lun + 1;
++ ZFCP_LOG_TRACE(
++ "max. SCSI LUN of units attached "
++ "via host # %d now %i\n",
++ port->adapter->scsi_host->host_no,
++ port->adapter->scsi_host->max_lun);
++ }
++
++ /* Unit is new and needs to be added to list */
++ write_lock_irqsave(&port->unit_list_lock, flags);
++ list_add_tail(&unit->list, &port->unit_list_head);
++ port->units++;
++ write_unlock_irqrestore(&port->unit_list_lock, flags);
++
++ /* also add unit to map list to get them in order of addition */
++ list_add_tail(&unit->map_list, &zfcp_data.map_list_head);
++
++ atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);
++
++ zfcp_callback_do_unit_add(NULL, port->adapter, port, unit);
++
++ ZFCP_LOG_TRACE(
++ "unit allocated at 0x%lx, %i units in "
++ "list of port 0x%lx\n",
++ (unsigned long)unit,
++ port->units,
++ (unsigned long)port);
++ goto out;
++
++proc_failed:
++ ZFCP_KFREE(unit, sizeof(zfcp_unit_t));
++
++unit_alloc_failed:
++ unit = NULL;
++
++known_unit:
++out:
++ *unit_p = unit;
++ ZFCP_LOG_TRACE("exit (%d)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_unit_dequeue
++ *
++ * purpose: dequeues the specified logical unit from the list of
++ * the "parent" port
++ *
++ * returns: 0 - zfcp_unit_t data structure successfully removed
++ * !0 - zfcp_unit_t data structure could not be removed
++ * (e.g. still used)
++ *
++ * locks : unit list write lock is assumed to be held by caller
++ */
++static int zfcp_unit_dequeue(zfcp_unit_t *unit)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter (unit=0x%lx)\n", (unsigned long)unit);
++
++ /* sanity check: valid unit data structure address (simple check) */
++ if (!unit) {
++ ZFCP_LOG_NORMAL(
++ "bug: Pointer to a unit struct is a null "
++ "pointer\n");
++ retval = -EINVAL;
++ goto out;
++ }
++
++ /*
++ * sanity check:
++ * leave if required list lock is not held,
++ * do not know whether it is held by the calling routine (required!)
++ * protecting this critical area or someone else (must not occur!),
++ * but a lock not held by anyone is definetely wrong
++ */
++ if (!spin_is_locked(&unit->port->unit_list_lock)) {
++ ZFCP_LOG_NORMAL("bug: Unit list lock not held "
++ "(debug info 0x%lx)\n",
++ (unsigned long) unit);
++ retval = -EPERM;
++ goto out;
++ }
++
++ /* remove specified unit data structure from list */
++ list_del(&unit->list);
++
++ /* decrease number of units in list */
++ unit->port->units--;
++
++ ZFCP_LOG_TRACE(
++ "unit 0x%lx removed, %i units still in list of port 0x%lx\n",
++ (unsigned long)unit,
++ unit->port->units,
++ (unsigned long)unit->port);
++
++ ZFCP_LOG_TRACE("Deleting proc entry......\n");
++ zfcp_delete_unit_proc(unit);
++ ZFCP_LOG_TRACE("Proc entry removed.\n");
++
++ /* free memory of unit data structure */
++ ZFCP_KFREE(unit, sizeof(zfcp_unit_t));
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval; /* succeed */
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * function: zfcp_create_unit_proc
++ *
++ * purpose: creates proc-dir and status file for the unit passed in
++ *
++ * returns: 0 if all entries could be created properly
++ * -EPERM if at least one entry could not be created
++ * (all entries are guarranteed to be freed in this
++ * case)
++ *
++ * locks: proc_sema must be held on call and throughout the function
++ */
++int zfcp_create_unit_proc(zfcp_unit_t *unit)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++ char unit_scsi_lun[20];
++ int length = 0;
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter (unit=0x%lx)\n",
++ (unsigned long)unit);
++
++ length += sprintf(&unit_scsi_lun[length],"lun0x%x", unit->scsi_lun);
++ unit_scsi_lun[length]='\0';
++ unit->proc_dir = proc_mkdir (unit_scsi_lun,
++ unit->port->proc_dir);
++ if (!unit->proc_dir) {
++ ZFCP_LOG_INFO("error: Allocation of proc-fs entry %s for the unit "
++ "with SCSI LUN 0x%x failed. Out of resources.\n",
++ unit_scsi_lun,
++ unit->scsi_lun);
++ retval=-EPERM;
++ goto out;
++ }
++ unit->proc_file=create_proc_entry(ZFCP_STATUS_FILE,
++ S_IFREG|S_IRUGO|S_IWUSR,
++ unit->proc_dir);
++ if (!unit->proc_file) {
++ ZFCP_LOG_INFO("error: Allocation of proc-fs entry %s for the unit "
++ "with SCSI LUN 0x%x failed. Out of resources.\n",
++ ZFCP_STATUS_FILE,
++ unit->scsi_lun);
++ remove_proc_entry (unit_scsi_lun, unit->port->proc_dir);
++ retval=-EPERM;
++ goto out;
++ }
++
++ unit->proc_file->proc_fops = &zfcp_unit_fops;
++ unit->proc_file->data=(void *)unit;
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++out:
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_delete_unit_proc
++ *
++ * purpose: deletes proc-dir and status file for the unit passed in
++ *
++ * returns: 0 in all cases
++ *
++ * locks: proc_sema must be held on call and throughout the function
++ */
++int zfcp_delete_unit_proc(zfcp_unit_t *unit)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++ char unit_scsi_lun[20];
++ int length = 0;
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter (unit=0x%lx)\n",
++ (unsigned long)unit);
++
++ remove_proc_entry (ZFCP_STATUS_FILE,
++ unit->proc_dir);
++ length += sprintf(&unit_scsi_lun[length],"lun0x%x", unit->scsi_lun);
++ unit_scsi_lun[length]='\0';
++ remove_proc_entry (unit_scsi_lun, unit->port->proc_dir);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_create_port_proc
++ *
++ * purpose: creates proc-dir and status file for the port passed in
++ *
++ * returns: 0 if all entries could be created properly
++ * -EPERM if at least one entry could not be created
++ * (all entries are guarranteed to be freed in this
++ * case)
++ *
++ * locks: proc_sema must be held on call and throughout the function
++ */
++int zfcp_create_port_proc(zfcp_port_t *port)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++ char port_scsi_id[20];
++ int length = 0;
++ int retval = 0;
++
++ length +=sprintf(&port_scsi_id[length],"id0x%x", port->scsi_id);
++ port_scsi_id[length]='\0';
++ port->proc_dir = proc_mkdir (port_scsi_id,
++ port->adapter->proc_dir);
++ if (!port->proc_dir) {
++ ZFCP_LOG_INFO("error: Allocation of proc-fs entry %s for the port "
++ "with SCSI-id 0x%x failed. Out of resources.\n",
++ port_scsi_id,
++ port->scsi_id);
++ retval=-EPERM;
++ goto out;
++ }
++ ZFCP_LOG_TRACE("enter (port=0x%lx)\n",
++ (unsigned long)port);
++ port->proc_file=create_proc_entry(ZFCP_STATUS_FILE,
++ S_IFREG|S_IRUGO|S_IWUSR,
++ port->proc_dir);
++ if (!port->proc_file) {
++ ZFCP_LOG_INFO("error: Allocation of proc-fs entry %s for the port "
++ "with SCSI-id 0x%x failed. Out of resources.\n",
++ ZFCP_STATUS_FILE,
++ port->scsi_id);
++ remove_proc_entry (port_scsi_id, port->adapter->proc_dir);
++ retval=-EPERM;
++ goto out;
++ }
++
++ port->proc_file->proc_fops = &zfcp_port_fops;
++ port->proc_file->data=(void *)port;
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_delete_port_proc
++ *
++ * purpose: deletes proc-dir and status file for the port passed in
++ *
++ * returns: 0 in all cases
++ *
++ * locks: proc_sema must be held on call and throughout the function
++ */
++int zfcp_delete_port_proc(zfcp_port_t *port)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++ char port_scsi_id[20];
++ int length = 0;
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter (port=0x%lx)\n",
++ (unsigned long)port);
++
++ remove_proc_entry (ZFCP_STATUS_FILE, port->proc_dir);
++ length = 0;
++ length +=sprintf(&port_scsi_id[length],"id0x%x", port->scsi_id);
++ port_scsi_id[length]='\0';
++ remove_proc_entry (port_scsi_id, port->adapter->proc_dir);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_create_adapter_proc
++ *
++ * purpose: creates proc-dir and status file for the adapter passed in
++ *
++ * returns: 0 if all entries could be created properly
++ * -EPERM if at least one entry could not be created
++ * (all entries are guarranteed to be freed in this
++ * case)
++ *
++ * locks: proc_sema must be held on call and throughout the function
++ */
++int zfcp_create_adapter_proc(zfcp_adapter_t *adapter)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++ char devno[20];
++ int length = 0;
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter (adapter=0x%lx)\n",
++ (unsigned long)adapter);
++
++ length +=sprintf(&devno[length],"devno0x%04x", adapter->devno);
++ devno[length]='\0';
++ adapter->proc_dir = proc_mkdir (devno, zfcp_data.proc_dir);
++ if (!adapter->proc_dir) {
++ ZFCP_LOG_INFO("error: Allocation of proc-fs entry %s for the adapter "
++ "with devno 0x%04x failed. Out of resources.\n",
++ devno,
++ adapter->devno);
++ retval=-EPERM;
++ goto out;
++ }
++ adapter->proc_file=create_proc_entry(ZFCP_STATUS_FILE,
++ S_IFREG|S_IRUGO|S_IWUSR,
++ adapter->proc_dir);
++ if (!adapter->proc_file) {
++ ZFCP_LOG_INFO("error: Allocation of proc-fs entry %s for the adapter "
++ "with devno 0x%04x failed. Out of resources.\n",
++ ZFCP_STATUS_FILE,
++ adapter->devno);
++ remove_proc_entry (devno, zfcp_data.proc_dir);
++ retval=-EPERM;
++ goto out;
++ }
++
++ adapter->proc_file->proc_fops = &zfcp_adapter_fops;
++
++ adapter->proc_file->data=(void *)adapter;
++
++ out:
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_delete_adapter_proc
++ *
++ * purpose: deletes proc-dir and status file for the adapter passed in
++ *
++ * returns: 0 in all cases
++ *
++ * locks: proc_sema must be held on call and throughout the function
++ */
++int zfcp_delete_adapter_proc(zfcp_adapter_t *adapter)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++ char devno[20];
++ int length = 0;
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter (adapter=0x%lx)\n",
++ (unsigned long)adapter);
++
++ remove_proc_entry (ZFCP_STATUS_FILE, adapter->proc_dir);
++ length += sprintf(&devno[length],"devno0x%04x", adapter->devno);
++ devno[length]='\0';
++ remove_proc_entry (devno, zfcp_data.proc_dir);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_open_parm_proc
++ *
++ * purpose: sets-up and fills the contents of the parm proc_entry
++ * during a read access
++ *
++ * retval: 0 if successfull
++ * -ENOMEM if at least one buffer could not be allocated
++ * (all buffers will be freed on exit)
++ */
++int zfcp_open_parm_proc(struct inode *inode, struct file *file)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++/*
++ * Note: modified proc fs utilization (instead of using ..._generic):
++ *
++ * - to avoid (SMP) races, allocate buffers for output using
++ * the private_data member in the respective file struct
++ * such that read() just has to copy out of this buffer
++ *
++ */
++ int len = 0;
++ procbuf_t *pbuf;
++ int retval=0;
++
++ ZFCP_LOG_TRACE("enter (inode=0x%lx, file=0x%lx)\n",
++ (unsigned long)inode,
++ (unsigned long) file);
++
++#if 0
++ /* DEBUG: force an abort which is being hung than, usage of mod_parm dismisses pending fsf_req */
++ ZFCP_LOG_NORMAL("try to recover forced and hung abort\n");
++ zfcp_erp_adapter_reopen(ZFCP_FIRST_ADAPTER, 0);
++#endif
++
++ pbuf = ZFCP_KMALLOC(sizeof(procbuf_t), GFP_KERNEL);
++ if (pbuf == NULL) {
++ ZFCP_LOG_NORMAL("error: Not enough memory available for "
++ "proc-fs action. Action will be ignored.\n");
++ retval = -ENOMEM;
++ goto out;
++ } else {
++ file->private_data = ( void * ) pbuf;
++ }
++
++ pbuf->buf = ZFCP_KMALLOC(ZFCP_MAX_PROC_SIZE, GFP_KERNEL);
++ if (pbuf->buf == NULL) {
++ ZFCP_LOG_NORMAL("error: Not enough memory available for "
++ "proc-fs action. Action will be ignored.\n");
++ ZFCP_KFREE(pbuf, sizeof(*pbuf));
++ retval = -ENOMEM;
++ goto out;
++ }
++
++ ZFCP_LOG_TRACE("Memory for proc parm output allocated.\n");
++
++ MOD_INC_USE_COUNT;
++
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,"Module Information: \n");
++
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,"Module Version %s running in mode: ",
++ ZFCP_REVISION);
++
++ len += sprintf(pbuf->buf+len,"FULL FEATURED\n");
++
++ len += sprintf(pbuf->buf+len,"Debug proc output enabled: %s\n",
++ proc_debug ? " YES" : " NO");
++
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,
++ "Full log-level is: 0x%08x which means:\n",
++ atomic_read(&zfcp_data.loglevel));
++
++ len += sprintf(pbuf->buf+len,
++ "ERP log-level: %01x\n",
++ (atomic_read(&zfcp_data.loglevel) >> 6*4) & 0xf);
++ len += sprintf(pbuf->buf+len,
++ "QDIO log-level: %01x "
++ "Dynamic IO log-level: %01x\n",
++ (atomic_read(&zfcp_data.loglevel) >> 5*4) & 0xf,
++ (atomic_read(&zfcp_data.loglevel) >> 4*4) & 0xf);
++ len += sprintf(pbuf->buf+len,
++ "Configuration log-level: %01x "
++ "FSF log-level: %01x\n",
++ (atomic_read(&zfcp_data.loglevel) >> 3*4) & 0xf,
++ (atomic_read(&zfcp_data.loglevel) >> 2*4) & 0xf);
++ len += sprintf(pbuf->buf+len,
++ "SCSI log-level: %01x "
++ "Other log-level: %01x\n",
++ (atomic_read(&zfcp_data.loglevel) >> 1*4) & 0xf,
++ atomic_read(&zfcp_data.loglevel) & 0xf);
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,
++ "Registered Adapters: %5d\n",
++ zfcp_data.adapters);
++ len += sprintf(pbuf->buf+len,"\n");
++
++ if (proc_debug != 0) {
++ len += sprintf(pbuf->buf+len,
++ "Data Structure information:\n");
++ len += sprintf(pbuf->buf+len,
++ "Data struct at: 0x%08lx\n",
++ (unsigned long) &zfcp_data);
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,
++ "Adapter list head at: 0x%08lx\n",
++ (unsigned long) &(zfcp_data.adapter_list_head));
++ len += sprintf(pbuf->buf+len,
++ "Next list head: 0x%08lx "
++ "Previous list head: 0x%08lx\n",
++ (unsigned long) zfcp_data.adapter_list_head.next,
++ (unsigned long) zfcp_data.adapter_list_head.prev);
++ len += sprintf(pbuf->buf+len,
++ "List lock: 0x%08lx "
++ "List lock owner PC: 0x%08lx\n",
++ zfcp_data.adapter_list_lock.lock,
++ zfcp_data.adapter_list_lock.owner_pc);
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,
++ "Total memory used(bytes): 0x%08x\n",
++ atomic_read(&zfcp_data.mem_count));
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,
++ "DEVICE REGISTRATION INFO (devreg):\n");
++ len += sprintf(pbuf->buf+len,
++ "Control Unit Type: 0x%04x "
++ "Control Unit Mode: 0x%02x\n",
++ zfcp_data.devreg.ci.hc.ctype,
++ zfcp_data.devreg.ci.hc.cmode);
++ len += sprintf(pbuf->buf+len,
++ "Channel Status: 0x%04x "
++ "Device Status: 0x%02x\n",
++ zfcp_data.devreg.ci.hc.dtype,
++ zfcp_data.devreg.ci.hc.dmode);
++ len += sprintf(pbuf->buf+len,
++ "Flags: 0x%08x\n",
++ zfcp_data.devreg.flag);
++ len += sprintf(pbuf->buf+len,"\n");
++ len += sprintf(pbuf->buf+len,
++ "PRIVILEGED DEVICE REGISTRATION INFO (devreg):\n");
++ len += sprintf(pbuf->buf+len,
++ "Control Unit Type: 0x%04x "
++ "Control Unit Model: 0x%02x\n",
++ zfcp_data.devreg_priv.ci.hc.ctype,
++ zfcp_data.devreg_priv.ci.hc.cmode);
++ len += sprintf(pbuf->buf+len,
++ "Device Type: 0x%04x "
++ "Device Model: 0x%02x\n",
++ zfcp_data.devreg_priv.ci.hc.dtype,
++ zfcp_data.devreg_priv.ci.hc.dmode);
++ len += sprintf(pbuf->buf+len,
++ "Flags: 0x%08x\n",
++ zfcp_data.devreg_priv.flag);
++ len += sprintf(pbuf->buf+len,"\n");
++ }// if (proc_debug != 0)
++
++ pbuf->len = len;
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_close_parm_proc
++ *
++ * purpose: releases the memory allocated by zfcp_open_parm_proc
++ *
++ * retval: 0 in all cases
++ *
++ */
++int zfcp_close_parm_proc(struct inode *inode, struct file *file)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ int rc=0;
++ procbuf_t *pbuf = (procbuf_t *) file->private_data;
++
++ ZFCP_LOG_TRACE("enter (inode=0x%lx, buffer=0x%lx)\n",
++ (unsigned long)inode,
++ (unsigned long) file);
++
++ if (pbuf) {
++ if (pbuf->buf) {
++ ZFCP_LOG_TRACE("Freeing pbuf->buf\n");
++ ZFCP_KFREE(pbuf->buf, ZFCP_MAX_PROC_SIZE);
++ } else {
++ ZFCP_LOG_DEBUG("No procfile buffer found to be freed\n");
++ }
++ ZFCP_LOG_TRACE("Freeing pbuf\n");
++ ZFCP_KFREE(pbuf, sizeof(*pbuf));
++ } else {
++ ZFCP_LOG_DEBUG("No procfile buffer found to be freed.\n");
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", rc);
++
++ MOD_DEC_USE_COUNT;
++
++ return rc;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * function: zfcp_open_add_map_proc
++ *
++ * purpose: allocates memory for proc_line, intitalises count
++ *
++ * retval: 0 if successfull
++ * -ENOMEM if memory coud not be obtained
++ *
++ * locks: grabs the zfcp_data.sema_map semaphore
++ * it is released upon exit of zfcp_close_add_map_proc
++ */
++int zfcp_open_add_map_proc(struct inode *inode, struct file *buffer)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ int retval=0;
++
++ ZFCP_LOG_TRACE("enter (inode=0x%lx, buffer=0x%lx)\n",
++ (unsigned long)inode,
++ (unsigned long) buffer);
++
++ down(&zfcp_data.proc_sema);
++
++ zfcp_data.proc_line = ZFCP_KMALLOC(ZFCP_MAX_PROC_LINE, GFP_KERNEL);
++ if (zfcp_data.proc_line == NULL) {
++ /* release semaphore on memory shortage */
++ up(&zfcp_data.proc_sema);
++
++ ZFCP_LOG_NORMAL("error: Not enough free memory for procfile"
++ " input. Input will be ignored.\n");
++
++ retval = -ENOMEM;
++ goto out;
++ }
++
++ /* This holds the length of the part acutally containing data, not the
++ size of the buffer */
++ zfcp_data.proc_line_length=0;
++
++ MOD_INC_USE_COUNT;
++
++ ZFCP_LOG_TRACE("proc_line buffer allocated...\n");
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++
++/*
++ * function: zfcp_close_add_map_proc
++ *
++ * purpose: parses any remaining string in proc_line, then
++ * releases memory for proc_line, then calls
++ * zfcp_adapter_scsi_register_all to tell the SCSI stack about
++ * possible new devices
++ *
++ * retval: 0 in all cases
++ *
++ * locks: upon exit of zfcp_close_add_map_proc, releases the proc_sema
++ */
++int zfcp_close_add_map_proc(struct inode *inode, struct file *buffer)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ int retval=0;
++
++ ZFCP_LOG_TRACE("enter (inode=0x%lx, buffer=0x%lx)\n",
++ (unsigned long)inode,
++ (unsigned long) buffer);
++
++ if (zfcp_data.proc_line == NULL)
++ goto out;
++
++ if (zfcp_data.proc_line_length > 0) {
++ ZFCP_LOG_TRACE("Passing leftover line to parser\n");
++ retval=zfcp_config_parse_record_list(
++ zfcp_data.proc_line,
++ zfcp_data.proc_line_length,
++ ZFCP_PARSE_ADD);
++ if(retval<0) {
++ ZFCP_LOG_NORMAL("Warning: One or several mapping "
++ "entries were not added to the "
++ "module configuration.\n");
++ }
++ }
++ ZFCP_KFREE(zfcp_data.proc_line, ZFCP_MAX_PROC_LINE);
++ ZFCP_LOG_TRACE("proc_line buffer released...\n");
++ zfcp_data.proc_line=NULL;
++ zfcp_data.proc_line_length=0;
++
++ zfcp_adapter_scsi_register_all();
++
++ /* release semaphore */
++ up(&zfcp_data.proc_sema);
++
++ MOD_DEC_USE_COUNT;
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++
++/*
++ * function: zfcp_create_root_proc
++ *
++ * purpose: creates the main proc-directory for the zfcp driver
++ *
++ * retval: 0 if successfull
++ * -EPERM if the proc-directory could not be created
++ *
++ * locks: the proc_sema is held on entry and throughout this function
++ */
++int zfcp_create_root_proc()
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ zfcp_data.proc_dir = proc_mkdir (ZFCP_NAME, proc_scsi);
++ if (!zfcp_data.proc_dir) {
++ ZFCP_LOG_INFO("error: Allocation of proc-fs directory %s for the "
++ "zfcp-driver failed.\n",
++ ZFCP_NAME);
++ retval = -EPERM;
++ }
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++
++/*
++ * function: zfcp_create_data_procs
++ *
++ * purpose: creates the module-centric proc-entries
++ *
++ * retval: 0 if successfull
++ * -EPERM if the proc-entries could not be created
++ * (all entries are removed on exit)
++ *
++ * locks: the proc_sema is held on entry and throughout this function
++ */
++int zfcp_create_data_procs()
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter\n");
++ /* parm_file */
++ zfcp_data.parm_proc_file=create_proc_entry(ZFCP_PARM_FILE,
++ S_IFREG|S_IRUGO|S_IWUSR,
++ zfcp_data.proc_dir);
++ if (!zfcp_data.parm_proc_file) {
++ ZFCP_LOG_INFO("error: Allocation of proc-fs entry %s for module "
++ "configuration failed. Out of resources.\n",
++ ZFCP_PARM_FILE);
++ retval = -EPERM;
++ goto out;
++ }
++ zfcp_data.parm_proc_file->proc_fops=&zfcp_parm_fops;
++
++ /* map file */
++ zfcp_data.map_proc_file=create_proc_entry(ZFCP_MAP_FILE,
++ S_IFREG|S_IRUGO,
++ zfcp_data.proc_dir);
++ if (!zfcp_data.map_proc_file) {
++ ZFCP_LOG_INFO("error: Allocation of proc-fs entry %s for module "
++ "configuration failed. Out of resources.\n",
++ ZFCP_MAP_FILE);
++ retval = -EPERM;
++ goto fail_map_proc_file;
++ }
++ zfcp_data.map_proc_file->proc_fops=&zfcp_map_fops;
++
++ /* add_map file */
++ zfcp_data.add_map_proc_file=create_proc_entry(ZFCP_ADD_MAP_FILE,
++ S_IFREG|S_IWUSR,
++ zfcp_data.proc_dir);
++ if (!zfcp_data.map_proc_file) {
++ ZFCP_LOG_INFO("error: Allocation of proc-fs entry %s for module "
++ "configuration failed. Out of resources.\n",
++ ZFCP_ADD_MAP_FILE);
++ retval = -EPERM;
++ goto fail_add_map_proc_file;
++ }
++ zfcp_data.add_map_proc_file->proc_fops=&zfcp_add_map_fops;
++ goto out;
++
++ fail_add_map_proc_file:
++ remove_proc_entry (ZFCP_MAP_FILE, zfcp_data.proc_dir);
++ fail_map_proc_file:
++ remove_proc_entry (ZFCP_PARM_FILE, zfcp_data.proc_dir);
++
++ out:
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++
++/*
++ * function: zfcp_delete_root_proc
++ *
++ * purpose: deletes the main proc-directory for the zfcp driver
++ *
++ * retval: 0 in all cases
++ */
++int zfcp_delete_root_proc()
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ remove_proc_entry (ZFCP_NAME, proc_scsi);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++};
++
++
++
++/*
++ * function: zfcp_delete_data_proc
++ *
++ * purpose: deletes the module-specific proc-entries for the zfcp driver
++ *
++ * retval: 0 in all cases
++ */
++int zfcp_delete_data_procs()
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ remove_proc_entry (ZFCP_MAP_FILE, zfcp_data.proc_dir);
++ remove_proc_entry (ZFCP_ADD_MAP_FILE, zfcp_data.proc_dir);
++ remove_proc_entry (ZFCP_PARM_FILE, zfcp_data.proc_dir);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++};
++
++
++
++/*
++ * function: zfcp_parm_proc_read
++ *
++ * purpose: Provides information about module settings as proc-output
++ *
++ * returns: number of characters copied to user-space
++ * - <error-type> otherwise
++ */
++ssize_t zfcp_parm_proc_read(struct file *file,
++ char *user_buf,
++ size_t user_len,
++ loff_t *offset)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ loff_t len;
++ procbuf_t *pbuf = (procbuf_t *) file->private_data;
++
++ ZFCP_LOG_TRACE(
++ "enter (file=0x%lx user_buf=0x%lx "
++ "user_length=%li *offset=0x%lx)\n",
++ (unsigned long)file,
++ (unsigned long)user_buf,
++ user_len,
++ (unsigned long)*offset);
++
++ if ( *offset>=pbuf->len) {
++ return 0;
++ } else {
++ len = min(user_len, (unsigned long)(pbuf->len - *offset));
++ if (copy_to_user( user_buf, &(pbuf->buf[*offset]), len))
++ return -EFAULT;
++ (* offset) += len;
++ return len;
++ }
++
++ /* FIXME: the following code is never reached */
++
++ ZFCP_LOG_TRACE("Size-offset is %ld, user_len is %ld\n",
++ ((unsigned long)(pbuf->len - *offset)),
++ user_len);
++
++ ZFCP_LOG_TRACE("exit (%Li)\n", len);
++
++ return len;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * function: zfcp_parm_proc_write
++ *
++ * purpose: parses write requests to parm procfile
++ *
++ * returns: number of characters passed into function
++ * -<error code> on failure
++ *
++ * known bugs: does not work when small buffers are used
++ */
++
++ssize_t zfcp_parm_proc_write(struct file *file,
++ const char *user_buf,
++ size_t user_len,
++ loff_t *offset)
++
++{
++
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ char *buffer, *tmp = NULL;
++ char *buffer_start = NULL;
++ char *pos;
++ size_t my_count = user_len;
++ u32 value;
++ int retval = user_len;
++
++ ZFCP_LOG_TRACE(
++ "enter (file=0x%lx user_buf=0x%lx "
++ "user_length=%li *offset=0x%lx)\n",
++ (unsigned long)file,
++ (unsigned long)user_buf,
++ user_len,
++ (unsigned long)*offset);
++
++ buffer = ZFCP_KMALLOC(my_count + 1, GFP_KERNEL);
++ if (!buffer) {
++ ZFCP_LOG_NORMAL("error: Not enough free memory for procfile"
++ " input. Input will be ignored.\n");
++ retval = -ENOMEM;
++ goto out;
++ }
++ buffer_start=buffer;
++ ZFCP_LOG_TRACE("buffer allocated...\n");
++
++ copy_from_user(buffer, user_buf, my_count);
++
++ buffer[my_count] = '\0';
++
++ ZFCP_LOG_TRACE("user_len= %ld, strlen= %ld, buffer=%s<\n",
++ user_len, strlen("loglevel=0x00000000"), buffer);
++
++ /* look for new loglevel */
++ pos = strstr(buffer, "loglevel=");
++ if (pos) {
++ tmp = pos + strlen("loglevel=");
++ value = simple_strtoul(tmp, &pos, 0);
++ if (pos == tmp) {
++ ZFCP_LOG_INFO(
++ "warning: Log-level could not be changed, syntax faulty."
++ "\nSyntax is loglevel=0xueqdcfso, see device driver "
++ "documentation for details.\n");
++ retval = -EFAULT;
++ } else {
++ ZFCP_LOG_TRACE(
++ "setting new loglevel (old is 0x%x, new is 0x%x)\n",
++ atomic_read(&zfcp_data.loglevel), value);
++ atomic_set(&zfcp_data.loglevel, value);
++ }
++ }
++
++#ifdef ZFCP_LOW_MEM_CREDITS
++ /* look for low mem trigger/credit */
++ pos = strstr(buffer, "lowmem=");
++ if (pos) {
++ tmp = pos + strlen("lowmem=");
++ value = simple_strtoul(tmp, &pos, 0);
++ if (pos == tmp) {
++ ZFCP_LOG_INFO("warning: lowmem credit faulty.");
++ retval = -EFAULT;
++ } else {
++ ZFCP_LOG_INFO("setting lowmem credit to %d\n", value);
++ atomic_set(&zfcp_data.lowmem_credit, value);
++ }
++ }
++#endif
++
++ ZFCP_LOG_TRACE("freeing buffer..\n");
++ ZFCP_KFREE(buffer_start, my_count + 1);
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++/*
++ * function: zfcp_open_proc_map
++ *
++ * purpose: allocates memory for proc_buffer_map
++ *
++ * retval: 0 if successfull
++ * -ENOMEM if memory coud not be obtained
++ *
++ * locks: grabs the zfcp_data.sema_map semaphore
++ * it is released upon exit of zfcp_close_proc_map
++ */
++int zfcp_proc_map_open(struct inode *inode, struct file *buffer)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (inode=0x%lx, buffer=0x%lx)\n",
++ (unsigned long)inode,
++ (unsigned long) buffer);
++
++ /* block access */
++ down(&zfcp_data.proc_sema);
++
++ zfcp_data.proc_buffer_map = ZFCP_KMALLOC(
++ ZFCP_MAX_PROC_SIZE,
++ GFP_KERNEL);
++ if (!zfcp_data.proc_buffer_map) {
++ /* release semaphore on memory shortage */
++ up(&zfcp_data.proc_sema);
++ ZFCP_LOG_NORMAL(
++ "error: Not enough free memory for procfile"
++ " output. No output will be given.\n");
++ retval = -ENOMEM;
++ } else MOD_INC_USE_COUNT;
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_close_proc_map
++ *
++ * purpose: releases memory for proc_buffer_map
++ *
++ * retval: 0 in all cases
++ *
++ * locks: upon exit releases zfcp_close_proc_map
++ */
++int zfcp_proc_map_close(struct inode *inode, struct file *buffer)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ int retval=0;
++
++ ZFCP_LOG_TRACE(
++ "enter (inode=0x%lx, buffer=0x%lx)\n",
++ (unsigned long)inode,
++ (unsigned long) buffer);
++
++ if (zfcp_data.proc_buffer_map) {
++ ZFCP_LOG_TRACE("Freeing zfcp_data.proc_buffer_map.\n");
++ ZFCP_KFREE(zfcp_data.proc_buffer_map, ZFCP_MAX_PROC_SIZE);
++ up(&zfcp_data.proc_sema);
++ MOD_DEC_USE_COUNT;
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_proc_map_read
++ *
++ * purpose: Provides a list of all configured devices in identical format
++ * to expected configuration input as proc-output
++ *
++ * returns: number of characters copied to user-space
++ * - <error-type> otherwise
++ *
++ * locks: proc_sema must be held on entry and throughout function
++ */
++ssize_t zfcp_proc_map_read(
++ struct file *file,
++ char *user_buf,
++ size_t user_len,
++ loff_t *offset)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ size_t real_len = 0;
++ size_t print_len = 0;
++ loff_t line_offset = 0;
++ u64 current_unit = 0;
++ zfcp_unit_t *unit;
++ int i = 0;
++ static size_t item_size = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (file=0x%lx user_buf=0x%lx "
++ "user_length=%li, *offset=%Ld)\n",
++ (unsigned long)file,
++ (unsigned long)user_buf,
++ user_len,
++ *offset);
++
++ if (*offset) {
++ /*
++ * current_unit: unit that needs to be printed (might be remainder)
++ * line_offset: bytes of current_unit that have already been printed
++ */
++ current_unit = (*offset);
++ line_offset = do_div(current_unit, item_size);
++ ZFCP_LOG_TRACE(
++ "item_size %ld, current_unit %Ld, line_offset %Ld\n",
++ item_size,
++ (llui_t)current_unit,
++ line_offset);
++ }
++
++ list_for_each_entry(unit, &zfcp_data.map_list_head, map_list) {
++ /* skip all units that have already been completely printed */
++ if (i < current_unit) {
++ i++;
++ continue;
++ }
++ /* a unit to be printed (at least partially) */
++ ZFCP_LOG_TRACE("unit=0x%lx\n", (unsigned long)unit);
++ /* assumption: item_size <= ZFCP_MAX_PROC_SIZE */
++ item_size = sprintf(
++ &zfcp_data.proc_buffer_map[real_len],
++ "0x%04x 0x%08x:0x%016Lx 0x%08x:0x%016Lx\n",
++ unit->port->adapter->devno,
++ unit->port->scsi_id,
++ (llui_t)(unit->port->wwpn),
++ unit->scsi_lun,
++ (llui_t)(unit->fcp_lun));
++ /* re-calculate used bytes in kernel buffer */
++ real_len += item_size;
++ /* re-calculate bytes to be printed */
++ print_len = real_len - line_offset;
++ /* stop if there is not enough user buffer space left */
++ if (print_len > user_len) {
++ /* adjust number of bytes to be printed */
++ print_len = user_len;
++ break;
++ }
++ /* stop if there is not enough kernel buffer space left */
++ if (real_len + item_size > ZFCP_MAX_PROC_SIZE)
++ break;
++ }
++
++ /* print if there is something in buffer */
++ if (print_len) {
++ ZFCP_LOG_TRACE(
++ "Trying to do output (line_offset=%Ld, print_len=%ld, "
++ "real_len=%ld, user_len=%ld).\n",
++ line_offset, print_len, real_len, user_len);
++ if (copy_to_user(
++ user_buf,
++ &zfcp_data.proc_buffer_map[line_offset],
++ print_len)) {
++ ZFCP_LOG_NORMAL(
++ "bug: Copying proc-file output to user space "
++ "failed (debug info %ld)",
++ print_len);
++ print_len = -EFAULT;
++ } else /* re-calculate offset in proc-output for next call */
++ (*offset) += print_len;
++ }
++
++ ZFCP_LOG_TRACE("exit (%li)\n", print_len);
++
++ return print_len;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/* why is such a function not provided by the kernel? */
++static size_t strnspn(const char *string, const char *chars, size_t limit)
++{
++ size_t pos = 0;
++ const char *s = string, *c;
++
++ while ((*s != '\0') && (pos < limit)) {
++ c = chars;
++ do {
++ if (*c == '\0')
++ goto out;
++ } while (*c++ != *s);
++ s++;
++ pos++;
++ }
++
++out:
++ return pos;
++}
++
++
++/* why is such a function not provided by the kernel? */
++char* strnchr(const char *string, int character, size_t count)
++{
++ char *s = (char*) string;
++
++ for (;; s++, count--) {
++ if (!count)
++ return NULL;
++ if (*s == character)
++ return s;
++ if (*s == '\0')
++ return NULL;
++ }
++}
++
++
++/* why is such a function not provided by the kernel? */
++char* strnpbrk(const char *string, const char *chars, size_t count)
++{
++ char *s = (char*) string;
++
++ for (;; s++, count--) {
++ if (!count)
++ return NULL;
++ if (strnspn(s, chars, 1))
++ return s;
++ if (*s == '\0')
++ return NULL;
++ }
++}
++
++
++/*
++ * function: zfcp_find_forward
++ *
++ * purpose: Scans buffer for '\n' to a max length of *buffer_length
++ * buffer is incremented to after the first occurance of
++ * '\n' and *buffer_length decremented to reflect the new
++ * buffer length.
++ * fragment is a pointer to the original buffer start address
++ * and contains the initial fragment string of length
++ * *fragment_length
++ *
++ * returns: 0 if found
++ * -1 otherwise
++ */
++unsigned long zfcp_find_forward(char **buffer,
++ unsigned long *buffer_length,
++ char **fragment,
++ unsigned long *fragment_length)
++{
++
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ unsigned long retval=0;
++
++ ZFCP_LOG_TRACE(
++ "enter (*buffer=0x%lx, *buffer_length=%ld, "
++ "*fragment=0x%lx, *fragment_length=%ld)\n",
++ (unsigned long)*buffer,
++ *buffer_length,
++ (unsigned long)*fragment,
++ *fragment_length);
++
++ *fragment = *buffer;
++ for(;*buffer < (*fragment + *buffer_length);){
++ if (**buffer=='\n') break;
++ (*buffer)++;
++ }
++ if(*buffer >= (*fragment + *buffer_length)){
++ *fragment_length = *buffer_length;
++ *buffer_length = 0;
++ retval = -1;
++ goto out;
++ }
++ (*buffer)++;
++ *fragment_length = *buffer - *fragment;
++ *buffer_length -= *fragment_length;
++
++ out:
++ ZFCP_LOG_TRACE("exit (%li)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * function: zfcp_find_backward
++ *
++ * purpose: Scans buffer for '\n' backwards to a max length of
++ * *buffer_length. Buffer is left unchanged, but
++ * *buffer_length is decremented to reflect the new
++ * buffer length.
++ * rest points to the part of the string past the last
++ * occurrence of '\n' in the original buffer contents
++ * rest_length is the length of this part
++ *
++ * returns: 0 if found
++ * -1 otherwise
++ */
++unsigned long zfcp_find_backward(char **buffer,
++ unsigned long *buffer_length,
++ char **rest,
++ unsigned long *rest_length)
++{
++
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ unsigned long retval=0;
++
++ ZFCP_LOG_TRACE(
++ "enter (*buffer=0x%lx, *buffer_length=%ld, "
++ "*rest=0x%lx, *rest_length=%ld)\n",
++ (unsigned long)*buffer,
++ *buffer_length,
++ (unsigned long)*rest,
++ *rest_length);
++
++ *rest = *buffer + *buffer_length - 1;
++ /*
++ n n+1 n+2 n+3 n+4 n+5 n+6 n+7 n+8
++ ^ ^ ^(*buffer+*buffer_length)
++ *buffer *rest (buffer end)
++ */
++ for(;*rest!=*buffer;){
++ if (**rest=='\n') break;
++ (*rest)--;
++ }
++ if (*rest <= *buffer) {
++ *rest_length = *buffer_length;
++ *buffer_length = 0;
++ retval = -1;
++ goto out;
++ }
++ (*rest)++;
++ /*
++ n n+1 n+2 n+3 n+4 n+5 n+6 n+7 n+8
++ ^ ^ ^ ^ ^(*buffer+*buffer_length)
++ *buffer '\n' *rest (buffer end)
++ */
++ *rest_length = (*buffer + *buffer_length) - *rest;
++ *buffer_length -= *rest_length;
++
++ out:
++ ZFCP_LOG_TRACE("*rest= 0x%lx\n",
++ (unsigned long)*rest);
++
++ ZFCP_LOG_TRACE("exit (%li)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_add_map_proc_write
++ *
++ * purpose: Breaks down the input map entries in user_buf into lines
++ * to be parsed by zfcp_config_parse_record_list.
++ * Also takes care of recombinations, multiple calls, etc.
++ *
++ * returns: user_len as passed in
++ *
++ * locks: proc_sema must be held on entry and throughout function
++ */
++ssize_t zfcp_add_map_proc_write(struct file *file,
++ const char *user_buf,
++ size_t user_len,
++ loff_t *offset)
++{
++
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ char *buffer = NULL;
++ char *buffer_start = NULL; /* buffer is modified, this isn't (see kfree) */
++ char *frag = NULL;
++ size_t frag_length = 0;
++ size_t my_count = user_len;
++ int temp_ret = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (file=0x%lx user_buf=0x%lx "
++ "user_length=%li *offset=0x%lx)\n",
++ (unsigned long)file,
++ (unsigned long)user_buf,
++ user_len,
++ (unsigned long)*offset);
++
++
++ buffer = ZFCP_KMALLOC(my_count, GFP_KERNEL);
++ if (!buffer) {
++ ZFCP_LOG_NORMAL("error: Not enough free memory for procfile"
++ " input. Input will be ignored.\n");
++ user_len = -ENOMEM;
++ goto out;
++ }
++ buffer_start=buffer;
++ ZFCP_LOG_TRACE("buffer allocated...\n");
++
++ copy_from_user(buffer, user_buf, my_count);
++
++ if (zfcp_data.proc_line_length > 0) {
++ ZFCP_LOG_TRACE(
++ "Remnants were present...(%ld)\n",
++ zfcp_data.proc_line_length);
++ temp_ret = zfcp_find_forward(
++ &buffer, &my_count, &frag, &frag_length);
++ ZFCP_LOG_TRACE(
++ "fragment = 0x%lx, length= %ld\n",
++ (unsigned long) frag,
++ frag_length);
++
++ if ((zfcp_data.proc_line_length + frag_length) >
++ (ZFCP_MAX_PROC_LINE - 1)) {
++ ZFCP_LOG_INFO(
++ "Maximum line length exceeded while parsing (%ld)\n",
++ zfcp_data.proc_line_length + frag_length);
++ zfcp_data.proc_line_length = 0;
++ user_len= -EINVAL;
++ goto free_buffer;
++ }
++
++ if (frag_length > 0) {
++ memcpy( zfcp_data.proc_line + zfcp_data.proc_line_length,
++ frag,
++ frag_length);
++ zfcp_data.proc_line_length += frag_length;
++ }
++
++ if(temp_ret) {
++ ZFCP_LOG_TRACE("\"\\n\" was not found \n");
++ goto free_buffer;
++ }
++
++ ZFCP_LOG_TRACE(
++ "my_count= %ld, buffer=0x%lx text: \"%s\"\n",
++ my_count,
++ (unsigned long) buffer,
++ buffer);
++
++ /* process line combined from several buffers */
++ if (zfcp_config_parse_record_list(
++ zfcp_data.proc_line,
++ zfcp_data.proc_line_length,
++ ZFCP_PARSE_ADD) < 0) {
++ user_len=-EINVAL;
++ /* Do not try another parse in close_proc */
++ zfcp_data.proc_line_length = 0;
++ ZFCP_LOG_NORMAL("Warning: One or several mapping "
++ "entries were not added to the "
++ "module configuration.\n");
++ }
++ zfcp_data.proc_line_length = 0;
++ }// if(zfcp_data.proc_line_length > 0)
++
++ temp_ret = zfcp_find_backward(&buffer, &my_count, &frag, &frag_length);
++ ZFCP_LOG_TRACE(
++ "fragment length = %ld\n",
++ frag_length);
++ if (frag_length > (ZFCP_MAX_PROC_LINE - 1)) {
++ ZFCP_LOG_NORMAL(
++ "warning: Maximum line length exceeded while parsing "
++ "input. Length is already %ld. Some part of the input "
++ "will be ignored.\n",
++ frag_length);
++ zfcp_data.proc_line_length = 0;
++ user_len = -EINVAL;
++ goto free_buffer;
++ }
++
++ if (frag_length > 0) {
++ memcpy(zfcp_data.proc_line, frag, frag_length);
++ zfcp_data.proc_line_length += frag_length;
++ }
++
++ if (temp_ret) {
++ ZFCP_LOG_TRACE("\"\\n\" was not found \n");
++ goto free_buffer;
++ }
++
++ ZFCP_LOG_TRACE(
++ "my_count= %ld, buffer=0x%lx text: \"%s\"\n",
++ my_count,
++ (unsigned long) buffer,
++ buffer);
++ if (zfcp_config_parse_record_list(
++ buffer,
++ my_count,
++ ZFCP_PARSE_ADD) < 0) {
++ user_len=-EINVAL;
++ /* Do not try another parse in close_proc */
++ zfcp_data.proc_line_length = 0;
++ ZFCP_LOG_NORMAL("Warning: One or several mapping "
++ "entries were not added to the "
++ "module configuration.\n");
++ }
++free_buffer:
++ ZFCP_LOG_TRACE("freeing buffer..\n");
++ ZFCP_KFREE(buffer_start, my_count + 1);
++out:
++ ZFCP_LOG_TRACE("exit (%li)\n", user_len);
++ return (user_len);
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * zfcp_adapter_proc_open
++ *
++ * modified proc fs utilization (instead of using ..._generic):
++ *
++ * - to avoid (SMP) races, allocate buffers for output using
++ * the private_data member in the respective file struct
++ * such that read() just has to copy out of this buffer
++ *
++ */
++
++int zfcp_adapter_proc_open(struct inode *inode, struct file *file)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ int len = 0;
++ procbuf_t *pbuf;
++ int retval=0;
++ const struct inode *ino = file->f_dentry->d_inode;
++ const struct proc_dir_entry *dp = ino->u.generic_ip;
++ zfcp_adapter_t *adapter = dp->data;
++ int i;
++
++ ZFCP_LOG_TRACE("enter (inode=0x%lx, file=0x%lx)\n",
++ (unsigned long)inode,
++ (unsigned long) file);
++
++#if 0
++ /* DEBUG: force an abort which is being hung than, usage of mod_parm dismisses pending fsf_req */
++ ZFCP_LOG_NORMAL("try to recover forced and hung abort\n");
++ zfcp_erp_adapter_reopen(ZFCP_FIRST_ADAPTER, 0);
++#endif
++
++ pbuf = ZFCP_KMALLOC(sizeof(procbuf_t), GFP_KERNEL);
++ if (pbuf == NULL) {
++ ZFCP_LOG_NORMAL("error: Not enough memory available for "
++ "proc-fs action. Action will be ignored.\n");
++ retval = -ENOMEM;
++ goto out;
++ } else {
++ file->private_data = ( void * ) pbuf;
++ }
++
++ pbuf->buf = ZFCP_KMALLOC(ZFCP_MAX_PROC_SIZE, GFP_KERNEL);
++ if (pbuf->buf == NULL) {
++ ZFCP_LOG_NORMAL("error: Not enough memory available for "
++ "proc-fs action. Action will be ignored.\n");
++ ZFCP_KFREE(pbuf, sizeof(*pbuf));
++ retval = -ENOMEM;
++ goto out;
++ }
++
++ ZFCP_LOG_TRACE("Memory for adapter proc output allocated.\n");
++
++ MOD_INC_USE_COUNT;
++
++ len += sprintf(pbuf->buf+len,
++ "\nFCP adapter\n\n");
++
++ len += sprintf(pbuf->buf+len,
++ "FCP driver %s "
++ "(or for cryptography's sake 0x%08x)\n\n",
++ ZFCP_REVISION,
++ zfcp_data.driver_version);
++
++ len += sprintf(pbuf->buf+len,
++ "device number: 0x%04x "
++ "registered on irq: 0x%04x\n",
++ adapter->devno,
++ adapter->irq);
++ len += sprintf(pbuf->buf+len,
++ "WWNN: 0x%016Lx\n",
++ (llui_t)adapter->wwnn);
++ len += sprintf(pbuf->buf+len,
++ "WWPN: 0x%016Lx "
++ "S_ID: 0x%06x\n",
++ (llui_t)adapter->wwpn,
++ adapter->s_id);
++ len += sprintf(pbuf->buf+len,
++ "HW version: 0x%04x "
++ "LIC version: 0x%08x\n",
++ adapter->hydra_version,
++ adapter->fsf_lic_version);
++ len += sprintf(pbuf->buf+len,
++ "FC link speed: %d Gb/s "
++ "FC service class: %d\n",
++ adapter->fc_link_speed,
++ adapter->fc_service_class);
++ len += sprintf(pbuf->buf+len,
++ "Hardware Version: 0x%08x\n"
++ "Serial Number: %17s\n",
++ adapter->hardware_version,
++ adapter->serial_number);
++ len += sprintf(pbuf->buf+len,
++ "FC topology: %s\n",
++ zfcp_topologies[adapter->fc_topology]);
++#if 0
++ if (adapter->fc_topology == FSF_TOPO_P2P)
++ len += sprintf(pbuf->buf+len,
++ "D_ID of peer: 0x%06x\n",
++ adapter->peer_d_id);
++#endif
++ len += sprintf(pbuf->buf+len,
++ "SCSI host number: 0x%08x\n",
++ adapter->scsi_host->host_no);
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,
++ "Attached ports: %10d "
++ "QTCB size (bytes): %10ld\n",
++ adapter->ports,
++ sizeof(fsf_qtcb_t));
++ len += sprintf(pbuf->buf+len,
++ "Max SCSI ID of ports: 0x%08x "
++ "Max SCSI LUN of ports: 0x%08x\n",
++ adapter->max_scsi_id,
++ adapter->max_scsi_lun);
++ len += sprintf(pbuf->buf+len,
++ "FSF req seq. no: 0x%08x "
++ "FSF reqs active: %10d\n",
++ adapter->fsf_req_seq_no,
++ atomic_read(&adapter->fsf_reqs_active));
++ len += sprintf(pbuf->buf+len,
++ "Scatter-gather table-size: %5d "
++ "Max no of queued commands: %10d\n",
++ zfcp_data.scsi_host_template.sg_tablesize,
++ zfcp_data.scsi_host_template.can_queue);
++ len += sprintf(pbuf->buf+len,
++ "Uses clustering: %1d "
++ "Uses New Error-Handling Code: %1d\n",
++ zfcp_data.scsi_host_template.use_clustering,
++ zfcp_data.scsi_host_template.use_new_eh_code);
++ len += sprintf(pbuf->buf+len,
++ "ERP counter: 0x%08x ",
++ atomic_read(&adapter->erp_counter));
++ len += sprintf(pbuf->buf+len,
++ "Adapter Status: 0x%08x\n",
++ atomic_read(&adapter->status));
++ len += sprintf(pbuf->buf+len,
++ "SCSI commands delayed: %10d\n",
++ atomic_read(&adapter->fake_scsi_reqs_active));
++ len += sprintf(pbuf->buf+len,"\n");
++
++ if (proc_debug != 0) {
++ len += sprintf(pbuf->buf+len,
++ "Adapter Structure information:\n");
++ len += sprintf(pbuf->buf+len,
++ "Common Magic: 0x%08x "
++ "Specific Magic: 0x%08x\n",
++ adapter->common_magic,
++ adapter->specific_magic);
++ len += sprintf(pbuf->buf+len,
++ "Adapter struct at: 0x%08lx "
++ "List head at: 0x%08lx\n",
++ (unsigned long) adapter,
++ (unsigned long) &(adapter->list));
++ len += sprintf(pbuf->buf+len,
++ "Next list head: 0x%08lx "
++ "Previous list head: 0x%08lx\n",
++ (unsigned long) adapter->list.next,
++ (unsigned long) adapter->list.prev);
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,
++ "Scsi_Host struct at: 0x%08lx\n",
++ (unsigned long) adapter->scsi_host);
++ len += sprintf(pbuf->buf+len,
++ "Port list head at: 0x%08lx\n",
++ (unsigned long) &(adapter->port_list_head));
++ len += sprintf(pbuf->buf+len,
++ "Next list head: 0x%08lx "
++ "Previous list head: 0x%08lx\n",
++ (unsigned long) adapter->port_list_head.next,
++ (unsigned long) adapter->port_list_head.prev);
++ len += sprintf(pbuf->buf+len,
++ "List lock: 0x%08lx "
++ "List lock owner PC: 0x%08lx\n",
++ adapter->port_list_lock.lock,
++ adapter->port_list_lock.owner_pc);
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,
++ "O-FCP req list head: 0x%08lx\n",
++ (unsigned long) &(adapter->fsf_req_list_head));
++ len += sprintf(pbuf->buf+len,
++ "Next list head: 0x%08lx "
++ "Previous list head: 0x%08lx\n",
++ (unsigned long) adapter->fsf_req_list_head.next,
++ (unsigned long) adapter->fsf_req_list_head.prev);
++ len += sprintf(pbuf->buf+len,
++ "List lock: 0x%08lx "
++ "List lock owner PC: 0x%08lx\n",
++ adapter->fsf_req_list_lock.lock,
++ adapter->fsf_req_list_lock.owner_pc);
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,
++ "Request queue at: 0x%08lx\n",
++ (unsigned long)&(adapter->request_queue));
++ len += sprintf(pbuf->buf+len,
++ "Free index: %03d "
++ "Free count: %03d\n",
++ adapter->request_queue.free_index,
++ atomic_read(&adapter->request_queue.free_count));
++ len += sprintf(pbuf->buf+len,
++ "List lock: 0x%08lx "
++ "List lock owner PC: 0x%08lx\n",
++ adapter->request_queue.queue_lock.lock,
++ adapter->request_queue.queue_lock.owner_pc);
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,
++ "Response queue at: 0x%08lx\n",
++ (unsigned long)&(adapter->response_queue));
++ len += sprintf(pbuf->buf+len,
++ "Free index: %03d "
++ "Free count: %03d\n",
++ adapter->response_queue.free_index,
++ atomic_read(&adapter->response_queue.free_count));
++ len += sprintf(pbuf->buf+len,
++ "List lock: 0x%08lx "
++ "List lock owner PC: 0x%08lx\n",
++ adapter->response_queue.queue_lock.lock,
++ adapter->response_queue.queue_lock.owner_pc);
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,"DEVICE INFORMATION (devinfo):\n");
++ len += sprintf(pbuf->buf+len,"Status: ");
++ switch(adapter->devinfo.status) {
++ case 0:
++ len += sprintf(pbuf->buf+len,
++ "\"OK\"\n");
++ break;
++ case DEVSTAT_NOT_OPER:
++ len += sprintf(pbuf->buf+len,
++ "\"DEVSTAT_NOT_OPER\"\n");
++ break;
++ case DEVSTAT_DEVICE_OWNED:
++ len += sprintf(pbuf->buf+len,
++ "\"DEVSTAT_DEVICE_OWNED\"\n");
++ break;
++ case DEVSTAT_UNKNOWN_DEV:
++ len += sprintf(pbuf->buf+len,
++ "\"DEVSTAT_UNKNOWN_DEV\"\n");
++ break;
++ default:
++ len += sprintf(pbuf->buf+len,
++ "UNSPECIFIED STATE (value is 0x%x)\n",
++ adapter->devinfo.status);
++ break;
++ }
++ len += sprintf(pbuf->buf+len,
++ "Control Unit Type: 0x%04x "
++ "Control Unit Model: 0x%02x\n",
++ adapter->devinfo.sid_data.cu_type,
++ adapter->devinfo.sid_data.cu_model);
++ len += sprintf(pbuf->buf+len,
++ "Device Type: 0x%04x "
++ "Device Model: 0x%02x\n",
++ adapter->devinfo.sid_data.dev_type,
++ adapter->devinfo.sid_data.dev_model);
++ len += sprintf(pbuf->buf+len,
++ "CIWs: ");
++ for(i=0;i<4;i++){
++ len += sprintf(pbuf->buf+len,
++ "0x%08x ",
++ *(unsigned int *)(&adapter->devinfo.sid_data.ciw[i]));
++ }
++ len += sprintf(pbuf->buf+len,"\n ");
++ for(i=4;i<8;i++){
++ len += sprintf(pbuf->buf+len,
++ "0x%08x ",
++ *(unsigned int *)(&adapter->devinfo.sid_data.ciw[i]));
++ }
++ len += sprintf(pbuf->buf+len,"\n");
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,"DEVICE INFORMATION (devstat):\n");
++ len += sprintf(pbuf->buf+len,
++ "Interrupt Parameter: 0x%08lx "
++ "Last path used mask: 0x%02x\n",
++ adapter->devstat.intparm,
++ adapter->devstat.lpum);
++ len += sprintf(pbuf->buf+len,
++ "Channel Status: 0x%02x "
++ "Device Status: 0x%02x\n",
++ adapter->devstat.cstat,
++ adapter->devstat.dstat);
++ len += sprintf(pbuf->buf+len,
++ "Flag: 0x%08x "
++ "CCW address (from irb): 0x%08lx\n",
++ adapter->devstat.flag,
++ (unsigned long)adapter->devstat.cpa);
++ len += sprintf(pbuf->buf+len,
++ "Response count: 0x%08x "
++ "Sense Count: 0x%08x\n",
++ adapter->devstat.rescnt,
++ adapter->devstat.scnt);
++ len += sprintf(pbuf->buf+len,
++ "IRB: ");
++ for(i=0;i<4;i++){
++ len += sprintf(pbuf->buf+len,
++ "0x%08x ",
++ *((unsigned int *)(&adapter->devstat.ii.irb)+i));
++ }
++ len += sprintf(pbuf->buf+len,"\n");
++ len += sprintf(pbuf->buf+len,
++ "Sense Data: ");
++ for(i=0;i<4;i++){
++ len += sprintf(pbuf->buf+len,
++ "0x%08x ",
++ *((unsigned int *)(&adapter->devstat.ii.sense.data)+i));
++ }
++ len += sprintf(pbuf->buf+len,"\n");
++ }
++
++#ifdef ZFCP_STAT_QUEUES
++ len += sprintf(pbuf->buf + len, "\nOutbound queue full: 0x%08x ",
++ atomic_read(&adapter->outbound_queue_full));
++ len += sprintf(pbuf->buf + len, "Outbound requests: 0x%08x\n\n",
++ atomic_read(&adapter->outbound_total));
++#endif
++#ifdef ZFCP_STAT_REQSIZES
++ len += sprintf(pbuf->buf + len, "missed stats 0x%x\n",
++ atomic_read(&adapter->stat_errors));
++ len = zfcp_statistics_print(
++ adapter, &adapter->read_req_head,
++ "rr", pbuf->buf, len, ZFCP_MAX_PROC_SIZE);
++ len = zfcp_statistics_print(
++ adapter, &adapter->write_req_head,
++ "wr", pbuf->buf, len, ZFCP_MAX_PROC_SIZE);
++#endif
++
++ ZFCP_LOG_TRACE("stored %d bytes in proc buffer\n", len);
++
++ pbuf->len = len;
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++int zfcp_adapter_proc_close(struct inode *inode, struct file *file)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ int rc=0;
++ procbuf_t *pbuf = (procbuf_t *) file->private_data;
++
++ ZFCP_LOG_TRACE("enter (inode=0x%lx, buffer=0x%lx)\n",
++ (unsigned long)inode,
++ (unsigned long) file);
++
++ if (pbuf) {
++ if (pbuf->buf) {
++ ZFCP_LOG_TRACE("Freeing pbuf->buf\n");
++ ZFCP_KFREE(pbuf->buf, ZFCP_MAX_PROC_SIZE);
++ } else {
++ ZFCP_LOG_DEBUG("No procfile buffer found to be freed\n");
++ }
++ ZFCP_LOG_TRACE("Freeing pbuf\n");
++ ZFCP_KFREE(pbuf, sizeof(*pbuf));
++ } else {
++ ZFCP_LOG_DEBUG("No procfile buffer found to be freed.\n");
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", rc);
++
++ MOD_DEC_USE_COUNT;
++
++ return rc;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * function: zfcp_adapter_proc_read
++ *
++ * returns: number of characters copied to user-space
++ * - <error-type> otherwise
++ */
++ssize_t zfcp_adapter_proc_read(struct file *file,
++ char *user_buf,
++ size_t user_len,
++ loff_t *offset)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ loff_t len;
++ procbuf_t *pbuf = (procbuf_t *) file->private_data;
++
++ ZFCP_LOG_TRACE(
++ "enter (file=0x%lx user_buf=0x%lx "
++ "user_length=%li *offset=0x%lx)\n",
++ (unsigned long)file,
++ (unsigned long)user_buf,
++ user_len,
++ (unsigned long)*offset);
++
++ if ( *offset>=pbuf->len) {
++ return 0;
++ } else {
++ len = min(user_len, (unsigned long)(pbuf->len - *offset));
++ if (copy_to_user( user_buf, &(pbuf->buf[*offset]), len))
++ return -EFAULT;
++ (* offset) += len;
++ return len;
++ }
++
++ ZFCP_LOG_TRACE("Size-offset is %ld, user_len is %ld\n",
++ ((unsigned long)(pbuf->len - *offset)),
++ user_len);
++
++ ZFCP_LOG_TRACE("exit (%Li)\n", len);
++
++ return len;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * function: zfcp_adapter_proc_write
++ *
++ * known bugs: does not work when small buffers are used
++ *
++ */
++
++ssize_t zfcp_adapter_proc_write(struct file *file,
++ const char *user_buf,
++ size_t user_len,
++ loff_t *offset)
++
++{
++
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ char *buffer = NULL;
++ size_t my_count = user_len;
++ const struct inode *ino = file->f_dentry->d_inode;
++ const struct proc_dir_entry *dp = ino->u.generic_ip;
++ zfcp_adapter_t *adapter = dp->data;
++
++ ZFCP_LOG_TRACE(
++ "enter (file=0x%lx user_buf=0x%lx "
++ "user_length=%li *offset=0x%lx)\n",
++ (unsigned long)file,
++ (unsigned long)user_buf,
++ user_len,
++ (unsigned long)*offset);
++
++ buffer = ZFCP_KMALLOC(my_count + 1, GFP_KERNEL);
++ if (!buffer) {
++ ZFCP_LOG_NORMAL("error: Not enough free memory for procfile"
++ " input. Input will be ignored.\n");
++ user_len = -ENOMEM;
++ goto out;
++ }
++ ZFCP_LOG_TRACE("buffer allocated...\n");
++
++ copy_from_user(buffer, user_buf, my_count);
++
++ buffer[my_count] = '\0'; /* for debugging */
++
++ ZFCP_LOG_TRACE("user_len= %ld, buffer=>%s<\n",
++ user_len, buffer);
++
++ if ((strncmp(ZFCP_RESET_ERP, buffer, strlen(ZFCP_RESET_ERP)) == 0) ||
++ (strncmp(ZFCP_SET_ONLINE, buffer, strlen(ZFCP_SET_ONLINE)) == 0)) {
++ ZFCP_LOG_NORMAL(
++ "user triggered (re)start of all operations on the "
++ "adapter with devno 0x%04x\n",
++ adapter->devno);
++ zfcp_erp_modify_adapter_status(
++ adapter,
++ ZFCP_STATUS_COMMON_RUNNING,
++ ZFCP_SET);
++ zfcp_erp_adapter_reopen(
++ adapter,
++ ZFCP_STATUS_COMMON_ERP_FAILED);
++ zfcp_erp_wait(adapter);
++ user_len = strlen(buffer);
++ } else if (strncmp(ZFCP_SET_OFFLINE, buffer, strlen(ZFCP_SET_OFFLINE)) == 0) {
++ ZFCP_LOG_NORMAL(
++ "user triggered shutdown of all operations on the "
++ "adapter with devno 0x%04x\n",
++ adapter->devno);
++ zfcp_erp_adapter_shutdown(adapter, 0);
++ zfcp_erp_wait(adapter);
++ user_len = strlen(buffer);
++ } else if (strncmp(ZFCP_STAT_RESET, buffer, strlen(ZFCP_STAT_RESET)) == 0) {
++#ifdef ZFCP_STAT_REQSIZES
++ ZFCP_LOG_NORMAL(
++ "user triggered reset of all statisticss for the "
++ "adapter with devno 0x%04x\n",
++ adapter->devno);
++ atomic_compare_and_swap(1, 0, &adapter->stat_on);
++ zfcp_statistics_clear(adapter, &adapter->read_req_head);
++ zfcp_statistics_clear(adapter, &adapter->write_req_head);
++ atomic_set(&adapter->stat_errors, 0);
++ atomic_compare_and_swap(0, 1, &adapter->stat_on);
++#endif
++ user_len = strlen(buffer);
++ } else if (strncmp(ZFCP_STAT_OFF, buffer, strlen(ZFCP_STAT_OFF)) == 0) {
++#ifdef ZFCP_STAT_REQSIZES
++ if (atomic_compare_and_swap(1, 0, &adapter->stat_on)) {
++ ZFCP_LOG_NORMAL(
++ "warning: all statistics for the adapter "
++ "with devno 0x%04x already off\n ",
++ adapter->devno);
++ } else {
++ ZFCP_LOG_NORMAL(
++ "user triggered shutdown of all statistics for the "
++ "adapter with devno 0x%04x\n",
++ adapter->devno);
++ zfcp_statistics_clear(adapter, &adapter->read_req_head);
++ zfcp_statistics_clear(adapter, &adapter->write_req_head);
++ }
++#endif
++ user_len = strlen(buffer);
++ } else if (strncmp(ZFCP_STAT_ON, buffer, strlen(ZFCP_STAT_ON)) == 0) {
++#ifdef ZFCP_STAT_REQSIZES
++ if (atomic_compare_and_swap(0, 1, &adapter->stat_on)) {
++ ZFCP_LOG_NORMAL(
++ "warning: all statistics for the adapter "
++ "with devno 0x%04x already on\n ",
++ adapter->devno);
++ } else {
++ ZFCP_LOG_NORMAL(
++ "user triggered (re)start of all statistics for the "
++ "adapter with devno 0x%04x\n",
++ adapter->devno);
++ }
++#endif
++ user_len = strlen(buffer);
++ } else {
++ ZFCP_LOG_INFO("error: unknown procfs command\n");
++ user_len = -EINVAL;
++ }
++
++ ZFCP_LOG_TRACE("freeing buffer..\n");
++ ZFCP_KFREE(buffer, my_count + 1);
++out:
++ ZFCP_LOG_TRACE("exit (%li)\n", user_len);
++ return (user_len);
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++int zfcp_port_proc_close(struct inode *inode, struct file *file)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ int rc=0;
++ procbuf_t *pbuf = (procbuf_t *) file->private_data;
++
++ ZFCP_LOG_TRACE("enter (inode=0x%lx, buffer=0x%lx)\n",
++ (unsigned long)inode,
++ (unsigned long) file);
++
++ if (pbuf) {
++ if (pbuf->buf) {
++ ZFCP_LOG_TRACE("Freeing pbuf->buf\n");
++ ZFCP_KFREE(pbuf->buf, ZFCP_MAX_PROC_SIZE);
++ } else {
++ ZFCP_LOG_DEBUG("No procfile buffer found to be freed\n");
++ }
++ ZFCP_LOG_TRACE("Freeing pbuf\n");
++ ZFCP_KFREE(pbuf, sizeof(*pbuf));
++ } else {
++ ZFCP_LOG_DEBUG("No procfile buffer found to be freed.\n");
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", rc);
++
++ MOD_DEC_USE_COUNT;
++
++ return rc;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * function: zfcp_port_proc_read
++ *
++ * returns: number of characters copied to user-space
++ * - <error-type> otherwise
++ */
++ssize_t zfcp_port_proc_read(struct file *file,
++ char *user_buf,
++ size_t user_len,
++ loff_t *offset)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ loff_t len;
++ procbuf_t *pbuf = (procbuf_t *) file->private_data;
++
++ ZFCP_LOG_TRACE(
++ "enter (file=0x%lx user_buf=0x%lx "
++ "user_length=%li *offset=0x%lx)\n",
++ (unsigned long)file,
++ (unsigned long)user_buf,
++ user_len,
++ (unsigned long)*offset);
++
++ if ( *offset>=pbuf->len) {
++ return 0;
++ } else {
++ len = min(user_len, (unsigned long)(pbuf->len - *offset));
++ if (copy_to_user( user_buf, &(pbuf->buf[*offset]), len))
++ return -EFAULT;
++ (* offset) += len;
++ return len;
++ }
++
++ ZFCP_LOG_TRACE("Size-offset is %ld, user_len is %ld\n",
++ ((unsigned long)(pbuf->len - *offset)),
++ user_len);
++
++ ZFCP_LOG_TRACE("exit (%Li)\n", len);
++
++ return len;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * function: zfcp_port_proc_write
++ *
++ * known bugs: does not work when small buffers are used
++ *
++ */
++
++ssize_t zfcp_port_proc_write(struct file *file,
++ const char *user_buf,
++ size_t user_len,
++ loff_t *offset)
++
++{
++
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ char *buffer = NULL;
++ size_t my_count = user_len;
++ const struct inode *ino = file->f_dentry->d_inode;
++ const struct proc_dir_entry *dp = ino->u.generic_ip;
++ zfcp_port_t *port = dp->data;
++ zfcp_adapter_t *adapter = port->adapter;
++
++ ZFCP_LOG_TRACE(
++ "enter (file=0x%lx user_buf=0x%lx "
++ "user_length=%li *offset=0x%lx)\n",
++ (unsigned long)file,
++ (unsigned long)user_buf,
++ user_len,
++ (unsigned long)*offset);
++
++ buffer = ZFCP_KMALLOC(my_count + 1, GFP_KERNEL);
++ if (!buffer) {
++ ZFCP_LOG_NORMAL("error: Not enough free memory for procfile"
++ " input. Input will be ignored.\n");
++ user_len = -ENOMEM;
++ goto out;
++ }
++ ZFCP_LOG_TRACE("buffer allocated...\n");
++
++ copy_from_user(buffer, user_buf, my_count);
++
++ buffer[my_count] = '\0'; /* for debugging */
++
++ ZFCP_LOG_TRACE("user_len= %ld, buffer=>%s<\n",
++ user_len, buffer);
++
++ if ((strncmp(ZFCP_RESET_ERP, buffer, strlen(ZFCP_RESET_ERP)) == 0) ||
++ (strncmp(ZFCP_SET_ONLINE, buffer, strlen(ZFCP_SET_ONLINE)) == 0)) {
++ ZFCP_LOG_NORMAL(
++ "user triggered (re)start of all operations on the "
++ "port with WWPN 0x%016Lx on the adapter with devno "
++ "0x%04x\n",
++ (llui_t)port->wwpn,
++ adapter->devno);
++ zfcp_erp_modify_port_status(
++ port,
++ ZFCP_STATUS_COMMON_RUNNING,
++ ZFCP_SET);
++ zfcp_erp_port_reopen(
++ port,
++ ZFCP_STATUS_COMMON_ERP_FAILED);
++ zfcp_erp_wait(adapter);
++ user_len = strlen(buffer);
++ } else if (strncmp(ZFCP_SET_OFFLINE, buffer, strlen(ZFCP_SET_OFFLINE)) == 0) {
++ ZFCP_LOG_NORMAL(
++ "user triggered shutdown of all operations on the "
++ "port with WWPN 0x%016Lx on the adapter with devno "
++ "0x%04x\n",
++ (llui_t)port->wwpn,
++ adapter->devno);
++ zfcp_erp_port_shutdown(port, 0);
++ zfcp_erp_wait(adapter);
++ user_len = strlen(buffer);
++ } else if (strncmp(ZFCP_RTV, buffer, strlen(ZFCP_RTV)) == 0) {
++ ZFCP_LOG_NORMAL(
++ "Read timeout value (RTV) ELS "
++ "(wwpn=0x%016Lx devno=0x%04x)\n",
++ (llui_t)port->wwpn,
++ adapter->devno);
++ zfcp_els(port, ZFCP_LS_RTV);
++ user_len = strlen(buffer);
++ } else if (strncmp(ZFCP_RLS, buffer, strlen(ZFCP_RLS)) == 0) {
++ ZFCP_LOG_NORMAL(
++ "Read link status (RLS) ELS "
++ "(wwpn=0x%016Lx devno=0x%04x)\n",
++ (llui_t)port->wwpn,
++ adapter->devno);
++ zfcp_els(port, ZFCP_LS_RLS);
++ user_len = strlen(buffer);
++ } else if (strncmp(ZFCP_PDISC, buffer, strlen(ZFCP_PDISC)) == 0) {
++ ZFCP_LOG_NORMAL(
++ "Port discovery (PDISC) ELS "
++ "(wwpn=0x%016Lx devno=0x%04x)\n",
++ (llui_t)port->wwpn,
++ adapter->devno);
++ zfcp_els(port, ZFCP_LS_PDISC);
++ user_len = strlen(buffer);
++ } else if (strncmp(ZFCP_ADISC, buffer, strlen(ZFCP_ADISC)) == 0) {
++ ZFCP_LOG_NORMAL(
++ "Address discovery (ADISC) ELS "
++ "(wwpn=0x%016Lx devno=0x%04x)\n",
++ (llui_t)port->wwpn,
++ adapter->devno);
++ zfcp_els(port, ZFCP_LS_ADISC);
++ } else {
++ ZFCP_LOG_INFO("error: unknown procfs command\n");
++ user_len = -EINVAL;
++ }
++
++ ZFCP_LOG_TRACE("freeing buffer..\n");
++ ZFCP_KFREE(buffer, my_count + 1);
++out:
++ ZFCP_LOG_TRACE("exit (%li)\n", user_len);
++ return (user_len);
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * zfcp_port_proc_open
++ *
++ * modified proc fs utilization (instead of using ..._generic):
++ *
++ * - to avoid (SMP) races, allocate buffers for output using
++ * the private_data member in the respective file struct
++ * such that read() just has to copy out of this buffer
++ *
++ */
++
++int zfcp_port_proc_open(struct inode *inode, struct file *file)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ int len = 0;
++ procbuf_t *pbuf;
++ int retval=0;
++ const struct inode *ino = file->f_dentry->d_inode;
++ const struct proc_dir_entry *dp = ino->u.generic_ip;
++ zfcp_port_t *port = dp->data;
++
++ ZFCP_LOG_TRACE("enter (inode=0x%lx, file=0x%lx)\n",
++ (unsigned long)inode,
++ (unsigned long) file);
++
++ pbuf = ZFCP_KMALLOC(sizeof(procbuf_t), GFP_KERNEL);
++ if (pbuf == NULL) {
++ ZFCP_LOG_NORMAL("error: Not enough memory available for "
++ "proc-fs action. Action will be ignored.\n");
++ retval = -ENOMEM;
++ goto out;
++ } else {
++ file->private_data = ( void * ) pbuf;
++ }
++
++ pbuf->buf = ZFCP_KMALLOC(ZFCP_MAX_PROC_SIZE, GFP_KERNEL);
++ if (pbuf->buf == NULL) {
++ ZFCP_LOG_NORMAL("error: Not enough memory available for "
++ "proc-fs action. Action will be ignored.\n");
++ ZFCP_KFREE(pbuf, sizeof(*pbuf));
++ retval = -ENOMEM;
++ goto out;
++ }
++
++ ZFCP_LOG_TRACE("Memory for port proc output allocated.\n");
++
++ MOD_INC_USE_COUNT;
++
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,
++ "Port Information: \n");
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,
++ "WWNN: 0x%016Lx "
++ "WWPN: 0x%016Lx\n",
++ (llui_t)port->wwnn,
++ (llui_t)port->wwpn);
++ len += sprintf(pbuf->buf+len,
++ "SCSI ID: 0x%08x "
++ "Max SCSI LUN: 0x%08x\n",
++ port->scsi_id,
++ port->max_scsi_lun);
++ len += sprintf(pbuf->buf+len,
++ "D_ID: 0x%06x\n",
++ port->d_id);
++ len += sprintf(pbuf->buf+len,
++ "Handle: 0x%08x\n",
++ port->handle);
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,
++ "Attached units: %10d\n",
++ port->units);
++ len += sprintf(pbuf->buf+len,
++ "ERP counter: 0x%08x\n",
++ atomic_read(&port->erp_counter));
++ len += sprintf(pbuf->buf+len,
++ "Port Status: 0x%08x\n",
++ atomic_read(&port->status));
++ len += sprintf(pbuf->buf+len,"\n");
++
++ if (proc_debug != 0) {
++ len += sprintf(pbuf->buf+len,
++ "Port Structure information:\n");
++ len += sprintf(pbuf->buf+len,
++ "Common Magic: 0x%08x "
++ "Specific Magic: 0x%08x\n",
++ port->common_magic,
++ port->specific_magic);
++ len += sprintf(pbuf->buf+len,
++ "Port struct at: 0x%08lx "
++ "List head at: 0x%08lx\n",
++ (unsigned long) port,
++ (unsigned long) &(port->list));
++ len += sprintf(pbuf->buf+len,
++ "Next list head: 0x%08lx "
++ "Previous list head: 0x%08lx\n",
++ (unsigned long) port->list.next,
++ (unsigned long) port->list.prev);
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,
++ "Unit list head at: 0x%08lx\n",
++ (unsigned long) &(port->unit_list_head));
++ len += sprintf(pbuf->buf+len,
++ "Next list head: 0x%08lx "
++ "Previous list head: 0x%08lx\n",
++ (unsigned long) port->unit_list_head.next,
++ (unsigned long) port->unit_list_head.prev);
++ len += sprintf(pbuf->buf+len,
++ "List lock: 0x%08lx "
++ "List lock owner PC: 0x%08lx\n",
++ port->unit_list_lock.lock,
++ port->unit_list_lock.owner_pc);
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,
++ "Parent adapter at: 0x%08lx\n",
++ (unsigned long) port->adapter);
++ }
++
++ ZFCP_LOG_TRACE("stored %d bytes in proc buffer\n", len);
++
++ pbuf->len = len;
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * zfcp_unit_proc_open
++ *
++ * - to avoid (SMP) races, allocate buffers for output using
++ * the private_data member in the respective file struct
++ * such that read() just has to copy out of this buffer
++ *
++ */
++
++int zfcp_unit_proc_open(struct inode *inode, struct file *file)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ int len = 0;
++ procbuf_t *pbuf;
++ int retval=0;
++ const struct inode *ino = file->f_dentry->d_inode;
++ const struct proc_dir_entry *dp = ino->u.generic_ip;
++ zfcp_unit_t *unit = dp->data;
++
++ ZFCP_LOG_TRACE("enter (inode=0x%lx, file=0x%lx)\n",
++ (unsigned long)inode,
++ (unsigned long) file);
++
++ pbuf = ZFCP_KMALLOC(sizeof(procbuf_t), GFP_KERNEL);
++ if (pbuf == NULL) {
++ ZFCP_LOG_NORMAL("error: Not enough memory available for "
++ "proc-fs action. Action will be ignored.\n");
++ retval = -ENOMEM;
++ goto out;
++ } else {
++ file->private_data = ( void * ) pbuf;
++ }
++
++ pbuf->buf = ZFCP_KMALLOC(ZFCP_MAX_PROC_SIZE, GFP_KERNEL);
++ if (pbuf->buf == NULL) {
++ ZFCP_LOG_NORMAL("error: Not enough memory available for "
++ "proc-fs action. Action will be ignored.\n");
++ ZFCP_KFREE(pbuf, sizeof(*pbuf));
++ retval = -ENOMEM;
++ goto out;
++ }
++
++ ZFCP_LOG_TRACE("Memory for unit proc output allocated.\n");
++
++ MOD_INC_USE_COUNT;
++
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,
++ "Unit Information: \n");
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,
++ "SCSI LUN: 0x%08x "
++ "FCP_LUN: 0x%016Lx\n",
++ unit->scsi_lun,
++ (llui_t)unit->fcp_lun);
++ len += sprintf(pbuf->buf+len,
++ "Handle: 0x%08x\n",
++ unit->handle);
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,
++ "ERP counter: 0x%08x\n",
++ atomic_read(&unit->erp_counter));
++ len += sprintf(pbuf->buf+len,
++ "Unit Status: 0x%08x\n",
++ atomic_read(&unit->status));
++ len += sprintf(pbuf->buf+len,"\n");
++
++ if (proc_debug != 0) {
++ len += sprintf(pbuf->buf+len,
++ "Unit Structure information:\n");
++ len += sprintf(pbuf->buf+len,
++ "Common Magic: 0x%08x "
++ "Specific Magic: 0x%08x\n",
++ unit->common_magic,
++ unit->specific_magic);
++ len += sprintf(pbuf->buf+len,
++ "Unit struct at: 0x%08lx "
++ "List head at: 0x%08lx\n",
++ (unsigned long) unit,
++ (unsigned long) &(unit->list));
++ len += sprintf(pbuf->buf+len,
++ "Next list head: 0x%08lx "
++ "Previous list head: 0x%08lx\n",
++ (unsigned long) unit->list.next,
++ (unsigned long) unit->list.prev);
++ len += sprintf(pbuf->buf+len,"\n");
++
++ len += sprintf(pbuf->buf+len,
++ "Parent port at: 0x%08lx "
++ "SCSI dev struct at: 0x%08lx\n",
++ (unsigned long) unit->port,
++ (unsigned long) unit->device);
++ }
++
++ ZFCP_LOG_TRACE("stored %d bytes in proc buffer\n", len);
++
++ pbuf->len = len;
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++int zfcp_unit_proc_close(struct inode *inode, struct file *file)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ int rc=0;
++ procbuf_t *pbuf = (procbuf_t *) file->private_data;
++
++ ZFCP_LOG_TRACE("enter (inode=0x%lx, buffer=0x%lx)\n",
++ (unsigned long)inode,
++ (unsigned long) file);
++
++ if (pbuf) {
++ if (pbuf->buf) {
++ ZFCP_LOG_TRACE("Freeing pbuf->buf\n");
++ ZFCP_KFREE(pbuf->buf, ZFCP_MAX_PROC_SIZE);
++ } else {
++ ZFCP_LOG_DEBUG("No procfile buffer found to be freed\n");
++ }
++ ZFCP_LOG_TRACE("Freeing pbuf\n");
++ ZFCP_KFREE(pbuf, sizeof(*pbuf));
++ } else {
++ ZFCP_LOG_DEBUG("No procfile buffer found to be freed.\n");
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", rc);
++
++ MOD_DEC_USE_COUNT;
++
++ return rc;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * function: zfcp_unit_proc_read
++ *
++ * returns: number of characters copied to user-space
++ * - <error-type> otherwise
++ */
++ssize_t zfcp_unit_proc_read(struct file *file,
++ char *user_buf,
++ size_t user_len,
++ loff_t *offset)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ loff_t len;
++ procbuf_t *pbuf = (procbuf_t *) file->private_data;
++
++ ZFCP_LOG_TRACE(
++ "enter (file=0x%lx user_buf=0x%lx "
++ "user_length=%li *offset=0x%lx)\n",
++ (unsigned long)file,
++ (unsigned long)user_buf,
++ user_len,
++ (unsigned long)*offset);
++
++ if ( *offset>=pbuf->len) {
++ return 0;
++ } else {
++ len = min(user_len, (unsigned long)(pbuf->len - *offset));
++ if (copy_to_user( user_buf, &(pbuf->buf[*offset]), len))
++ return -EFAULT;
++ (* offset) += len;
++ return len;
++ }
++
++ ZFCP_LOG_TRACE("Size-offset is %ld, user_len is %ld\n",
++ ((unsigned long)(pbuf->len - *offset)),
++ user_len);
++
++ ZFCP_LOG_TRACE("exit (%Li)\n", len);
++
++ return len;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * function: zfcp_unit_proc_write
++ *
++ */
++
++ssize_t zfcp_unit_proc_write(struct file *file,
++ const char *user_buf,
++ size_t user_len,
++ loff_t *offset)
++
++{
++
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ char *buffer = NULL;
++ size_t my_count = user_len;
++ const struct inode *ino = file->f_dentry->d_inode;
++ const struct proc_dir_entry *dp = ino->u.generic_ip;
++ zfcp_unit_t *unit = dp->data;
++
++ ZFCP_LOG_TRACE(
++ "enter (file=0x%lx user_buf=0x%lx "
++ "user_length=%li *offset=0x%lx)\n",
++ (unsigned long)file,
++ (unsigned long)user_buf,
++ user_len,
++ (unsigned long)*offset);
++
++ buffer = ZFCP_KMALLOC(my_count + 1, GFP_KERNEL);
++ if (!buffer) {
++ ZFCP_LOG_NORMAL("error: Not enough free memory for procfile"
++ " input. Input will be ignored.\n");
++ user_len = -ENOMEM;
++ goto out;
++ }
++ ZFCP_LOG_TRACE("buffer allocated...\n");
++
++ copy_from_user(buffer, user_buf, my_count);
++
++ buffer[my_count] = '\0'; /* for debugging */
++
++ ZFCP_LOG_TRACE("user_len= %ld, buffer=>%s<\n",
++ user_len, buffer);
++
++ if ((strncmp(ZFCP_RESET_ERP, buffer, strlen(ZFCP_RESET_ERP)) == 0) ||
++ (strncmp(ZFCP_SET_ONLINE, buffer, strlen(ZFCP_SET_ONLINE)) == 0)) {
++ ZFCP_LOG_NORMAL(
++ "user triggered (re)start of all operations on the "
++ "unit with FCP_LUN 0x%016Lx on the port with WWPN 0x%016Lx "
++ "on the adapter with devno 0x%04x\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ zfcp_erp_modify_unit_status(
++ unit,
++ ZFCP_STATUS_COMMON_RUNNING,
++ ZFCP_SET);
++ zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED);
++ zfcp_erp_wait(unit->port->adapter);
++ user_len = strlen(buffer);
++ } else if (strncmp(ZFCP_SET_OFFLINE, buffer, strlen(ZFCP_SET_OFFLINE)) == 0) {
++ ZFCP_LOG_NORMAL(
++ "user triggered shutdown of all operations on the "
++ "unit with FCP_LUN 0x%016Lx on the port with WWPN 0x%016Lx "
++ "on the adapter with devno 0x%04x\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ zfcp_erp_unit_shutdown(unit, 0);
++ zfcp_erp_wait(unit->port->adapter);
++ user_len = strlen(buffer);
++ } else {
++ ZFCP_LOG_INFO("error: unknown procfs command\n");
++ user_len = -EINVAL;
++ }
++
++ ZFCP_LOG_TRACE("freeing buffer..\n");
++ ZFCP_KFREE(buffer, my_count + 1);
++out:
++ ZFCP_LOG_TRACE("exit (%li)\n", user_len);
++ return (user_len);
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * function: zfcp_scsi_detect
++ *
++ * purpose: This routine is called by the SCSI stack mid layer
++ * to query detected host bus adapters.
++ *
++ * returns: number of detcted HBAs (0, if no HBAs detected)
++ */
++int zfcp_scsi_detect(Scsi_Host_Template *shtpnt)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ int adapters = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (shtpnt =0x%lx)\n",
++ (unsigned long) shtpnt);
++
++ spin_unlock_irq(&io_request_lock);
++ adapters = zfcp_adapter_scsi_register_all();
++ spin_lock_irq(&io_request_lock);
++
++ ZFCP_LOG_TRACE(
++ "exit (adapters =%d)\n",
++ adapters);
++
++ return adapters;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: for all adapters which are not yet registered with SCSI stack:
++ * wait for finish of erp and register adapter with SCSI stack then
++ *
++ * returns: number of adapters registered with SCSI stack
++ *
++ * FIXME(design): /proc/scsi/zfcp/add-del_map must be locked as long as we
++ * are in such a loop as implemented here.
++ * We need a guarantee that no adapter will (dis)sappear.
++ * Otherwise list corruption may be caused.
++ * (We can't hold the lock all the time due to possible
++ * calls to schedule())
++ */
++static int zfcp_adapter_scsi_register_all()
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ int retval = 0;
++ unsigned long flags;
++ zfcp_adapter_t *adapter;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ read_lock_irqsave(&zfcp_data.adapter_list_lock, flags);
++ adapter = ZFCP_FIRST_ADAPTER;
++ while (adapter) {
++ read_unlock_irqrestore(&zfcp_data.adapter_list_lock, flags);
++ if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_REGISTERED, &adapter->status)) {
++ ZFCP_LOG_DEBUG(
++ "adapter with devno 0x%04x needs "
++ "to be registered with SCSI stack, "
++ "waiting for erp to settle\n",
++ adapter->devno);
++ zfcp_erp_wait(adapter);
++ if (zfcp_adapter_scsi_register(adapter) == 0);
++ retval++;
++ }
++ read_lock_irqsave(&zfcp_data.adapter_list_lock, flags);
++ adapter = ZFCP_NEXT_ADAPTER(adapter);
++ }
++ read_unlock_irqrestore(&zfcp_data.adapter_list_lock, flags);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++void zfcp_scsi_select_queue_depth(struct Scsi_Host *host, Scsi_Device *dev_list)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ zfcp_adapter_t *adapter = (zfcp_adapter_t *)host->hostdata[0];
++ zfcp_port_t *port = NULL;
++ zfcp_unit_t *unit = NULL;
++ unsigned long flags=0;
++
++ ZFCP_LOG_TRACE("enter (host =0x%lx, dev_list=0x%lx)\n",
++ (unsigned long) host,
++ (unsigned long) dev_list);
++
++ read_lock_irqsave(&adapter->port_list_lock, flags);
++ ZFCP_FOR_EACH_PORT(adapter, port) {
++ read_lock(&port->unit_list_lock);
++ ZFCP_FOR_EACH_UNIT(port, unit) {
++ ZFCP_LOG_DEBUG("Determinig if unit 0x%lx"
++ " supports tagging\n",
++ (unsigned long) unit);
++ if (!unit->device)
++ continue;
++
++ if (unit->device->tagged_supported) {
++ ZFCP_LOG_DEBUG("Enabling tagging for "
++ "unit 0x%lx \n",
++ (unsigned long) unit);
++ unit->device->tagged_queue = 1;
++ unit->device->current_tag = 0;
++ unit->device->queue_depth = ZFCP_CMND_PER_LUN;
++ atomic_set_mask(ZFCP_STATUS_UNIT_ASSUMETCQ, &unit->status);
++ } else {
++ ZFCP_LOG_DEBUG("Disabling tagging for "
++ "unit 0x%lx \n",
++ (unsigned long) unit);
++ unit->device->tagged_queue = 0;
++ unit->device->current_tag = 0;
++ unit->device->queue_depth = 1;
++ atomic_clear_mask(ZFCP_STATUS_UNIT_ASSUMETCQ, &unit->status);
++ }
++ }
++ read_unlock(&port->unit_list_lock);
++ }
++ read_unlock_irqrestore(&adapter->port_list_lock, flags);
++
++ ZFCP_LOG_TRACE("exit\n");
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_scsi_revoke
++ *
++ * purpose:
++ *
++ * returns:
++ */
++int zfcp_scsi_revoke(Scsi_Device *sdpnt)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ int retval = 0;
++ zfcp_unit_t *unit = (zfcp_unit_t*) sdpnt->hostdata;
++#if 0
++ zfcp_port_t *port = unit->port;
++#endif
++
++ ZFCP_LOG_TRACE("enter (sdpnt=0x%lx)\n", (unsigned long)sdpnt);
++
++ if (!unit) {
++ ZFCP_LOG_INFO(
++ "no unit associated with SCSI device at "
++ "address 0x%lx\n",
++ (unsigned long)sdpnt);
++ goto out;
++ }
++
++#if 0
++ /* Shutdown entire port if we are going to shutdown the last unit. */
++ if (port->units == 1) {
++ zfcp_erp_port_shutdown(port, 0);
++ zfcp_erp_wait(port->adapter);
++ } else {
++ zfcp_erp_unit_shutdown(unit, 0);
++ zfcp_erp_wait(port->adapter);
++ }
++#endif
++ sdpnt->hostdata = NULL;
++ unit->device = NULL;
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_scsi_release
++ *
++ * purpose: called from SCSI stack mid layer to make this driver
++ * cleanup I/O and resources for this adapter
++ *
++ * returns:
++ */
++int zfcp_scsi_release(struct Scsi_Host *shpnt)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter (shpnt=0x%lx)\n", (unsigned long)shpnt);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_scsi_insert_into_fake_queue
++ *
++ * purpose:
++ *
++ *
++ * returns:
++ */
++inline void zfcp_scsi_insert_into_fake_queue(zfcp_adapter_t *adapter, Scsi_Cmnd *new_cmnd)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ unsigned long flags;
++ Scsi_Cmnd *current_cmnd;
++
++ ZFCP_LOG_TRACE("enter (adapter=0x%lx, cmnd=0x%lx)\n",
++ (unsigned long)adapter,
++ (unsigned long)new_cmnd);
++
++ ZFCP_LOG_DEBUG("Faking SCSI command:\n");
++ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
++ (char*)new_cmnd->cmnd,
++ new_cmnd->cmd_len);
++
++ new_cmnd->host_scribble = NULL;
++
++ write_lock_irqsave(&adapter->fake_list_lock,flags);
++ if(adapter->first_fake_cmnd==NULL) {
++ adapter->first_fake_cmnd = new_cmnd;
++ adapter->fake_scsi_timer.function =
++ zfcp_scsi_process_and_clear_fake_queue;
++ adapter->fake_scsi_timer.data =
++ (unsigned long)adapter;
++ adapter->fake_scsi_timer.expires =
++ jiffies + ZFCP_FAKE_SCSI_COMPLETION_TIME;
++ add_timer(&adapter->fake_scsi_timer);
++ } else {
++ for(current_cmnd=adapter->first_fake_cmnd;
++ current_cmnd->host_scribble != NULL;
++ current_cmnd = (Scsi_Cmnd *)(current_cmnd->host_scribble));
++ current_cmnd->host_scribble = (char *)new_cmnd;
++ }
++ atomic_inc(&adapter->fake_scsi_reqs_active);
++ write_unlock_irqrestore(&adapter->fake_list_lock,flags);
++
++ ZFCP_LOG_TRACE("exit\n");
++
++ return;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_scsi_process_and_clear_fake_queue
++ *
++ * purpose:
++ *
++ *
++ * returns:
++ */
++inline void zfcp_scsi_process_and_clear_fake_queue(unsigned long data)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ unsigned long flags;
++ Scsi_Cmnd *current_cmnd;
++ Scsi_Cmnd *next_cmnd;
++ zfcp_adapter_t *adapter=(zfcp_adapter_t *)data;
++
++ ZFCP_LOG_TRACE("enter (data=0x%lx)\n", data);
++
++ /*
++ * We need a common lock for scsi_req on command completion
++ * as well as on command abort to avoid race conditions
++ * during completions and aborts taking place at the same time.
++ * It needs to be the outer lock as in the eh_abort_handler.
++ */
++ read_lock_irqsave(&adapter->abort_lock, flags);
++ write_lock(&adapter->fake_list_lock);
++ if(adapter->first_fake_cmnd == NULL) {
++ ZFCP_LOG_DEBUG("Processing of fake-queue called "
++ "for an empty queue.\n");
++ } else {
++ current_cmnd=adapter->first_fake_cmnd;
++ do {
++ next_cmnd=(Scsi_Cmnd *)(current_cmnd->host_scribble);
++ current_cmnd->host_scribble = NULL;
++#if 0
++ zfcp_cmd_dbf_event_scsi("clrfake", adapter, current_cmnd);
++#endif
++ current_cmnd->scsi_done(current_cmnd);
++#ifdef ZFCP_DEBUG_REQUESTS
++ debug_text_event(adapter->req_dbf, 2, "fk_done:");
++ debug_event(adapter->req_dbf, 2, ¤t_cmnd, sizeof(unsigned long));
++#endif /* ZFCP_DEBUG_REQUESTS */
++ atomic_dec(&adapter->fake_scsi_reqs_active);
++ current_cmnd=next_cmnd;
++ } while (next_cmnd != NULL);
++ /* Set list to empty */
++ adapter->first_fake_cmnd = NULL;
++ }
++ write_unlock(&adapter->fake_list_lock);
++ read_unlock_irqrestore(&adapter->abort_lock, flags);
++
++ ZFCP_LOG_TRACE("exit\n");
++
++ return;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++static void zfcp_scsi_command_fail(
++ zfcp_unit_t *unit,
++ Scsi_Cmnd *scsi_cmnd,
++ int result)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ zfcp_adapter_t *adapter = unit->port->adapter;
++
++#ifdef ZFCP_DEBUG_REQUESTS
++ debug_text_event(adapter->req_dbf, 2, "de_done:");
++ debug_event(adapter->req_dbf, 2, &scsi_cmnd, sizeof(unsigned long));
++#endif /* ZFCP_DEBUG_REQUESTS */
++
++ scsi_cmnd->SCp.ptr = (char*)0;
++ scsi_cmnd->result = result;
++
++ zfcp_cmd_dbf_event_scsi("failing", adapter, scsi_cmnd);
++
++ scsi_cmnd->scsi_done(scsi_cmnd);
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++static void zfcp_scsi_command_fake(
++ zfcp_unit_t *unit,
++ Scsi_Cmnd *scsi_cmnd)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ if (scsi_cmnd->SCp.ptr) {
++ if (((unsigned long)scsi_cmnd->SCp.ptr + ZFCP_SCSI_RETRY_TIMEOUT)
++ < jiffies) {
++ /* leave it to the SCSI stack eh */
++ zfcp_scsi_command_fail(unit, scsi_cmnd, DID_TIME_OUT << 16);
++ return;
++ }
++ } else scsi_cmnd->SCp.ptr = (char*)jiffies;
++ scsi_cmnd->retries--; /* -1 is ok */
++ scsi_cmnd->result |= DID_SOFT_ERROR << 16
++ | SUGGEST_RETRY << 24;
++ zfcp_scsi_insert_into_fake_queue(unit->port->adapter, scsi_cmnd);
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/**
++ * zfcp_scsi_command_async - worker for zfcp_scsi_queuecommand and
++ * zfcp_scsi_command_sync
++ */
++int zfcp_scsi_command_async(
++ zfcp_unit_t *unit,
++ Scsi_Cmnd *scsi_cmnd,
++ void (* done)(Scsi_Cmnd *))
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ scsi_cmnd->scsi_done = done;
++ scsi_cmnd->result = 0;
++
++ if (!unit) {
++ zfcp_scsi_command_fail(unit, scsi_cmnd, DID_NO_CONNECT << 16);
++ goto out;
++ }
++
++ if (atomic_test_mask(
++ ZFCP_STATUS_COMMON_ERP_FAILED,
++ &unit->status)) {
++ zfcp_scsi_command_fail(unit, scsi_cmnd, DID_ERROR << 16);
++ goto out;
++ }
++
++ if (!atomic_test_mask(
++ ZFCP_STATUS_COMMON_RUNNING,
++ &unit->status)) {
++ zfcp_scsi_command_fail(unit, scsi_cmnd, DID_ERROR << 16);
++ goto out;
++ }
++
++ if (!atomic_test_mask(
++ ZFCP_STATUS_COMMON_UNBLOCKED,
++ &unit->status)) {
++ zfcp_scsi_command_fake(unit, scsi_cmnd);
++ goto out;
++ }
++
++ if (zfcp_fsf_send_fcp_command_task(unit, scsi_cmnd) < 0)
++ zfcp_scsi_command_fake(unit, scsi_cmnd);
++
++out:
++ return 0;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++void zfcp_scsi_command_sync_handler(Scsi_Cmnd *scsi_cmnd)
++{
++ struct completion *wait = (struct completion*) scsi_cmnd->bh_next;
++ complete(wait);
++}
++
++
++/**
++ * zfcp_scsi_command_sync - send a SCSI command and wait for completion
++ * returns 0, errors are indicated by scsi_cmnd->result
++ */
++int zfcp_scsi_command_sync(
++ zfcp_unit_t *unit,
++ Scsi_Cmnd *scsi_cmnd)
++{
++ DECLARE_COMPLETION(wait);
++
++ scsi_cmnd->bh_next = (void*) &wait; /* silent re-use */
++ zfcp_scsi_command_async(
++ unit,
++ scsi_cmnd,
++ zfcp_scsi_command_sync_handler);
++ wait_for_completion(&wait);
++
++ return 0;
++}
++
++
++
++/*
++ * function: zfcp_scsi_queuecommand
++ *
++ * purpose: enqueues a SCSI command to the specified target device
++ *
++ * note: The scsi_done midlayer function may be called directly from
++ * within queuecommand provided queuecommand returns with success (0)
++ * If it fails, it is expected that the command could not be sent
++ * and is still available for processing.
++ * As we ensure that queuecommand never fails, we have the choice
++ * to call done directly wherever we please.
++ * Thus, any kind of send errors other than those indicating
++ * 'infinite' retries will be reported directly.
++ * Retry requests are put into a list to be processed under timer
++ * control once in a while to allow for other operations to
++ * complete in the meantime.
++ *
++ * returns: 0 - success, SCSI command enqueued
++ * !0 - failure, note that we never allow this to happen as the
++ * SCSI stack would block indefinitely should a non-zero return
++ * value be reported if there are no outstanding commands
++ * (as in when the queues are down)
++ */
++int zfcp_scsi_queuecommand(
++ Scsi_Cmnd *scsi_cmnd,
++ void (* done)(Scsi_Cmnd *))
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ zfcp_unit_t *unit;
++ zfcp_adapter_t *adapter;
++
++ ZFCP_LOG_TRACE(
++ "enter (scsi_cmnd=0x%lx done=0x%lx)\n",
++ (unsigned long)scsi_cmnd,
++ (unsigned long)done);
++
++ spin_unlock_irq(&io_request_lock);
++
++ /*
++ * figure out adapter
++ * (previously stored there by the driver when
++ * the adapter was registered)
++ */
++ adapter = (zfcp_adapter_t*) scsi_cmnd->host->hostdata[0];
++
++ /*
++ * figure out target device
++ * (stored there by the driver when the first command
++ * is sent to this target device)
++ * ATTENTION: assumes hostdata initialized to NULL by
++ * mid layer (see scsi_scan.c)
++ */
++ if (!scsi_cmnd->device->hostdata) {
++ unit = zfcp_unit_lookup(
++ adapter,
++ scsi_cmnd->device->channel,
++ scsi_cmnd->device->id,
++ scsi_cmnd->device->lun);
++ /* Is specified unit configured? */
++ if (unit) {
++ scsi_cmnd->device->hostdata = unit;
++ unit->device = scsi_cmnd->device;
++ ZFCP_LOG_DEBUG(
++ "logical unit address (0x%lx) saved "
++ "for direct lookup and scsi_stack "
++ "pointer 0x%lx saved in unit structure\n",
++ (unsigned long)unit,
++ (unsigned long)unit->device);
++ }
++ } else unit = (zfcp_unit_t*) scsi_cmnd->device->hostdata;
++
++ zfcp_scsi_command_async(unit, scsi_cmnd, done);
++
++ spin_lock_irq(&io_request_lock);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", 0);
++
++ return 0;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_unit_lookup
++ *
++ * purpose:
++ *
++ * returns:
++ *
++ * context:
++ */
++static zfcp_unit_t* zfcp_unit_lookup(
++ zfcp_adapter_t *adapter,
++ int channel,
++ int id,
++ int lun)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++ zfcp_port_t *port;
++ zfcp_unit_t *unit = NULL;
++ unsigned long flags;
++
++ ZFCP_LOG_TRACE(
++ "enter (adapter devno=0x%04x, channel=%i, id=%i, lun=%i)\n",
++ adapter->devno,
++ channel,
++ id,
++ lun);
++
++ read_lock_irqsave(&adapter->port_list_lock, flags);
++ ZFCP_FOR_EACH_PORT(adapter, port) {
++ if ((scsi_id_t)id != port->scsi_id)
++ continue;
++ read_lock(&port->unit_list_lock);
++ ZFCP_FOR_EACH_UNIT(port, unit) {
++ if ((scsi_lun_t)lun == unit->scsi_lun) {
++ ZFCP_LOG_TRACE("found unit\n");
++ break;
++ }
++ }
++ read_unlock(&port->unit_list_lock);
++ if (unit)
++ break;
++ }
++ read_unlock_irqrestore(&adapter->port_list_lock, flags);
++
++ ZFCP_LOG_TRACE("exit (0x%lx)\n", (unsigned long)unit);
++
++ return unit;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_scsi_potential_abort_on_fake
++ *
++ * purpose:
++ *
++ * returns: 0 - no fake request aborted
++ * 1 - fake request was aborted
++ *
++ * context: both the adapter->abort_lock and the
++ * adapter->fake_list_lock are assumed to be held write lock
++ * irqsave
++ */
++inline int zfcp_scsi_potential_abort_on_fake(zfcp_adapter_t *adapter, Scsi_Cmnd *cmnd)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ Scsi_Cmnd *current_cmnd, *prev_cmnd;
++ unsigned long flags;
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter (adapter=0x%lx, cmnd=0x%lx)\n",
++ (unsigned long)adapter,
++ (unsigned long)cmnd);
++
++ write_lock_irqsave(&adapter->fake_list_lock, flags);
++
++ current_cmnd=adapter->first_fake_cmnd;
++
++ if (!current_cmnd)
++ goto out;
++
++ if(current_cmnd==cmnd) {
++ adapter->first_fake_cmnd=(Scsi_Cmnd *)cmnd->host_scribble;
++ cmnd->host_scribble=NULL;
++ if(adapter->first_fake_cmnd==NULL) {
++ /* No need to wake anymore */
++ /* Note: It does not matter if the timer has already
++ * expired, the fake_list_lock takes care of
++ * potential races
++ */
++ del_timer(&adapter->fake_scsi_timer);
++ }
++ atomic_dec(&adapter->fake_scsi_reqs_active);
++ retval=1;
++ goto out;
++ }
++ do {
++ prev_cmnd = current_cmnd;
++ current_cmnd = (Scsi_Cmnd *)(current_cmnd->host_scribble);
++ if (current_cmnd==cmnd) {
++ prev_cmnd->host_scribble=current_cmnd->host_scribble;
++ current_cmnd->host_scribble=NULL;
++ atomic_dec(&adapter->fake_scsi_reqs_active);
++ retval=1;
++ goto out;
++ }
++ } while (current_cmnd->host_scribble != NULL);
++
++out:
++ write_unlock_irqrestore(&adapter->fake_list_lock, flags);
++
++ ZFCP_LOG_TRACE("exit (%d)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++ }
++
++
++/*
++ * function: zfcp_scsi_eh_abort_handler
++ *
++ * purpose: tries to abort the specified (timed out) SCSI command
++ *
++ * note: We do not need to care for a SCSI command which completes
++ * normally but late during this abort routine runs.
++ * We are allowed to return late commands to the SCSI stack.
++ * It tracks the state of commands and will handle late commands.
++ * (Usually, the normal completion of late commands is ignored with
++ * respect to the running abort operation. Grep for 'done_late'
++ * in the SCSI stacks sources.)
++ *
++ * returns: SUCCESS - command has been aborted and cleaned up in internal
++ * bookkeeping,
++ * SCSI stack won't be called for aborted command
++ * FAILED - otherwise
++ */
++int zfcp_scsi_eh_abort_handler(Scsi_Cmnd *scpnt)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ int retval = SUCCESS;
++ zfcp_fsf_req_t *new_fsf_req, *old_fsf_req;
++ zfcp_adapter_t *adapter = (zfcp_adapter_t*) scpnt->host->hostdata[0];
++ zfcp_unit_t *unit = (zfcp_unit_t*) scpnt->device->hostdata;
++ zfcp_port_t *port = unit->port;
++ unsigned long flags;
++ u32 status = 0;
++#ifdef ZFCP_DEBUG_ABORTS
++ /* the components of a abort_dbf record (fixed size record) */
++ u64 dbf_scsi_cmnd = (unsigned long)scpnt;
++ char dbf_opcode[ZFCP_ABORT_DBF_LENGTH];
++ wwn_t dbf_wwn = port->wwpn;
++ fcp_lun_t dbf_fcp_lun = unit->fcp_lun;
++ u64 dbf_retries = scpnt->retries;
++ u64 dbf_allowed = scpnt->allowed;
++ u64 dbf_timeout = 0;
++ u64 dbf_fsf_req = 0;
++ u64 dbf_fsf_status = 0;
++ u64 dbf_fsf_qual[2] = { 0, 0 };
++ char dbf_result[ZFCP_ABORT_DBF_LENGTH]
++ = { "##undef" };
++
++ memset(dbf_opcode, 0, ZFCP_ABORT_DBF_LENGTH);
++ memcpy( dbf_opcode,
++ scpnt->cmnd,
++ min(scpnt->cmd_len, (unsigned char)ZFCP_ABORT_DBF_LENGTH));
++#endif
++
++ /*TRACE*/
++ ZFCP_LOG_TRACE("enter (scpnt=0x%lx)\n", (unsigned long)scpnt);
++
++ ZFCP_LOG_INFO(
++ "Aborting for adapter=0x%lx, devno=0x%04x, scsi_cmnd=0x%lx\n",
++ (unsigned long)adapter,
++ adapter->devno,
++ (unsigned long)scpnt);
++
++ spin_unlock_irq(&io_request_lock);
++#if 0
++ /* DEBUG */
++ retval=FAILED;
++ goto out;
++#endif
++
++ /*
++ * Race condition between normal (late) completion and abort has
++ * to be avoided.
++ * The entirity of all accesses to scsi_req have to be atomic.
++ * scsi_req is usually part of the fsf_req (for requests which
++ * are not faked) and thus we block the release of fsf_req
++ * as long as we need to access scsi_req.
++ * For faked commands we use the same lock even if they are not
++ * put into the fsf_req queue. This makes implementation
++ * easier.
++ */
++ write_lock_irqsave(&adapter->abort_lock, flags);
++
++ /*
++ * Check if we deal with a faked command, which we may just forget
++ * about from now on
++ */
++ if (zfcp_scsi_potential_abort_on_fake(adapter, scpnt)) {
++ write_unlock_irqrestore(&adapter->abort_lock, flags);
++#ifdef ZFCP_DEBUG_ABORTS
++ strncpy(dbf_result, "##faked", ZFCP_ABORT_DBF_LENGTH);
++#endif
++ retval = SUCCESS;
++ goto out;
++ }
++
++ /*
++ * Check whether command has just completed and can not be aborted.
++ * Even if the command has just been completed late, we can access
++ * scpnt since the SCSI stack does not release it at least until
++ * this routine returns. (scpnt is parameter passed to this routine
++ * and must not disappear during abort even on late completion.)
++ */
++ old_fsf_req = (zfcp_fsf_req_t*) scpnt->host_scribble;
++ if (!old_fsf_req) {
++ ZFCP_LOG_DEBUG("late command completion overtook abort\n");
++ /*
++ * That's it.
++ * Do not initiate abort but return SUCCESS.
++ */
++ write_unlock_irqrestore(&adapter->abort_lock, flags);
++ retval = SUCCESS;
++#ifdef ZFCP_DEBUG_ABORTS
++ strncpy(dbf_result, "##late1", ZFCP_ABORT_DBF_LENGTH);
++#endif
++ goto out;
++ }
++#ifdef ZFCP_DEBUG_ABORTS
++ dbf_fsf_req = (unsigned long)old_fsf_req;
++ dbf_timeout = (jiffies - old_fsf_req->data.send_fcp_command_task.start_jiffies) / HZ;
++#endif
++
++ old_fsf_req->data.send_fcp_command_task.scsi_cmnd = NULL;
++ /* mark old request as being aborted */
++ old_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING;
++ /*
++ * We have to collect all information (e.g. unit) needed by
++ * zfcp_fsf_abort_fcp_command before calling that routine
++ * since that routine is not allowed to access
++ * fsf_req which it is going to abort.
++ * This is because of we need to release fsf_req_list_lock
++ * before calling zfcp_fsf_abort_fcp_command.
++ * Since this lock will not be held, fsf_req may complete
++ * late and may be released meanwhile.
++ */
++ ZFCP_LOG_DEBUG(
++ "unit=0x%lx, unit_fcp_lun=0x%Lx\n",
++ (unsigned long)unit,
++ (llui_t)unit->fcp_lun);
++
++ /*
++ * We block (call schedule)
++ * That's why we must release the lock and enable the
++ * interrupts before.
++ * On the other hand we do not need the lock anymore since
++ * all critical accesses to scsi_req are done.
++ */
++ write_unlock_irqrestore(&adapter->abort_lock, flags);
++ /* call FSF routine which does the abort */
++ new_fsf_req = zfcp_fsf_abort_fcp_command(
++ (unsigned long)old_fsf_req, adapter, unit, 0);
++ ZFCP_LOG_DEBUG(
++ "new_fsf_req=0x%lx\n",
++ (unsigned long) new_fsf_req);
++ if (!new_fsf_req) {
++ retval = FAILED;
++ ZFCP_LOG_DEBUG(
++ "warning: Could not abort SCSI command "
++ "at 0x%lx\n",
++ (unsigned long)scpnt);
++#ifdef ZFCP_DEBUG_ABORTS
++ strncpy(dbf_result, "##nores", ZFCP_ABORT_DBF_LENGTH);
++#endif
++ goto out;
++ }
++
++ /* wait for completion of abort */
++ ZFCP_LOG_DEBUG("Waiting for cleanup....\n");
++#ifdef ZFCP_DEBUG_ABORTS
++ /* FIXME: copying zfcp_fsf_req_wait_and_cleanup code is not really nice */
++ __wait_event(
++ new_fsf_req->completion_wq,
++ new_fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
++ status = new_fsf_req->status;
++ dbf_fsf_status = new_fsf_req->qtcb->header.fsf_status;
++ /*
++ * Ralphs special debug load provides timestamps in the FSF
++ * status qualifier. This might be specified later if being
++ * useful for debugging aborts.
++ */
++ dbf_fsf_qual[0] = *(u64*)&new_fsf_req->qtcb->header.fsf_status_qual.word[0];
++ dbf_fsf_qual[1] = *(u64*)&new_fsf_req->qtcb->header.fsf_status_qual.word[2];
++ retval = zfcp_fsf_req_cleanup(new_fsf_req);
++#else
++ retval = zfcp_fsf_req_wait_and_cleanup(
++ new_fsf_req,
++ ZFCP_UNINTERRUPTIBLE,
++ &status);
++#endif
++ ZFCP_LOG_DEBUG(
++ "Waiting for cleanup complete, status=0x%x\n",
++ status);
++ /* status should be valid since signals were not permitted */
++ if (status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) {
++ retval = SUCCESS;
++#ifdef ZFCP_DEBUG_ABORTS
++ strncpy(dbf_result, "##succ", ZFCP_ABORT_DBF_LENGTH);
++#endif
++ } else if (status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) {
++ retval = SUCCESS;
++#ifdef ZFCP_DEBUG_ABORTS
++ strncpy(dbf_result, "##late2", ZFCP_ABORT_DBF_LENGTH);
++#endif
++ } else {
++ retval = FAILED;
++#ifdef ZFCP_DEBUG_ABORTS
++ strncpy(dbf_result, "##fail", ZFCP_ABORT_DBF_LENGTH);
++#endif
++ }
++
++out:
++#ifdef ZFCP_DEBUG_ABORTS
++ debug_event(adapter->abort_dbf, 1, &dbf_scsi_cmnd, sizeof(u64));
++ debug_event(adapter->abort_dbf, 1, &dbf_opcode, ZFCP_ABORT_DBF_LENGTH);
++ debug_event(adapter->abort_dbf, 1, &dbf_wwn, sizeof(wwn_t));
++ debug_event(adapter->abort_dbf, 1, &dbf_fcp_lun, sizeof(fcp_lun_t));
++ debug_event(adapter->abort_dbf, 1, &dbf_retries, sizeof(u64));
++ debug_event(adapter->abort_dbf, 1, &dbf_allowed, sizeof(u64));
++ debug_event(adapter->abort_dbf, 1, &dbf_timeout, sizeof(u64));
++ debug_event(adapter->abort_dbf, 1, &dbf_fsf_req, sizeof(u64));
++ debug_event(adapter->abort_dbf, 1, &dbf_fsf_status, sizeof(u64));
++ debug_event(adapter->abort_dbf, 1, &dbf_fsf_qual[0], sizeof(u64));
++ debug_event(adapter->abort_dbf, 1, &dbf_fsf_qual[1], sizeof(u64));
++ debug_text_event(adapter->abort_dbf, 1, dbf_result);
++#endif
++
++ spin_lock_irq(&io_request_lock);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_scsi_eh_device_reset_handler
++ *
++ * purpose:
++ *
++ * returns:
++ */
++int zfcp_scsi_eh_device_reset_handler(Scsi_Cmnd *scpnt)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ int retval;
++ zfcp_unit_t *unit = (zfcp_unit_t*) scpnt->device->hostdata;
++ /*TRACE*/
++ ZFCP_LOG_TRACE("enter (scpnt=0x%lx)\n", (unsigned long)scpnt);
++
++ spin_unlock_irq(&io_request_lock);
++ /*
++ * We should not be called to reset a target which we 'sent' faked SCSI
++ * commands since the abort of faked SCSI commands should always
++ * succeed (simply delete timer).
++ */
++ if (!unit) {
++ ZFCP_LOG_NORMAL(
++ "bug: Tried to reset a non existant unit.\n");
++ retval = SUCCESS;
++ goto out;
++ }
++ ZFCP_LOG_NORMAL(
++ "Resetting SCSI device "
++ "(unit with FCP_LUN 0x%016Lx on the port with WWPN 0x%016Lx "
++ "on the adapter with devno 0x%04x)\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++
++ /*
++ * If we do not know whether the unit supports 'logical unit reset'
++ * then try 'logical unit reset' and proceed with 'target reset'
++ * if 'logical unit reset' fails.
++ * If the unit is known not to support 'logical unit reset' then
++ * skip 'logical unit reset' and try 'target reset' immediately.
++ */
++ if (!atomic_test_mask(ZFCP_STATUS_UNIT_NOTSUPPUNITRESET, &unit->status)) {
++ retval = zfcp_task_management_function(unit, LOGICAL_UNIT_RESET);
++ if (retval) {
++ ZFCP_LOG_DEBUG(
++ "logical unit reset failed (unit=0x%lx)\n",
++ (unsigned long)unit);
++ if (retval == -ENOTSUPP)
++ atomic_set_mask(ZFCP_STATUS_UNIT_NOTSUPPUNITRESET,
++ &unit->status);
++ /* fall through and try 'target reset' next */
++ } else {
++ ZFCP_LOG_DEBUG(
++ "logical unit reset succeeded (unit=0x%lx)\n",
++ (unsigned long)unit);
++ /* avoid 'target reset' */
++ retval = SUCCESS;
++ goto out;
++ }
++ }
++ retval = zfcp_task_management_function(unit, TARGET_RESET);
++ if (retval) {
++ ZFCP_LOG_DEBUG(
++ "target reset failed (unit=0x%lx)\n",
++ (unsigned long)unit);
++ retval = FAILED;
++ } else {
++ ZFCP_LOG_DEBUG(
++ "target reset succeeded (unit=0x%lx)\n",
++ (unsigned long)unit);
++ retval = SUCCESS;
++ }
++
++out:
++ spin_lock_irq(&io_request_lock);
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++static int zfcp_task_management_function(zfcp_unit_t *unit, u8 tm_flags)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ zfcp_adapter_t *adapter = unit->port->adapter;
++ int retval;
++ int status;
++ zfcp_fsf_req_t *fsf_req;
++
++ ZFCP_LOG_TRACE(
++ "enter (unit=0x%lx tm_flags=0x%x)\n",
++ (unsigned long)unit,
++ tm_flags);
++
++ /* issue task management function */
++ fsf_req = zfcp_fsf_send_fcp_command_task_management
++ (adapter, unit, tm_flags, 0);
++ if (!fsf_req) {
++ ZFCP_LOG_INFO(
++ "error: Out of resources. Could not create a "
++ "task management (abort, reset, etc) request "
++ "for the unit with FCP_LUN 0x%016Lx connected to "
++ "the port with WWPN 0x%016Lx connected to "
++ "the adapter with devno 0x%04x.\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ adapter->devno);
++ retval = -ENOMEM;
++ goto out;
++ }
++
++ retval = zfcp_fsf_req_wait_and_cleanup(
++ fsf_req,
++ ZFCP_UNINTERRUPTIBLE,
++ &status);
++ /*
++ * check completion status of task management function
++ * (status should always be valid since no signals permitted)
++ */
++ if (status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED)
++ retval = -EIO;
++ else if (status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP)
++ retval = -ENOTSUPP;
++ else retval = 0;
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_scsi_eh_bus_reset_handler
++ *
++ * purpose:
++ *
++ * returns:
++ */
++int zfcp_scsi_eh_bus_reset_handler(Scsi_Cmnd *scpnt)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ int retval = 0;
++ zfcp_unit_t *unit;
++
++ ZFCP_LOG_TRACE("enter (scpnt=0x%lx)\n", (unsigned long)scpnt);
++ spin_unlock_irq(&io_request_lock);
++
++ unit = (zfcp_unit_t *)scpnt->device->hostdata;
++ /*DEBUG*/
++ ZFCP_LOG_NORMAL(
++ "Resetting SCSI bus "
++ "(unit with FCP_LUN 0x%016Lx on the port with WWPN 0x%016Lx "
++ "on the adapter with devno 0x%04x)\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ zfcp_erp_adapter_reopen(unit->port->adapter, 0);
++ zfcp_erp_wait(unit->port->adapter);
++ retval = SUCCESS;
++
++ spin_lock_irq(&io_request_lock);
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_scsi_eh_host_reset_handler
++ *
++ * purpose:
++ *
++ * returns:
++ */
++int zfcp_scsi_eh_host_reset_handler(Scsi_Cmnd *scpnt)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ int retval = 0;
++ zfcp_unit_t *unit;
++
++ ZFCP_LOG_TRACE("enter (scpnt=0x%lx)\n", (unsigned long)scpnt);
++ spin_unlock_irq(&io_request_lock);
++
++ unit = (zfcp_unit_t *)scpnt->device->hostdata;
++ /*DEBUG*/
++ ZFCP_LOG_NORMAL(
++ "Resetting SCSI host "
++ "(unit with FCP_LUN 0x%016Lx on the port with WWPN 0x%016Lx "
++ "on the adapter with devno 0x%04x)\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ zfcp_erp_adapter_reopen(unit->port->adapter, 0);
++ zfcp_erp_wait(unit->port->adapter);
++ retval=SUCCESS;
++
++ spin_lock_irq(&io_request_lock);
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_adapter_detect
++ *
++ * purpose: checks whether the specified zSeries device is
++ * a supported adapter
++ *
++ * returns: 0 - for supported adapter
++ * !0 - for unsupported devices
++ */
++int zfcp_adapter_detect(zfcp_adapter_t *adapter)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter: (adapter=0x%lx)\n", (unsigned long)adapter);
++ retval = get_dev_info_by_devno(adapter->devno, &adapter->devinfo);
++ if (retval) {
++ ZFCP_LOG_INFO(
++ "warning: Device information for the adapter "
++ "with devno 0x%04x could not be determined. "
++ "The attempt returned %d. It is probable that "
++ "no device with this devno exists.\n",
++ adapter->devno,
++ retval);
++ goto out;
++ }
++
++ if (adapter->devinfo.status == 0){
++ ZFCP_LOG_TRACE(
++ "Adapter returned \"OK\", "
++ "devno is 0x%04x.\n",
++ (unsigned int) adapter->devno);
++ goto ok;
++ }
++ if (adapter->devinfo.status & DEVSTAT_NOT_OPER) {
++ ZFCP_LOG_INFO(
++ "error: Adapter with devno 0x%04x is not "
++ "operational.\n",
++ (unsigned int) adapter->devno);
++ retval = -EBUSY;
++ }
++ if (adapter->devinfo.status & DEVSTAT_DEVICE_OWNED) {
++ ZFCP_LOG_INFO(
++ "error: Adapter with devno 0x%04x is already "
++ "owned by another driver.\n",
++ (unsigned int) adapter->devno);
++ retval = -EACCES;
++ }
++ if (adapter->devinfo.status & DEVSTAT_UNKNOWN_DEV) {
++ ZFCP_LOG_INFO(
++ "error: Adapter with devno 0x%04x is not "
++ "an FCP card.\n",
++ (unsigned int) adapter->devno);
++ retval = -EACCES;
++ }
++ if (adapter->devinfo.status & (~(DEVSTAT_NOT_OPER |
++ DEVSTAT_DEVICE_OWNED |
++ DEVSTAT_UNKNOWN_DEV))){
++ ZFCP_LOG_NORMAL(
++ "bug: Adapter with devno 0x%04x returned an "
++ "unexpected condition during the identification "
++ "phase. (debug info %d)\n",
++ (unsigned int) adapter->devno,
++ adapter->devinfo.status);
++ retval = -ENODEV;
++ }
++ if (retval < 0)
++ goto out;
++ok:
++ if ((adapter->devinfo.sid_data.cu_type != ZFCP_CONTROL_UNIT_TYPE) ||
++ (adapter->devinfo.sid_data.cu_model != ZFCP_CONTROL_UNIT_MODEL) ||
++ (adapter->devinfo.sid_data.dev_type != ZFCP_DEVICE_TYPE) ||
++ ((adapter->devinfo.sid_data.dev_model != ZFCP_DEVICE_MODEL) &&
++ (adapter->devinfo.sid_data.dev_model != ZFCP_DEVICE_MODEL_PRIV))) {
++ ZFCP_LOG_NORMAL(
++ "error: Adapter with devno 0x%04x is not "
++ "an FCP card.\n",
++ (unsigned int) adapter->devno);
++ retval = -ENODEV;
++ }
++
++out:
++ ZFCP_LOG_TRACE(
++ "CU type, model, dev type, model"
++ " 0x%x, 0x%x, 0x%x, 0x%x.\n",
++ adapter->devinfo.sid_data.cu_type,
++ adapter->devinfo.sid_data.cu_model,
++ adapter->devinfo.sid_data.dev_type,
++ adapter->devinfo.sid_data.dev_model);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_adapter_irq_register(zfcp_adapter_t* adapter)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++ int retval = 0;
++ signed int tmp_irq; /* adapter->irq is unsigned 16 bit! */
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ /* find out IRQ */
++ tmp_irq = get_irq_by_devno(adapter->devno);
++
++ if (tmp_irq < 0 || tmp_irq > 0x0FFFF) {
++ ZFCP_LOG_NORMAL(
++ "bug: The attempt to identify the irq for the "
++ "adapter with devno 0x%04x failed. All map entries "
++ "containing this devno are ignored. "
++ "(debug info 0x%x)\n",
++ adapter->devno,
++ tmp_irq);
++ retval = -ENXIO;
++ goto out;
++ }
++ ZFCP_LOG_TRACE(
++ "get_irq_by_devno returned irq=0x%x.\n",
++ tmp_irq);
++ adapter->irq = tmp_irq;
++
++ /* request IRQ */
++ retval = s390_request_irq_special(
++ adapter->irq,
++ (void *)zfcp_cio_handler,
++ zfcp_dio_not_oper_handler,
++ 0,
++ zfcp_data.scsi_host_template.name,
++ (void *)&adapter->devstat);
++ if (retval) {
++ ZFCP_LOG_INFO(
++ "error: Could not allocate irq %i to the adapter "
++ "with devno 0x%04x (debug info %i).\n",
++ adapter->irq,
++ adapter->devno,
++ retval);
++ goto out;
++ }
++ atomic_set_mask(ZFCP_STATUS_ADAPTER_IRQOWNER, &adapter->status);
++ ZFCP_LOG_DEBUG("request irq %i successfull\n", adapter->irq);
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_adapter_irq_unregister(zfcp_adapter_t* adapter)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter (adapter=0x%lx)\n", (unsigned long)adapter);
++
++ if(!atomic_test_mask(ZFCP_STATUS_ADAPTER_IRQOWNER, &adapter->status)) {
++ ZFCP_LOG_DEBUG("Adapter with devno 0x%04x does not own "
++ "an irq, skipping over freeing attempt.\n",
++ adapter->devno);
++ goto out;
++ }
++ /* Note: There exists no race condition when the irq is given up by some
++ other agency while at this point. The CIO layer will still handle the
++ subsequent free_irq correctly.
++ */
++ free_irq(adapter->irq, (void *) &adapter->devstat);
++ atomic_clear_mask(ZFCP_STATUS_ADAPTER_IRQOWNER, &adapter->status);
++ ZFCP_LOG_DEBUG("gave up irq=%i\n", adapter->irq);
++ out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_adapter_scsi_register(zfcp_adapter_t* adapter)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (adapter=0x%lx)\n",
++ (unsigned long)adapter);
++
++ /* register adapter as SCSI host with mid layer of SCSI stack */
++ adapter->scsi_host = scsi_register(
++ &zfcp_data.scsi_host_template,
++ sizeof(zfcp_adapter_t*));
++ if (!adapter->scsi_host) {
++ ZFCP_LOG_NORMAL(
++ "error: Not enough free memory. "
++ "Could not register host-adapter with "
++ "devno 0x%04x with the SCSI-stack.\n",
++ adapter->devno);
++ retval = -EIO;
++ goto out;
++ }
++ atomic_set_mask(ZFCP_STATUS_ADAPTER_REGISTERED, &adapter->status);
++ ZFCP_LOG_DEBUG(
++ "host registered, scsi_host at 0x%lx\n",
++ (unsigned long)adapter->scsi_host);
++
++ /* tell the SCSI stack some characteristics of this adapter */
++ adapter->scsi_host->max_id = adapter->max_scsi_id + 1;
++ adapter->scsi_host->max_lun = adapter->max_scsi_lun + 1;
++ adapter->scsi_host->max_channel = 0;
++ adapter->scsi_host->irq = adapter->irq;
++ adapter->scsi_host->unique_id = adapter->devno;
++ adapter->scsi_host->max_cmd_len = ZFCP_MAX_SCSI_CMND_LENGTH;
++ adapter->scsi_host->loaded_as_module
++ = (zfcp_data.scsi_host_template.module ? 1 : 0);
++ adapter->scsi_host->select_queue_depths
++ = zfcp_scsi_select_queue_depth;
++
++ /*
++ * save a pointer to our own adapter data structure within
++ * hostdata field of SCSI host data structure
++ */
++ adapter->scsi_host->hostdata[0] = (unsigned long)adapter;
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++int zfcp_initialize_with_0copy(zfcp_adapter_t *adapter)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_QDIO
++
++ int retval = 0;
++ qdio_initialize_t init_data;
++
++ ZFCP_LOG_TRACE("enter (adapter=0x%lx)\n", (unsigned long)adapter);
++
++ init_data.irq = adapter->irq;
++ init_data.q_format = QDIO_SCSI_QFMT;
++ memcpy(init_data.adapter_name,&adapter->name,8);
++ init_data.qib_param_field_format = 0;
++ init_data.qib_param_field = NULL;
++ init_data.input_slib_elements = NULL;
++ init_data.output_slib_elements = NULL;
++ init_data.min_input_threshold = ZFCP_MIN_INPUT_THRESHOLD;
++ init_data.max_input_threshold = ZFCP_MAX_INPUT_THRESHOLD;
++ init_data.min_output_threshold = ZFCP_MIN_OUTPUT_THRESHOLD;
++ init_data.max_output_threshold = ZFCP_MAX_OUTPUT_THRESHOLD;
++ init_data.no_input_qs = 1;
++ init_data.no_output_qs = 1;
++ init_data.input_handler = zfcp_qdio_response_handler;
++ init_data.output_handler = zfcp_qdio_request_handler;
++ init_data.int_parm = (unsigned long)adapter;
++ init_data.flags = QDIO_INBOUND_0COPY_SBALS|
++ QDIO_OUTBOUND_0COPY_SBALS|
++ QDIO_USE_OUTBOUND_PCIS;
++ init_data.input_sbal_addr_array =
++ (void **)(adapter->response_queue.buffer);
++ init_data.output_sbal_addr_array =
++ (void **)(adapter->request_queue.buffer);
++ ZFCP_LOG_TRACE("Before qdio_initialise\n");
++ retval = qdio_initialize(&init_data);
++ ZFCP_LOG_TRACE("After qdio_initialise\n");
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ *
++ * note: qdio queues shall be down (no ongoing inbound processing)
++ */
++static int zfcp_fsf_req_dismiss_all(zfcp_adapter_t *adapter)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval = 0;
++ zfcp_fsf_req_t *fsf_req, *next_fsf_req;
++
++ ZFCP_LOG_TRACE(
++ "enter (adapter=0x%lx)\n",
++ (unsigned long)adapter);
++
++ ZFCP_FOR_NEXT_FSFREQ(adapter, fsf_req, next_fsf_req)
++ zfcp_fsf_req_dismiss(fsf_req);
++ while (!list_empty(&adapter->fsf_req_list_head)) {
++ ZFCP_LOG_DEBUG(
++ "fsf req list of adapter with "
++ "devno 0x%04x not yet empty\n",
++ adapter->devno);
++ /* wait for woken intiators to clean up their requests */
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ schedule_timeout(ZFCP_FSFREQ_CLEANUP_TIMEOUT);
++ }
++
++ /* consistency check */
++ if (atomic_read(&adapter->fsf_reqs_active)) {
++ ZFCP_LOG_NORMAL(
++ "bug: There are still %d FSF requests pending "
++ "on the adapter with devno 0x%04x after "
++ "cleanup.\n",
++ atomic_read(&adapter->fsf_reqs_active),
++ adapter->devno);
++ atomic_set(&adapter->fsf_reqs_active, 0);
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_fsf_req_dismiss(zfcp_fsf_req_t *fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (fsf_req=0x%lx)\n",
++ (unsigned long)fsf_req);
++
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
++ zfcp_fsf_req_complete(fsf_req);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_qdio_handler_error_check
++ *
++ * purpose: called by the response handler to determine error condition
++ *
++ * returns: error flag
++ *
++ */
++inline int zfcp_qdio_handler_error_check(
++ zfcp_adapter_t *adapter,
++ unsigned int status,
++ unsigned int qdio_error,
++ unsigned int siga_error)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_QDIO
++
++ int retval=0;
++
++ ZFCP_LOG_TRACE(
++ "enter (adapter=0x%lx, status=%i qdio_error=%i siga_error=%i\n",
++ (unsigned long) adapter,
++ status,
++ qdio_error,
++ siga_error);
++
++ if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_TRACE)){
++ if (status & QDIO_STATUS_INBOUND_INT){
++ ZFCP_LOG_TRACE("status is"
++ " QDIO_STATUS_INBOUND_INT \n");
++ }
++ if (status & QDIO_STATUS_OUTBOUND_INT){
++ ZFCP_LOG_TRACE("status is"
++ " QDIO_STATUS_OUTBOUND_INT \n");
++ }
++ }// if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_TRACE))
++ if (status & QDIO_STATUS_LOOK_FOR_ERROR){
++ retval=-EIO;
++
++ ZFCP_LOG_FLAGS(1,"QDIO_STATUS_LOOK_FOR_ERROR \n");
++
++ ZFCP_LOG_INFO("A qdio problem occured. The status, qdio_error and "
++ "siga_error are 0x%x, 0x%x and 0x%x\n",
++ status,
++ qdio_error,
++ siga_error);
++
++ if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
++ ZFCP_LOG_FLAGS(2, "QDIO_STATUS_ACTIVATE_CHECK_CONDITION\n");
++ }
++ if (status & QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR){
++ ZFCP_LOG_FLAGS(2, "QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR\n");
++ }
++ if (status & QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR){
++ ZFCP_LOG_FLAGS(2, "QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR\n");
++ }
++
++ if (siga_error & QDIO_SIGA_ERROR_ACCESS_EXCEPTION) {
++ ZFCP_LOG_FLAGS(2, "QDIO_SIGA_ERROR_ACCESS_EXCEPTION\n");
++ }
++
++ if (siga_error & QDIO_SIGA_ERROR_B_BIT_SET) {
++ ZFCP_LOG_FLAGS(2, "QDIO_SIGA_ERROR_B_BIT_SET\n");
++ }
++
++ switch (qdio_error) {
++ case 0:
++ ZFCP_LOG_FLAGS(3, "QDIO_OK");
++ break;
++ case SLSB_P_INPUT_ERROR :
++ ZFCP_LOG_FLAGS(1, "SLSB_P_INPUT_ERROR\n");
++ break;
++ case SLSB_P_OUTPUT_ERROR :
++ ZFCP_LOG_FLAGS(1, "SLSB_P_OUTPUT_ERROR\n");
++ break;
++ default :
++ ZFCP_LOG_NORMAL("bug: Unknown qdio error reported "
++ "(debug info 0x%x)\n",
++ qdio_error);
++ break;
++ }
++ /* Restarting IO on the failed adapter from scratch */
++ debug_text_event(adapter->erp_dbf,1,"qdio_err");
++ /*
++ * Since we have been using this adapter, it is save to assume
++ * that it is not failed but recoverable. The card seems to
++ * report link-up events by self-initiated queue shutdown.
++ * That is why we need to clear the the link-down flag
++ * which is set again in case we have missed by a mile.
++ */
++ zfcp_erp_adapter_reopen(
++ adapter,
++ ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
++ ZFCP_STATUS_COMMON_ERP_FAILED);
++ } // if(status & QDIO_STATUS_LOOK_FOR_ERROR)
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_qdio_request_handler
++ *
++ * purpose: is called by QDIO layer for completed SBALs in request queue
++ *
++ * returns: (void)
++ */
++void zfcp_qdio_request_handler(
++ int irq,
++ unsigned int status,
++ unsigned int qdio_error,
++ unsigned int siga_error,
++ unsigned int queue_number,
++ int first_element,
++ int elements_processed,
++ unsigned long int_parm)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_QDIO
++
++ zfcp_adapter_t *adapter;
++ zfcp_qdio_queue_t *queue;
++
++ ZFCP_LOG_TRACE(
++ "enter (irq=%i status=%i qdio_error=%i siga_error=%i "
++ "queue_number=%i first_element=%i elements_processed=%i "
++ "int_parm=0x%lx)\n",
++ irq,
++ status,
++ qdio_error,
++ siga_error,
++ queue_number,
++ first_element,
++ elements_processed,
++ int_parm);
++
++ adapter = (zfcp_adapter_t*)int_parm;
++ queue = &adapter->request_queue;
++
++ ZFCP_LOG_DEBUG("devno=0x%04x, first=%d, count=%d\n",
++ adapter->devno,
++ first_element,
++ elements_processed);
++
++ if (zfcp_qdio_handler_error_check(adapter, status, qdio_error, siga_error))
++ goto out;
++
++ /* cleanup all SBALs being program-owned now */
++ zfcp_zero_sbals(
++ queue->buffer,
++ first_element,
++ elements_processed);
++
++ /* increase free space in outbound queue */
++ atomic_add(elements_processed, &queue->free_count);
++ ZFCP_LOG_DEBUG("free_count=%d\n",
++ atomic_read(&queue->free_count));
++ wake_up(&adapter->request_wq);
++ ZFCP_LOG_DEBUG(
++ "Elements_processed = %d, free count=%d \n",
++ elements_processed,
++ atomic_read(&queue->free_count));
++
++out:
++ ZFCP_LOG_TRACE("exit\n");
++
++ return;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_qdio_response_handler
++ *
++ * purpose: is called by QDIO layer for completed SBALs in response queue
++ *
++ * returns: (void)
++ */
++void zfcp_qdio_response_handler(
++ int irq,
++ unsigned int status,
++ unsigned int qdio_error,
++ unsigned int siga_error,
++ unsigned int queue_number,
++ int first_element,
++ int elements_processed,
++ unsigned long int_parm)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_QDIO
++
++ zfcp_adapter_t *adapter;
++ zfcp_qdio_queue_t *queue;
++ int buffer_index;
++ int i;
++ qdio_buffer_t *buffer;
++ int retval = 0;
++ u8 count;
++ u8 start;
++ volatile qdio_buffer_element_t *buffere=NULL;
++ int buffere_index;
++
++ ZFCP_LOG_TRACE(
++ "enter (irq=0x%x status=0x%x qdio_error=0x%x siga_error=0x%x "
++ "queue_number=%i first_element=%i elements_processed=%i "
++ "int_parm=0x%lx)\n",
++ irq,
++ status,
++ qdio_error,
++ siga_error,
++ queue_number,
++ first_element,
++ elements_processed,
++ int_parm);
++
++ adapter = (zfcp_adapter_t*)int_parm;
++ queue = &adapter->response_queue;
++
++ if (zfcp_qdio_handler_error_check(adapter, status, qdio_error, siga_error))
++ goto out;
++
++ buffere = &(queue->buffer[first_element]->element[0]);
++ ZFCP_LOG_DEBUG("first BUFFERE flags=0x%x \n",
++ buffere->flags);
++ /*
++ * go through all SBALs from input queue currently
++ * returned by QDIO layer
++ */
++
++ for (i = 0; i < elements_processed; i++) {
++
++ buffer_index = first_element + i;
++ buffer_index %= QDIO_MAX_BUFFERS_PER_Q;
++ buffer = queue->buffer[buffer_index];
++
++ /* go through all SBALEs of SBAL */
++ for(buffere_index = 0;
++ buffere_index < QDIO_MAX_ELEMENTS_PER_BUFFER;
++ buffere_index++) {
++
++ /* look for QDIO request identifiers in SB */
++ buffere = &buffer->element[buffere_index];
++ retval = zfcp_qdio_reqid_check(adapter,
++ (void *)buffere->addr);
++
++ if (retval) {
++ ZFCP_LOG_NORMAL(
++ "bug: Inbound packet seems not to have "
++ "been sent at all. It will be ignored."
++ "(debug info 0x%lx, 0x%lx, %d, %d, 0x%x)\n",
++ (unsigned long)buffere->addr,
++ (unsigned long)&(buffere->addr),
++ first_element,
++ elements_processed,
++ adapter->devno);
++
++ ZFCP_LOG_NORMAL(
++ "Dump of inbound BUFFER %d BUFFERE %d "
++ "at address 0x%lx\n",
++ buffer_index,
++ buffere_index,
++ (unsigned long)buffer);
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_NORMAL,
++ (char*)buffer,
++ SBAL_SIZE);
++ }
++ if (buffere->flags & SBAL_FLAGS_LAST_ENTRY)
++ break;
++ };
++
++ if (!(buffere->flags & SBAL_FLAGS_LAST_ENTRY)) {
++ ZFCP_LOG_NORMAL("bug: End of inbound data not marked!\n");
++ }
++ }
++
++ /*
++ * put range of SBALs back to response queue
++ * (including SBALs which have already been free before)
++ */
++ count = atomic_read(&queue->free_count) + elements_processed;
++ start = queue->free_index;
++
++ ZFCP_LOG_TRACE(
++ "Calling do QDIO irq=0x%x,flags=0x%x, queue_no=%i, "
++ "index_in_queue=%i, count=%i, buffers=0x%lx\n",
++ irq,
++ QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
++ 0,
++ start,
++ count,
++ (unsigned long)&queue->buffer[start]);
++
++ retval = do_QDIO(
++ irq,
++ QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
++ 0,
++ start,
++ count,
++ NULL);
++ if (retval) {
++ atomic_set(&queue->free_count, count);
++ ZFCP_LOG_DEBUG(
++ "Inbound data regions could not be cleared "
++ "Transfer queues may be down. "
++ "(info %d, %d, %d)\n",
++ count,
++ start,
++ retval);
++ } else {
++ queue->free_index += count;
++ queue->free_index %= QDIO_MAX_BUFFERS_PER_Q;
++ atomic_set(&queue->free_count, 0);
++ ZFCP_LOG_TRACE(
++ "%i buffers successfully enqueued to response queue "
++ "starting at position %i\n",
++ count,
++ start);
++ }
++
++out:
++ /*
++ ZFCP_LOG_DEBUG("response_queue->free_count=%i,response_queue->free_index=%i\n",
++ atomic_read(&queue->free_count),
++ queue->free_index) ;
++ */
++ ZFCP_LOG_TRACE("exit\n");
++
++ return;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_qdio_reqid_check
++ *
++ * purpose: checks for valid reqids or unsolicited status
++ *
++ * returns: 0 - valid request id or unsolicited status
++ * !0 - otherwise
++ */
++static inline int zfcp_qdio_reqid_check(zfcp_adapter_t *adapter, void *sbale_addr)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_QDIO
++
++ zfcp_fsf_req_t *fsf_req;
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (sbale_addr=0x%lx)\n",
++ (unsigned long)sbale_addr);
++
++#ifdef ZFCP_DEBUG_REQUESTS
++ /* Note: seq is entered later */
++ debug_text_event(adapter->req_dbf, 1, "i:a/seq");
++ debug_event(adapter->req_dbf, 1, &sbale_addr, sizeof(unsigned long));
++#endif /* ZFCP_DEBUG_REQUESTS */
++
++ /* invalid (per convention used in this driver) */
++ if (!sbale_addr) {
++ ZFCP_LOG_NORMAL(
++ "bug: Inbound data faulty, contains null-pointer!\n");
++ retval = -EINVAL;
++ goto out;
++ }
++
++ /* valid request id and thus (hopefully :) valid fsf_req address */
++ fsf_req = (zfcp_fsf_req_t*)sbale_addr;
++
++ ZFCP_PARANOIA {
++ if ((fsf_req->common_magic != ZFCP_MAGIC)
++ ||(fsf_req->specific_magic != ZFCP_MAGIC_FSFREQ)) {
++ ZFCP_LOG_NORMAL(
++ "bug: An inbound FSF acknowledgement was "
++ "faulty (debug info 0x%x, 0x%x, 0x%lx)\n",
++ fsf_req->common_magic,
++ fsf_req->specific_magic,
++ (unsigned long)fsf_req);
++ retval = -EINVAL;
++ // panic("void of grace");
++ goto out;
++ }
++
++ if (adapter != fsf_req->adapter) {
++ ZFCP_LOG_NORMAL(
++ "bug: An inbound FSF acknowledgement was not "
++ "correct (debug info 0x%lx, 0x%lx, 0%lx) \n",
++ (unsigned long)fsf_req,
++ (unsigned long)fsf_req->adapter,
++ (unsigned long)adapter);
++ retval = -EINVAL;
++ goto out;
++ }
++ }
++
++#ifdef ZFCP_DEBUG_REQUESTS
++ /* debug feature stuff (test for QTCB: remember new unsol. status!) */
++ if (fsf_req->qtcb) {
++ debug_event(adapter->req_dbf, 1, &fsf_req->qtcb->prefix.req_seq_no,
++ sizeof(u32));
++ }
++#endif /* ZFCP_DEBUG_REQUESTS */
++
++ ZFCP_LOG_TRACE(
++ "fsf_req at 0x%lx, QTCB at 0x%lx\n",
++ (unsigned long)fsf_req,
++ (unsigned long)fsf_req->qtcb);
++ if (fsf_req->qtcb) {
++ ZFCP_LOG_TRACE("HEX DUMP OF 1ST BUFFERE PAYLOAD (QTCB):\n");
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_TRACE,
++ (char*)fsf_req->qtcb,
++ sizeof(fsf_qtcb_t));
++ }
++
++ /* finish the FSF request */
++ zfcp_fsf_req_complete(fsf_req);
++
++out:
++ ZFCP_LOG_TRACE("exit \n");
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_activate_adapter
++ *
++ * purpose:
++ *
++ * returns:
++ */
++inline static void zfcp_activate_adapter(int irq)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_DIO
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_DIO
++
++ int devno;
++ zfcp_adapter_t *adapter;
++
++ ZFCP_LOG_TRACE("enter (irq=%i)\n", irq);
++
++ devno = get_devno_by_irq(irq);
++ ZFCP_LOG_TRACE("devno is 0x%04x\n",devno);
++
++ /* Find the new adapter and open it */
++ ZFCP_FOR_EACH_ADAPTER(adapter) {
++ if (adapter->devno == devno) {
++ ZFCP_LOG_INFO(
++ "The adapter with devno 0x%04x "
++ "will now be activated.\n",
++ devno);
++ debug_text_event(adapter->erp_dbf,1,"activate");
++ zfcp_erp_modify_adapter_status(
++ adapter,
++ ZFCP_STATUS_COMMON_RUNNING,
++ ZFCP_SET);
++ zfcp_erp_adapter_reopen(
++ adapter,
++ ZFCP_STATUS_COMMON_ERP_FAILED);
++ }
++ }
++ if (!adapter)
++ ZFCP_LOG_DEBUG(
++ "An unconfigured adapter has become "
++ "active, it's devno 0x%04x.\n",
++ devno);
++
++ ZFCP_LOG_TRACE("exit\n");
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_dio_oper_handler
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_dio_oper_handler(int irq, devreg_t *dreg)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_DIO
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_DIO
++
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (irq=%i, dreg=0x%lx)\n",
++ irq, (unsigned long)dreg);
++
++ zfcp_activate_adapter(irq);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_dio_not_oper_handler
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static void zfcp_dio_not_oper_handler(int irq, int status)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_DIO
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_DIO
++
++ zfcp_adapter_t *adapter;
++ int known=0;
++
++ ZFCP_LOG_TRACE(
++ "enter (irq=%i, status=%i)\n",
++ irq, status);
++
++ ZFCP_FOR_EACH_ADAPTER(adapter) {
++ if(atomic_test_mask(ZFCP_STATUS_ADAPTER_IRQOWNER, &adapter->status) &&
++ (adapter->irq==irq)) {
++ known=1;
++ break;
++ }
++ }
++
++ switch (status) {
++ case DEVSTAT_DEVICE_GONE:
++ ZFCP_LOG_FLAGS(1,"DEVSTAT_DEVICE_GONE\n");
++ case DEVSTAT_NOT_OPER:
++ ZFCP_LOG_FLAGS(1,"DEVSTAT_NOT_OPER\n");
++ if (!known) {
++ ZFCP_LOG_DEBUG("An unconfigured or an already "
++ "disabled adapter became "
++ "unoperational on irq 0x%x.\n",
++ irq);
++ goto out;
++ }
++ ZFCP_LOG_INFO("The adapter with devno 0x%04x became "
++ "unoperational.\n",
++ adapter->devno);
++ /* shut-down the adapter and wait for completion */
++ debug_text_event(adapter->erp_dbf,1,"not_oper");
++ zfcp_erp_adapter_shutdown(adapter, ZFCP_STATUS_ADAPTER_IRQOWNER);
++ zfcp_erp_wait(adapter);
++ break;
++ case DEVSTAT_REVALIDATE:
++ ZFCP_LOG_FLAGS(1,"DEVSTAT_REVALIDATE\n");
++ /* The irq should still be that of the old adapter */
++ if(known) {
++ ZFCP_LOG_INFO("The adapter with devno 0x%04x became "
++ "unoperational.\n",
++ adapter->devno);
++ /* shut-down the adapter and wait for completion */
++ /* Note: This adapter is not the real IRQ-owner anymore
++ * The ERP strategy requires the IRQ to be freed somehow
++ * though
++ */
++ debug_text_event(adapter->erp_dbf,1,"reval");
++ zfcp_erp_adapter_shutdown(adapter, 0);
++ zfcp_erp_wait(adapter);
++ } else {
++ ZFCP_LOG_DEBUG("An unconfigured adapter was the "
++ "origin of a VM define, it's irq 0x%x.\n",
++ irq);
++ }
++ /* The new adapter already owns the irq and needs to be activated */
++ zfcp_activate_adapter(irq);
++ break;
++ default:
++ ZFCP_LOG_NORMAL("bug: Common I/O layer presented information "
++ "unknown to the zfcp module (debug info "
++ "0x%x, 0x%x)\n",
++ irq,
++ status);
++ }
++ out:
++ ZFCP_LOG_TRACE("exit\n");
++
++ return;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_cio_handler
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static void zfcp_cio_handler(int irq, void *devstat, struct pt_regs *rgs)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_DIO
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_DIO
++
++ ZFCP_LOG_TRACE(
++ "enter (irq=%i, devstat=0%lx, pt_regs=0%lx)\n",
++ irq, (unsigned long)devstat,
++ (unsigned long)rgs);
++ ZFCP_LOG_DEBUG("Normally, this function would never be called. "
++ "(info 0x%x, 0x%lx, 0x%lx)\n",
++ irq,
++ (unsigned long)devstat,
++ (unsigned long)rgs);
++ ZFCP_LOG_TRACE("exit\n");
++
++ return;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_req_complete
++ *
++ * purpose: Updates active counts and timers for openfcp-reqs
++ * May cleanup request after req_eval returns
++ *
++ * returns: 0 - success
++ * !0 - failure
++ *
++ * context:
++ */
++static int zfcp_fsf_req_complete(zfcp_fsf_req_t *fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval = 0;
++ int cleanup;
++ zfcp_adapter_t *adapter = fsf_req->adapter;
++
++ ZFCP_LOG_TRACE(
++ "enter (fsf_req=0x%lx)\n",
++ (unsigned long)fsf_req);
++
++ /* do some statistics */
++ atomic_dec(&adapter->fsf_reqs_active);
++
++ if (fsf_req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS) {
++ ZFCP_LOG_DEBUG("Status read response received\n");
++ /* Note: all cleanup handling is done in the callchain of
++ the function call-chain below.
++ */
++ zfcp_fsf_status_read_handler(fsf_req);
++ goto out;
++ } else zfcp_fsf_protstatus_eval(fsf_req);
++
++ /*
++ * fsf_req may be deleted due to waking up functions, so
++ * cleanup is saved here and used later
++ */
++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_CLEANUP)
++ cleanup = 1;
++ else cleanup = 0;
++
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_COMPLETED;
++
++ /* cleanup request if requested by initiator */
++ if (cleanup) {
++ ZFCP_LOG_TRACE(
++ "removing FSF request 0x%lx\n",
++ (unsigned long)fsf_req);
++ /*
++ * lock must not be held here since it will be
++ * grabed by the called routine, too
++ */
++ if (zfcp_fsf_req_cleanup(fsf_req)) {
++ ZFCP_LOG_NORMAL(
++ "bug: Could not remove one FSF "
++ "request. Memory leakage possible. "
++ "(debug info 0x%lx).\n",
++ (unsigned long)fsf_req);
++ }
++ } else {
++ /* notify initiator waiting for the requests completion */
++ ZFCP_LOG_TRACE(
++ "waking initiator of FSF request 0x%lx\n",
++ (unsigned long)fsf_req);
++ wake_up(&fsf_req->completion_wq);
++ }
++
++ out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_protstatus_eval
++ *
++ * purpose: evaluates the QTCB of the finished FSF request
++ * and initiates appropriate actions
++ * (usually calling FSF command specific handlers)
++ *
++ * returns:
++ *
++ * context:
++ *
++ * locks:
++ */
++static int zfcp_fsf_protstatus_eval(zfcp_fsf_req_t *fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval = 0;
++ zfcp_adapter_t *adapter = fsf_req->adapter;
++
++ ZFCP_LOG_TRACE(
++ "enter (fsf_req=0x%lx)\n",
++ (unsigned long)fsf_req);
++
++ ZFCP_LOG_DEBUG(
++ "QTCB is at 0x%lx\n",
++ (unsigned long)fsf_req->qtcb);
++
++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
++ ZFCP_LOG_DEBUG(
++ "fsf_req 0x%lx has been dismissed\n",
++ (unsigned long)fsf_req);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
++ ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
++ zfcp_cmd_dbf_event_fsf("dismiss", fsf_req, NULL, 0);
++ goto skip_protstatus;
++ }
++
++ /* log additional information provided by FSF (if any) */
++ if (fsf_req->qtcb->header.log_length) {
++ /* do not trust them ;-) */
++ if (fsf_req->qtcb->header.log_start > sizeof(fsf_qtcb_t)) {
++ ZFCP_LOG_NORMAL(
++ "bug: ULP (FSF logging) log data starts "
++ "beyond end of packet header. Ignored. "
++ "(start=%i, size=%li)\n",
++ fsf_req->qtcb->header.log_start,
++ sizeof(fsf_qtcb_t));
++ goto forget_log;
++ }
++ if ((size_t)(fsf_req->qtcb->header.log_start +
++ fsf_req->qtcb->header.log_length)
++ > sizeof(fsf_qtcb_t)) {
++ ZFCP_LOG_NORMAL(
++ "bug: ULP (FSF logging) log data ends "
++ "beyond end of packet header. Ignored. "
++ "(start=%i, length=%i, size=%li)\n",
++ fsf_req->qtcb->header.log_start,
++ fsf_req->qtcb->header.log_length,
++ sizeof(fsf_qtcb_t));
++ goto forget_log;
++ }
++ ZFCP_LOG_TRACE("ULP log data: \n");
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_TRACE,
++ (char*)fsf_req->qtcb + fsf_req->qtcb->header.log_start,
++ fsf_req->qtcb->header.log_length);
++ }
++forget_log:
++
++ /* evaluate FSF Protocol Status */
++ switch (fsf_req->qtcb->prefix.prot_status) {
++
++ case FSF_PROT_GOOD :
++ ZFCP_LOG_TRACE("FSF_PROT_GOOD\n");
++ break;
++
++ case FSF_PROT_FSF_STATUS_PRESENTED :
++ ZFCP_LOG_TRACE("FSF_PROT_FSF_STATUS_PRESENTED\n");
++ break;
++
++ case FSF_PROT_QTCB_VERSION_ERROR :
++ ZFCP_LOG_FLAGS(0, "FSF_PROT_QTCB_VERSION_ERROR\n");
++ /* DEBUG */
++ ZFCP_LOG_NORMAL(
++ "fsf_req=0x%lx, qtcb=0x%lx\n",
++ (unsigned long)fsf_req,
++ (unsigned long)fsf_req->qtcb);
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_NORMAL,
++ (char*)fsf_req,
++ sizeof(zfcp_fsf_req_t));
++ ZFCP_LOG_NORMAL(
++ "error: The adapter with devno 0x%04x contains "
++ "microcode of version 0x%x, the device driver "
++ "only supports 0x%x. Aborting.\n",
++ adapter->devno,
++ fsf_req->qtcb->prefix.prot_status_qual.version_error.fsf_version,
++ ZFCP_QTCB_VERSION);
++ /* stop operation for this adapter */
++ debug_text_exception(adapter->erp_dbf,0,"prot_ver_err");
++ zfcp_erp_adapter_shutdown(adapter, 0);
++ zfcp_cmd_dbf_event_fsf(
++ "qverserr", fsf_req,
++ &fsf_req->qtcb->prefix.prot_status_qual, sizeof(fsf_prot_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_PROT_SEQ_NUMB_ERROR :
++ ZFCP_LOG_FLAGS(0, "FSF_PROT_SEQ_NUMB_ERROR\n");
++ ZFCP_LOG_NORMAL(
++ "bug: Sequence number mismatch between "
++ "driver (0x%x) and adapter of devno 0x%04x "
++ "(0x%x). Restarting all operations on this "
++ "adapter.\n",
++ fsf_req->qtcb->prefix.req_seq_no,
++ adapter->devno,
++ fsf_req->qtcb->prefix.prot_status_qual.sequence_error.exp_req_seq_no);
++#ifdef ZFCP_DEBUG_REQUESTS
++ debug_text_event(adapter->req_dbf, 1, "exp_seq!");
++ debug_event(adapter->req_dbf, 1, &fsf_req->qtcb->prefix.prot_status_qual.sequence_error.exp_req_seq_no, 4);
++ debug_text_event(adapter->req_dbf, 1, "qtcb_seq!");
++ debug_exception(adapter->req_dbf, 1, &fsf_req->qtcb->prefix.req_seq_no, 4);
++#endif /* ZFCP_DEBUG_REQUESTS */
++ debug_text_exception(adapter->erp_dbf,0,"prot_seq_err");
++ /* restart operation on this adapter */
++ zfcp_erp_adapter_reopen(adapter,0);
++ zfcp_cmd_dbf_event_fsf(
++ "seqnoerr", fsf_req,
++ &fsf_req->qtcb->prefix.prot_status_qual, sizeof(fsf_prot_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY;
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_PROT_UNSUPP_QTCB_TYPE :
++ ZFCP_LOG_FLAGS(0, "FSF_PROT_UNSUP_QTCB_TYPE\n");
++ ZFCP_LOG_NORMAL("error: Packet header type used by the "
++ "device driver is incompatible with "
++ "that used on the adapter with devno "
++ "0x%04x. "
++ "Stopping all operations on this adapter.\n",
++ adapter->devno);
++ ZFCP_LOG_NORMAL(
++ "fsf_req=0x%lx, qtcb=0x%lx\n",
++ (unsigned long)fsf_req,
++ (unsigned long)fsf_req->qtcb);
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_NORMAL,
++ (char*)fsf_req,
++ sizeof(zfcp_fsf_req_t));
++ debug_text_exception(adapter->erp_dbf,0,"prot_unsup_qtcb");
++ zfcp_erp_adapter_shutdown(adapter, 0);
++ zfcp_cmd_dbf_event_fsf(
++ "unsqtcbt", fsf_req,
++ &fsf_req->qtcb->prefix.prot_status_qual, sizeof(fsf_prot_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_PROT_HOST_CONNECTION_INITIALIZING :
++ ZFCP_LOG_FLAGS(1, "FSF_PROT_HOST_CONNECTION_INITIALIZING\n");
++ zfcp_cmd_dbf_event_fsf(
++ "hconinit", fsf_req,
++ &fsf_req->qtcb->prefix.prot_status_qual, sizeof(fsf_prot_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
++ &(adapter->status));
++ debug_text_event(adapter->erp_dbf,4,"prot_con_init");
++ break;
++
++ case FSF_PROT_DUPLICATE_REQUEST_ID :
++ ZFCP_LOG_FLAGS(0, "FSF_PROT_DUPLICATE_REQUEST_IDS\n");
++ if (fsf_req->qtcb) {
++ ZFCP_LOG_NORMAL(
++ "bug: The request identifier 0x%Lx "
++ "to the adapter with devno 0x%04x is "
++ "ambiguous. "
++ "Stopping all operations on this adapter.\n",
++ *(llui_t*)(&fsf_req->qtcb->bottom.support.req_handle),
++ adapter->devno);
++ } else {
++ ZFCP_LOG_NORMAL(
++ "bug: The request identifier 0x%lx "
++ "to the adapter with devno 0x%04x is "
++ "ambiguous. "
++ "Stopping all operations on this adapter. "
++ "(bug: got this for an unsolicited "
++ "status read request)\n",
++ (unsigned long)fsf_req,
++ adapter->devno);
++ }
++ debug_text_exception(adapter->erp_dbf,0,"prot_dup_id");
++ zfcp_erp_adapter_shutdown(adapter, 0);
++ zfcp_cmd_dbf_event_fsf(
++ "dupreqid", fsf_req,
++ &fsf_req->qtcb->prefix.prot_status_qual, sizeof(fsf_prot_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_PROT_LINK_DOWN :
++ ZFCP_LOG_FLAGS(1, "FSF_PROT_LINK_DOWN\n");
++ /*
++ * 'test and set' is not atomic here -
++ * it's ok as long as calls to our response queue handler
++ * (and thus execution of this code here) are serialized
++ * by the qdio module
++ */
++ if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
++ &adapter->status)) {
++ switch (fsf_req->qtcb->prefix.prot_status_qual.locallink_error.code) {
++ case FSF_PSQ_LINK_NOLIGHT :
++ ZFCP_LOG_INFO(
++ "The local link to the adapter with "
++ "devno 0x%04x is down"
++ "(no light detected).\n",
++ adapter->devno);
++ break;
++ case FSF_PSQ_LINK_WRAPPLUG :
++ ZFCP_LOG_INFO(
++ "The local link to the adapter with "
++ "devno 0x%04x is down"
++ "(wrap plug detected).\n",
++ adapter->devno);
++ break;
++ case FSF_PSQ_LINK_NOFCP :
++ ZFCP_LOG_INFO(
++ "The local link to the adapter with "
++ "devno 0x%04x is down"
++ "(the adjacent node on the link "
++ "does not support FCP).\n",
++ adapter->devno);
++ break;
++ default :
++ ZFCP_LOG_INFO(
++ "The local link to the adapter with "
++ "devno 0x%04x is down"
++ "(warning: unknown reason code).\n",
++ adapter->devno);
++ break;
++
++ }
++ /*
++ * Due to the 'erp failed' flag the adapter won't
++ * be recovered but will be just set to 'blocked'
++ * state. All subordinary devices will have state
++ * 'blocked' and 'erp failed', too.
++ * Thus the adapter is still able to provide
++ * 'link up' status without being flooded with
++ * requests.
++ * (note: even 'close port' is not permitted)
++ */
++ ZFCP_LOG_INFO(
++ "Stopping all operations for the adapter "
++ "with devno 0x%04x.\n",
++ adapter->devno);
++ atomic_set_mask(
++ ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
++ ZFCP_STATUS_COMMON_ERP_FAILED,
++ &adapter->status);
++ zfcp_erp_adapter_reopen(adapter, 0);
++ debug_text_event(adapter->erp_dbf,1,"prot_link_down");
++ }
++ zfcp_cmd_dbf_event_fsf(
++ "linkdown", fsf_req,
++ &fsf_req->qtcb->prefix.prot_status_qual, sizeof(fsf_prot_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ zfcp_callback_do_link_down(adapter);
++ break;
++
++ case FSF_PROT_REEST_QUEUE :
++ ZFCP_LOG_FLAGS(1, "FSF_PROT_REEST_QUEUE\n");
++ debug_text_event(adapter->erp_dbf,1,"prot_reest_queue");
++ ZFCP_LOG_INFO("The local link to the adapter with "
++ "devno 0x%04x was re-plugged. "
++ "Re-starting operations on this adapter.\n",
++ adapter->devno);
++ /* All ports should be marked as ready to run again */
++ zfcp_erp_modify_adapter_status(
++ adapter,
++ ZFCP_STATUS_COMMON_RUNNING,
++ ZFCP_SET);
++ zfcp_erp_adapter_reopen(
++ adapter,
++ ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED
++ | ZFCP_STATUS_COMMON_ERP_FAILED);
++ zfcp_cmd_dbf_event_fsf(
++ "reestque", fsf_req,
++ &fsf_req->qtcb->prefix.prot_status_qual, sizeof(fsf_prot_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_PROT_ERROR_STATE :
++ ZFCP_LOG_FLAGS(0, "FSF_PROT_ERROR_STATE\n");
++ ZFCP_LOG_NORMAL(
++ "error: The adapter with devno 0x%04x "
++ "has entered the error state. "
++ "Restarting all operations on this "
++ "adapter.\n",
++ adapter->devno);
++ debug_text_event(adapter->erp_dbf,0,"prot_err_sta");
++ /* restart operation on this adapter */
++ zfcp_erp_adapter_reopen(adapter,0);
++ zfcp_cmd_dbf_event_fsf(
++ "proterrs", fsf_req,
++ &fsf_req->qtcb->prefix.prot_status_qual, sizeof(fsf_prot_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY;
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ default :
++ ZFCP_LOG_NORMAL(
++ "bug: Transfer protocol status information "
++ "provided by the adapter with devno 0x%04x "
++ "is not compatible with the device driver. "
++ "Stopping all operations on this adapter. "
++ "(debug info 0x%x).\n",
++ adapter->devno,
++ fsf_req->qtcb->prefix.prot_status);
++ ZFCP_LOG_NORMAL(
++ "fsf_req=0x%lx, qtcb=0x%lx\n",
++ (unsigned long)fsf_req,
++ (unsigned long)fsf_req->qtcb);
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_NORMAL,
++ (char*)fsf_req,
++ sizeof(zfcp_fsf_req_t));
++ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, (char *)fsf_req->qtcb, sizeof(fsf_qtcb_t));
++ debug_text_event(adapter->erp_dbf,0,"prot_inval:");
++ debug_exception(adapter->erp_dbf,0,
++ &fsf_req->qtcb->prefix.prot_status,
++ sizeof(u32));
++ // panic("it was pity");
++ zfcp_erp_adapter_shutdown(adapter, 0);
++ zfcp_cmd_dbf_event_fsf(
++ "undefps", fsf_req,
++ &fsf_req->qtcb->prefix.prot_status_qual, sizeof(fsf_prot_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ }
++
++ skip_protstatus:
++ /*
++ * always call specific handlers to give them a chance to do
++ * something meaningful even in error cases
++ */
++ zfcp_fsf_fsfstatus_eval(fsf_req);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_fsfstatus_eval
++ *
++ * purpose: evaluates FSF status of completed FSF request
++ * and acts accordingly
++ *
++ * returns:
++ */
++static int zfcp_fsf_fsfstatus_eval(zfcp_fsf_req_t *fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (fsf_req=0x%lx)\n",
++ (unsigned long)fsf_req);
++
++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
++ goto skip_fsfstatus;
++ }
++
++ /* evaluate FSF Status */
++ switch (fsf_req->qtcb->header.fsf_status) {
++ case FSF_UNKNOWN_COMMAND :
++ ZFCP_LOG_FLAGS(0, "FSF_UNKNOWN_COMMAND\n");
++ ZFCP_LOG_NORMAL("bug: Command issued by the device driver is "
++ "not known by the adapter with devno 0x%04x "
++ "Stopping all operations on this adapter. "
++ "(debug info 0x%x).\n",
++ fsf_req->adapter->devno,
++ fsf_req->qtcb->header.fsf_command);
++ debug_text_exception(fsf_req->adapter->erp_dbf,0,"fsf_s_unknown");
++ zfcp_erp_adapter_shutdown(fsf_req->adapter, 0);
++ zfcp_cmd_dbf_event_fsf(
++ "unknownc", fsf_req,
++ &fsf_req->qtcb->header.fsf_status_qual, sizeof(fsf_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_FCP_RSP_AVAILABLE :
++ ZFCP_LOG_FLAGS(2, "FSF_FCP_RSP_AVAILABLE\n");
++ ZFCP_LOG_DEBUG("FCP Sense data will be presented to the "
++ "SCSI stack.\n");
++ debug_text_event(fsf_req->adapter->erp_dbf,4,"fsf_s_rsp");
++ break;
++
++ case FSF_ADAPTER_STATUS_AVAILABLE :
++ ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
++ debug_text_event(fsf_req->adapter->erp_dbf,2,"fsf_s_astatus");
++ zfcp_fsf_fsfstatus_qual_eval(fsf_req);
++ break;
++
++ default :
++ break;
++ }
++
++skip_fsfstatus:
++ /*
++ * always call specific handlers to give them a chance to do
++ * something meaningful even in error cases
++ */
++ zfcp_fsf_req_dispatch(fsf_req);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++
++/*
++ * function: zfcp_fsf_fsfstatus_qual_eval
++ *
++ * purpose: evaluates FSF status-qualifier of completed FSF request
++ * and acts accordingly
++ *
++ * returns:
++ */
++static int zfcp_fsf_fsfstatus_qual_eval(zfcp_fsf_req_t *fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (fsf_req=0x%lx)\n",
++ (unsigned long)fsf_req);
++
++ switch (fsf_req->qtcb->header.fsf_status_qual.word[0]){
++ case FSF_SQ_FCP_RSP_AVAILABLE :
++ ZFCP_LOG_FLAGS(2, "FSF_SQ_FCP_RSP_AVAILABLE\n");
++ debug_text_event(fsf_req->adapter->erp_dbf,4,"fsf_sq_rsp");
++ break;
++ case FSF_SQ_RETRY_IF_POSSIBLE :
++ ZFCP_LOG_FLAGS(2, "FSF_SQ_RETRY_IF_POSSIBLE\n");
++ /* The SCSI-stack may now issue retries or escalate */
++ debug_text_event(fsf_req->adapter->erp_dbf,2,"fsf_sq_retry");
++ zfcp_cmd_dbf_event_fsf(
++ "sqretry", fsf_req,
++ &fsf_req->qtcb->header.fsf_status_qual, sizeof(fsf_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++ case FSF_SQ_COMMAND_ABORTED :
++ ZFCP_LOG_FLAGS(2, "FSF_SQ_COMMAND_ABORTED\n");
++ /* Carry the aborted state on to upper layer */
++ debug_text_event(fsf_req->adapter->erp_dbf,2,"fsf_sq_abort");
++ zfcp_cmd_dbf_event_fsf(
++ "sqabort", fsf_req,
++ &fsf_req->qtcb->header.fsf_status_qual, sizeof(fsf_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++ case FSF_SQ_NO_RECOM :
++ ZFCP_LOG_FLAGS(0, "FSF_SQ_NO_RECOM\n");
++ debug_text_exception(fsf_req->adapter->erp_dbf,0,"fsf_sq_no_rec");
++ ZFCP_LOG_NORMAL("bug: No recommendation could be given for a"
++ "problem on the adapter with devno 0x%04x "
++ "Stopping all operations on this adapter. ",
++ fsf_req->adapter->devno);
++ zfcp_erp_adapter_shutdown(fsf_req->adapter, 0);
++ zfcp_cmd_dbf_event_fsf(
++ "sqnrecom", fsf_req,
++ &fsf_req->qtcb->header.fsf_status_qual, sizeof(fsf_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++ case FSF_SQ_ULP_PROGRAMMING_ERROR :
++ ZFCP_LOG_FLAGS(0, "FSF_SQ_ULP_PROGRAMMING_ERROR\n");
++ ZFCP_LOG_NORMAL("error: not enough SBALs for data transfer "
++ "(adapter devno=0x%04x)\n",
++ fsf_req->adapter->devno);
++ debug_text_exception(fsf_req->adapter->erp_dbf, 0,
++ "fsf_sq_ulp_err");
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE :
++ case FSF_SQ_NO_RETRY_POSSIBLE :
++ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED :
++ /* dealt with in the respective functions */
++ break;
++ default:
++ ZFCP_LOG_NORMAL("bug: Additional status info could "
++ "not be interpreted properly.\n");
++ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
++ (char*)&fsf_req->qtcb->header.fsf_status_qual,
++ 16);
++ debug_text_event(fsf_req->adapter->erp_dbf,0,"fsf_sq_inval:");
++ debug_exception(fsf_req->adapter->erp_dbf,0,
++ &fsf_req->qtcb->header.fsf_status_qual.word[0],
++ sizeof(u32));
++ zfcp_cmd_dbf_event_fsf(
++ "squndef", fsf_req,
++ &fsf_req->qtcb->header.fsf_status_qual, sizeof(fsf_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_req_dispatch
++ *
++ * purpose: calls the appropriate command specific handler
++ *
++ * returns:
++ */
++static int zfcp_fsf_req_dispatch(zfcp_fsf_req_t *fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ zfcp_erp_action_t *erp_action = fsf_req->erp_action;
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (fsf_req=0x%lx)\n",
++ (unsigned long)fsf_req);
++
++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
++ ZFCP_LOG_TRACE(
++ "fsf_req=0x%lx, QTCB=0x%lx\n",
++ (unsigned long)fsf_req,
++ (unsigned long)(fsf_req->qtcb));
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_TRACE,
++ (char *)fsf_req->qtcb,
++ sizeof(fsf_qtcb_t));
++ }
++
++ switch (fsf_req->fsf_command) {
++
++ case FSF_QTCB_FCP_CMND :
++ ZFCP_LOG_FLAGS(3, "FSF_QTCB_FCP_CMND\n");
++ zfcp_fsf_send_fcp_command_handler(fsf_req);
++ break;
++
++ case FSF_QTCB_ABORT_FCP_CMND :
++ ZFCP_LOG_FLAGS(2, "FSF_QTCB_ABORT_FCP_CMND\n");
++ zfcp_fsf_abort_fcp_command_handler(fsf_req);
++ break;
++
++ case FSF_QTCB_SEND_GENERIC :
++ ZFCP_LOG_FLAGS(2, "FSF_QTCB_SEND_GENERIC\n");
++ zfcp_fsf_send_ct_handler(fsf_req);
++ break;
++
++ case FSF_QTCB_OPEN_PORT_WITH_DID :
++ ZFCP_LOG_FLAGS(2, "FSF_QTCB_OPEN_PORT_WITH_DID\n");
++ zfcp_fsf_open_port_handler(fsf_req);
++ break;
++
++ case FSF_QTCB_OPEN_LUN :
++ ZFCP_LOG_FLAGS(2, "FSF_QTCB_OPEN_LUN\n");
++ zfcp_fsf_open_unit_handler(fsf_req);
++ break;
++
++ case FSF_QTCB_CLOSE_LUN :
++ ZFCP_LOG_FLAGS(2, "FSF_QTCB_CLOSE_LUN\n");
++ zfcp_fsf_close_unit_handler(fsf_req);
++ break;
++
++ case FSF_QTCB_CLOSE_PORT :
++ ZFCP_LOG_FLAGS(2, "FSF_QTCB_CLOSE_PORT\n");
++ zfcp_fsf_close_port_handler(fsf_req);
++ break;
++
++ case FSF_QTCB_CLOSE_PHYSICAL_PORT :
++ ZFCP_LOG_FLAGS(2, "FSF_QTCB_CLOSE_PHYSICAL_PORT\n");
++ zfcp_fsf_close_physical_port_handler(fsf_req);
++ break;
++
++ case FSF_QTCB_EXCHANGE_CONFIG_DATA :
++ ZFCP_LOG_FLAGS(2, "FSF_QTCB_EXCHANGE_CONFIG_DATA\n");
++ zfcp_fsf_exchange_config_data_handler(fsf_req);
++ break;
++
++ case FSF_QTCB_EXCHANGE_PORT_DATA :
++ ZFCP_LOG_FLAGS(2, "FSF_QTCB_EXCHANGE_PORT_DATA\n");
++ zfcp_fsf_exchange_port_data_handler(fsf_req);
++ break;
++
++ case FSF_QTCB_SEND_ELS :
++ ZFCP_LOG_FLAGS(2, "FSF_QTCB_SEND_ELS\n");
++ zfcp_fsf_send_els_handler(fsf_req);
++ break;
++
++ case FSF_QTCB_DOWNLOAD_CONTROL_FILE :
++ ZFCP_LOG_FLAGS(2, "FSF_QTCB_DOWNLOAD_CONTROL_FILE\n");
++ zfcp_fsf_control_file_handler(fsf_req);
++ break;
++
++ case FSF_QTCB_UPLOAD_CONTROL_FILE :
++ ZFCP_LOG_FLAGS(2, "FSF_QTCB_UPLOAD_CONTROL_FILE\n");
++ zfcp_fsf_control_file_handler(fsf_req);
++ break;
++
++ default :
++ ZFCP_LOG_FLAGS(2, "FSF_QTCB_UNKNOWN\n");
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ ZFCP_LOG_NORMAL(
++ "bug: Command issued by the device driver is "
++ "not supported by the adapter with devno 0x%04x "
++ "(debug info 0x%lx 0x%x).\n",
++ fsf_req->adapter->devno,
++ (unsigned long)fsf_req,
++ fsf_req->fsf_command);
++ if (fsf_req->fsf_command !=
++ fsf_req->qtcb->header.fsf_command)
++ ZFCP_LOG_NORMAL(
++ "bug: Command issued by the device driver differs "
++ "from the command returned by the adapter with devno "
++ "0x%04x (debug info 0x%x, 0x%x).\n",
++ fsf_req->adapter->devno,
++ fsf_req->fsf_command,
++ fsf_req->qtcb->header.fsf_command);
++ }
++
++ if (erp_action)
++ zfcp_erp_async_handler(erp_action, 0);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_status_read
++ *
++ * purpose: initiates a Status Read command at the specified adapter
++ *
++ * returns:
++ */
++static int zfcp_fsf_status_read(
++ zfcp_adapter_t *adapter,
++ int req_flags)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ zfcp_fsf_req_t *fsf_req;
++ fsf_status_read_buffer_t *status_buffer;
++ unsigned long lock_flags;
++ volatile qdio_buffer_element_t *sbale;
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (adapter=0x%lx, req_flags=0x%x)\n",
++ (unsigned long)adapter,
++ req_flags);
++
++ /* setup new FSF request */
++ retval = zfcp_fsf_req_create(
++ adapter,
++ FSF_QTCB_UNSOLICITED_STATUS,
++ req_flags | ZFCP_REQ_USE_MEMPOOL,
++ &adapter->pool.fsf_req_status_read,
++ &lock_flags,
++ &fsf_req);
++ if (retval < 0) {
++ ZFCP_LOG_INFO(
++ "error: Out of resources. Could not create an "
++ "unsolicited status buffer for "
++ "the adapter with devno 0x%04x.\n",
++ adapter->devno);
++ goto failed_req_create;
++ }
++
++ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
++ sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS;
++ sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
++ fsf_req->sbale_curr = 2;
++
++ status_buffer = zfcp_mem_pool_find(&adapter->pool.data_status_read);
++ if (!status_buffer) {
++ ZFCP_LOG_NORMAL("bug: could not get some buffer\n");
++ goto failed_buf;
++ }
++ fsf_req->data.status_read.buffer = status_buffer;
++
++ /* insert pointer to respective buffer */
++ sbale = zfcp_qdio_sbale_curr(fsf_req);
++ sbale->addr = (void *)status_buffer;
++ sbale->length = sizeof(fsf_status_read_buffer_t);
++
++ /* start QDIO request for this FSF request */
++ retval = zfcp_fsf_req_send(fsf_req, NULL);
++ if (retval) {
++ ZFCP_LOG_DEBUG(
++ "error: Could not set-up unsolicited status "
++ "environment.\n");
++ goto failed_req_send;
++ }
++
++ ZFCP_LOG_TRACE(
++ "Status Read request initiated "
++ "(adapter devno=0x%04x)\n",
++ adapter->devno);
++#ifdef ZFCP_DEBUG_REQUESTS
++ debug_text_event(adapter->req_dbf, 1, "unso");
++#endif
++
++ goto out;
++
++failed_req_send:
++ zfcp_mem_pool_return(status_buffer, &adapter->pool.data_status_read);
++
++failed_buf:
++ if (zfcp_fsf_req_free(fsf_req)) {
++ ZFCP_LOG_NORMAL(
++ "bug: Could not remove one FSF "
++ "request. Memory leakage possible. "
++ "(debug info 0x%lx).\n",
++ (unsigned long)fsf_req);
++ };
++
++failed_req_create:
++out:
++ write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
++
++ ZFCP_LOG_TRACE("exit (%d)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++static int zfcp_fsf_status_read_port_closed(zfcp_fsf_req_t *fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ fsf_status_read_buffer_t *status_buffer = fsf_req->data.status_read.buffer;
++ zfcp_adapter_t *adapter = fsf_req->adapter;
++ unsigned long flags;
++ zfcp_port_t *port;
++
++ write_lock_irqsave(&adapter->port_list_lock, flags);
++ ZFCP_FOR_EACH_PORT (adapter, port)
++ if (port->d_id == (status_buffer->d_id & ZFCP_DID_MASK))
++ break;
++ write_unlock_irqrestore(&adapter->port_list_lock, flags);
++
++ if (!port) {
++ ZFCP_LOG_NORMAL(
++ "bug: Re-open port indication received for the "
++ "non-existing port with DID 0x%06x, on the adapter "
++ "with devno 0x%04x. Ignored.\n",
++ status_buffer->d_id & ZFCP_DID_MASK,
++ adapter->devno);
++ goto out;
++ }
++
++ switch (status_buffer->status_subtype) {
++
++ case FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT:
++ ZFCP_LOG_FLAGS(2, "FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT\n");
++ debug_text_event(adapter->erp_dbf, 3, "unsol_pc_phys:");
++ zfcp_erp_port_reopen(port, 0);
++ break;
++
++ case FSF_STATUS_READ_SUB_ERROR_PORT:
++ ZFCP_LOG_FLAGS(1,"FSF_STATUS_READ_SUB_ERROR_PORT\n");
++ debug_text_event(adapter->erp_dbf, 1, "unsol_pc_err:");
++ zfcp_erp_port_shutdown(port, 0);
++ break;
++
++ default:
++ debug_text_event(adapter->erp_dbf, 0, "unsol_unk_sub:");
++ debug_exception(
++ adapter->erp_dbf, 0,
++ &status_buffer->status_subtype, sizeof(u32));
++ ZFCP_LOG_NORMAL(
++ "bug: Undefined status subtype received "
++ "for a re-open indication on the port with "
++ "DID 0x%06x, on the adapter with devno "
++ "0x%04x. Ignored. (debug info 0x%x)\n",
++ status_buffer->d_id,
++ adapter->devno,
++ status_buffer->status_subtype);
++ }
++
++out:
++ return 0;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_status_read_handler
++ *
++ * purpose: is called for finished Open Port command
++ *
++ * returns:
++ */
++static int zfcp_fsf_status_read_handler(zfcp_fsf_req_t *fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval = 0;
++ zfcp_adapter_t *adapter = fsf_req->adapter;
++ fsf_status_read_buffer_t *status_buffer = fsf_req->data.status_read.buffer;
++
++ ZFCP_LOG_TRACE(
++ "enter (fsf_req=0x%lx)\n",
++ (unsigned long)fsf_req);
++
++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
++ zfcp_mem_pool_return(status_buffer, &adapter->pool.data_status_read);
++ if (zfcp_fsf_req_cleanup(fsf_req)) {
++ ZFCP_LOG_NORMAL("bug: Could not remove one FSF "
++ "request. Memory leakage possible. "
++ "(debug info 0x%lx).\n",
++ (unsigned long)fsf_req);
++ }
++ goto out;
++ }
++
++ switch (status_buffer->status_type) {
++
++ case FSF_STATUS_READ_PORT_CLOSED:
++ ZFCP_LOG_FLAGS(1,"FSF_STATUS_READ_PORT_CLOSED\n");
++ debug_text_event(adapter->erp_dbf,3,"unsol_pclosed:");
++ debug_event(adapter->erp_dbf,3,
++ &status_buffer->d_id,
++ sizeof(u32));
++ zfcp_fsf_status_read_port_closed(fsf_req);
++ break;
++
++ case FSF_STATUS_READ_INCOMING_ELS:
++ ZFCP_LOG_FLAGS(1,"FSF_STATUS_READ_INCOMING_ELS\n");
++ debug_text_event(adapter->erp_dbf,3,"unsol_els:");
++ zfcp_fsf_incoming_els(fsf_req);
++ break;
++
++ case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
++ ZFCP_LOG_FLAGS(1,"FSF_STATUS_READ_BIT_ERROR_THRESHOLD\n");
++ debug_text_event(adapter->erp_dbf,3,"unsol_bit_err:");
++ ZFCP_LOG_NORMAL("Bit error threshold data received:\n");
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_NORMAL,
++ (char*)status_buffer,
++ sizeof(fsf_status_read_buffer_t));
++ break;
++
++ case FSF_STATUS_READ_LINK_DOWN:
++ ZFCP_LOG_FLAGS(1,"FSF_STATUS_READ_LINK_DOWN\n");
++ debug_text_event(adapter->erp_dbf, 0, "unsol_link_down:");
++ ZFCP_LOG_INFO(
++ "Local link to adapter with devno 0x%04x is down\n",
++ adapter->devno);
++ atomic_set_mask(
++ ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
++ &adapter->status);
++ zfcp_erp_adapter_failed(adapter);
++ zfcp_callback_do_link_down(adapter);
++ break;
++
++
++ case FSF_STATUS_READ_CFDC_UPDATED:
++ ZFCP_LOG_FLAGS(1, "FSF_STATUS_READ_CFDC_UPDATED\n");
++ debug_text_event(adapter->erp_dbf, 2, "unsol_cfdc_upd:");
++ ZFCP_LOG_NORMAL(
++ "CFDC has been updated on the FCP adapter "
++ "(devno=0x%04x)\n",
++ adapter->devno);
++ break;
++
++ case FSF_STATUS_READ_CFDC_HARDENED:
++ ZFCP_LOG_FLAGS(1, "FSF_STATUS_READ_CFDC_HARDENED\n");
++ debug_text_event(adapter->erp_dbf, 2, "unsol_cfdc_harden:");
++ switch (status_buffer->status_subtype) {
++ case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE:
++ ZFCP_LOG_NORMAL(
++ "CFDC has been saved on the SE "
++ "(devno=0x%04x)\n",
++ adapter->devno);
++ break;
++ case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2:
++ ZFCP_LOG_NORMAL(
++ "CFDC has been copied to the secondary SE "
++ "(devno=0x%04x)\n",
++ adapter->devno);
++ break;
++ default:
++ ZFCP_LOG_NORMAL(
++ "CFDC has been hardened on the FCP adapter "
++ "(devno=0x%04x)\n",
++ adapter->devno);
++ }
++ break;
++
++ case FSF_STATUS_READ_LINK_UP:
++ ZFCP_LOG_FLAGS(1,"FSF_STATUS_READ_LINK_UP\n");
++ debug_text_event(adapter->erp_dbf,2,"unsol_link_up:");
++ ZFCP_LOG_INFO("The local link to the adapter with "
++ "devno 0x%04x was re-plugged. "
++ "Re-starting operations on this adapter..\n",
++ adapter->devno);
++ /* All ports should be marked as ready to run again */
++ zfcp_erp_modify_adapter_status(
++ adapter,
++ ZFCP_STATUS_COMMON_RUNNING,
++ ZFCP_SET);
++ zfcp_erp_adapter_reopen(
++ adapter,
++ ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED
++ | ZFCP_STATUS_COMMON_ERP_FAILED);
++
++ zfcp_callback_do_link_up(adapter);
++ break;
++
++ default:
++ debug_text_event(adapter->erp_dbf,0,"unsol_unknown:");
++ debug_exception(adapter->erp_dbf,0,
++ &status_buffer->status_type,
++ sizeof(u32));
++ ZFCP_LOG_NORMAL("bug: An unsolicited status packet of unknown "
++ "type was received by the zfcp-driver "
++ "(debug info 0x%x)\n",
++ status_buffer->status_type);
++ ZFCP_LOG_DEBUG("Dump of status_read_buffer 0x%lx:\n",
++ (unsigned long)status_buffer);
++ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
++ (char*)status_buffer,
++ sizeof(fsf_status_read_buffer_t));
++ break;
++ }
++
++ zfcp_mem_pool_return(status_buffer, &adapter->pool.data_status_read);
++ if (zfcp_fsf_req_cleanup(fsf_req)) {
++ ZFCP_LOG_NORMAL("bug: Could not remove one FSF "
++ "request. Memory leakage possible. "
++ "(debug info 0x%lx).\n",
++ (unsigned long)fsf_req);
++ }
++ /* recycle buffer and start new request
++ * repeat until outbound queue is empty or adapter shutdown is requested*/
++
++ /* FIXME(qdio) - we may wait in the req_create for 5s during shutdown, so
++ qdio_cleanup will have to wait at least that long before returning with
++ failure to allow us a proper cleanup under all circumstances
++ */
++ /* FIXME: allocation failure possible? (Is this code needed?) */
++ retval = zfcp_fsf_status_read(adapter, 0);
++ if (retval < 0) {
++ ZFCP_LOG_INFO(
++ "Outbound queue busy. "
++ "Could not create use an "
++ "unsolicited status read request for "
++ "the adapter with devno 0x%04x.\n",
++ adapter->devno);
++ /* temporary fix to avoid status read buffer shortage */
++ adapter->status_read_failed++;
++ if ((ZFCP_STATUS_READS_RECOM - adapter->status_read_failed)
++ < ZFCP_STATUS_READ_FAILED_THRESHOLD) {
++ ZFCP_LOG_INFO(
++ "restart adapter due to status read "
++ "buffer shortage (devno 0x%04x)\n",
++ adapter->devno);
++ zfcp_erp_adapter_reopen(adapter, 0);
++ }
++ }
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++void zfcp_fsf_incoming_els_rscn(
++ zfcp_adapter_t *adapter,
++ fsf_status_read_buffer_t *status_buffer)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++ fcp_rscn_head_t *fcp_rscn_head
++ = (fcp_rscn_head_t *) status_buffer->payload;
++ fcp_rscn_element_t *fcp_rscn_element
++ = (fcp_rscn_element_t *) status_buffer->payload;
++
++ unsigned long flags;
++ zfcp_port_t *port;
++ int i;
++ int known=0;
++ int no_notifications=0;
++ int range_mask=0;
++ int reopen_unknown=0;
++ /* see FC-FS */
++ int no_entries=(fcp_rscn_head->payload_len / 4);
++
++ zfcp_in_els_dbf_event(adapter, "##rscn", status_buffer, fcp_rscn_head->payload_len);
++
++ for (i=1; i < no_entries; i++) {
++ /* skip head and start with 1st element */
++ fcp_rscn_element++;
++ switch (fcp_rscn_element->addr_format) {
++ case ZFCP_PORT_ADDRESS:
++ ZFCP_LOG_FLAGS(1,"ZFCP_PORT_ADDRESS\n");
++ range_mask=ZFCP_PORTS_RANGE_PORT;
++ no_notifications=1;
++ break;
++ case ZFCP_AREA_ADDRESS:
++ ZFCP_LOG_FLAGS(1,"ZFCP_AREA_ADDRESS\n");
++ /* skip head and start with 1st element */
++ range_mask=ZFCP_PORTS_RANGE_AREA;
++ no_notifications = ZFCP_NO_PORTS_PER_AREA;
++ break;
++ case ZFCP_DOMAIN_ADDRESS:
++ ZFCP_LOG_FLAGS(1,"ZFCP_DOMAIN_ADDRESS\n");
++ range_mask=ZFCP_PORTS_RANGE_DOMAIN;
++ no_notifications = ZFCP_NO_PORTS_PER_DOMAIN;
++ break;
++ case ZFCP_FABRIC_ADDRESS:
++ ZFCP_LOG_FLAGS(1,"ZFCP_FABRIC_ADDRESS\n");
++ range_mask=ZFCP_PORTS_RANGE_FABRIC;
++ no_notifications = ZFCP_NO_PORTS_PER_FABRIC;
++ break;
++ }
++ known=0;
++ write_lock_irqsave(&adapter->port_list_lock, flags);
++ ZFCP_FOR_EACH_PORT (adapter, port) {
++ if (!atomic_test_mask(ZFCP_STATUS_PORT_DID_DID, &port->status))
++ continue;
++ if(((u32)port->d_id & range_mask)
++ == (u32)(fcp_rscn_element->nport_did & range_mask)) {
++ known++;
++#if 0
++ printk("known=%d, reopen did 0x%x\n",
++ known,
++ fcp_rscn_element->nport_did);
++#endif
++ debug_text_event(adapter->erp_dbf,1,"unsol_els_rscnk:");
++ zfcp_test_link(port);
++ }
++ }
++ write_unlock_irqrestore(&adapter->port_list_lock, flags);
++#if 0
++ printk("known %d, no_notifications %d\n",
++ known, no_notifications);
++#endif
++ if(known<no_notifications) {
++ ZFCP_LOG_DEBUG("At least one unknown port changed state. "
++ "Unknown ports need to be reopened.\n");
++ reopen_unknown=1;
++ }
++ } // for (i=1; i < no_entries; i++)
++
++ if(reopen_unknown) {
++ ZFCP_LOG_DEBUG("At least one unknown did "
++ "underwent a state change.\n");
++ write_lock_irqsave(&adapter->port_list_lock, flags);
++ ZFCP_FOR_EACH_PORT (adapter, port) {
++ if (atomic_test_mask(ZFCP_STATUS_PORT_NAMESERVER, &port->status))
++ continue;
++ if (!atomic_test_mask(ZFCP_STATUS_PORT_DID_DID, &port->status)) {
++ ZFCP_LOG_INFO("Received state change notification."
++ "Trying to open the port with WWPN "
++ "0x%016Lx. Hope it's there now.\n",
++ (llui_t)port->wwpn);
++ debug_text_event(adapter->erp_dbf,1,"unsol_els_rscnu:");
++ zfcp_erp_port_reopen(port,
++ ZFCP_STATUS_COMMON_ERP_FAILED);
++ }
++ }
++ write_unlock_irqrestore(&adapter->port_list_lock, flags);
++ }
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++void zfcp_fsf_incoming_els_plogi(
++ zfcp_adapter_t *adapter,
++ fsf_status_read_buffer_t *status_buffer)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ logi *els_logi = (logi*) status_buffer->payload;
++ zfcp_port_t *port;
++ unsigned long flags;
++
++ zfcp_in_els_dbf_event(adapter, "##plogi", status_buffer, 28);
++
++ write_lock_irqsave(&adapter->port_list_lock, flags);
++ ZFCP_FOR_EACH_PORT(adapter, port) {
++ if (port->wwpn == (*(wwn_t *)&els_logi->nport_wwn))
++ break;
++ }
++ write_unlock_irqrestore(&adapter->port_list_lock, flags);
++
++ if (!port) {
++ ZFCP_LOG_DEBUG(
++ "Re-open port indication received "
++ "for the non-existing port with D_ID "
++ "0x%06x, on the adapter with devno "
++ "0x%04x. Ignored.\n",
++ status_buffer->d_id,
++ adapter->devno);
++ } else {
++ debug_text_event(adapter->erp_dbf, 1, "unsol_els_plogi:");
++ debug_event(adapter->erp_dbf, 1, &els_logi->nport_wwn, 8);
++ zfcp_erp_port_forced_reopen(port, 0);
++ }
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++void zfcp_fsf_incoming_els_logo(
++ zfcp_adapter_t *adapter,
++ fsf_status_read_buffer_t *status_buffer)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ fcp_logo_t *els_logo = (fcp_logo_t*) status_buffer->payload;
++ zfcp_port_t *port;
++ unsigned long flags;
++
++ zfcp_in_els_dbf_event(adapter, "##logo", status_buffer, 16);
++
++ write_lock_irqsave(&adapter->port_list_lock, flags);
++ ZFCP_FOR_EACH_PORT(adapter, port) {
++ if (port->wwpn == els_logo->nport_wwpn)
++ break;
++ }
++ write_unlock_irqrestore(&adapter->port_list_lock, flags);
++
++ if (!port) {
++ ZFCP_LOG_DEBUG(
++ "Re-open port indication received "
++ "for the non-existing port with D_ID "
++ "0x%06x, on the adapter with devno "
++ "0x%04x. Ignored.\n",
++ status_buffer->d_id,
++ adapter->devno);
++ } else {
++ debug_text_event(adapter->erp_dbf, 1, "unsol_els_logo:");
++ debug_event(adapter->erp_dbf, 1, &els_logo->nport_wwpn, 8);
++ zfcp_erp_port_forced_reopen(port, 0);
++ }
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++void zfcp_fsf_incoming_els_unknown(
++ zfcp_adapter_t *adapter,
++ fsf_status_read_buffer_t *status_buffer)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ zfcp_in_els_dbf_event(adapter, "##undef", status_buffer, 24);
++ ZFCP_LOG_NORMAL(
++ "warning: Unknown incoming ELS (0x%x) received "
++ "for the adapter with devno 0x%04x\n",
++ *(u32*)(status_buffer->payload),
++ adapter->devno);
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++void zfcp_fsf_incoming_els(zfcp_fsf_req_t *fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ fsf_status_read_buffer_t *status_buffer = fsf_req->data.status_read.buffer;
++ u32 els_type = *(u32*)(status_buffer->payload);
++ zfcp_adapter_t *adapter = fsf_req->adapter;
++
++ if (els_type == LS_PLOGI)
++ zfcp_fsf_incoming_els_plogi(adapter, status_buffer);
++ else if (els_type == LS_LOGO)
++ zfcp_fsf_incoming_els_logo(adapter, status_buffer);
++ else if ((els_type & 0xffff0000) == LS_RSCN)
++ /* we are only concerned with the command, not the length */
++ zfcp_fsf_incoming_els_rscn(adapter, status_buffer);
++ else zfcp_fsf_incoming_els_unknown(adapter, status_buffer);
++
++ zfcp_callback_do_incomming_els(adapter, status_buffer->payload);
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_start_scsi_er_timer
++ *
++ * purpose: sets up the timer to watch over SCSI error recovery
++ * actions and starts it
++ *
++ */
++static void zfcp_fsf_start_scsi_er_timer(zfcp_adapter_t *adapter)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++ ZFCP_LOG_TRACE("enter (adapter=0x%lx\n",
++ (unsigned long)adapter);
++ adapter->scsi_er_timer.function =
++ zfcp_fsf_scsi_er_timeout_handler;
++ adapter->scsi_er_timer.data =
++ (unsigned long)adapter;
++ adapter->scsi_er_timer.expires =
++ jiffies + ZFCP_SCSI_ER_TIMEOUT;
++ add_timer(&adapter->scsi_er_timer);
++
++ ZFCP_LOG_TRACE("exit\n");
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++
++/*
++ * function: zfcp_fsf_abort_fcp_command
++ *
++ * purpose: tells FSF to abort a running SCSI command
++ *
++ * returns: address of initiated FSF request
++ * NULL - request could not be initiated
++ *
++ * FIXME(design) shouldn't this be modified to return an int
++ * also...don't know how though
++ */
++static zfcp_fsf_req_t * zfcp_fsf_abort_fcp_command(
++ unsigned long old_req_id,
++ zfcp_adapter_t *adapter,
++ zfcp_unit_t *unit,
++ int req_flags)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ volatile qdio_buffer_element_t *sbale;
++ zfcp_fsf_req_t *fsf_req = NULL;
++ int retval = 0;
++ unsigned long lock_flags;
++
++ ZFCP_LOG_TRACE(
++ "enter (old_req_id=0x%lx, adapter=0x%lx, "
++ "unit=0x%lx, req_flags=0x%x)\n",
++ old_req_id,
++ (unsigned long)adapter,
++ (unsigned long)unit,
++ req_flags);
++
++ /* setup new FSF request */
++ retval = zfcp_fsf_req_create(
++ adapter,
++ FSF_QTCB_ABORT_FCP_CMND,
++ req_flags,
++ &adapter->pool.fsf_req_scsi,
++ &lock_flags,
++ &fsf_req);
++ if (retval < 0) {
++ ZFCP_LOG_INFO(
++ "error: Out of resources. Could not create an "
++ "abort command request on the device with "
++ "the FCP_LUN 0x%016Lx connected to "
++ "the port with WWPN 0x%016Lx connected to "
++ "the adapter with devno 0x%04x.\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ adapter->devno);
++ goto out;
++ }
++
++ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
++ sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
++ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
++
++ fsf_req->data.abort_fcp_command.unit = unit;
++
++ /* set handles of unit and its parent port in QTCB */
++ fsf_req->qtcb->header.lun_handle = unit->handle;
++ fsf_req->qtcb->header.port_handle = unit->port->handle;
++
++ /* set handle of request which should be aborted */
++ fsf_req->qtcb->bottom.support.req_handle = (u64)old_req_id;
++
++#if 0
++ /* DEBUG */
++ goto out;
++#endif
++
++ /* start QDIO request for this FSF request */
++
++ zfcp_fsf_start_scsi_er_timer(adapter);
++ retval = zfcp_fsf_req_send(fsf_req, NULL);
++ if (retval) {
++ del_timer(&adapter->scsi_er_timer);
++ ZFCP_LOG_INFO(
++ "error: Could not send an abort command request "
++ "for a command on the adapter with devno 0x%04x, "
++ "port WWPN 0x%016Lx and unit FCP_LUN 0x%016Lx\n",
++ adapter->devno,
++ (llui_t)unit->port->wwpn,
++ (llui_t)unit->fcp_lun);
++ if (zfcp_fsf_req_free(fsf_req)) {
++ ZFCP_LOG_NORMAL(
++ "bug: Could not remove one FSF "
++ "request. Memory leakage possible. "
++ "(debug info 0x%lx).\n",
++ (unsigned long)fsf_req);
++ };
++ fsf_req = NULL;
++ goto out;
++ }
++
++ ZFCP_LOG_DEBUG(
++ "Abort FCP Command request initiated "
++ "(adapter devno=0x%04x, port D_ID=0x%06x, "
++ "unit FCP_LUN=0x%016Lx, old_req_id=0x%lx)\n",
++ adapter->devno,
++ unit->port->d_id,
++ (llui_t)unit->fcp_lun,
++ old_req_id);
++
++out:
++ write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
++
++ ZFCP_LOG_DEBUG("exit (0x%lx)\n", (unsigned long)fsf_req);
++
++ return fsf_req;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_abort_fcp_command_handler
++ *
++ * purpose: is called for finished Abort FCP Command request
++ *
++ * returns:
++ */
++static int zfcp_fsf_abort_fcp_command_handler(
++ zfcp_fsf_req_t *new_fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval = -EINVAL;
++ zfcp_unit_t *unit = new_fsf_req->data.abort_fcp_command.unit;
++ unsigned char status_qual = new_fsf_req->qtcb->header.fsf_status_qual.word[0];
++
++ ZFCP_LOG_TRACE(
++ "enter (new_fsf_req=0x%lx)\n",
++ (unsigned long)new_fsf_req);
++
++ del_timer(&new_fsf_req->adapter->scsi_er_timer);
++
++ if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
++ /* do not set ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED */
++ goto skip_fsfstatus;
++ }
++
++ /* evaluate FSF status in QTCB */
++ switch (new_fsf_req->qtcb->header.fsf_status) {
++
++ case FSF_PORT_HANDLE_NOT_VALID :
++ if(status_qual>>4 != status_qual%0xf) {
++ ZFCP_LOG_FLAGS(2, "FSF_PORT_HANDLE_NOT_VALID\n");
++ debug_text_event(new_fsf_req->adapter->erp_dbf,3,"fsf_s_phand_nv0");
++ /* In this case a command that was sent prior to a port
++ * reopen was aborted (handles are different). This is fine.
++ */
++ } else {
++ ZFCP_LOG_FLAGS(1, "FSF_PORT_HANDLE_NOT_VALID\n");
++ ZFCP_LOG_INFO("Temporary port identifier (handle) 0x%x "
++ "for the port with WWPN 0x%016Lx connected to "
++ "the adapter of devno 0x%04x is "
++ "not valid. This may happen occasionally.\n",
++ unit->port->handle,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ ZFCP_LOG_INFO("status qualifier:\n");
++ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
++ (char*)&new_fsf_req->qtcb->header.fsf_status_qual,
++ 16);
++ /* Let's hope this sorts out the mess */
++ debug_text_event(new_fsf_req->adapter->erp_dbf,1,"fsf_s_phand_nv1");
++ zfcp_erp_adapter_reopen(unit->port->adapter, 0);
++ new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ }
++ break;
++
++ case FSF_LUN_HANDLE_NOT_VALID :
++ if(status_qual>>4 != status_qual%0xf) {
++ /* 2 */
++ ZFCP_LOG_FLAGS(0, "FSF_LUN_HANDLE_NOT_VALID\n");
++ debug_text_event(new_fsf_req->adapter->erp_dbf,3,"fsf_s_lhand_nv0");
++ /* In this case a command that was sent prior to a unit
++ * reopen was aborted (handles are different). This is fine.
++ */
++ } else {
++ ZFCP_LOG_FLAGS(1, "FSF_LUN_HANDLE_NOT_VALID\n");
++ ZFCP_LOG_INFO("Warning: Temporary LUN identifier (handle) 0x%x "
++ "of the logical unit with FCP_LUN 0x%016Lx at "
++ "the remote port with WWPN 0x%016Lx connected "
++ "to the adapter with devno 0x%04x is "
++ "not valid. This may happen in rare cases."
++ "Trying to re-establish link.\n",
++ unit->handle,
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ ZFCP_LOG_DEBUG("Status qualifier data:\n");
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_DEBUG,
++ (char*)&new_fsf_req->qtcb->header.fsf_status_qual,
++ 16);
++ /* Let's hope this sorts out the mess */
++ debug_text_event(new_fsf_req->adapter->erp_dbf,1,"fsf_s_lhand_nv1");
++ zfcp_erp_port_reopen(unit->port, 0);
++ new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ }
++ break;
++
++ case FSF_FCP_COMMAND_DOES_NOT_EXIST :
++ ZFCP_LOG_FLAGS(2, "FSF_FCP_COMMAND_DOES_NOT_EXIST\n");
++ retval = 0;
++#ifdef ZFCP_DEBUG_REQUESTS
++ /* debug feature area which records fsf request sequence numbers */
++ debug_text_event(new_fsf_req->adapter->req_dbf, 3, "no_exist");
++ debug_event(new_fsf_req->adapter->req_dbf, 3,
++ &new_fsf_req->qtcb->bottom.support.req_handle,
++ sizeof(unsigned long));
++#endif /* ZFCP_DEBUG_REQUESTS */
++ debug_text_event(new_fsf_req->adapter->erp_dbf,3,"fsf_s_no_exist");
++ new_fsf_req->status
++ |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
++ break;
++
++ case FSF_PORT_BOXED :
++ /* 2 */
++ ZFCP_LOG_FLAGS(0, "FSF_PORT_BOXED\n");
++ ZFCP_LOG_DEBUG("The remote port "
++ "with WWPN 0x%016Lx on the adapter with "
++ "devno 0x%04x needs to be reopened\n",
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ debug_text_event(new_fsf_req->adapter->erp_dbf,2,"fsf_s_pboxed");
++ zfcp_erp_port_reopen(unit->port, 0);
++ new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
++ | ZFCP_STATUS_FSFREQ_RETRY;
++ break;
++
++ case FSF_ADAPTER_STATUS_AVAILABLE :
++ /* 2 */
++ ZFCP_LOG_FLAGS(0, "FSF_ADAPTER_STATUS_AVAILABLE\n");
++ switch (new_fsf_req->qtcb->header.fsf_status_qual.word[0]){
++ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE :
++ ZFCP_LOG_FLAGS(2, "FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
++ debug_text_event(new_fsf_req->adapter->erp_dbf,1,"fsf_sq_ltest");
++ /* reopening link to port */
++ zfcp_erp_port_reopen(unit->port, 0);
++ new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED :
++ ZFCP_LOG_FLAGS(2, "FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED\n");
++ /* SCSI stack will escalate */
++ debug_text_event(new_fsf_req->adapter->erp_dbf,1,"fsf_sq_ulp");
++ new_fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++ default:
++ ZFCP_LOG_NORMAL("bug: Wrong status qualifier 0x%x arrived.\n",
++ new_fsf_req->qtcb->header.fsf_status_qual.word[0]);
++ debug_text_event(new_fsf_req->adapter->erp_dbf,0,"fsf_sq_inval:");
++ debug_exception(new_fsf_req->adapter->erp_dbf,0,
++ &new_fsf_req->qtcb->header.fsf_status_qual.word[0],
++ sizeof(u32));
++ break;
++ }
++ break;
++
++ case FSF_GOOD :
++ /* 3 */
++ ZFCP_LOG_FLAGS(2, "FSF_GOOD\n");
++ retval = 0;
++ new_fsf_req->status
++ |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
++ break;
++
++ default :
++ ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
++ "(debug info 0x%x)\n",
++ new_fsf_req->qtcb->header.fsf_status);
++ debug_text_event(new_fsf_req->adapter->erp_dbf,0,"fsf_s_inval:");
++ debug_exception(new_fsf_req->adapter->erp_dbf,0,
++ &new_fsf_req->qtcb->header.fsf_status,
++ sizeof(u32));
++ break;
++ }
++
++ skip_fsfstatus:
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_nameserver_enqueue
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_nameserver_enqueue(zfcp_adapter_t *adapter)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++ int retval = 0;
++ zfcp_port_t *port;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ /* generate port structure */
++ retval = zfcp_port_enqueue(
++ adapter,
++ 0,
++ 0,
++ ZFCP_STATUS_PORT_NAMESERVER,
++ &port);
++ if (retval) {
++ ZFCP_LOG_INFO(
++ "error: Could not establish a connection to the "
++ "fabric name server connected to the "
++ "adapter with devno 0x%04x\n",
++ adapter->devno);
++ goto out;
++ }
++ /* set special D_ID */
++ port->d_id = ZFCP_DID_NAMESERVER;
++ /* enter nameserver port into adapter struct */
++ adapter->nameserver_port=port;
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ *
++ */
++static void zfcp_gid_pn_buffers_free(struct zfcp_gid_pn_data *gid_pn)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++ ZFCP_LOG_TRACE("enter\n");
++ if ((gid_pn->ct.pool != 0)) {
++ zfcp_mem_pool_return(gid_pn, gid_pn->ct.pool);
++ } else {
++ ZFCP_KFREE(gid_pn, sizeof(struct zfcp_gid_pn_data));
++ }
++
++ ZFCP_LOG_TRACE("exit\n");
++ return;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ *
++ */
++static int zfcp_gid_pn_buffers_alloc(struct zfcp_gid_pn_data **gid_pn,
++ zfcp_mem_pool_t *pool)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++#ifdef ZFCP_MEM_POOL_ONLY
++ *gid_pn = NULL;
++#else
++ *gid_pn = ZFCP_KMALLOC(sizeof(struct zfcp_gid_pn_data), GFP_KERNEL);
++#endif
++ if ((*gid_pn == 0) && (pool != 0))
++ *gid_pn = zfcp_mem_pool_find(pool);
++
++ if (*gid_pn == 0)
++ return -ENOMEM;
++
++ (*gid_pn)->ct.req = &(*gid_pn)->req;
++ (*gid_pn)->ct.resp = &(*gid_pn)->resp;
++ (*gid_pn)->ct.req_count = (*gid_pn)->ct.resp_count = 1;
++ (*gid_pn)->req.address = (char *) &(*gid_pn)->ct_iu_req;
++ (*gid_pn)->resp.address = (char *) &(*gid_pn)->ct_iu_resp;
++ (*gid_pn)->req.length = sizeof(struct ct_iu_ns_req);
++ (*gid_pn)->resp.length = sizeof(struct ct_iu_gid_pn);
++
++ return 0;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ *
++ */
++static int zfcp_ns_gid_pn_request(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++ zfcp_adapter_t *adapter = erp_action->adapter;
++ struct zfcp_gid_pn_data *gid_pn = 0;
++ struct ct_iu_ns_req *ct_iu_req;
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter (erp_action=0x%lx)\n", (unsigned long)erp_action);
++ if (!adapter->nameserver_port) {
++ ZFCP_LOG_NORMAL("bug: no nameserver available\n");
++ retval = -EINVAL;
++ goto out;
++ }
++
++ retval = zfcp_gid_pn_buffers_alloc(&gid_pn, &adapter->pool.data_gid_pn);
++ if (retval < 0) {
++ ZFCP_LOG_INFO("error: Out of memory. Could not allocate "
++ "buffers for nameserver request GID_PN. "
++ "(adapter: 0x%04x)\n", adapter->devno);
++ goto out;
++ }
++
++ /* setup nameserver request */
++ ct_iu_req = (struct ct_iu_ns_req *) gid_pn->ct.req->address;
++ ct_iu_req->header.revision = ZFCP_CT_REVISION;
++ ct_iu_req->header.gs_type = ZFCP_CT_DIRECTORY_SERVICE;
++ ct_iu_req->header.gs_subtype = ZFCP_CT_NAME_SERVER;
++ ct_iu_req->header.options = ZFCP_CT_SYNCHRONOUS;
++ ct_iu_req->header.cmd_rsp_code = ZFCP_CT_GID_PN;
++ ct_iu_req->header.max_res_size = ZFCP_CT_MAX_SIZE;
++ ct_iu_req->data.wwpn = erp_action->port->wwpn;
++
++ /* setup parameters for send generic command */
++ gid_pn->ct.port = adapter->nameserver_port;
++ gid_pn->ct.handler = zfcp_ns_gid_pn_handler;
++ gid_pn->ct.handler_data = (unsigned long) erp_action;
++ gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
++ gid_pn->ct.timer = &erp_action->timer;
++ erp_action->data.gid_pn = gid_pn;
++
++ retval = zfcp_fsf_send_ct(&gid_pn->ct,
++ &erp_action->adapter->pool.fsf_req_erp,
++ erp_action);
++ if (retval) {
++ ZFCP_LOG_INFO("error: Could not send nameserver request GID_PN "
++ "via adapter with devno 0x%04x\n",
++ adapter->devno);
++ zfcp_gid_pn_buffers_free(gid_pn);
++ erp_action->data.gid_pn = 0;
++ }
++
++ out:
++ ZFCP_LOG_TRACE("exit (%d)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/**
++ *
++ */
++static void zfcp_ns_gid_pn_handler(unsigned long data)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++ zfcp_erp_action_t *erp_action = (zfcp_erp_action_t *) data;
++ zfcp_port_t *port = erp_action->port;
++ struct zfcp_send_ct *ct = &erp_action->data.gid_pn->ct;
++ struct ct_iu_ns_req *ct_iu_req =
++ (struct ct_iu_ns_req *) ct->req->address;
++ struct ct_iu_gid_pn *ct_iu_resp =
++ (struct ct_iu_gid_pn *) ct->resp->address;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ if (ct->status)
++ goto failed;
++
++ if (zfcp_check_ct_response(&ct_iu_resp->header)) {
++ /* FIXME: do we need some specific erp entry points */
++ atomic_set_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status);
++ goto failed;
++ }
++ /* paranoia */
++ if (ct_iu_req->data.wwpn != port->wwpn) {
++ ZFCP_LOG_NORMAL(
++ "bug: Port WWPN returned by nameserver lookup "
++ "does not correspond to "
++ "the expected value on the adapter with devno 0x%04x. "
++ "(debug info 0x%016Lx, 0x%016Lx)\n",
++ port->adapter->devno,
++ (llui_t)port->wwpn,
++ (llui_t)ct_iu_req->data.wwpn);
++ goto failed;
++ }
++
++ /* looks like a valid d_id */
++ port->d_id = ZFCP_DID_MASK & ct_iu_resp->d_id;
++ atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
++ ZFCP_LOG_DEBUG(
++ "devno 0x%04x: WWPN=0x%016Lx ---> D_ID=0x%06x\n",
++ port->adapter->devno,
++ (llui_t)port->wwpn,
++ port->d_id);
++ goto out;
++
++ failed:
++ ZFCP_LOG_NORMAL(
++ "warning: WWPN 0x%016Lx not found by nameserver lookup "
++ "using the adapter with devno 0x%04x\n",
++ (llui_t)port->wwpn,
++ port->adapter->devno);
++ ZFCP_LOG_DEBUG("CT IUs do not match:\n");
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_DEBUG,
++ (char*)ct_iu_req,
++ sizeof(struct ct_iu_ns_req));
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_DEBUG,
++ (char*)ct_iu_resp,
++ sizeof(struct ct_iu_gid_pn));
++
++ out:
++ zfcp_gid_pn_buffers_free(erp_action->data.gid_pn);
++ erp_action->data.gid_pn = 0;
++ ZFCP_LOG_TRACE("exit\n");
++ return;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/**
++ * FIXME: document
++ * FIXME: check for FS_RJT IU and set appropriate return code
++ */
++int zfcp_ns_ga_nxt_request(zfcp_port_t *port, struct ct_iu_ga_nxt *ct_iu_resp)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++ struct ct_iu_ns_req *ct_iu_req;
++ struct zfcp_send_ct *ct;
++ zfcp_adapter_t *adapter = port->adapter;
++ int ret = 0;
++
++ DECLARE_COMPLETION(wait);
++
++ memset(ct_iu_resp, 0, sizeof(*ct_iu_resp));
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ if (!adapter->nameserver_port) {
++ ZFCP_LOG_NORMAL("bug: no nameserver available\n");
++ ret = -EINVAL;
++ goto out;
++ }
++
++ if ((ct_iu_req =
++ ZFCP_KMALLOC(sizeof(struct ct_iu_ns_req), GFP_KERNEL)) == 0) {
++ ZFCP_LOG_INFO("error: Out of memory. Unable to create "
++ "CT request (FC-GS), adapter devno 0x%04x.\n",
++ adapter->devno);
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ if ((ct =
++ ZFCP_KMALLOC(sizeof(struct zfcp_send_ct), GFP_KERNEL)) == 0) {
++ ZFCP_LOG_INFO("error: Out of memory. Unable to create "
++ "CT request (FC-GS), adapter devno 0x%04x.\n",
++ adapter->devno);
++ ret = -ENOMEM;
++ goto free_ct_iu_req;
++ }
++
++ if ((ct->req =
++ ZFCP_KMALLOC(sizeof(struct scatterlist), GFP_KERNEL)) == 0) {
++ ZFCP_LOG_INFO("error: Out of memory. Unable to create "
++ "CT request (FC-GS), adapter devno 0x%04x.\n",
++ adapter->devno);
++ ret = -ENOMEM;
++ goto free_ct;
++ }
++
++ if ((ct->resp =
++ ZFCP_KMALLOC(sizeof(struct scatterlist), GFP_KERNEL)) == 0) {
++ ZFCP_LOG_INFO("error: Out of memory. Unable to create "
++ "CT request (FC-GS), adapter devno 0x%04x.\n",
++ adapter->devno);
++ ret = -ENOMEM;
++ goto free_req;
++ }
++
++ /* setup nameserver request */
++ ct_iu_req->header.revision = ZFCP_CT_REVISION;
++ ct_iu_req->header.gs_type = ZFCP_CT_DIRECTORY_SERVICE;
++ ct_iu_req->header.gs_subtype = ZFCP_CT_NAME_SERVER;
++ ct_iu_req->header.options = ZFCP_CT_SYNCHRONOUS;
++ ct_iu_req->header.cmd_rsp_code = ZFCP_CT_GA_NXT;
++ ct_iu_req->header.max_res_size = ZFCP_CT_MAX_SIZE;
++ ct_iu_req->data.d_id = ZFCP_DID_MASK & (port->d_id - 1);
++
++ ct->completion = &wait;
++ ct->req->address = (char *) ct_iu_req;
++ ct->resp->address = (char *) ct_iu_resp;
++ ct->req->length = sizeof(*ct_iu_req);
++ ct->resp->length = sizeof(*ct_iu_resp);
++ ct->req_count = ct->resp_count = 1;
++
++ /* setup parameters for send generic command */
++ ct->port = adapter->nameserver_port;
++ ct->handler = zfcp_ns_ga_nxt_handler;
++ ct->handler_data = (unsigned long) ct;
++
++ ct->timeout = ZFCP_NS_GA_NXT_TIMEOUT;
++
++ ret = zfcp_fsf_send_ct(ct, NULL, NULL);
++ if (ret) {
++ ZFCP_LOG_INFO("error: Could not send nameserver request GA_NXT "
++ "via adapter with devno 0x%04x\n",
++ adapter->devno);
++ goto free_resp;
++ }
++ wait_for_completion(&wait);
++ ret = ct->status;
++
++ free_resp:
++ ZFCP_KFREE(ct->resp, sizeof(struct scatterlist));
++ free_req:
++ ZFCP_KFREE(ct->req, sizeof(struct scatterlist));
++ free_ct:
++ ZFCP_KFREE(ct, sizeof(struct zfcp_send_ct));
++ free_ct_iu_req:
++ ZFCP_KFREE(ct_iu_req, sizeof(struct ct_iu_ns_req));
++ out:
++ ZFCP_LOG_TRACE("exit (%d)\n", ret);
++ return ret;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * FIXME: document
++ * FIXME: check for FS_RJT IU and return appropriate status
++ */
++static void zfcp_ns_ga_nxt_handler(unsigned long data)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++ struct zfcp_send_ct *ct = (struct zfcp_send_ct *) data;
++ struct ct_iu_ns_req *ct_iu_req =
++ (struct ct_iu_ns_req *) ct->req[0].address;
++ struct ct_iu_ga_nxt *ct_iu_resp =
++ (struct ct_iu_ga_nxt *) ct->resp[0].address;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ if (zfcp_check_ct_response(&ct_iu_resp->header))
++ goto failed;
++
++ goto out;
++
++ failed:
++ ct->status = -EIO;
++ ZFCP_LOG_DEBUG("CT IU headers do not match:\n");
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_DEBUG,
++ (char*)ct_iu_req,
++ sizeof(struct ct_iu_ns_req));
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_DEBUG,
++ (char*)ct_iu_resp,
++ sizeof(struct ct_iu_gid_pn));
++out:
++ if (ct->completion != NULL) {
++ complete(ct->completion);
++ }
++ ZFCP_LOG_TRACE("exit\n");
++ return;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/* reject CT_IU reason codes acc. to FC-GS-4 */
++static const struct zfcp_rc_entry zfcp_ct_rc[] = {
++ {0x01, "invalid command code"},
++ {0x02, "invalid version level"},
++ {0x03, "logical error"},
++ {0x04, "invalid CT_IU size"},
++ {0x05, "logical busy"},
++ {0x07, "protocol error"},
++ {0x09, "unable to perform command request"},
++ {0x0b, "command not supported"},
++ {0x0d, "server not available"},
++ {0x0e, "session could not be established"},
++ {0xff, "vendor specific error"},
++ {0, NULL},
++};
++
++/* LS_RJT reason codes acc. to FC-FS */
++static const struct zfcp_rc_entry zfcp_ls_rjt_rc[] = {
++ {0x01, "invalid LS_Command code"},
++ {0x03, "logical error"},
++ {0x05, "logical busy"},
++ {0x07, "protocol error"},
++ {0x09, "unable to perform command request"},
++ {0x0b, "command not supported"},
++ {0x0e, "command already in progress"},
++ {0xff, "vendor specific error"},
++ {0, NULL},
++};
++
++/* reject reason codes according to FC-PH/FC-FS */
++static const struct zfcp_rc_entry zfcp_p_rjt_rc[] = {
++ {0x01, "invalid D_ID"},
++ {0x02, "invalid S_ID"},
++ {0x03, "Nx_Port not available, temporary"},
++ {0x04, "Nx_Port not available, permament"},
++ {0x05, "class not supported"},
++ {0x06, "delimiter usage error"},
++ {0x07, "TYPE not supported"},
++ {0x08, "invalid Link_Control"},
++ {0x09, "invalid R_CTL field"},
++ {0x0a, "invalid F_CTL field"},
++ {0x0b, "invalid OX_ID"},
++ {0x0c, "invalid RX_ID"},
++ {0x0d, "invalid SEQ_ID"},
++ {0x0e, "invalid DF_CTL"},
++ {0x0f, "invalid SEQ_CNT"},
++ {0x10, "invalid parameter field"},
++ {0x11, "exchange error"},
++ {0x12, "protocol error"},
++ {0x13, "incorrect length"},
++ {0x14, "unsupported ACK"},
++ {0x15, "class of service not supported by entity at FFFFFE"},
++ {0x16, "login required"},
++ {0x17, "excessive sequences attempted"},
++ {0x18, "unable to establish exchange"},
++ {0x1a, "fabric path not available"},
++ {0x1b, "invalid VC_ID (class 4)"},
++ {0x1c, "invalid CS_CTL field"},
++ {0x1d, "insufficient resources for VC (class 4)"},
++ {0x1f, "invalid class of service"},
++ {0x20, "preemption request rejected"},
++ {0x21, "preemption not enabled"},
++ {0x22, "multicast error"},
++ {0x23, "multicast error terminate"},
++ {0x24, "process login required"},
++ {0xff, "vendor specific reject"},
++ {0, NULL},
++};
++
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++/**
++ * zfcp_rc_description - return description for given reaon code
++ * @code: reason code
++ * @rc_table: table of reason codes and descriptions
++ */
++static inline const char *
++zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table)
++{
++ const char *descr = "unknown reason code";
++
++ do {
++ if (code == rc_table->code) {
++ descr = rc_table->description;
++ break;
++ }
++ rc_table++;
++ } while (rc_table->code && rc_table->description);
++
++ return descr;
++}
++
++/**
++ * zfcp_check_ct_response - evaluate reason code for CT_IU
++ * @rjt: response payload to an CT_IU request
++ * Return: 0 for accept CT_IU, 1 for reject CT_IU or invlid response code
++ */
++int
++zfcp_check_ct_response(struct ct_hdr *rjt)
++{
++ if (rjt->cmd_rsp_code == ZFCP_CT_ACCEPT)
++ return 0;
++
++ if (rjt->cmd_rsp_code != ZFCP_CT_REJECT) {
++ ZFCP_LOG_NORMAL("error: invalid Generic Service command/"
++ "response code (0x%04hx)\n",
++ rjt->cmd_rsp_code);
++ return 1;
++ }
++
++ ZFCP_LOG_INFO("Generic Service command rejected\n");
++ ZFCP_LOG_INFO("%s (0x%02x, 0x%02x, 0x%02x)\n",
++ zfcp_rc_description(rjt->reason_code, zfcp_ct_rc),
++ (u32) rjt->reason_code, (u32) rjt->reason_code_expl,
++ (u32) rjt->vendor_unique);
++
++ return 1;
++}
++
++/**
++ * zfcp_print_els_rjt - print reject parameter and description for ELS reject
++ * @rjt_par: reject parameter acc. to FC-PH/FC-FS
++ * @rc_table: table of reason codes and descriptions
++ */
++static inline void
++zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par,
++ const struct zfcp_rc_entry *rc_table)
++{
++ ZFCP_LOG_INFO("%s (%02x %02x %02x %02x)\n",
++ zfcp_rc_description(rjt_par->reason_code, rc_table),
++ (u32) rjt_par->action, (u32) rjt_par->reason_code,
++ (u32) rjt_par->reason_expl, (u32) rjt_par->vendor_unique);
++}
++
++/**
++ * zfcp_fsf_handle_els_rjt - evaluate status qualifier/reason code on ELS reject
++ * @sq: status qualifier word
++ * @rjt_par: reject parameter as described in FC-PH and FC-FS
++ * Return: -EROMTEIO for LS_RJT, -EREMCHG for invalid D_ID, -EIO else
++ */
++int
++zfcp_handle_els_rjt(u32 sq, struct zfcp_ls_rjt_par *rjt_par)
++{
++ int ret = -EIO;
++
++ if (sq == FSF_IOSTAT_NPORT_RJT) {
++ ZFCP_LOG_INFO("ELS rejected (P_RJT)\n");
++ zfcp_print_els_rjt(rjt_par, zfcp_p_rjt_rc);
++ /* invalid d_id */
++ if (rjt_par->reason_code == 0x01)
++ ret = -EREMCHG;
++ } else if (sq == FSF_IOSTAT_FABRIC_RJT) {
++ ZFCP_LOG_INFO("ELS rejected (F_RJT)\n");
++ zfcp_print_els_rjt(rjt_par, zfcp_p_rjt_rc);
++ /* invalid d_id */
++ if (rjt_par->reason_code == 0x01)
++ ret = -EREMCHG;
++ } else if (sq == FSF_IOSTAT_LS_RJT) {
++ ZFCP_LOG_INFO("ELS rejected (LS_RJT)\n");
++ zfcp_print_els_rjt(rjt_par, zfcp_ls_rjt_rc);
++ ret = -EREMOTEIO;
++ } else
++ ZFCP_LOG_INFO("unexpected SQ: 0x%02x\n", sq);
++
++ return ret;
++}
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++
++
++/*
++ * checks whether req buffer and resp bother fit into one SBALE each
++ */
++static inline int
++zfcp_use_one_sbal(struct scatterlist *req, int req_count,
++ struct scatterlist *resp, int resp_count)
++{
++ return ((req_count == 1) && (resp_count == 1) &&
++ (((unsigned long) req[0].address & PAGE_MASK) ==
++ ((unsigned long) (req[0].address +
++ req[0].length - 1) & PAGE_MASK)) &&
++ (((unsigned long) resp[0].address & PAGE_MASK) ==
++ ((unsigned long) (resp[0].address +
++ resp[0].length - 1) & PAGE_MASK)));
++}
++
++/**
++ * FIXME: doc
++ */
++int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, zfcp_mem_pool_t *pool,
++ zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval = 0;
++ volatile qdio_buffer_element_t *sbale;
++ zfcp_port_t *port = ct->port;
++ zfcp_adapter_t *adapter = port->adapter;
++ zfcp_fsf_req_t *fsf_req;
++ unsigned long lock_flags;
++ int bytes;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ /* setup new FSF request */
++ retval = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC,
++ ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
++ pool, &lock_flags, &fsf_req);
++ if (retval < 0) {
++ ZFCP_LOG_INFO("error: Out of resources. "
++ "Could not create a CT request (FC-GS), "
++ "destination port D_ID is 0x%06x "
++ "at the adapter with devno 0x%04x.\n",
++ ct->port->d_id, adapter->devno);
++ goto failed_req;
++ }
++
++ if (erp_action != 0) {
++ erp_action->fsf_req = fsf_req;
++ fsf_req->erp_action = erp_action;
++ }
++
++ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
++ if (zfcp_use_one_sbal(ct->req, ct->req_count,
++ ct->resp, ct->resp_count)){
++ /* both request buffer and response buffer
++ fit into one sbale each */
++ sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
++ sbale[2].addr = ct->req[0].address;
++ sbale[2].length = ct->req[0].length;
++ sbale[3].addr = ct->resp[0].address;
++ sbale[3].length = ct->resp[0].length;
++ sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
++ } else if (adapter->supported_features &
++ FSF_FEATURE_ELS_CT_CHAINED_SBALS) {
++ /* try to use chained SBALs */
++ bytes = zfcp_qdio_sbals_from_sg(fsf_req,
++ SBAL_FLAGS0_TYPE_WRITE_READ,
++ ct->req, ct->req_count,
++ ZFCP_MAX_SBALS_PER_CT_REQ);
++ if (bytes <= 0) {
++ ZFCP_LOG_INFO("error: Out of resources (outbuf). "
++ "Could not create a CT request (FC-GS), "
++ "destination port D_ID is 0x%06x "
++ "at the adapter with devno 0x%04x.\n",
++ ct->port->d_id, adapter->devno);
++ if (bytes == 0) {
++ retval = -ENOMEM;
++ } else {
++ retval = bytes;
++ }
++ goto failed_send;
++ }
++ fsf_req->qtcb->bottom.support.req_buf_length = bytes;
++ fsf_req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
++ bytes = zfcp_qdio_sbals_from_sg(fsf_req,
++ SBAL_FLAGS0_TYPE_WRITE_READ,
++ ct->resp, ct->resp_count,
++ ZFCP_MAX_SBALS_PER_CT_REQ);
++ if (bytes <= 0) {
++ ZFCP_LOG_INFO("error: Out of resources (inbuf). "
++ "Could not create a CT request (FC-GS), "
++ "destination port D_ID is 0x%06x "
++ "at the adapter with devno 0x%04x.\n",
++ ct->port->d_id, adapter->devno);
++ if (bytes == 0) {
++ retval = -ENOMEM;
++ } else {
++ retval = bytes;
++ }
++ goto failed_send;
++ }
++ fsf_req->qtcb->bottom.support.resp_buf_length = bytes;
++ } else {
++ /* reject send generic request */
++ ZFCP_LOG_INFO(
++ "error: microcode does not support chained SBALs."
++ "CT request (FC-GS) too big."
++ "Destination port D_ID is 0x%06x "
++ "at the adapter with devno 0x%04x.\n",
++ port->d_id, adapter->devno);
++ retval = -EOPNOTSUPP;
++ goto failed_send;
++ }
++
++ /* settings in QTCB */
++ fsf_req->qtcb->header.port_handle = port->handle;
++ fsf_req->qtcb->bottom.support.service_class = adapter->fc_service_class;
++ fsf_req->qtcb->bottom.support.timeout = ct->timeout;
++ fsf_req->data.send_ct = ct;
++
++ /* start QDIO request for this FSF request */
++ retval = zfcp_fsf_req_send(fsf_req, ct->timer);
++ if (retval) {
++ ZFCP_LOG_DEBUG("error: Out of resources. Could not send a "
++ "generic services command via adapter with "
++ "devno 0x%04x, port WWPN 0x%016Lx\n",
++ adapter->devno, (llui_t) port->wwpn);
++ goto failed_send;
++ } else {
++ ZFCP_LOG_DEBUG("Send Generic request initiated "
++ "(adapter devno=0x%04x, port D_ID=0x%06x)\n",
++ adapter->devno, port->d_id);
++ goto out;
++ }
++
++ failed_send:
++ if (zfcp_fsf_req_free(fsf_req)) {
++ ZFCP_LOG_NORMAL("bug: Could not remove one FSF request. Memory "
++ "leakage possible. (debug info 0x%lx).\n",
++ (unsigned long)fsf_req);
++ retval = -EINVAL;
++ };
++ if (erp_action != 0) {
++ erp_action->fsf_req = NULL;
++ }
++ failed_req:
++ out:
++ write_unlock_irqrestore(&adapter->request_queue.queue_lock,
++ lock_flags);
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_send_ct_handler
++ *
++ * purpose: is called for finished Send Generic request
++ *
++ * returns:
++ */
++static int zfcp_fsf_send_ct_handler(zfcp_fsf_req_t *fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval = -EINVAL;
++ zfcp_port_t *port = fsf_req->data.send_ct->port;
++ fsf_qtcb_header_t *header = &fsf_req->qtcb->header;
++ u16 subtable, rule, counter;
++
++ ZFCP_LOG_TRACE(
++ "enter (fsf_req=0x%lx)\n",
++ (unsigned long)fsf_req);
++
++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
++ /* do not set ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED */
++ goto skip_fsfstatus;
++ }
++
++ /* evaluate FSF status in QTCB */
++ switch (fsf_req->qtcb->header.fsf_status) {
++
++ case FSF_PORT_HANDLE_NOT_VALID :
++ ZFCP_LOG_FLAGS(1,"FSF_PORT_HANDLE_NOT_VALID\n");
++ ZFCP_LOG_DEBUG("Temporary port identifier (handle) 0x%x "
++ "for the port with WWPN 0x%016Lx connected to "
++ "the adapter of devno 0x%04x is "
++ "not valid. This may happen occasionally.\n",
++ port->handle,
++ (llui_t)port->wwpn,
++ port->adapter->devno);
++ ZFCP_LOG_INFO("status qualifier:\n");
++ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
++ (char*)&fsf_req->qtcb->header.fsf_status_qual,
++ 16);
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_s_phandle_nv");
++ zfcp_erp_adapter_reopen(port->adapter, 0);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_SERVICE_CLASS_NOT_SUPPORTED :
++ ZFCP_LOG_FLAGS(0, "FSF_SERVICE_CLASS_NOT_SUPPORTED\n");
++ if(fsf_req->adapter->fc_service_class <= 3) {
++ ZFCP_LOG_NORMAL("error: The adapter with devno=0x%04x does "
++ "not support fibre-channel class %d.\n",
++ port->adapter->devno,
++ fsf_req->adapter->fc_service_class);
++ } else {
++ ZFCP_LOG_NORMAL( "bug: The fibre channel class at the adapter "
++ "with devno 0x%04x is invalid. "
++ "(debug info %d)\n",
++ port->adapter->devno,
++ fsf_req->adapter->fc_service_class);
++ }
++ /* stop operation for this adapter */
++ debug_text_exception(fsf_req->adapter->erp_dbf,0,"fsf_s_class_nsup");
++ zfcp_erp_adapter_shutdown(port->adapter, 0);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_ACCESS_DENIED :
++ ZFCP_LOG_FLAGS(2, "FSF_ACCESS_DENIED\n");
++ ZFCP_LOG_NORMAL("Access denied, cannot send generic command "
++ "(devno=0x%04x wwpn=0x%016Lx)\n",
++ port->adapter->devno,
++ (llui_t)port->wwpn);
++ for (counter = 0; counter < 2; counter++) {
++ subtable = header->fsf_status_qual.halfword[counter * 2];
++ rule = header->fsf_status_qual.halfword[counter * 2 + 1];
++ switch (subtable) {
++ case FSF_SQ_CFDC_SUBTABLE_OS:
++ case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
++ case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
++ case FSF_SQ_CFDC_SUBTABLE_LUN:
++ ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
++ zfcp_act_subtable_type[subtable], rule);
++ break;
++ }
++ }
++ debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_access");
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_GENERIC_COMMAND_REJECTED :
++ ZFCP_LOG_FLAGS(1,"FSF_GENERIC_COMMAND_REJECTED\n");
++ ZFCP_LOG_INFO("warning: The port with WWPN 0x%016Lx connected to "
++ "the adapter of devno 0x%04x has "
++ "rejected a generic services command.\n",
++ (llui_t)port->wwpn,
++ port->adapter->devno);
++ ZFCP_LOG_INFO("status qualifier:\n");
++ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_INFO,
++ (char*)&fsf_req->qtcb->header.fsf_status_qual,
++ 16);
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_s_gcom_rej");
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_REQUEST_BUF_NOT_VALID :
++ ZFCP_LOG_FLAGS(1, "FSF_REQUEST_BUF_NOT_VALID\n");
++ ZFCP_LOG_NORMAL(
++ "error: The port with WWPN 0x%016Lx connected to "
++ "the adapter of devno 0x%04x has "
++ "rejected a generic services command "
++ "due to invalid request buffer.\n",
++ (llui_t)port->wwpn,
++ port->adapter->devno);
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_s_reqiv");
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_RESPONSE_BUF_NOT_VALID :
++ ZFCP_LOG_FLAGS(1, "FSF_RESPONSE_BUF_NOT_VALID\n");
++ ZFCP_LOG_NORMAL(
++ "error: The port with WWPN 0x%016Lx connected to "
++ "the adapter of devno 0x%04x has "
++ "rejected a generic services command "
++ "due to invalid response buffer.\n",
++ (llui_t)port->wwpn,
++ port->adapter->devno);
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_s_resiv");
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_PORT_BOXED :
++ ZFCP_LOG_FLAGS(2, "FSF_PORT_BOXED\n");
++ ZFCP_LOG_DEBUG("The remote port "
++ "with WWPN 0x%016Lx on the adapter with "
++ "devno 0x%04x needs to be reopened\n",
++ (llui_t)port->wwpn,
++ port->adapter->devno);
++ debug_text_event(fsf_req->adapter->erp_dbf,2,"fsf_s_pboxed");
++ zfcp_erp_port_reopen(port, 0);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
++ | ZFCP_STATUS_FSFREQ_RETRY;
++ break;
++
++ case FSF_ADAPTER_STATUS_AVAILABLE :
++ ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
++ switch (fsf_req->qtcb->header.fsf_status_qual.word[0]){
++ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE :
++ ZFCP_LOG_FLAGS(2, "FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
++ /* reopening link to port */
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_sq_ltest");
++ zfcp_test_link(port);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED :
++ /* ERP strategy will escalate */
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_sq_ulp");
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ default:
++ ZFCP_LOG_NORMAL("bug: Wrong status qualifier 0x%x arrived.\n",
++ fsf_req->qtcb->header.fsf_status_qual.word[0]);
++ break;
++ }
++ break;
++
++ case FSF_GOOD :
++ ZFCP_LOG_FLAGS(2,"FSF_GOOD\n");
++ retval = 0;
++ break;
++
++ default :
++ ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
++ "(debug info 0x%x)\n",
++ fsf_req->qtcb->header.fsf_status);
++ debug_text_event(fsf_req->adapter->erp_dbf,0,"fsf_sq_inval:");
++ debug_exception(fsf_req->adapter->erp_dbf,0,
++ &fsf_req->qtcb->header.fsf_status_qual.word[0],
++ sizeof(u32));
++ break;
++ }
++
++skip_fsfstatus:
++ fsf_req->data.send_ct->status = retval;
++
++ /* callback */
++ if (fsf_req->data.send_ct->handler != 0) {
++ (fsf_req->data.send_ct->handler)
++ (fsf_req->data.send_ct->handler_data);
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_send_els_handler
++ *
++ * purpose: Handler for the Send ELS FSF requests
++ *
++ * returns: 0 - FSF request processed successfuly
++ * -EINVAL - FSF status is not 0
++ */
++static int zfcp_fsf_send_els_handler(zfcp_fsf_req_t *fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ zfcp_adapter_t *adapter = fsf_req->adapter;
++ zfcp_port_t *port = fsf_req->data.send_els->port;
++ fsf_qtcb_header_t *header = &fsf_req->qtcb->header;
++ fsf_qtcb_bottom_support_t *bottom = &fsf_req->qtcb->bottom.support;
++ struct zfcp_send_els *send_els = fsf_req->data.send_els;
++ u16 subtable, rule, counter;
++ int retval = -EINVAL;
++
++ ZFCP_LOG_TRACE("enter (fsf_req=0x%lx)\n", (unsigned long)fsf_req);
++
++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
++ goto skip_fsfstatus;
++
++ switch (header->fsf_status) {
++
++ case FSF_GOOD:
++ ZFCP_LOG_FLAGS(2, "FSF_GOOD\n");
++ ZFCP_LOG_INFO(
++ "The FSF request has been successfully completed "
++ "(devno=0x%04x fsf_req.seq_no=%d)\n",
++ adapter->devno,
++ fsf_req->seq_no);
++ retval = 0;
++ break;
++
++ case FSF_SERVICE_CLASS_NOT_SUPPORTED:
++ ZFCP_LOG_FLAGS(2, "FSF_SERVICE_CLASS_NOT_SUPPORTED\n");
++ if (adapter->fc_service_class <= 3) {
++ ZFCP_LOG_INFO(
++ "error: The adapter with devno=0x%04x does "
++ "not support fibre-channel class %d\n",
++ adapter->devno,
++ adapter->fc_service_class);
++ } else {
++ ZFCP_LOG_INFO(
++ "bug: The fibre channel class at the adapter "
++ "with devno 0x%04x is invalid "
++ "(debug info %d)\n",
++ adapter->devno,
++ adapter->fc_service_class);
++ }
++ debug_text_exception(adapter->erp_dbf, 0, "fsf_s_class_nsup");
++ zfcp_erp_adapter_shutdown(port->adapter, 0);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_ACCESS_DENIED:
++ ZFCP_LOG_FLAGS(2, "FSF_ACCESS_DENIED\n");
++ ZFCP_LOG_NORMAL("Access denied, cannot send ELS "
++ "(devno=0x%04x wwpn=0x%016Lx)\n",
++ adapter->devno,
++ (llui_t)port->wwpn);
++ for (counter = 0; counter < 2; counter++) {
++ subtable = header->fsf_status_qual.halfword[counter * 2];
++ rule = header->fsf_status_qual.halfword[counter * 2 + 1];
++ switch (subtable) {
++ case FSF_SQ_CFDC_SUBTABLE_OS:
++ case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
++ case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
++ case FSF_SQ_CFDC_SUBTABLE_LUN:
++ ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
++ zfcp_act_subtable_type[subtable], rule);
++ break;
++ }
++ }
++ debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_access");
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_ELS_COMMAND_REJECTED:
++ ZFCP_LOG_FLAGS(2, "FSF_ELS_COMMAND_REJECTED\n");
++ ZFCP_LOG_INFO(
++ "The ELS command has been rejected because "
++ "a command filter in the FCP channel prohibited "
++ "sending of the ELS to the SAN "
++ "(devno=0x%04x wwpn=0x%016Lx)\n",
++ adapter->devno,
++ (llui_t)port->wwpn);
++ break;
++
++ case FSF_PAYLOAD_SIZE_MISMATCH:
++ ZFCP_LOG_FLAGS(2, "FSF_PAYLOAD_SIZE_MISMATCH\n");
++ ZFCP_LOG_INFO(
++ "ELS request size and ELS response size must be either "
++ "both 0, or both greater than 0 "
++ "(devno=0x%04x req_buf_length=%d resp_buf_length=%d)\n",
++ adapter->devno,
++ bottom->req_buf_length,
++ bottom->resp_buf_length);
++ break;
++
++ case FSF_REQUEST_SIZE_TOO_LARGE:
++ ZFCP_LOG_FLAGS(2, "FSF_REQUEST_SIZE_TOO_LARGE\n");
++ ZFCP_LOG_INFO(
++ "Length of the ELS request buffer, "
++ "specified in QTCB bottom, "
++ "exceeds the size of the buffers "
++ "that have been allocated for ELS request data "
++ "(devno=0x%04x req_buf_length=%d)\n",
++ adapter->devno,
++ bottom->req_buf_length);
++ break;
++
++ case FSF_RESPONSE_SIZE_TOO_LARGE:
++ ZFCP_LOG_FLAGS(2, "FSF_RESPONSE_SIZE_TOO_LARGE\n");
++ ZFCP_LOG_INFO(
++ "Length of the ELS response buffer, "
++ "specified in QTCB bottom, "
++ "exceeds the size of the buffers "
++ "that have been allocated for ELS response data "
++ "(devno=0x%04x resp_buf_length=%d)\n",
++ adapter->devno,
++ bottom->resp_buf_length);
++ break;
++
++ case FSF_ADAPTER_STATUS_AVAILABLE:
++ ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
++ switch (header->fsf_status_qual.word[0]){
++
++ case FSF_SQ_RETRY_IF_POSSIBLE:
++ ZFCP_LOG_FLAGS(2, "FSF_SQ_RETRY_IF_POSSIBLE\n");
++ debug_text_event(adapter->erp_dbf, 1, "fsf_sq_retry");
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
++ ZFCP_LOG_FLAGS(2, "FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED\n");
++ debug_text_event(adapter->erp_dbf, 1, "fsf_sq_ulp");
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ retval =
++ zfcp_handle_els_rjt(header->fsf_status_qual.word[1],
++ (struct zfcp_ls_rjt_par *)
++ &header->fsf_status_qual.word[2]);
++ break;
++
++ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
++ ZFCP_LOG_FLAGS(2, "FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
++ debug_text_event(adapter->erp_dbf, 1, "fsf_sq_ltest");
++ if (send_els->ls_code != ZFCP_LS_ADISC)
++ zfcp_test_link(port);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ default:
++ ZFCP_LOG_INFO(
++ "bug: Wrong status qualifier 0x%x arrived.\n",
++ header->fsf_status_qual.word[0]);
++ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
++ (char*)header->fsf_status_qual.word, 16);
++ }
++ break;
++
++ case FSF_UNKNOWN_COMMAND:
++ ZFCP_LOG_FLAGS(2, "FSF_UNKNOWN_COMMAND\n");
++ ZFCP_LOG_INFO(
++ "FSF command 0x%x is not supported by FCP adapter "
++ "(devno=0x%04x)\n",
++ fsf_req->fsf_command,
++ adapter->devno);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ default:
++ ZFCP_LOG_NORMAL(
++ "bug: An unknown FSF Status was presented "
++ "(devno=0x%04x fsf_status=0x%08x)\n",
++ adapter->devno,
++ header->fsf_status);
++ debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_sq_inval");
++ debug_exception(fsf_req->adapter->erp_dbf, 0,
++ &header->fsf_status_qual.word[0], sizeof(u32));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++ }
++
++skip_fsfstatus:
++ send_els->status = retval;
++
++ if (send_els->handler != 0)
++ send_els->handler(send_els->handler_data);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/**
++ * zfcp_fsf_send_els - Send an ELS
++ * @*els: to send
++ * Returns: 0 on success, -E* code else
++ *
++ * Create a FSF request from an ELS and queue it for sending. Chaining is used
++ * if needed and supported (in that order).
++ */
++int zfcp_fsf_send_els(struct zfcp_send_els *els)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ unsigned long lock_flags;
++ int retval;
++ zfcp_fsf_req_t *fsf_req;
++ zfcp_port_t *port = els->port;
++ zfcp_adapter_t *adapter = port->adapter;
++ volatile struct qdio_buffer_element_t *sbale;
++ int bytes;
++
++ retval = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS,
++ ZFCP_REQ_AUTO_CLEANUP,
++ NULL, &lock_flags, &fsf_req);
++ if (retval < 0) {
++ ZFCP_LOG_INFO("error: Out of resources. "
++ "Could not create an ELS request, "
++ "destination port D_ID is 0x%06x "
++ "at the adapter with devno 0x%04x.\n",
++ port->d_id, adapter->devno);
++ goto failed_req;
++ }
++
++ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
++ if (zfcp_use_one_sbal(els->req, els->req_count,
++ els->resp, els->resp_count)){
++ /* both request buffer and response buffer
++ fit into one sbale each */
++ sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
++ sbale[2].addr = els->req[0].address;
++ sbale[2].length = els->req[0].length;
++ sbale[3].addr = els->resp[0].address;
++ sbale[3].length = els->resp[0].length;
++ sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
++ } else if (adapter->supported_features &
++ FSF_FEATURE_ELS_CT_CHAINED_SBALS) {
++ /* try to use chained SBALs */
++ bytes = zfcp_qdio_sbals_from_sg(fsf_req,
++ SBAL_FLAGS0_TYPE_WRITE_READ,
++ els->req, els->req_count,
++ ZFCP_MAX_SBALS_PER_ELS_REQ);
++ if (bytes <= 0) {
++ ZFCP_LOG_INFO("error: Out of resources (outbuf). "
++ "Could not create an ELS request, "
++ "destination port D_ID is 0x%06x "
++ "at the adapter with devno 0x%04x.\n",
++ port->d_id, adapter->devno);
++ if (bytes == 0) {
++ retval = -ENOMEM;
++ } else {
++ retval = bytes;
++ }
++ goto failed_send;
++ }
++ fsf_req->qtcb->bottom.support.req_buf_length = bytes;
++ fsf_req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
++ bytes = zfcp_qdio_sbals_from_sg(fsf_req,
++ SBAL_FLAGS0_TYPE_WRITE_READ,
++ els->resp, els->resp_count,
++ ZFCP_MAX_SBALS_PER_ELS_REQ);
++ if (bytes <= 0) {
++ ZFCP_LOG_INFO("error: Out of resources (inbuf). "
++ "Could not create an ELS request, "
++ "destination port D_ID is 0x%06x "
++ "at the adapter with devno 0x%04x.\n",
++ port->d_id, adapter->devno);
++ if (bytes == 0) {
++ retval = -ENOMEM;
++ } else {
++ retval = bytes;
++ }
++ goto failed_send;
++ }
++ fsf_req->qtcb->bottom.support.resp_buf_length = bytes;
++ } else {
++ /* reject request */
++ ZFCP_LOG_INFO("error: microcode does not support chained SBALs."
++ "ELS request too big."
++ "Destination port D_ID is 0x%06x "
++ "at the adapter with devno 0x%04x.\n",
++ port->d_id, adapter->devno);
++ retval = -EOPNOTSUPP;
++ goto failed_send;
++ }
++
++ /* settings in QTCB */
++ fsf_req->qtcb->bottom.support.d_id = port->d_id;
++ fsf_req->qtcb->bottom.support.service_class = adapter->fc_service_class;
++ fsf_req->qtcb->bottom.support.timeout = ZFCP_ELS_TIMEOUT;
++ fsf_req->data.send_els = els;
++
++ /* start QDIO request for this FSF request */
++ retval = zfcp_fsf_req_send(fsf_req, NULL);
++ if (retval) {
++ ZFCP_LOG_DEBUG("error: Out of resources. Could not send an "
++ "ELS command via adapter with "
++ "devno 0x%04x, port WWPN 0x%016Lx\n",
++ adapter->devno, (llui_t) port->wwpn);
++ goto failed_send;
++ } else {
++ ZFCP_LOG_DEBUG("ELS request initiated "
++ "(adapter devno=0x%04x, port D_ID=0x%06x)\n",
++ adapter->devno, port->d_id);
++ goto out;
++ }
++
++ failed_send:
++ if (zfcp_fsf_req_free(fsf_req)) {
++ ZFCP_LOG_NORMAL("bug: Could not remove one FSF request. Memory "
++ "leakage possible. (debug info 0x%lx).\n",
++ (unsigned long)fsf_req);
++ retval = -EINVAL;
++ };
++ failed_req:
++ out:
++ write_unlock_irqrestore(&adapter->request_queue.queue_lock,
++ lock_flags);
++
++ return retval;
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++static inline volatile qdio_buffer_element_t * zfcp_qdio_sbale_get(
++ zfcp_qdio_queue_t *queue,
++ int sbal,
++ int sbale)
++{
++ return &queue->buffer[sbal]->element[sbale];
++}
++
++
++static inline volatile qdio_buffer_element_t * zfcp_qdio_sbale_req(
++ zfcp_fsf_req_t *fsf_req,
++ int sbal,
++ int sbale)
++{
++ return zfcp_qdio_sbale_get(
++ &fsf_req->adapter->request_queue,
++ sbal,
++ sbale);
++}
++
++
++static inline volatile qdio_buffer_element_t * zfcp_qdio_sbale_resp(
++ zfcp_fsf_req_t *fsf_req,
++ int sbal,
++ int sbale)
++{
++ return zfcp_qdio_sbale_get(
++ &fsf_req->adapter->response_queue,
++ sbal,
++ sbale);
++}
++
++
++/* the following routines work on outbound queues */
++static inline volatile qdio_buffer_element_t * zfcp_qdio_sbale_curr(
++ zfcp_fsf_req_t *fsf_req)
++{
++ return zfcp_qdio_sbale_req(
++ fsf_req,
++ fsf_req->sbal_curr,
++ fsf_req->sbale_curr);
++}
++
++
++/* can assume at least one free SBAL in outbound queue when called */
++static inline void zfcp_qdio_sbal_limit(zfcp_fsf_req_t *fsf_req, int max_sbals)
++{
++ int count = atomic_read(&fsf_req->adapter->request_queue.free_count);
++ count = min(count, max_sbals);
++ fsf_req->sbal_last = fsf_req->sbal_first;
++ fsf_req->sbal_last += (count - 1);
++ fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
++}
++
++
++static inline volatile qdio_buffer_element_t * zfcp_qdio_sbal_chain(
++ zfcp_fsf_req_t *fsf_req,
++ unsigned long sbtype)
++{
++ volatile qdio_buffer_element_t *sbale;
++
++ /* set last entry flag in current SBALE of current SBAL */
++ sbale = zfcp_qdio_sbale_curr(fsf_req);
++ sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
++
++ /* don't exceed last allowed SBAL */
++ if (fsf_req->sbal_curr == fsf_req->sbal_last)
++ return NULL;
++
++ /* set chaining flag in first SBALE of current SBAL */
++ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
++ sbale->flags |= SBAL_FLAGS0_MORE_SBALS;
++
++ /* calculate index of next SBAL */
++ fsf_req->sbal_curr++;
++ fsf_req->sbal_curr %= QDIO_MAX_BUFFERS_PER_Q;
++
++ /* keep this requests number of SBALs up-to-date */
++ fsf_req->sbal_number++;
++
++ /* start at first SBALE of new SBAL */
++ fsf_req->sbale_curr = 0;
++
++ /* set storage-block type for new SBAL */
++ sbale = zfcp_qdio_sbale_curr(fsf_req);
++ sbale->flags |= sbtype;
++
++ return sbale;
++}
++
++
++static inline volatile qdio_buffer_element_t * zfcp_qdio_sbale_next(
++ zfcp_fsf_req_t *fsf_req, unsigned long sbtype)
++{
++ if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
++ return zfcp_qdio_sbal_chain(fsf_req, sbtype);
++
++ fsf_req->sbale_curr++;
++
++ return zfcp_qdio_sbale_curr(fsf_req);
++}
++
++
++static inline int zfcp_qdio_sbals_zero(
++ zfcp_qdio_queue_t *queue,
++ int first,
++ int last)
++{
++ qdio_buffer_t **buf = queue->buffer;
++ int curr = first;
++ int count = 0;
++
++ for(;;) {
++ curr %= QDIO_MAX_BUFFERS_PER_Q;
++ count++;
++ memset(buf[curr], 0, sizeof(qdio_buffer_t));
++ if (curr == last)
++ break;
++ curr++;
++ }
++ return count;
++}
++
++
++static inline int zfcp_qdio_sbals_wipe(
++ zfcp_fsf_req_t *fsf_req)
++{
++ return zfcp_qdio_sbals_zero(
++ &fsf_req->adapter->request_queue,
++ fsf_req->sbal_first,
++ fsf_req->sbal_curr);
++}
++
++
++static inline void zfcp_qdio_sbale_fill(
++ zfcp_fsf_req_t *fsf_req,
++ unsigned long sbtype,
++ void *addr,
++ int length)
++{
++ volatile qdio_buffer_element_t *sbale = zfcp_qdio_sbale_curr(fsf_req);
++
++ sbale->addr = addr;
++ sbale->length = length;
++}
++
++
++static inline int zfcp_qdio_sbals_from_segment(
++ zfcp_fsf_req_t *fsf_req,
++ unsigned long sbtype,
++ void* start_addr,
++ unsigned long total_length)
++{
++ unsigned long remaining, length;
++ void *addr;
++
++ /* split segment up heeding page boundaries */
++ for (addr = start_addr,
++ remaining = total_length;
++ remaining;
++ addr += length,
++ remaining -= length) {
++ /* get next free SBALE for new piece */
++ if (!zfcp_qdio_sbale_next(fsf_req, sbtype)) {
++ /* no SBALE left, clean up and leave */
++ zfcp_qdio_sbals_wipe(fsf_req);
++ return -EINVAL;
++ }
++ /* calculate length of new piece */
++ length = min(remaining,
++ (PAGE_SIZE - ((unsigned long)addr & (PAGE_SIZE - 1))));
++ /* fill current SBALE with calculated piece */
++ zfcp_qdio_sbale_fill(fsf_req, sbtype, addr, length);
++ }
++ return total_length;
++}
++
++
++/* for exploiters with a scatter-gather list ready at hand */
++static inline int
++zfcp_qdio_sbals_from_sg(zfcp_fsf_req_t *fsf_req, unsigned long sbtype,
++ struct scatterlist *sg, int sg_count, int max_sbals)
++{
++ int sg_index;
++ struct scatterlist *sg_segment;
++ int bytes, retval;
++ volatile qdio_buffer_element_t *sbale;
++ zfcp_adapter_t *adapter;
++
++ /* figure out last allowed SBAL */
++ zfcp_qdio_sbal_limit(fsf_req, max_sbals);
++
++ /* set storage-block type for current SBAL */
++ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
++ sbale->flags |= sbtype;
++
++ /* process all segements of scatter-gather list */
++ for (sg_index = 0, sg_segment = sg, bytes = 0;
++ sg_index < sg_count;
++ sg_index++, sg_segment++) {
++ retval = zfcp_qdio_sbals_from_segment(
++ fsf_req,
++ sbtype,
++ sg_segment->address,
++ sg_segment->length);
++ if (retval < 0)
++ return retval;
++ bytes += retval;
++ }
++ /* assume that no other SBALEs are to follow in the same SBAL */
++ sbale = zfcp_qdio_sbale_curr(fsf_req);
++ sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
++
++#ifdef ZFCP_STAT_REQSIZES
++ adapter = fsf_req->adapter;
++ if (sbtype == SBAL_FLAGS0_TYPE_READ)
++ zfcp_statistics_inc(adapter, &adapter->read_req_head, bytes);
++ else zfcp_statistics_inc(adapter, &adapter->write_req_head, bytes);
++#endif
++ return bytes;
++}
++
++
++/* for exploiters with just a buffer ready at hand */
++static inline int zfcp_qdio_sbals_from_buffer(
++ zfcp_fsf_req_t *fsf_req,
++ unsigned long sbtype,
++ void *buffer,
++ unsigned long length,
++ int max_sbals)
++{
++ struct scatterlist sg_segment;
++
++ sg_segment.address = buffer;
++ sg_segment.length = length;
++
++ return zfcp_qdio_sbals_from_sg(fsf_req, sbtype, &sg_segment, 1,
++ max_sbals);
++}
++
++
++/* for exploiters with a SCSI command ready at hand */
++static inline int zfcp_qdio_sbals_from_scsicmnd(
++ zfcp_fsf_req_t *fsf_req,
++ unsigned long sbtype,
++ struct scsi_cmnd *scsi_cmnd)
++{
++ if (scsi_cmnd->use_sg)
++ return zfcp_qdio_sbals_from_sg(fsf_req, sbtype,
++ (struct scatterlist *)
++ scsi_cmnd->request_buffer,
++ scsi_cmnd->use_sg,
++ ZFCP_MAX_SBALS_PER_REQ);
++ else
++ return zfcp_qdio_sbals_from_buffer(fsf_req, sbtype,
++ scsi_cmnd->request_buffer,
++ scsi_cmnd->request_bufflen,
++ ZFCP_MAX_SBALS_PER_REQ);
++}
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns: address of initiated FSF request
++ * NULL - request could not be initiated
++ */
++static int zfcp_fsf_exchange_config_data(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ volatile qdio_buffer_element_t *sbale;
++ int retval = 0;
++ unsigned long lock_flags;
++
++ ZFCP_LOG_TRACE("enter (erp_action=0x%lx)\n", (unsigned long)erp_action);
++
++ /* setup new FSF request */
++ retval = zfcp_fsf_req_create(
++ erp_action->adapter,
++ FSF_QTCB_EXCHANGE_CONFIG_DATA,
++ ZFCP_REQ_AUTO_CLEANUP,
++ &erp_action->adapter->pool.fsf_req_erp,
++ &lock_flags,
++ &erp_action->fsf_req);
++ if (retval < 0) {
++ ZFCP_LOG_INFO(
++ "error: Out of resources. Could not create an "
++ "exchange configuration data request for"
++ "the adapter with devno 0x%04x.\n",
++ erp_action->adapter->devno);
++ goto out;
++ }
++
++
++ sbale = zfcp_qdio_sbale_req(erp_action->fsf_req,
++ erp_action->fsf_req->sbal_curr, 0);
++ sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
++ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
++
++ erp_action->fsf_req->erp_action = erp_action;
++ erp_action->fsf_req->qtcb->bottom.config.feature_selection =
++ FSF_FEATURE_CFDC;
++
++ /* start QDIO request for this FSF request */
++ retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
++ if (retval) {
++ ZFCP_LOG_INFO(
++ "error: Could not send an exchange configuration data "
++ "command on the adapter with devno 0x%04x\n",
++ erp_action->adapter->devno);
++ if (zfcp_fsf_req_free(erp_action->fsf_req)) {
++ ZFCP_LOG_NORMAL(
++ "bug: Could not remove one FSF "
++ "request. Memory leakage possible. "
++ "(debug info 0x%lx).\n",
++ (unsigned long)erp_action->fsf_req);
++ }
++ erp_action->fsf_req = NULL;
++ goto out;
++ }
++
++ ZFCP_LOG_DEBUG(
++ "Exchange Configuration Data request initiated "
++ "(adapter devno=0x%04x)\n",
++ erp_action->adapter->devno);
++
++out:
++ write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock, lock_flags);
++
++ ZFCP_LOG_TRACE("exit (%d)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/**
++ * zfcp_fsf_exchange_config_evaluate
++ * @fsf_req: fsf_req which belongs to xchg config data request
++ * @xchg_ok: specifies if xchg config data was incomplete or complete (0/1)
++ *
++ * returns: -EIO on error, 0 otherwise
++ */
++static int
++zfcp_fsf_exchange_config_evaluate(zfcp_fsf_req_t *fsf_req, int xchg_ok)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ fsf_qtcb_bottom_config_t *bottom;
++ zfcp_adapter_t *adapter = fsf_req->adapter;
++
++ bottom = &fsf_req->qtcb->bottom.config;
++ ZFCP_LOG_DEBUG(
++ "low/high QTCB version 0x%x/0x%x of FSF\n",
++ bottom->low_qtcb_version, bottom->high_qtcb_version);
++ adapter->fsf_lic_version = bottom->lic_version;
++ adapter->supported_features = bottom->supported_features;
++
++ if (xchg_ok) {
++ adapter->wwnn = bottom->nport_serv_param.wwnn;
++ adapter->wwpn = bottom->nport_serv_param.wwpn;
++ adapter->s_id = bottom->s_id & ZFCP_DID_MASK;
++ adapter->fc_topology = bottom->fc_topology;
++ adapter->fc_link_speed = bottom->fc_link_speed;
++ adapter->hydra_version = bottom->adapter_type;
++ } else {
++ adapter->wwnn = 0;
++ adapter->wwpn = 0;
++ adapter->s_id = 0;
++ adapter->fc_topology = 0;
++ adapter->fc_link_speed = 0;
++ adapter->hydra_version = 0;
++ }
++
++ if (adapter->supported_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
++ adapter->hardware_version = bottom->hardware_version;
++ memcpy(adapter->serial_number, bottom->serial_number, 17);
++ EBCASC(adapter->serial_number, sizeof(adapter->serial_number));
++ }
++
++ ZFCP_LOG_INFO(
++ "The adapter with devno=0x%04x reported "
++ "the following characteristics:\n"
++ "WWNN 0x%016Lx, WWPN 0x%016Lx, S_ID 0x%08x,\n"
++ "adapter version 0x%x, LIC version 0x%x, "
++ "FC link speed %d Gb/s\n",
++ adapter->devno,
++ (llui_t) adapter->wwnn, (llui_t) adapter->wwpn,
++ (unsigned int) adapter->s_id,
++ adapter->hydra_version,
++ adapter->fsf_lic_version,
++ adapter->fc_link_speed);
++ if (ZFCP_QTCB_VERSION < bottom->low_qtcb_version) {
++ ZFCP_LOG_NORMAL(
++ "error: the adapter with devno 0x%04x "
++ "only supports newer control block "
++ "versions in comparison to this device "
++ "driver (try updated device driver)\n",
++ adapter->devno);
++ debug_text_event(adapter->erp_dbf, 0, "low_qtcb_ver");
++ zfcp_erp_adapter_shutdown(adapter, 0);
++ return -EIO;
++ }
++ if (ZFCP_QTCB_VERSION > bottom->high_qtcb_version) {
++ ZFCP_LOG_NORMAL(
++ "error: the adapter with devno 0x%04x "
++ "only supports older control block "
++ "versions than this device driver uses"
++ "(consider a microcode upgrade)\n",
++ adapter->devno);
++ debug_text_event(adapter->erp_dbf, 0, "high_qtcb_ver");
++ zfcp_erp_adapter_shutdown(adapter, 0);
++ return -EIO;
++ }
++ return 0;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_exchange_config_data_handler
++ *
++ * purpose: is called for finished Exchange Configuration Data command
++ *
++ * returns:
++ */
++int zfcp_fsf_exchange_config_data_handler
++ (zfcp_fsf_req_t *fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval = -EIO;
++ fsf_qtcb_bottom_config_t *bottom;
++ zfcp_adapter_t *adapter = fsf_req->adapter;
++
++ ZFCP_LOG_TRACE(
++ "enter (fsf_req=0x%lx)\n",
++ (unsigned long)fsf_req);
++
++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
++ /* don't set any value, stay with the old (unitialized) ones */
++ goto skip_fsfstatus;
++ }
++
++ switch (fsf_req->qtcb->header.fsf_status) {
++
++ case FSF_GOOD :
++ ZFCP_LOG_FLAGS(2,"FSF_GOOD\n");
++ if (zfcp_fsf_exchange_config_evaluate(fsf_req, 1))
++ goto skip_fsfstatus;
++ switch (adapter->fc_topology) {
++ case FSF_TOPO_P2P:
++ ZFCP_LOG_FLAGS(1,"FSF_TOPO_P2P\n");
++ ZFCP_LOG_NORMAL("error: Point-to-point fibre-channel "
++ "configuration detected "
++ "at the adapter with devno "
++ "0x%04x, not supported, shutting down adapter\n",
++ adapter->devno);
++ debug_text_event(fsf_req->adapter->erp_dbf,0,"top-p-to-p");
++ zfcp_erp_adapter_shutdown(adapter, 0);
++ goto skip_fsfstatus;
++ case FSF_TOPO_AL:
++ ZFCP_LOG_FLAGS(1,"FSF_TOPO_AL\n");
++ ZFCP_LOG_NORMAL("error: Arbitrated loop fibre-channel "
++ "topology detected "
++ "at the adapter with devno "
++ "0x%04x, not supported, shutting down adapter\n",
++ adapter->devno);
++ debug_text_event(fsf_req->adapter->erp_dbf,0,"top-al");
++ zfcp_erp_adapter_shutdown(adapter, 0);
++ goto skip_fsfstatus;
++ case FSF_TOPO_FABRIC:
++ ZFCP_LOG_FLAGS(1,"FSF_TOPO_FABRIC\n");
++ ZFCP_LOG_INFO("Switched fabric fibre-channel "
++ "network detected "
++ "at the adapter with devno "
++ "0x%04x\n",
++ adapter->devno);
++ break;
++ default:
++ ZFCP_LOG_NORMAL("bug: The fibre-channel topology "
++ "reported by the exchange "
++ "configuration command for "
++ "the adapter with devno "
++ "0x%04x is not "
++ "of a type known to the zfcp "
++ "driver, shutting down adapter\n",
++ adapter->devno);
++ debug_text_exception(fsf_req->adapter->erp_dbf,0,
++ "unknown-topo");
++ zfcp_erp_adapter_shutdown(adapter, 0);
++ goto skip_fsfstatus;
++ }
++ bottom = &fsf_req->qtcb->bottom.config;
++ if (bottom->max_qtcb_size < sizeof(fsf_qtcb_t)) {
++ ZFCP_LOG_NORMAL("bug: Maximum QTCB size (%d bytes) "
++ "allowed by the adapter with devno "
++ "0x%04x is lower than the minimum "
++ "required by the driver (%ld bytes).\n",
++ bottom->max_qtcb_size,
++ adapter->devno,
++ sizeof(fsf_qtcb_t));
++ debug_text_event(fsf_req->adapter->erp_dbf,0,"qtcb-size");
++ debug_event(fsf_req->adapter->erp_dbf,0,&bottom->max_qtcb_size,
++ sizeof(u32));
++ zfcp_erp_adapter_shutdown(adapter, 0);
++ goto skip_fsfstatus;
++ }
++ atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
++ retval = 0;
++
++ zfcp_callback_do_adapter_add(NULL, adapter);
++
++ break;
++
++ case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
++ debug_text_event(adapter->erp_dbf, 0, "xchg-inco");
++
++ if (zfcp_fsf_exchange_config_evaluate(fsf_req, 0))
++ goto skip_fsfstatus;
++
++ ZFCP_LOG_INFO(
++ "Local link to adapter with devno 0x%04x is down\n",
++ adapter->devno);
++ atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
++ ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
++ &adapter->status);
++ zfcp_erp_adapter_failed(adapter);
++ break;
++
++ default:
++ /* retval is -EIO by default */
++ debug_text_event(fsf_req->adapter->erp_dbf,0,"fsf-stat-ng");
++ debug_event(fsf_req->adapter->erp_dbf,0,
++ &fsf_req->qtcb->header.fsf_status,
++ sizeof(u32));
++ zfcp_erp_adapter_shutdown(adapter, 0);
++ }
++
++ skip_fsfstatus:
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++int zfcp_fsf_exchange_port_data(zfcp_adapter_t *adapter,
++ fsf_qtcb_bottom_port_t *data)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ volatile qdio_buffer_element_t *sbale;
++ int retval = 0;
++ int status;
++ unsigned long lock_flags;
++ zfcp_fsf_req_t *fsf_req;
++
++ if(!(adapter->supported_features & FSF_FEATURE_HBAAPI_MANAGEMENT)){
++ ZFCP_LOG_INFO("error: exchange port data "
++ "command not supported by adapter 0x%4.4x\n",
++ adapter->devno);
++ retval = ENOTSUPP;
++ goto out;
++ }
++
++ /* setup new FSF request */
++ retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA,
++ 0, 0, &lock_flags, &fsf_req);
++ if (retval < 0) {
++ ZFCP_LOG_INFO("error: Out of resources. Could not create an "
++ "exchange port data request for"
++ "the adapter with devno 0x%4.4x.\n",
++ adapter->devno);
++ write_unlock_irqrestore(&adapter->request_queue.queue_lock,
++ lock_flags);
++ goto out;
++ }
++
++ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
++ sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
++ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
++
++ fsf_req->data.port_data = data;
++
++ /* start QDIO request for this FSF request */
++ retval = zfcp_fsf_req_send(fsf_req, NULL);
++ if (retval) {
++ ZFCP_LOG_INFO("error: Could not send an exchange port data "
++ "command on the adapter with devno 0x%4.4x\n",
++ adapter->devno);
++ if (zfcp_fsf_req_free(fsf_req)) {
++ ZFCP_LOG_NORMAL("bug: Could not remove one FSF "
++ "request. Memory leakage possible. "
++ "(debug info 0x%lx).\n",
++ (unsigned long)fsf_req);
++ }
++ fsf_req = NULL;
++ write_unlock_irqrestore(&adapter->request_queue.queue_lock,
++ lock_flags);
++ goto out;
++ }
++
++ ZFCP_LOG_DEBUG("Exchange Port Data request initiated "
++ "(adapter devno=0x%x)\n", adapter->devno);
++
++
++ write_unlock_irqrestore(&adapter->request_queue.queue_lock,
++ lock_flags);
++
++ /* FIXME: could we wait interruptible here ? */
++ retval = zfcp_fsf_req_wait_and_cleanup(fsf_req, ZFCP_UNINTERRUPTIBLE,
++ &status);
++
++ out:
++ ZFCP_LOG_TRACE("exit (%d)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++static int zfcp_fsf_exchange_port_data_handler(zfcp_fsf_req_t *fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval = -EIO;
++ fsf_qtcb_bottom_port_t *bottom;
++ fsf_qtcb_bottom_port_t *data = fsf_req->data.port_data;
++
++ ZFCP_LOG_TRACE("enter (fsf_req=0x%lx)\n",
++ (unsigned long)fsf_req);
++
++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
++ /* don't set any value, stay with the old (unitialized) ones */
++ goto skip_fsfstatus;
++ }
++
++ /* evaluate FSF status in QTCB */
++ switch (fsf_req->qtcb->header.fsf_status) {
++ case FSF_GOOD :
++ ZFCP_LOG_FLAGS(2,"FSF_GOOD\n");
++ bottom = &fsf_req->qtcb->bottom.port;
++ memcpy(data, bottom, sizeof(fsf_qtcb_bottom_port_t));
++ retval = 0;
++ break;
++ default:
++ /* retval is -EIO by default */
++ debug_text_event(fsf_req->adapter->erp_dbf, 0,
++ "fsf-stat-ng");
++ debug_event(fsf_req->adapter->erp_dbf,0,
++ &fsf_req->qtcb->header.fsf_status,
++ sizeof(u32));
++ }
++
++ skip_fsfstatus:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * function: zfcp_fsf_open_port
++ *
++ * purpose:
++ *
++ * returns: address of initiated FSF request
++ * NULL - request could not be initiated
++ */
++static int zfcp_fsf_open_port(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ volatile qdio_buffer_element_t *sbale;
++ int retval = 0;
++ unsigned long lock_flags;
++
++ ZFCP_LOG_TRACE("enter (erp_action=0x%lx)\n", (unsigned long)erp_action);
++
++ /* setup new FSF request */
++ retval = zfcp_fsf_req_create(
++ erp_action->adapter,
++ FSF_QTCB_OPEN_PORT_WITH_DID,
++ ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
++ &erp_action->adapter->pool.fsf_req_erp,
++ &lock_flags,
++ &erp_action->fsf_req);
++ if (retval < 0) {
++ ZFCP_LOG_INFO(
++ "error: Out of resources. Could not create an "
++ "open port request for "
++ "the port with WWPN 0x%016Lx connected to "
++ "the adapter with devno 0x%04x.\n",
++ (llui_t)erp_action->port->wwpn,
++ erp_action->adapter->devno);
++ goto out;
++ }
++
++ sbale = zfcp_qdio_sbale_req(erp_action->fsf_req,
++ erp_action->fsf_req->sbal_curr, 0);
++ sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
++ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
++
++ erp_action->fsf_req->qtcb->bottom.support.d_id = erp_action->port->d_id;
++ atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status);
++ erp_action->fsf_req->data.open_port.port = erp_action->port;
++ erp_action->fsf_req->erp_action = erp_action;
++
++ /* start QDIO request for this FSF request */
++ retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
++ if (retval) {
++ ZFCP_LOG_INFO(
++ "error: Could not send an "
++ "open port request for "
++ "the port with WWPN 0x%016Lx connected to "
++ "the adapter with devno 0x%04x.\n",
++ (llui_t)erp_action->port->wwpn,
++ erp_action->adapter->devno);
++ if (zfcp_fsf_req_free(erp_action->fsf_req)) {
++ ZFCP_LOG_NORMAL(
++ "bug: Could not remove one FSF "
++ "request. Memory leakage possible. "
++ "(debug info 0x%lx).\n",
++ (unsigned long)erp_action->fsf_req);
++ retval=-EINVAL;
++ };
++ erp_action->fsf_req = NULL;
++ goto out;
++ }
++
++ ZFCP_LOG_DEBUG(
++ "Open Port request initiated "
++ "(adapter devno=0x%04x, port WWPN=0x%016Lx)\n",
++ erp_action->adapter->devno,
++ (llui_t)erp_action->port->wwpn);
++
++out:
++ write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock, lock_flags);
++
++ ZFCP_LOG_TRACE("exit (%d)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_open_port_handler
++ *
++ * purpose: is called for finished Open Port command
++ *
++ * returns:
++ */
++static int zfcp_fsf_open_port_handler(zfcp_fsf_req_t *fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval = -EINVAL;
++ zfcp_port_t *port;
++ fsf_plogi_t *plogi;
++ fsf_qtcb_header_t *header = &fsf_req->qtcb->header;
++ u16 subtable, rule, counter;
++
++ ZFCP_LOG_TRACE(
++ "enter (fsf_req=0x%lx)\n",
++ (unsigned long)fsf_req);
++
++ port = fsf_req->data.open_port.port;
++
++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
++ /* don't change port status in our bookkeeping */
++ goto skip_fsfstatus;
++ }
++
++ /* evaluate FSF status in QTCB */
++ switch (fsf_req->qtcb->header.fsf_status) {
++
++ case FSF_PORT_ALREADY_OPEN :
++ ZFCP_LOG_FLAGS(0, "FSF_PORT_ALREADY_OPEN\n");
++ ZFCP_LOG_NORMAL("bug: The remote port with WWPN=0x%016Lx "
++ "connected to the adapter with "
++ "devno=0x%04x is already open.\n",
++ (llui_t)port->wwpn,
++ port->adapter->devno);
++ debug_text_exception(fsf_req->adapter->erp_dbf,0,"fsf_s_popen");
++ /* This is a bug, however operation should continue normally
++ * if it is simply ignored */
++ break;
++
++ case FSF_ACCESS_DENIED :
++ ZFCP_LOG_FLAGS(2, "FSF_ACCESS_DENIED\n");
++ ZFCP_LOG_NORMAL("Access denied, cannot open port "
++ "(devno=0x%04x wwpn=0x%016Lx)\n",
++ port->adapter->devno,
++ (llui_t)port->wwpn);
++ for (counter = 0; counter < 2; counter++) {
++ subtable = header->fsf_status_qual.halfword[counter * 2];
++ rule = header->fsf_status_qual.halfword[counter * 2 + 1];
++ switch (subtable) {
++ case FSF_SQ_CFDC_SUBTABLE_OS:
++ case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
++ case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
++ case FSF_SQ_CFDC_SUBTABLE_LUN:
++ ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
++ zfcp_act_subtable_type[subtable], rule);
++ break;
++ }
++ }
++ debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_access");
++ zfcp_erp_port_failed(port);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED :
++ ZFCP_LOG_FLAGS(1, "FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED\n");
++ ZFCP_LOG_INFO("error: The FSF adapter is out of resources. "
++ "The remote port with WWPN=0x%016Lx "
++ "connected to the adapter with "
++ "devno=0x%04x could not be opened. "
++ "Disabling it.\n",
++ (llui_t)port->wwpn,
++ port->adapter->devno);
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_s_max_ports");
++ zfcp_erp_port_failed(port);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_ADAPTER_STATUS_AVAILABLE :
++ ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
++ switch (fsf_req->qtcb->header.fsf_status_qual.word[0]){
++ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE :
++ ZFCP_LOG_FLAGS(2, "FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_sq_ltest");
++ /* ERP strategy will escalate */
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED :
++ /* ERP strategy will escalate */
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_sq_ulp");
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++ case FSF_SQ_NO_RETRY_POSSIBLE :
++ ZFCP_LOG_FLAGS(0, "FSF_SQ_NO_RETRY_POSSIBLE\n");
++ ZFCP_LOG_NORMAL("The remote port with WWPN=0x%016Lx "
++ "connected to the adapter with "
++ "devno=0x%04x could not be opened. "
++ "Disabling it.\n",
++ (llui_t)port->wwpn,
++ port->adapter->devno);
++ debug_text_exception(fsf_req->adapter->erp_dbf,0,"fsf_sq_no_retry");
++ zfcp_erp_port_failed(port);
++ zfcp_cmd_dbf_event_fsf("sqnretry", fsf_req,
++ &fsf_req->qtcb->header.fsf_status_qual,
++ sizeof(fsf_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++ default:
++ ZFCP_LOG_NORMAL("bug: Wrong status qualifier 0x%x arrived.\n",
++ fsf_req->qtcb->header.fsf_status_qual.word[0]);
++ debug_text_event(fsf_req->adapter->erp_dbf,0,"fsf_sq_inval:");
++ debug_exception(fsf_req->adapter->erp_dbf,0,
++ &fsf_req->qtcb->header.fsf_status_qual.word[0],
++ sizeof(u32));
++ break;
++ }
++ break;
++
++ case FSF_GOOD :
++ ZFCP_LOG_FLAGS(3, "FSF_GOOD\n");
++ /* save port handle assigned by FSF */
++ port->handle = fsf_req->qtcb->header.port_handle;
++ ZFCP_LOG_INFO("The remote port (WWPN=0x%016Lx) via adapter "
++ "(devno=0x%04x) was opened, it's "
++ "port handle is 0x%x\n",
++ (llui_t)port->wwpn,
++ port->adapter->devno,
++ port->handle);
++ /* mark port as open */
++ atomic_set_mask(
++ ZFCP_STATUS_COMMON_OPEN |
++ ZFCP_STATUS_PORT_PHYS_OPEN,
++ &port->status);
++ retval = 0;
++ /* check whether D_ID has changed during open */
++ /*
++ * FIXME: This check is not airtight, as the FCP channel does
++ * not monitor closures of target port connections caused on
++ * the remote side. Thus, they might miss out on invalidating
++ * locally cached WWPNs (and other N_Port parameters) of gone
++ * target ports. So, our heroic attempt to make things safe
++ * could be undermined by 'open port' response data tagged with
++ * obsolete WWPNs. Another reason to monitor potential
++ * connection closures ourself at least (by interpreting
++ * incoming ELS' and unsolicited status). It just crosses my
++ * mind that one should be able to cross-check by means of
++ * another GID_PN straight after a port has been opened.
++ * Alternately, an ADISC/PDISC ELS should suffice, as well.
++ */
++ plogi = (fsf_plogi_t*) fsf_req->qtcb->bottom.support.els;
++ if (!atomic_test_mask(ZFCP_STATUS_PORT_NO_WWPN, &port->status)) {
++ if (fsf_req->qtcb->bottom.support.els1_length <
++ ((((unsigned long)&plogi->serv_param.wwpn) -
++ ((unsigned long)plogi)) +
++ sizeof(fsf_wwn_t))) {
++ ZFCP_LOG_INFO(
++ "warning: insufficient length of PLOGI payload (%i)\n",
++ fsf_req->qtcb->bottom.support.els1_length);
++ debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_s_short_plogi:");
++ /* skip sanity check and assume wwpn is ok */
++ } else {
++ if (plogi->serv_param.wwpn != port->wwpn) {
++ ZFCP_LOG_INFO(
++ "warning: D_ID of port with WWPN 0x%016Lx changed "
++ "during open\n",
++ (llui_t)port->wwpn);
++ debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_s_did_change:");
++ atomic_clear_mask(
++ ZFCP_STATUS_PORT_DID_DID,
++ &port->status);
++ }
++ }
++ }
++ break;
++
++ default :
++ ZFCP_LOG_NORMAL(
++ "bug: An unknown FSF Status was presented "
++ "(debug info 0x%x)\n",
++ fsf_req->qtcb->header.fsf_status);
++ debug_text_event(fsf_req->adapter->erp_dbf,0,"fsf_s_inval:");
++ debug_exception(fsf_req->adapter->erp_dbf,0,
++ &fsf_req->qtcb->header.fsf_status,
++ sizeof(u32));
++ break;
++ }
++
++skip_fsfstatus:
++
++ atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &port->status);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_close_port
++ *
++ * purpose: submit FSF command "close port"
++ *
++ * returns: address of initiated FSF request
++ * NULL - request could not be initiated
++ */
++static int zfcp_fsf_close_port(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ volatile qdio_buffer_element_t *sbale;
++ int retval = 0;
++ unsigned long lock_flags;
++
++ ZFCP_LOG_TRACE("enter (erp_action=0x%lx)\n", (unsigned long)erp_action);
++
++ /* setup new FSF request */
++ retval = zfcp_fsf_req_create(
++ erp_action->adapter,
++ FSF_QTCB_CLOSE_PORT,
++ ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
++ &erp_action->adapter->pool.fsf_req_erp,
++ &lock_flags,
++ &erp_action->fsf_req);
++ if (retval < 0) {
++ ZFCP_LOG_INFO(
++ "error: Out of resources. Could not create a "
++ "close port request for WWPN 0x%016Lx connected to "
++ "the adapter with devno 0x%04x.\n",
++ (llui_t)erp_action->port->wwpn,
++ erp_action->adapter->devno);
++ goto out;
++ }
++
++ sbale = zfcp_qdio_sbale_req(erp_action->fsf_req,
++ erp_action->fsf_req->sbal_curr, 0);
++ sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
++ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
++
++ atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status);
++ erp_action->fsf_req->data.close_port.port = erp_action->port;
++ erp_action->fsf_req->erp_action = erp_action;
++ erp_action->fsf_req->qtcb->header.port_handle = erp_action->port->handle;
++
++ /* start QDIO request for this FSF request */
++ retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
++ if (retval) {
++ ZFCP_LOG_INFO(
++ "error: Could not send a "
++ "close port request for WWPN 0x%016Lx connected to "
++ "the adapter with devno 0x%04x.\n",
++ (llui_t)erp_action->port->wwpn,
++ erp_action->adapter->devno);
++ if (zfcp_fsf_req_free(erp_action->fsf_req)) {
++ ZFCP_LOG_NORMAL(
++ "bug: Could not remove one FSF "
++ "request. Memory leakage possible. "
++ "(debug info 0x%lx).\n",
++ (unsigned long)erp_action->fsf_req);
++ retval=-EINVAL;
++ };
++ erp_action->fsf_req = NULL;
++ goto out;
++ }
++
++ ZFCP_LOG_TRACE(
++ "Close Port request initiated "
++ "(adapter devno=0x%04x, port WWPN=0x%016Lx)\n",
++ erp_action->adapter->devno,
++ (llui_t)erp_action->port->wwpn);
++
++out:
++ write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock, lock_flags);
++
++ ZFCP_LOG_TRACE("exit (%d)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_close_port_handler
++ *
++ * purpose: is called for finished Close Port FSF command
++ *
++ * returns:
++ */
++static int zfcp_fsf_close_port_handler(zfcp_fsf_req_t *fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval = -EINVAL;
++ zfcp_port_t *port;
++
++ ZFCP_LOG_TRACE(
++ "enter (fsf_req=0x%lx)\n",
++ (unsigned long)fsf_req);
++
++ port = fsf_req->data.close_port.port;
++
++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
++ /* don't change port status in our bookkeeping */
++ goto skip_fsfstatus;
++ }
++
++ /* evaluate FSF status in QTCB */
++ switch (fsf_req->qtcb->header.fsf_status) {
++
++ case FSF_PORT_HANDLE_NOT_VALID :
++ ZFCP_LOG_FLAGS(1, "FSF_PORT_HANDLE_NOT_VALID\n");
++ ZFCP_LOG_INFO(
++ "Temporary port identifier (handle) 0x%x "
++ "for the port with WWPN 0x%016Lx connected to "
++ "the adapter of devno 0x%04x is "
++ "not valid. This may happen occasionally.\n",
++ port->handle,
++ (llui_t)port->wwpn,
++ port->adapter->devno);
++ ZFCP_LOG_DEBUG("status qualifier:\n");
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_DEBUG,
++ (char*)&fsf_req->qtcb->header.fsf_status_qual,
++ 16);
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_s_phand_nv");
++ zfcp_erp_adapter_reopen(port->adapter, 0);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_ADAPTER_STATUS_AVAILABLE :
++ ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
++ /* Note: FSF has actually closed the port in this case.
++ * The status code is just daft. Fingers crossed for a change
++ */
++ retval=0;
++ break;
++#if 0
++ switch (fsf_req->qtcb->header.fsf_status_qual.word[0]){
++ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE :
++ ZFCP_LOG_FLAGS(2, "FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
++ /* This will now be escalated by ERP */
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_sq_ltest");
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED :
++ ZFCP_LOG_FLAGS(2, "FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED\n");
++ /* ERP strategy will escalate */
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_sq_ulp");
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++ default:
++ ZFCP_LOG_NORMAL("bug: Wrong status qualifier 0x%x arrived.\n",
++ fsf_req->qtcb->header.fsf_status_qual.word[0]);
++ debug_text_event(fsf_req->adapter->erp_dbf,0,"fsf_sq_inval:");
++ debug_exception(fsf_req->adapter->erp_dbf,0,
++ &fsf_req->qtcb->header.fsf_status_qual.word[0],
++ sizeof(u32));
++ break;
++ }
++ break;
++#endif
++
++ case FSF_GOOD :
++ ZFCP_LOG_FLAGS(3, "FSF_GOOD\n");
++ ZFCP_LOG_TRACE(
++ "remote port (WWPN=0x%016Lx) via adapter "
++ "(devno=0x%04x) closed, "
++ "port handle 0x%x\n",
++ (llui_t)port->wwpn,
++ port->adapter->devno,
++ port->handle);
++ zfcp_erp_modify_port_status(
++ port,
++ ZFCP_STATUS_COMMON_OPEN,
++ ZFCP_CLEAR);
++ retval = 0;
++ break;
++
++ default :
++ ZFCP_LOG_NORMAL(
++ "bug: An unknown FSF Status was presented "
++ "(debug info 0x%x)\n",
++ fsf_req->qtcb->header.fsf_status);
++ debug_text_event(fsf_req->adapter->erp_dbf,0,"fsf_s_inval:");
++ debug_exception(fsf_req->adapter->erp_dbf,0,
++ &fsf_req->qtcb->header.fsf_status,
++ sizeof(u32));
++ break;
++ }
++
++skip_fsfstatus:
++ atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &port->status);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_close_physical_port
++ *
++ * purpose: submit FSF command "close physical port"
++ *
++ * returns: address of initiated FSF request
++ * NULL - request could not be initiated
++ */
++static int zfcp_fsf_close_physical_port(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ volatile qdio_buffer_element_t *sbale;
++ int retval = 0;
++ unsigned long lock_flags;
++
++ ZFCP_LOG_TRACE("enter (erp_action=0x%lx)\n", (unsigned long)erp_action);
++
++ /* setup new FSF request */
++ retval = zfcp_fsf_req_create(
++ erp_action->adapter,
++ FSF_QTCB_CLOSE_PHYSICAL_PORT,
++ ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
++ &erp_action->adapter->pool.fsf_req_erp,
++ &lock_flags,
++ &erp_action->fsf_req);
++ if (retval < 0) {
++ ZFCP_LOG_INFO(
++ "error: Out of resources. Could not create a "
++ "close physical port request for "
++ "the port with WWPN 0x%016Lx connected to "
++ "the adapter with devno 0x%04x.\n",
++ (llui_t)erp_action->port->wwpn,
++ erp_action->adapter->devno);
++ goto out;
++ }
++
++ sbale = zfcp_qdio_sbale_req(erp_action->fsf_req,
++ erp_action->fsf_req->sbal_curr, 0);
++ sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
++ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
++
++ /* mark port as being closed */
++ atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, &erp_action->port->status);
++ /* save a pointer to this port */
++ erp_action->fsf_req->data.close_physical_port.port = erp_action->port;
++ /* port to be closeed */
++ erp_action->fsf_req->qtcb->header.port_handle = erp_action->port->handle;
++ erp_action->fsf_req->erp_action = erp_action;
++
++ /* start QDIO request for this FSF request */
++ retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
++ if (retval) {
++ ZFCP_LOG_INFO(
++ "error: Could not send an "
++ "close physical port request for "
++ "the port with WWPN 0x%016Lx connected to "
++ "the adapter with devno 0x%04x.\n",
++ (llui_t)erp_action->port->wwpn,
++ erp_action->adapter->devno);
++ if (zfcp_fsf_req_free(erp_action->fsf_req)){
++ ZFCP_LOG_NORMAL(
++ "bug: Could not remove one FSF "
++ "request. Memory leakage possible. "
++ "(debug info 0x%lx).\n",
++ (unsigned long)erp_action->fsf_req);
++ retval=-EINVAL;
++ };
++ erp_action->fsf_req = NULL;
++ goto out;
++ }
++
++ ZFCP_LOG_TRACE(
++ "Close Physical Port request initiated "
++ "(adapter devno=0x%04x, port WWPN=0x%016Lx)\n",
++ erp_action->adapter->devno,
++ (llui_t)erp_action->port->wwpn);
++
++out:
++ write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock, lock_flags);
++
++ ZFCP_LOG_TRACE("exit (%d)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_close_physical_port_handler
++ *
++ * purpose: is called for finished Close Physical Port FSF command
++ *
++ * returns:
++ */
++static int zfcp_fsf_close_physical_port_handler(zfcp_fsf_req_t *fsf_req){
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = -EINVAL;
++ zfcp_port_t *port;
++ zfcp_unit_t *unit;
++ unsigned long flags;
++ fsf_qtcb_header_t *header = &fsf_req->qtcb->header;
++ u16 subtable, rule, counter;
++
++ ZFCP_LOG_TRACE(
++ "enter (fsf_req=0x%lx)\n",
++ (unsigned long)fsf_req);
++
++ port = fsf_req->data.close_physical_port.port;
++
++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
++ /* don't change port status in our bookkeeping */
++ goto skip_fsfstatus;
++ }
++
++ /* evaluate FSF status in QTCB */
++ switch (fsf_req->qtcb->header.fsf_status) {
++
++ case FSF_PORT_HANDLE_NOT_VALID :
++ ZFCP_LOG_FLAGS(1, "FSF_PORT_HANDLE_NOT_VALID\n");
++ ZFCP_LOG_INFO(
++ "Temporary port identifier (handle) 0x%x "
++ "for the port with WWPN 0x%016Lx connected to "
++ "the adapter of devno 0x%04x is "
++ "not valid. This may happen occasionally.\n",
++ port->handle,
++ (llui_t)port->wwpn,
++ port->adapter->devno);
++ ZFCP_LOG_DEBUG("status qualifier:\n");
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_DEBUG,
++ (char*)&fsf_req->qtcb->header.fsf_status_qual,
++ 16);
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_s_phand_nv");
++ zfcp_erp_adapter_reopen(port->adapter, 0);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ // panic("for ralph");
++ break;
++
++ case FSF_ACCESS_DENIED :
++ ZFCP_LOG_FLAGS(2, "FSF_ACCESS_DENIED\n");
++ ZFCP_LOG_NORMAL("Access denied, cannot close physical port "
++ "(devno=0x%04x wwpn=0x%016Lx)\n",
++ port->adapter->devno,
++ (llui_t)port->wwpn);
++ for (counter = 0; counter < 2; counter++) {
++ subtable = header->fsf_status_qual.halfword[counter * 2];
++ rule = header->fsf_status_qual.halfword[counter * 2 + 1];
++ switch (subtable) {
++ case FSF_SQ_CFDC_SUBTABLE_OS:
++ case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
++ case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
++ case FSF_SQ_CFDC_SUBTABLE_LUN:
++ ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
++ zfcp_act_subtable_type[subtable], rule);
++ break;
++ }
++ }
++ debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_access");
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_PORT_BOXED :
++ ZFCP_LOG_FLAGS(2, "FSF_PORT_BOXED\n");
++ ZFCP_LOG_DEBUG("The remote port "
++ "with WWPN 0x%016Lx on the adapter with "
++ "devno 0x%04x needs to be reopened but "
++ "it was attempted to close it physically.\n",
++ (llui_t)port->wwpn,
++ port->adapter->devno);
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_s_pboxed");
++ zfcp_erp_port_reopen(port, 0);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
++ | ZFCP_STATUS_FSFREQ_RETRY;
++ break;
++
++
++ case FSF_ADAPTER_STATUS_AVAILABLE :
++ ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
++ switch (fsf_req->qtcb->header.fsf_status_qual.word[0]){
++ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE :
++ ZFCP_LOG_FLAGS(2, "FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_sq_ltest");
++ /* This will now be escalated by ERP */
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED :
++ ZFCP_LOG_FLAGS(2, "FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED\n");
++ /* ERP strategy will escalate */
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_sq_ulp");
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++ default:
++ ZFCP_LOG_NORMAL("bug: Wrong status qualifier 0x%x arrived.\n",
++ fsf_req->qtcb->header.fsf_status_qual.word[0]);
++ debug_text_event(fsf_req->adapter->erp_dbf,0,"fsf_sq_inval:");
++ debug_exception(fsf_req->adapter->erp_dbf,0,
++ &fsf_req->qtcb->header.fsf_status_qual.word[0],
++ sizeof(u32));
++ break;
++ }
++ break;
++
++ case FSF_GOOD :
++ ZFCP_LOG_FLAGS(3, "FSF_GOOD\n");
++ ZFCP_LOG_DEBUG(
++ "Remote port (WWPN=0x%016Lx) via adapter "
++ "(devno=0x%04x) physically closed, "
++ "port handle 0x%x\n",
++ (llui_t)port->wwpn,
++ port->adapter->devno,
++ port->handle);
++ /* can't use generic zfcp_erp_modify_port_status because
++ * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
++ */
++ atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN,
++ &port->status);
++ read_lock_irqsave(&port->unit_list_lock, flags);
++ ZFCP_FOR_EACH_UNIT(port, unit) {
++ atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
++ &unit->status);
++ }
++ read_unlock_irqrestore(&port->unit_list_lock, flags);
++ retval = 0;
++ break;
++
++ default :
++ ZFCP_LOG_NORMAL(
++ "bug: An unknown FSF Status was presented "
++ "(debug info 0x%x)\n",
++ fsf_req->qtcb->header.fsf_status);
++ debug_text_event(fsf_req->adapter->erp_dbf,0,"fsf_s_inval:");
++ debug_exception(fsf_req->adapter->erp_dbf,0,
++ &fsf_req->qtcb->header.fsf_status,
++ sizeof(u32));
++ break;
++ }
++
++skip_fsfstatus:
++ atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, &port->status);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_open_unit
++ *
++ * purpose:
++ *
++ * returns:
++ *
++ * assumptions: This routine does not check whether the associated
++ * remote port has already been opened. This should be
++ * done by calling routines. Otherwise some status
++ * may be presented by FSF
++ */
++static int zfcp_fsf_open_unit(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ volatile qdio_buffer_element_t *sbale;
++ int retval = 0;
++ unsigned long lock_flags;
++
++ ZFCP_LOG_TRACE("enter (erp_action=0x%lx)\n", (unsigned long)erp_action);
++
++ /* setup new FSF request */
++ retval = zfcp_fsf_req_create(
++ erp_action->adapter,
++ FSF_QTCB_OPEN_LUN,
++ ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
++ &erp_action->adapter->pool.fsf_req_erp,
++ &lock_flags,
++ &erp_action->fsf_req);
++ if (retval < 0) {
++ ZFCP_LOG_INFO(
++ "error: Out of resources. Could not create an "
++ "open unit request for FCP_LUN 0x%016Lx connected to "
++ "the port with WWPN 0x%016Lx connected to "
++ "the adapter with devno 0x%04x.\n",
++ (llui_t)erp_action->unit->fcp_lun,
++ (llui_t)erp_action->unit->port->wwpn,
++ erp_action->adapter->devno);
++ goto out;
++ }
++
++ sbale = zfcp_qdio_sbale_req(erp_action->fsf_req,
++ erp_action->fsf_req->sbal_curr, 0);
++ sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
++ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
++
++ erp_action->fsf_req->qtcb->header.port_handle =
++ erp_action->port->handle;
++ erp_action->fsf_req->qtcb->bottom.support.fcp_lun =
++ erp_action->unit->fcp_lun;
++ atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status);
++ erp_action->fsf_req->data.open_unit.unit = erp_action->unit;
++ erp_action->fsf_req->erp_action = erp_action;
++
++ /* start QDIO request for this FSF request */
++ retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
++ if (retval) {
++ ZFCP_LOG_INFO(
++ "error: Could not send an open unit request "
++ "on the adapter with devno 0x%04x, "
++ "port WWPN 0x%016Lx for unit FCP_LUN 0x%016Lx\n",
++ erp_action->adapter->devno,
++ (llui_t)erp_action->port->wwpn,
++ (llui_t)erp_action->unit->fcp_lun);
++ if (zfcp_fsf_req_free(erp_action->fsf_req)) {
++ ZFCP_LOG_NORMAL(
++ "bug: Could not remove one FSF "
++ "request. Memory leakage possible. "
++ "(debug info 0x%lx).\n",
++ (unsigned long)erp_action->fsf_req);
++ retval=-EINVAL;
++ };
++ erp_action->fsf_req = NULL;
++ goto out;
++ }
++
++ ZFCP_LOG_TRACE(
++ "Open LUN request initiated "
++ "(adapter devno=0x%04x, port WWPN=0x%016Lx, unit FCP_LUN=0x%016Lx)\n",
++ erp_action->adapter->devno,
++ (llui_t)erp_action->port->wwpn,
++ (llui_t)erp_action->unit->fcp_lun);
++
++out:
++ write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock, lock_flags);
++
++ ZFCP_LOG_TRACE("exit (%d)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++
++/*
++ * function: zfcp_fsf_open_unit_handler
++ *
++ * purpose: is called for finished Open LUN command
++ *
++ * returns:
++ */
++static int zfcp_fsf_open_unit_handler(zfcp_fsf_req_t *fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval = -EINVAL;
++ zfcp_adapter_t *adapter;
++ zfcp_unit_t *unit;
++ fsf_qtcb_header_t *header;
++ fsf_queue_designator_t *queue_designator;
++ u16 subtable, rule, counter;
++
++ ZFCP_LOG_TRACE(
++ "enter (fsf_req=0x%lx)\n",
++ (unsigned long)fsf_req);
++
++ adapter = fsf_req->adapter;
++ unit = fsf_req->data.open_unit.unit;
++ header = &fsf_req->qtcb->header;
++ queue_designator = &header->fsf_status_qual.fsf_queue_designator;
++
++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
++ /* don't change unit status in our bookkeeping */
++ goto skip_fsfstatus;
++ }
++
++ /* evaluate FSF status in QTCB */
++ switch (fsf_req->qtcb->header.fsf_status) {
++
++ case FSF_PORT_HANDLE_NOT_VALID :
++ ZFCP_LOG_FLAGS(1, "FSF_PORT_HANDLE_NOT_VALID\n");
++ ZFCP_LOG_INFO("Temporary port identifier (handle) 0x%x "
++ "for the port with WWPN 0x%016Lx connected to "
++ "the adapter of devno 0x%04x is "
++ "not valid. This may happen occasionally.\n",
++ unit->port->handle,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ ZFCP_LOG_DEBUG("status qualifier:\n");
++ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
++ (char*)&fsf_req->qtcb->header.fsf_status_qual,
++ 16);
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_s_ph_nv");
++ zfcp_erp_adapter_reopen(unit->port->adapter, 0);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_LUN_ALREADY_OPEN :
++ ZFCP_LOG_FLAGS(0, "FSF_LUN_ALREADY_OPEN\n");
++ ZFCP_LOG_NORMAL("bug: Attempted to open the logical unit "
++ "with FCP_LUN 0x%016Lx at "
++ "the remote port with WWPN 0x%016Lx connected "
++ "to the adapter with devno 0x%04x twice.\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ debug_text_exception(fsf_req->adapter->erp_dbf,0,"fsf_s_uopen");
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_ACCESS_DENIED :
++ ZFCP_LOG_FLAGS(2, "FSF_ACCESS_DENIED\n");
++ ZFCP_LOG_NORMAL("Access denied, cannot open unit 0x%016Lx "
++ "on the remote port 0x%016Lx "
++ "on adapter with devno 0x%04x\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ adapter->devno);
++ for (counter = 0; counter < 2; counter++) {
++ subtable = header->fsf_status_qual.halfword[counter * 2];
++ rule = header->fsf_status_qual.halfword[counter * 2 + 1];
++ switch (subtable) {
++ case FSF_SQ_CFDC_SUBTABLE_OS:
++ case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
++ case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
++ case FSF_SQ_CFDC_SUBTABLE_LUN:
++ ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
++ zfcp_act_subtable_type[subtable], rule);
++ break;
++ }
++ }
++ debug_text_event(adapter->erp_dbf, 1, "fsf_s_access");
++ zfcp_erp_unit_failed(unit);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_PORT_BOXED :
++ ZFCP_LOG_FLAGS(2, "FSF_PORT_BOXED\n");
++ ZFCP_LOG_DEBUG("The remote port "
++ "with WWPN 0x%016Lx on the adapter with "
++ "devno 0x%04x needs to be reopened\n",
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ debug_text_event(fsf_req->adapter->erp_dbf,2,"fsf_s_pboxed");
++ zfcp_erp_port_reopen(unit->port, 0);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
++ | ZFCP_STATUS_FSFREQ_RETRY;
++ break;
++
++ case FSF_LUN_SHARING_VIOLATION :
++ ZFCP_LOG_FLAGS(2, "FSF_LUN_SHARING_VIOLATION\n");
++ if (header->fsf_status_qual.word[0] != 0) {
++ ZFCP_LOG_NORMAL("FCP-LUN 0x%Lx at the remote port with "
++ "WWPN 0x%Lx connected to the adapter "
++ "with devno 0x%04x is already in use "
++ "in LPAR%d, CSS%d\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ adapter->devno,
++ queue_designator->hla,
++ queue_designator->cssid);
++ } else {
++ subtable = header->fsf_status_qual.halfword[4];
++ rule = header->fsf_status_qual.halfword[5];
++ switch (subtable) {
++ case FSF_SQ_CFDC_SUBTABLE_OS:
++ case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
++ case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
++ case FSF_SQ_CFDC_SUBTABLE_LUN:
++ ZFCP_LOG_NORMAL("Access to FCP-LUN 0x%Lx at the "
++ "remote port with WWPN 0x%Lx "
++ "connected to the adapter "
++ "with devno 0x%04x "
++ "is denied (%s rule %d)\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ adapter->devno,
++ zfcp_act_subtable_type[subtable],
++ rule);
++ break;
++ }
++ }
++ ZFCP_LOG_DEBUG("Additional sense data is presented:\n");
++ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
++ (char*)&header->fsf_status_qual,
++ sizeof(fsf_status_qual_t));
++ debug_text_event(adapter->erp_dbf,2,"fsf_s_l_sh_vio");
++ zfcp_erp_unit_failed(unit);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED :
++ ZFCP_LOG_FLAGS(1, "FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED\n");
++ ZFCP_LOG_INFO("error: The adapter ran out of resources. "
++ "There is no handle (temporary port identifier) "
++ "available for the unit with "
++ "FCP_LUN 0x%016Lx at the remote port with WWPN 0x%016Lx "
++ "connected to the adapter with devno 0x%04x\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_s_max_units");
++ zfcp_erp_unit_failed(unit);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_ADAPTER_STATUS_AVAILABLE :
++ ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
++ switch (fsf_req->qtcb->header.fsf_status_qual.word[0]){
++ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE :
++ ZFCP_LOG_FLAGS(2, "FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
++ /* Re-establish link to port */
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_sq_ltest");
++ zfcp_erp_port_reopen(unit->port, 0);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED :
++ ZFCP_LOG_FLAGS(2, "FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED\n");
++ /* ERP strategy will escalate */
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_sq_ulp");
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++ default:
++ ZFCP_LOG_NORMAL("bug: Wrong status qualifier 0x%x arrived.\n",
++ fsf_req->qtcb->header.fsf_status_qual.word[0]);
++ debug_text_event(fsf_req->adapter->erp_dbf,0,"fsf_sq_inval:");
++ debug_exception(fsf_req->adapter->erp_dbf,0,
++ &fsf_req->qtcb->header.fsf_status_qual.word[0],
++ sizeof(u32));
++ }
++ break;
++
++ case FSF_GOOD :
++ ZFCP_LOG_FLAGS(3, "FSF_GOOD\n");
++ /* save LUN handle assigned by FSF */
++ unit->handle = fsf_req->qtcb->header.lun_handle;
++ ZFCP_LOG_TRACE("unit (FCP_LUN=0x%016Lx) of remote port "
++ "(WWPN=0x%016Lx) via adapter (devno=0x%04x) opened, "
++ "port handle 0x%x \n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno,
++ unit->handle);
++ /* mark unit as open */
++ atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
++ retval = 0;
++ break;
++
++ default :
++ ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
++ "(debug info 0x%x)\n",
++ fsf_req->qtcb->header.fsf_status);
++ debug_text_event(fsf_req->adapter->erp_dbf,0,"fsf_s_inval:");
++ debug_exception(fsf_req->adapter->erp_dbf,0,
++ &fsf_req->qtcb->header.fsf_status,
++ sizeof(u32));
++ break;
++ }
++
++skip_fsfstatus:
++ atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &unit->status);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_close_unit
++ *
++ * purpose:
++ *
++ * returns: address of fsf_req - request successfully initiated
++ * NULL -
++ *
++ * assumptions: This routine does not check whether the associated
++ * remote port/lun has already been opened. This should be
++ * done by calling routines. Otherwise some status
++ * may be presented by FSF
++ */
++static int zfcp_fsf_close_unit(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ volatile qdio_buffer_element_t *sbale;
++ int retval = 0;
++ unsigned long lock_flags;
++
++ ZFCP_LOG_TRACE("enter (erp_action=0x%lx)\n", (unsigned long)erp_action);
++
++ /* setup new FSF request */
++ retval = zfcp_fsf_req_create(
++ erp_action->adapter,
++ FSF_QTCB_CLOSE_LUN,
++ ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
++ &erp_action->adapter->pool.fsf_req_erp,
++ &lock_flags,
++ &erp_action->fsf_req);
++ if (retval < 0) {
++ ZFCP_LOG_INFO(
++ "error: Out of resources. Could not create a "
++ "close unit request for FCP_LUN 0x%016Lx connected to "
++ "the port with WWPN 0x%016Lx connected to "
++ "the adapter with devno 0x%04x.\n",
++ (llui_t)erp_action->unit->fcp_lun,
++ (llui_t)erp_action->port->wwpn,
++ erp_action->adapter->devno);
++ goto out;
++ }
++
++ sbale = zfcp_qdio_sbale_req(erp_action->fsf_req,
++ erp_action->fsf_req->sbal_curr, 0);
++ sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
++ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
++
++ erp_action->fsf_req->qtcb->header.port_handle = erp_action->port->handle;
++ erp_action->fsf_req->qtcb->header.lun_handle = erp_action->unit->handle;
++ atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status);
++ erp_action->fsf_req->data.close_unit.unit = erp_action->unit;
++ erp_action->fsf_req->erp_action = erp_action;
++
++ /* start QDIO request for this FSF request */
++ retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
++ if (retval) {
++ ZFCP_LOG_INFO(
++ "error: Could not send a "
++ "close unit request for FCP_LUN 0x%016Lx connected to "
++ "the port with WWPN 0x%016Lx connected to "
++ "the adapter with devno 0x%04x.\n",
++ (llui_t)erp_action->unit->fcp_lun,
++ (llui_t)erp_action->port->wwpn,
++ erp_action->adapter->devno);
++ if (zfcp_fsf_req_free(erp_action->fsf_req)){
++ ZFCP_LOG_NORMAL(
++ "bug: Could not remove one FSF "
++ "request. Memory leakage possible. "
++ "(debug info 0x%lx).\n",
++ (unsigned long)erp_action->fsf_req);
++ retval = -EINVAL;
++ };
++ erp_action->fsf_req = NULL;
++ goto out;
++ }
++
++ ZFCP_LOG_TRACE(
++ "Close LUN request initiated "
++ "(adapter devno=0x%04x, port WWPN=0x%016Lx, unit FCP_LUN=0x%016Lx)\n",
++ erp_action->adapter->devno,
++ (llui_t)erp_action->port->wwpn,
++ (llui_t)erp_action->unit->fcp_lun);
++
++out:
++ write_unlock_irqrestore(&erp_action->adapter->request_queue.queue_lock, lock_flags);
++
++ ZFCP_LOG_TRACE("exit (%d)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_close_unit_handler
++ *
++ * purpose: is called for finished Close LUN FSF command
++ *
++ * returns:
++ */
++static int zfcp_fsf_close_unit_handler(zfcp_fsf_req_t *fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval = -EINVAL;
++ zfcp_unit_t *unit;
++
++ ZFCP_LOG_TRACE(
++ "enter (fsf_req=0x%lx)\n",
++ (unsigned long)fsf_req);
++
++ unit = fsf_req->data.close_unit.unit; /* restore unit */
++
++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
++ /* don't change unit status in our bookkeeping */
++ goto skip_fsfstatus;
++ }
++
++ /* evaluate FSF status in QTCB */
++ switch (fsf_req->qtcb->header.fsf_status) {
++
++ case FSF_PORT_HANDLE_NOT_VALID :
++ ZFCP_LOG_FLAGS(1, "FSF_PORT_HANDLE_NOT_VALID\n");
++ ZFCP_LOG_INFO(
++ "Temporary port identifier (handle) 0x%x "
++ "for the port with WWPN 0x%016Lx connected to "
++ "the adapter of devno 0x%04x is "
++ "not valid. This may happen in rare "
++ "circumstances\n",
++ unit->port->handle,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ ZFCP_LOG_DEBUG("status qualifier:\n");
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_DEBUG,
++ (char*)&fsf_req->qtcb->header.fsf_status_qual,
++ 16);
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_s_phand_nv");
++ zfcp_erp_adapter_reopen(unit->port->adapter, 0);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_LUN_HANDLE_NOT_VALID :
++ ZFCP_LOG_FLAGS(1, "FSF_LUN_HANDLE_NOT_VALID\n");
++ ZFCP_LOG_INFO(
++ "Temporary LUN identifier (handle) 0x%x "
++ "of the logical unit with FCP_LUN 0x%016Lx at "
++ "the remote port with WWPN 0x%016Lx connected "
++ "to the adapter with devno 0x%04x is "
++ "not valid. This may happen occasionally.\n",
++ unit->handle,
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ ZFCP_LOG_DEBUG("Status qualifier data:\n");
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_DEBUG,
++ (char*)&fsf_req->qtcb->header.fsf_status_qual,
++ 16);
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_s_lhand_nv");
++ zfcp_erp_port_reopen(unit->port, 0);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_PORT_BOXED :
++ ZFCP_LOG_FLAGS(2, "FSF_PORT_BOXED\n");
++ ZFCP_LOG_DEBUG("The remote port "
++ "with WWPN 0x%016Lx on the adapter with "
++ "devno 0x%04x needs to be reopened\n",
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ debug_text_event(fsf_req->adapter->erp_dbf,2,"fsf_s_pboxed");
++ zfcp_erp_port_reopen(unit->port, 0);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
++ | ZFCP_STATUS_FSFREQ_RETRY;
++ break;
++
++ case FSF_ADAPTER_STATUS_AVAILABLE :
++ ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
++ switch (fsf_req->qtcb->header.fsf_status_qual.word[0]){
++ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE :
++ ZFCP_LOG_FLAGS(2, "FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
++ /* re-establish link to port */
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_sq_ltest");
++ zfcp_erp_port_reopen(unit->port, 0);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED :
++ ZFCP_LOG_FLAGS(2, "FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED\n");
++ /* ERP strategy will escalate */
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_sq_ulp");
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++ default:
++ ZFCP_LOG_NORMAL("bug: Wrong status qualifier 0x%x arrived.\n",
++ fsf_req->qtcb->header.fsf_status_qual.word[0]);
++ debug_text_event(fsf_req->adapter->erp_dbf,0,"fsf_sq_inval:");
++ debug_exception(fsf_req->adapter->erp_dbf,0,
++ &fsf_req->qtcb->header.fsf_status_qual.word[0],
++ sizeof(u32));
++ break;
++ }
++ break;
++
++ case FSF_GOOD :
++ ZFCP_LOG_FLAGS(3, "FSF_GOOD\n");
++ ZFCP_LOG_TRACE("unit (FCP_LUN=0x%016Lx) of remote port "
++ "(WWPN=0x%016Lx) via adapter (devno=0x%04x) closed, "
++ "port handle 0x%x \n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno,
++ unit->handle);
++ /* mark unit as closed */
++ atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
++ retval = 0;
++ break;
++
++ default :
++ ZFCP_LOG_NORMAL("bug: An unknown FSF Status was presented "
++ "(debug info 0x%x)\n",
++ fsf_req->qtcb->header.fsf_status);
++ debug_text_event(fsf_req->adapter->erp_dbf,0,"fsf_s_inval:");
++ debug_exception(fsf_req->adapter->erp_dbf,0,
++ &fsf_req->qtcb->header.fsf_status,
++ sizeof(u32));
++ break;
++ }
++
++skip_fsfstatus:
++
++ atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &unit->status);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_control_file
++ *
++ * purpose: Initiator of the control file upload/download FSF requests
++ *
++ * returns: 0 - FSF request is successfuly created and queued
++ * -EOPNOTSUPP - The FCP adapter does not have Control File support
++ * -EINVAL - Invalid direction specified
++ * -ENOMEM - Insufficient memory
++ * -EPERM - Cannot create FSF request or or place it in QDIO queue
++ */
++static int zfcp_fsf_control_file(
++ zfcp_adapter_t *adapter,
++ zfcp_fsf_req_t **fsf_req_ptr,
++ u32 fsf_command,
++ u32 option,
++ zfcp_sg_list_t *sg_list)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ zfcp_fsf_req_t *fsf_req;
++ fsf_qtcb_bottom_support_t *bottom;
++ unsigned long lock_flags;
++ int req_flags = 0;
++ int direction;
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (adapter=0x%lx fsf_req_ptr=0x%lx "
++ "fsf_command=0x%x option=0x%x sg_list=0x%lx)\n",
++ (unsigned long)adapter,
++ (unsigned long)fsf_req_ptr,
++ fsf_command,
++ option,
++ (unsigned long)sg_list);
++
++ if (!(adapter->supported_features & FSF_FEATURE_CFDC)) {
++ ZFCP_LOG_INFO(
++ "Adapter does not support control file "
++ "(devno=0x%04x)\n",
++ adapter->devno);
++ retval = -EOPNOTSUPP;
++ goto no_act_support;
++ }
++
++ switch (fsf_command) {
++
++ case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
++ direction = SBAL_FLAGS0_TYPE_WRITE;
++ if ((option != FSF_CFDC_OPTION_FULL_ACCESS) &&
++ (option != FSF_CFDC_OPTION_RESTRICTED_ACCESS))
++ req_flags |= ZFCP_WAIT_FOR_SBAL;
++ break;
++
++ case FSF_QTCB_UPLOAD_CONTROL_FILE:
++ direction = SBAL_FLAGS0_TYPE_READ;
++ break;
++
++ default:
++ ZFCP_LOG_INFO("Invalid FSF command code 0x%08x\n", fsf_command);
++ goto invalid_command;
++ }
++
++ retval = zfcp_fsf_req_create(
++ adapter, fsf_command, req_flags, NULL, &lock_flags, &fsf_req);
++ if (retval < 0) {
++ ZFCP_LOG_INFO(
++ "error: Could not create FSF request (devno=0x%04x)\n",
++ adapter->devno);
++ retval = -EPERM;
++ goto out;
++ }
++
++ bottom = &fsf_req->qtcb->bottom.support;
++ bottom->op_subtype = FSF_CFDC_OPERATION_SUBTYPE;
++ bottom->option = option;
++
++ if (sg_list->count > 0) {
++ int bytes = zfcp_qdio_sbals_from_sg(
++ fsf_req, direction, sg_list->sg,
++ ZFCP_MAX_SBALES_PER_CONTROL_FILE,
++ ZFCP_MAX_SBALS_PER_REQ);
++
++ if (bytes != ZFCP_CFDC_MAX_CONTROL_FILE_SIZE) {
++ ZFCP_LOG_INFO(
++ "error: Could not create sufficient number "
++ "of SBALS (devno=0x%04x)\n",
++ adapter->devno);
++ retval = -ENOMEM;
++ goto sbals_failed;
++ }
++ } else {
++ volatile qdio_buffer_element_t *sbale =
++ zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
++ sbale[0].flags |= direction;
++ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
++ }
++
++ retval = zfcp_fsf_req_send(fsf_req, NULL);
++ if (retval < 0) {
++ ZFCP_LOG_INFO(
++ "error: Could not send FSF request (devno=0x%04x)\n",
++ adapter->devno);
++ retval = -EPERM;
++ goto queue_failed;
++ }
++
++ ZFCP_LOG_NORMAL(
++ "Control file %s FSF request initiated (devno=0x%04x)\n",
++ fsf_command == FSF_QTCB_DOWNLOAD_CONTROL_FILE ?
++ "download" : "upload",
++ adapter->devno);
++
++ *fsf_req_ptr = fsf_req;
++
++ goto out;
++
++sbals_failed:
++queue_failed:
++ if (zfcp_fsf_req_free(fsf_req)) {
++ ZFCP_LOG_INFO(
++ "bug: Could not remove the FSF request "
++ "(devno=0x%04x fsf_req=0x%lx)\n",
++ adapter->devno,
++ (unsigned long)fsf_req);
++ }
++
++out:
++ write_unlock_irqrestore(
++ &adapter->request_queue.queue_lock, lock_flags);
++
++no_act_support:
++invalid_command:
++ ZFCP_LOG_TRACE("exit (%d)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_control_file_handler
++ *
++ * purpose: Handler of the control file upload/download FSF requests
++ *
++ * returns: 0 - FSF request successfuly processed
++ * -EAGAIN - Operation has to be repeated because of a temporary problem
++ * -EACCES - There is no permission to execute an operation
++ * -EPERM - The control file is not in a right format
++ * -EIO - There is a problem with the FCP adapter
++ * -EINVAL - Invalid operation
++ * -EFAULT - User space memory I/O operation fault
++ */
++static int zfcp_fsf_control_file_handler(zfcp_fsf_req_t *fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ zfcp_adapter_t *adapter = fsf_req->adapter;
++ fsf_qtcb_header_t *header = &fsf_req->qtcb->header;
++ fsf_qtcb_bottom_support_t *bottom = &fsf_req->qtcb->bottom.support;
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter (fsf_req=0x%lx)\n", (unsigned long)fsf_req);
++
++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
++ retval = -EINVAL;
++ goto skip_fsfstatus;
++ }
++
++ switch (header->fsf_status) {
++
++ case FSF_GOOD:
++ ZFCP_LOG_FLAGS(2, "FSF_GOOD\n");
++ ZFCP_LOG_NORMAL(
++ "The FSF request has been successfully completed "
++ "(devno=0x%04x fsf_req.seq_no=%d)\n",
++ adapter->devno,
++ fsf_req->seq_no);
++ break;
++
++ case FSF_OPERATION_PARTIALLY_SUCCESSFUL:
++ ZFCP_LOG_FLAGS(2, "FSF_OPERATION_PARTIALLY_SUCCESSFUL\n");
++ if (bottom->op_subtype == FSF_CFDC_OPERATION_SUBTYPE) {
++ switch (header->fsf_status_qual.word[0]) {
++
++ case FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE:
++ ZFCP_LOG_NORMAL(
++ "CDFC could not be saved "
++ "on the SE (devno=0x%04x)\n",
++ adapter->devno);
++ break;
++
++ case FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE2:
++ ZFCP_LOG_NORMAL(
++ "CDFC could not be copied "
++ "to the secondary SE (devno=0x%04x)\n",
++ adapter->devno);
++ break;
++
++ default:
++ ZFCP_LOG_NORMAL(
++ "CDFC could not be hardened "
++ "on the FCP adapter (devno=0x%04x)\n",
++ adapter->devno);
++ }
++ }
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ retval = -EAGAIN;
++ break;
++
++ case FSF_AUTHORIZATION_FAILURE:
++ ZFCP_LOG_FLAGS(2, "FSF_AUTHORIZATION_FAILURE\n");
++ ZFCP_LOG_NORMAL(
++ "Subchannel does not accept privileged commands "
++ "(devno=0x%04x)\n",
++ adapter->devno);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ retval = -EACCES;
++ break;
++
++ case FSF_CFDC_ERROR_DETECTED:
++ ZFCP_LOG_FLAGS(2, "FSF_CFDC_ERROR_DETECTED\n");
++ ZFCP_LOG_NORMAL(
++ "Error at position %d in the CDFC, "
++ "CDFC is discarded by the FCP adapter (devno=0x%04x)\n",
++ header->fsf_status_qual.word[0],
++ adapter->devno);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ retval = -EPERM;
++ break;
++
++ case FSF_CONTROL_FILE_UPDATE_ERROR:
++ ZFCP_LOG_FLAGS(2, "FSF_CONTROL_FILE_UPDATE_ERROR\n");
++ ZFCP_LOG_NORMAL(
++ "FCP adapter cannot harden the control file, "
++ "file is discarded (devno=0x%04x)\n",
++ adapter->devno);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ retval = -EIO;
++ break;
++
++ case FSF_CONTROL_FILE_TOO_LARGE:
++ ZFCP_LOG_FLAGS(2, "FSF_CONTROL_FILE_TOO_LARGE\n");
++ ZFCP_LOG_NORMAL(
++ "Control file is too large, file is discarded "
++ "by the FCP adapter (devno=0x%04x)\n",
++ adapter->devno);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ retval = -EIO;
++ break;
++
++ case FSF_ACCESS_CONFLICT_DETECTED:
++ ZFCP_LOG_FLAGS(2, "FSF_ACCESS_CONFLICT_DETECTED\n");
++ if (bottom->op_subtype == FSF_CFDC_OPERATION_SUBTYPE)
++ ZFCP_LOG_NORMAL(
++ "CDFC has been discarded, because activation "
++ "would impact %d active connection(s) "
++ "(devno=0x%04x)\n",
++ header->fsf_status_qual.word[0],
++ adapter->devno);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ retval = -EIO;
++ break;
++
++ case FSF_CONFLICTS_OVERRULED:
++ ZFCP_LOG_FLAGS(2, "FSF_CONFLICTS_OVERRULED\n");
++ if (bottom->op_subtype == FSF_CFDC_OPERATION_SUBTYPE)
++ ZFCP_LOG_NORMAL(
++ "CDFC has been activated, but activation "
++ "has impacted %d active connection(s) "
++ "(devno=0x%04x)\n",
++ header->fsf_status_qual.word[0],
++ adapter->devno);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ retval = -EIO;
++ break;
++
++ case FSF_UNKNOWN_COMMAND:
++ ZFCP_LOG_FLAGS(2, "FSF_UNKNOWN_COMMAND\n");
++ ZFCP_LOG_NORMAL(
++ "FSF command 0x%x is not supported by FCP adapter "
++ "(devno=0x%04x)\n",
++ fsf_req->fsf_command,
++ adapter->devno);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ retval = -EINVAL;
++ break;
++
++ case FSF_UNKNOWN_OP_SUBTYPE:
++ ZFCP_LOG_FLAGS(2, "FSF_UNKNOWN_OP_SUBTYPE\n");
++ ZFCP_LOG_NORMAL(
++ "Invalid operation subtype 0x%x has been specified "
++ "in QTCB bottom (devno=0x%04x)\n",
++ bottom->op_subtype,
++ adapter->devno);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ retval = -EINVAL;
++ break;
++
++ case FSF_INVALID_COMMAND_OPTION:
++ ZFCP_LOG_FLAGS(2, "FSF_INVALID_COMMAND_OPTION\n");
++ ZFCP_LOG_NORMAL(
++ "Invalid option 0x%x has been specified in QTCB bottom "
++ "(devno=0x%04x)\n",
++ bottom->option,
++ adapter->devno);
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ retval = -EINVAL;
++ break;
++
++ default:
++ ZFCP_LOG_NORMAL(
++ "bug: An unknown FSF Status was presented "
++ "(devno=0x%04x fsf_status=0x%08x)\n",
++ adapter->devno,
++ header->fsf_status);
++ debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_sq_inval");
++ debug_exception(fsf_req->adapter->erp_dbf, 0,
++ &header->fsf_status_qual.word[0], sizeof(u32));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ retval = -EINVAL;
++ break;
++ }
++
++skip_fsfstatus:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++#ifdef ZFCP_RESID
++/*
++ * function: zfcp_scsi_truncte_command
++ *
++ * purpose:
++ *
++ * returns:
++ */
++inline int zfcp_scsi_truncate_command(unsigned char *command_struct,
++ unsigned long original_byte_length,
++ unsigned long new_byte_length)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ int retval=0;
++ unsigned long factor, new_block_size;
++ u8 *len_6;
++ u16 *len_10;
++ u32 *len_12;
++ /* trace */
++ ZFCP_LOG_NORMAL(
++ "enter command_struct = 0x%lx, "
++ "original_byte_length = %ld "
++ "new_byte_length = %ld\n",
++ (unsigned long)command_struct,
++ original_byte_length,
++ new_byte_length);
++
++ /*trace*/
++ ZFCP_LOG_NORMAL("original SCSI command:\n");
++ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
++ command_struct,
++ 12);
++
++ switch(command_struct[0]) {
++ case WRITE_6:
++ case READ_6:
++ len_6 = &command_struct[4];
++ factor = (unsigned long)(original_byte_length
++ / *len_6);
++ new_block_size = new_byte_length / factor;
++ if(new_byte_length % factor) {
++ ZFCP_LOG_NORMAL("bug: Recalculation of command size "
++ "failed. "
++ "(debug info %d, %ld, %d, %ld, %ld)\n",
++ 6,
++ original_byte_length,
++ *len_6,
++ new_byte_length,
++ factor);
++ goto error;
++ }
++ /* trace */
++ ZFCP_LOG_NORMAL("*len_6=%d, factor= %ld, new_byte_length= %ld\n",
++ *len_6, factor, new_byte_length);
++ *len_6=(u8)new_block_size;
++ /* trace */
++ ZFCP_LOG_NORMAL("new *len_6=%d\n",
++ *len_6);
++ break;
++ case WRITE_10:
++ case READ_10:
++ case WRITE_VERIFY:
++ len_10= (u16 *)&command_struct[7];
++ factor = (unsigned long)(original_byte_length
++ / *len_10);
++ new_block_size = new_byte_length / factor;
++ if(new_byte_length % factor) {
++ ZFCP_LOG_NORMAL("bug: Recalculation of command size "
++ "failed. "
++ "(debug info %d, %ld, %d, %ld, %ld)\n",
++ 10,
++ original_byte_length,
++ *len_10,
++ new_byte_length,
++ factor);
++ goto error;
++ }
++ /* TRACE */
++ ZFCP_LOG_NORMAL("*len_10 = %d, factor = %ld, new_byte_length = %ld\n",
++ *len_10, factor, new_byte_length);
++ *len_10=(u16)new_block_size;
++ /* trace */
++ ZFCP_LOG_NORMAL("new *len_10=%d\n",
++ *len_10);
++ break;
++ case WRITE_12:
++ case READ_12:
++ case WRITE_VERIFY_12:
++ len_12= (u32 *)&command_struct[7];
++ factor = (unsigned long)(original_byte_length
++ / *len_12);
++ new_block_size = new_byte_length / factor;
++ if(new_byte_length % factor) {
++ ZFCP_LOG_NORMAL("bug: Recalculation of command size "
++ "failed. "
++ "(debug info %d, %ld, %d, %ld, %ld)\n",
++ 12,
++ original_byte_length,
++ *len_12,
++ new_byte_length,
++ factor);
++ goto error;
++ }
++ /* TRACE */
++ ZFCP_LOG_NORMAL("*len_12 = %d, factor = %ld, new_byte_length = %ld\n",
++ *len_12, factor, new_byte_length);
++ *len_12=(u32)new_block_size;
++ /* trace */
++ ZFCP_LOG_NORMAL("new *len_12=%d\n",
++ *len_12);
++ break;
++ default:
++ /* INFO */
++ ZFCP_LOG_NORMAL("Command to be truncated is not in the list of "
++ "known objects.\n");
++ goto error;
++ break;
++ }
++ goto out;
++
++ error:
++ retval=1;
++ out:
++ /*trace*/
++ ZFCP_LOG_NORMAL("truncated SCSI command:\n");
++ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
++ command_struct,
++ 12);
++
++ /* TRACE */
++ ZFCP_LOG_NORMAL("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++#endif // ZFCP_RESID
++
++/*
++ * function: zfcp_fsf_send_fcp_command_task
++ *
++ * purpose:
++ *
++ * returns:
++ *
++ * note: we do not employ linked commands (not supported by HBA anyway)
++ */
++static int
++ zfcp_fsf_send_fcp_command_task(
++ zfcp_unit_t *unit,
++ Scsi_Cmnd *scsi_cmnd)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ zfcp_fsf_req_t *fsf_req = NULL;
++ fcp_cmnd_iu_t *fcp_cmnd_iu;
++ zfcp_adapter_t *adapter = unit->port->adapter;
++ unsigned int sbtype;
++ unsigned long lock_flags;
++ int real_bytes = 0;
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (adapter devno=0x%04x, unit=0x%lx)\n",
++ adapter->devno,
++ (unsigned long)unit);
++
++ /* setup new FSF request */
++ retval = zfcp_fsf_req_create(
++ adapter,
++ FSF_QTCB_FCP_CMND,
++ ZFCP_REQ_AUTO_CLEANUP,
++ &adapter->pool.fsf_req_scsi,
++ &lock_flags,
++ &fsf_req);
++ if (retval < 0) {
++ ZFCP_LOG_DEBUG(
++ "error: Out of resources. Could not create an "
++ "FCP command request for FCP_LUN 0x%016Lx connected to "
++ "the port with WWPN 0x%016Lx connected to "
++ "the adapter with devno 0x%04x.\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ adapter->devno);
++ goto failed_req_create;
++ }
++
++ /*
++ * associate FSF request with SCSI request
++ * (need this for look up on abort)
++ */
++ scsi_cmnd->host_scribble = (char*) fsf_req;
++
++ /*
++ * associate SCSI command with FSF request
++ * (need this for look up on normal command completion)
++ */
++ fsf_req->data.send_fcp_command_task.scsi_cmnd = scsi_cmnd;
++#ifdef ZFCP_DEBUG_REQUESTS
++ debug_text_event(adapter->req_dbf, 3, "fsf/sc");
++ debug_event(adapter->req_dbf, 3, &fsf_req, sizeof(unsigned long));
++ debug_event(adapter->req_dbf, 3, &scsi_cmnd, sizeof(unsigned long));
++#endif /* ZFCP_DEBUG_REQUESTS */
++#ifdef ZFCP_DEBUG_ABORTS
++ fsf_req->data.send_fcp_command_task.start_jiffies = jiffies;
++#endif
++
++ fsf_req->data.send_fcp_command_task.unit = unit;
++ ZFCP_LOG_DEBUG("unit=0x%lx, unit_fcp_lun=0x%Lx\n",
++ (unsigned long)unit,
++ (llui_t)unit->fcp_lun);
++
++ /* set handles of unit and its parent port in QTCB */
++ fsf_req->qtcb->header.lun_handle = unit->handle;
++ fsf_req->qtcb->header.port_handle = unit->port->handle;
++
++ /* FSF does not define the structure of the FCP_CMND IU */
++ fcp_cmnd_iu = (fcp_cmnd_iu_t*)
++ &(fsf_req->qtcb->bottom.io.fcp_cmnd);
++
++ /*
++ * set depending on data direction:
++ * data direction bits in SBALE (SB Type)
++ * data direction bits in QTCB
++ * data direction bits in FCP_CMND IU
++ */
++ switch (scsi_cmnd->sc_data_direction) {
++ case SCSI_DATA_NONE:
++ ZFCP_LOG_FLAGS(3, "SCSI_DATA_NONE\n");
++ fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
++ /*
++ * FIXME(qdio):
++ * what is the correct type for commands
++ * without 'real' data buffers?
++ */
++ sbtype = SBAL_FLAGS0_TYPE_READ;
++ break;
++ case SCSI_DATA_READ:
++ ZFCP_LOG_FLAGS(3, "SCSI_DATA_READ\n");
++ fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
++ sbtype = SBAL_FLAGS0_TYPE_READ;
++ fcp_cmnd_iu->rddata = 1;
++ break;
++ case SCSI_DATA_WRITE:
++ ZFCP_LOG_FLAGS(3, "SCSI_DATA_WRITE\n");
++ fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
++ sbtype = SBAL_FLAGS0_TYPE_WRITE;
++ fcp_cmnd_iu->wddata = 1;
++ break;
++ case SCSI_DATA_UNKNOWN:
++ ZFCP_LOG_FLAGS(0, "SCSI_DATA_UNKNOWN not supported\n");
++ default:
++ /*
++ * dummy, catch this condition earlier
++ * in zfcp_scsi_queuecommand
++ */
++ goto failed_scsi_cmnd;
++ }
++
++ /* set FC service class in QTCB (3 per default) */
++ fsf_req->qtcb->bottom.io.service_class = adapter->fc_service_class;
++
++ /* set FCP_LUN in FCP_CMND IU in QTCB */
++ fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
++
++ /* set task attributes in FCP_CMND IU in QTCB */
++ if ((scsi_cmnd->device && scsi_cmnd->device->tagged_queue) ||
++ atomic_test_mask(ZFCP_STATUS_UNIT_ASSUMETCQ, &unit->status)) {
++ fcp_cmnd_iu->task_attribute = SIMPLE_Q;
++ ZFCP_LOG_TRACE("setting SIMPLE_Q task attribute\n");
++ } else {
++ fcp_cmnd_iu->task_attribute = UNTAGGED;
++ ZFCP_LOG_TRACE("setting UNTAGGED task attribute\n");
++ }
++
++ /* set additional length of FCP_CDB in FCP_CMND IU in QTCB, if needed */
++ if (scsi_cmnd->cmd_len > FCP_CDB_LENGTH) {
++ fcp_cmnd_iu->add_fcp_cdb_length
++ = (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2;
++ ZFCP_LOG_TRACE("SCSI CDB length is 0x%x, "
++ "additional FCP_CDB length is 0x%x "
++ "(shifted right 2 bits)\n",
++ scsi_cmnd->cmd_len,
++ fcp_cmnd_iu->add_fcp_cdb_length);
++ }
++ /*
++ * copy SCSI CDB (including additional length, if any) to
++ * FCP_CDB in FCP_CMND IU in QTCB
++ */
++ memcpy( fcp_cmnd_iu->fcp_cdb,
++ scsi_cmnd->cmnd,
++ scsi_cmnd->cmd_len);
++
++ /* FCP CMND IU length in QTCB */
++ fsf_req->qtcb->bottom.io.fcp_cmnd_length
++ = sizeof(fcp_cmnd_iu_t) +
++ fcp_cmnd_iu->add_fcp_cdb_length +
++ sizeof(fcp_dl_t);
++
++ /* generate SBALEs from data buffer */
++ real_bytes = zfcp_qdio_sbals_from_scsicmnd(
++ fsf_req,
++ sbtype,
++ scsi_cmnd);
++ if (real_bytes < 0) {
++ if (fsf_req->sbal_number < ZFCP_MAX_SBALS_PER_REQ) {
++ ZFCP_LOG_DEBUG(
++ "Data did not fit into available buffer(s), "
++ "waiting for more...\n");
++ retval = -EIO;
++ } else {
++ ZFCP_LOG_NORMAL(
++ "error: Too large SCSI data buffer. "
++ "Shutting down unit "
++ "(devno=0x%04x, WWPN=0x%016Lx, FCP_LUN=0x%016Lx)\n",
++ unit->port->adapter->devno,
++ (llui_t)unit->port->wwpn,
++ (llui_t)unit->fcp_lun);
++ zfcp_erp_unit_shutdown(unit, 0);
++ retval = -EINVAL;
++ }
++ goto no_fit;
++ }
++
++ /* set length of FCP data length in FCP_CMND IU in QTCB */
++ zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes);
++
++ ZFCP_LOG_DEBUG("Sending SCSI command:\n");
++ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
++ (char *)scsi_cmnd->cmnd,
++ scsi_cmnd->cmd_len);
++
++ /*
++ * start QDIO request for this FSF request
++ * covered by an SBALE)
++ */
++ {
++ int i, pos;
++ ZFCP_LOG_DEBUG(
++ "opcode=0x%x, sbal_first=%d, "
++ "sbal_curr=%d, sbal_last=%d, "
++ "sbal_number=%d, sbale_curr=%d\n",
++ scsi_cmnd->cmnd[0],
++ fsf_req->sbal_first,
++ fsf_req->sbal_curr,
++ fsf_req->sbal_last,
++ fsf_req->sbal_number,
++ fsf_req->sbale_curr);
++ for (i = 0; i < fsf_req->sbal_number; i++) {
++ pos = (fsf_req->sbal_first + i) % QDIO_MAX_BUFFERS_PER_Q;
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_DEBUG,
++ (char*)adapter->request_queue.buffer[pos],
++ sizeof(qdio_buffer_t));
++ }
++ }
++ retval = zfcp_fsf_req_send(fsf_req, NULL);
++ if (retval < 0) {
++ ZFCP_LOG_INFO(
++ "error: Could not send an FCP command request "
++ "for a command on the adapter with devno 0x%04x, "
++ "port WWPN 0x%016Lx and unit FCP_LUN 0x%016Lx\n",
++ adapter->devno,
++ (llui_t)unit->port->wwpn,
++ (llui_t)unit->fcp_lun);
++ goto send_failed;
++ }
++
++ ZFCP_LOG_TRACE(
++ "Send FCP Command initiated "
++ "(adapter devno=0x%04x, port WWPN=0x%016Lx, unit FCP_LUN=0x%016Lx)\n",
++ adapter->devno,
++ (llui_t)unit->port->wwpn,
++ (llui_t)unit->fcp_lun);
++ goto success;
++
++send_failed:
++no_fit:
++failed_scsi_cmnd:
++ /* dequeue new FSF request previously enqueued */
++#ifdef ZFCP_DEBUG_REQUESTS
++ debug_text_event(adapter->req_dbf, 3, "fail_sc");
++ debug_event(adapter->req_dbf, 3, &scsi_cmnd, sizeof(unsigned long));
++#endif /* ZFCP_DEBUG_REQUESTS */
++
++ if (zfcp_fsf_req_free(fsf_req)) {
++ ZFCP_LOG_INFO(
++ "error: Could not remove an FSF request from "
++ "the otubound (send) list (debug info 0x%lx)\n",
++ (unsigned long)fsf_req);
++ }
++ fsf_req = NULL;
++
++success:
++failed_req_create:
++ write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
++
++ ZFCP_LOG_TRACE("exit (%d)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_send_fcp_command_task_management
++ *
++ * purpose:
++ *
++ * returns:
++ *
++ * FIXME(design) shouldn't this be modified to return an int
++ * also...don't know how though
++
++ */
++static zfcp_fsf_req_t*
++ zfcp_fsf_send_fcp_command_task_management(
++ zfcp_adapter_t *adapter,
++ zfcp_unit_t *unit,
++ u8 tm_flags,
++ int req_flags)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ zfcp_fsf_req_t *fsf_req = NULL;
++ int retval = 0;
++ fcp_cmnd_iu_t *fcp_cmnd_iu;
++ unsigned long lock_flags;
++
++ volatile qdio_buffer_element_t *sbale;
++
++ ZFCP_LOG_TRACE(
++ "enter (adapter devno=0x%04x, unit=0x%lx, tm_flags=0x%x, "
++ "req_flags=0x%x)\n",
++ adapter->devno,
++ (unsigned long)unit,
++ tm_flags,
++ req_flags);
++
++
++ /* setup new FSF request */
++ retval = zfcp_fsf_req_create(
++ adapter,
++ FSF_QTCB_FCP_CMND,
++ req_flags,
++ &adapter->pool.fsf_req_scsi,
++ &lock_flags,
++ &fsf_req);
++ if (retval < 0) {
++ ZFCP_LOG_INFO("error: Out of resources. Could not create an "
++ "FCP command (task management) request for "
++ "the adapter with devno 0x%04x, port with "
++ "WWPN 0x%016Lx and FCP_LUN 0x%016Lx.\n",
++ adapter->devno,
++ (llui_t)unit->port->wwpn,
++ (llui_t)unit->fcp_lun);
++ goto out;
++ }
++
++ /* Used to decide on proper handler in the return path,
++ * could be either zfcp_fsf_send_fcp_command_task_handler or
++ * zfcp_fsf_send_fcp_command_task_management_handler */
++ fsf_req->status|=ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
++ /*
++ * hold a pointer to the unit being target of this
++ * task management request
++ */
++ fsf_req->data.send_fcp_command_task_management.unit = unit;
++
++ /* set FSF related fields in QTCB */
++ fsf_req->qtcb->header.lun_handle = unit->handle;
++ fsf_req->qtcb->header.port_handle = unit->port->handle;
++ fsf_req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
++ fsf_req->qtcb->bottom.io.service_class
++ = adapter->fc_service_class;
++ fsf_req->qtcb->bottom.io.fcp_cmnd_length
++ = sizeof(fcp_cmnd_iu_t) + sizeof(fcp_dl_t);
++
++ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
++ sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
++ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
++
++ /* set FCP related fields in FCP_CMND IU in QTCB */
++ fcp_cmnd_iu = (fcp_cmnd_iu_t*)
++ &(fsf_req->qtcb->bottom.io.fcp_cmnd);
++ fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
++ fcp_cmnd_iu->task_management_flags = tm_flags;
++
++ /* start QDIO request for this FSF request */
++ zfcp_fsf_start_scsi_er_timer(adapter);
++ retval = zfcp_fsf_req_send(fsf_req, NULL);
++ if (retval) {
++ del_timer(&adapter->scsi_er_timer);
++ ZFCP_LOG_INFO(
++ "error: Could not send an FCP-command (task management) "
++ "on the adapter with devno 0x%04x, "
++ "port WWPN 0x%016Lx for unit FCP_LUN 0x%016Lx\n",
++ adapter->devno,
++ (llui_t)unit->port->wwpn,
++ (llui_t)unit->fcp_lun);
++ if (zfcp_fsf_req_free(fsf_req)){
++ ZFCP_LOG_NORMAL(
++ "bug: Could not remove one FSF "
++ "request. Memory leakage possible. "
++ "(debug info 0x%lx).\n",
++ (unsigned long)fsf_req);
++ retval=-EINVAL;
++ };
++ fsf_req = NULL;
++ goto out;
++ }
++
++ ZFCP_LOG_TRACE(
++ "Send FCP Command (task management function) initiated "
++ "(adapter devno=0x%04x, port WWPN=0x%016Lx, unit FCP_LUN=0x%016Lx, "
++ "tm_flags=0x%x)\n",
++ adapter->devno,
++ (llui_t)unit->port->wwpn,
++ (llui_t)unit->fcp_lun,
++ tm_flags);
++
++out:
++ write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
++
++ ZFCP_LOG_TRACE("exit (0x%lx)\n", (unsigned long)fsf_req);
++
++ return fsf_req;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_send_fcp_command_handler
++ *
++ * purpose: is called for finished Send FCP Command
++ *
++ * returns:
++ */
++static int
++ zfcp_fsf_send_fcp_command_handler(
++ zfcp_fsf_req_t *fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval = -EINVAL;
++ zfcp_unit_t *unit;
++ fsf_qtcb_header_t *header = &fsf_req->qtcb->header;
++ u16 subtable, rule, counter;
++
++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
++ unit = fsf_req->data.send_fcp_command_task_management.unit;
++ else unit = fsf_req->data.send_fcp_command_task.unit;
++
++ ZFCP_LOG_TRACE(
++ "enter (fsf_req=0x%lx)\n",
++ (unsigned long)fsf_req);
++
++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
++ /* go directly to calls of special handlers */
++ goto skip_fsfstatus;
++ }
++
++ /* evaluate FSF status in QTCB */
++ switch (fsf_req->qtcb->header.fsf_status) {
++
++ case FSF_PORT_HANDLE_NOT_VALID:
++ ZFCP_LOG_FLAGS(1, "FSF_PORT_HANDLE_NOT_VALID\n");
++ ZFCP_LOG_INFO("Temporary port identifier (handle) 0x%x "
++ "for the port with WWPN 0x%016Lx connected to "
++ "the adapter of devno 0x%04x is not valid.\n",
++ unit->port->handle,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
++ (char*)&fsf_req->qtcb->header.fsf_status_qual,
++ 16);
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_s_phand_nv");
++ zfcp_erp_adapter_reopen(unit->port->adapter, 0);
++ zfcp_cmd_dbf_event_fsf(
++ "porthinv", fsf_req,
++ &fsf_req->qtcb->header.fsf_status_qual, sizeof(fsf_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_LUN_HANDLE_NOT_VALID:
++ ZFCP_LOG_FLAGS(1, "FSF_LUN_HANDLE_NOT_VALID\n");
++ ZFCP_LOG_INFO("Temporary LUN identifier (handle) 0x%x "
++ "of the logical unit with FCP_LUN 0x%016Lx at "
++ "the remote port with WWPN 0x%016Lx connected "
++ "to the adapter with devno 0x%04x is "
++ "not valid. This may happen occasionally.\n",
++ unit->handle,
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ ZFCP_LOG_NORMAL("Status qualifier data:\n");
++ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
++ (char*)&fsf_req->qtcb->header.fsf_status_qual,
++ 16);
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_s_uhand_nv");
++ zfcp_erp_port_reopen(unit->port, 0);
++ zfcp_cmd_dbf_event_fsf(
++ "lunhinv", fsf_req,
++ &fsf_req->qtcb->header.fsf_status_qual, sizeof(fsf_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_HANDLE_MISMATCH:
++ ZFCP_LOG_FLAGS(0, "FSF_HANDLE_MISMATCH\n");
++ ZFCP_LOG_NORMAL("bug: The port handle (temporary port "
++ "identifier) 0x%x has changed unexpectedly. "
++ "This was detected upon receiveing the response "
++ "of a command send to the unit with FCP_LUN "
++ "0x%016Lx at the remote port with WWPN 0x%016Lx "
++ "connected to the adapter with devno 0x%04x.\n",
++ unit->port->handle,
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ ZFCP_LOG_NORMAL("status qualifier:\n");
++ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
++ (char*)&fsf_req->qtcb->header.fsf_status_qual,
++ 16);
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_s_hand_mis");
++ zfcp_erp_adapter_reopen(unit->port->adapter, 0);
++ zfcp_cmd_dbf_event_fsf(
++ "handmism", fsf_req,
++ &fsf_req->qtcb->header.fsf_status_qual, sizeof(fsf_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_SERVICE_CLASS_NOT_SUPPORTED :
++ ZFCP_LOG_FLAGS(0, "FSF_SERVICE_CLASS_NOT_SUPPORTED\n");
++ if(fsf_req->adapter->fc_service_class <= 3) {
++ ZFCP_LOG_NORMAL( "error: The adapter with devno=0x%04x does "
++ "not support fibre-channel class %d.\n",
++ unit->port->adapter->devno,
++ fsf_req->adapter->fc_service_class);
++ } else {
++ ZFCP_LOG_NORMAL(
++ "bug: The fibre channel class at the adapter "
++ "with devno 0x%04x is invalid. "
++ "(debug info %d)\n",
++ unit->port->adapter->devno,
++ fsf_req->adapter->fc_service_class);
++ }
++ /* stop operation for this adapter */
++ debug_text_exception(fsf_req->adapter->erp_dbf,0,"fsf_s_class_nsup");
++ zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
++ zfcp_cmd_dbf_event_fsf(
++ "unsclass", fsf_req,
++ &fsf_req->qtcb->header.fsf_status_qual, sizeof(fsf_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_FCPLUN_NOT_VALID:
++ ZFCP_LOG_FLAGS(0, "FSF_FCPLUN_NOT_VALID\n");
++ ZFCP_LOG_NORMAL("bug: The FCP_LUN 0x%016Lx behind the remote port "
++ "of WWPN 0x%016Lx via the adapter with "
++ "devno 0x%04x does not have the correct unit "
++ "handle (temporary unit identifier) 0x%x\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno,
++ unit->handle);
++ ZFCP_LOG_DEBUG("status qualifier:\n");
++ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
++ (char*)&fsf_req->qtcb->header.fsf_status_qual,
++ 16);
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_s_fcp_lun_nv");
++ zfcp_erp_port_reopen(unit->port, 0);
++ zfcp_cmd_dbf_event_fsf(
++ "fluninv", fsf_req,
++ &fsf_req->qtcb->header.fsf_status_qual, sizeof(fsf_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++
++ case FSF_ACCESS_DENIED :
++ ZFCP_LOG_FLAGS(2, "FSF_ACCESS_DENIED\n");
++ ZFCP_LOG_NORMAL("Access denied, cannot send FCP command "
++ "(devno=0x%04x wwpn=0x%016Lx lun=0x%016Lx)\n",
++ unit->port->adapter->devno,
++ (llui_t)unit->port->wwpn,
++ (llui_t)unit->fcp_lun);
++ for (counter = 0; counter < 2; counter++) {
++ subtable = header->fsf_status_qual.halfword[counter * 2];
++ rule = header->fsf_status_qual.halfword[counter * 2 + 1];
++ switch (subtable) {
++ case FSF_SQ_CFDC_SUBTABLE_OS:
++ case FSF_SQ_CFDC_SUBTABLE_PORT_WWPN:
++ case FSF_SQ_CFDC_SUBTABLE_PORT_DID:
++ case FSF_SQ_CFDC_SUBTABLE_LUN:
++ ZFCP_LOG_INFO("Access denied (%s rule %d)\n",
++ zfcp_act_subtable_type[subtable], rule);
++ break;
++ }
++ }
++ debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_access");
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_DIRECTION_INDICATOR_NOT_VALID:
++ ZFCP_LOG_FLAGS(0, "FSF_DIRECTION_INDICATOR_NOT_VALID\n");
++ ZFCP_LOG_INFO("bug: Invalid data direction given for the unit "
++ "with FCP_LUN 0x%016Lx at the remote port with "
++ "WWPN 0x%016Lx via the adapter with devno 0x%04x "
++ "(debug info %d)\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno,
++ fsf_req->qtcb->bottom.io.data_direction);
++ /* stop operation for this adapter */
++ debug_text_event(fsf_req->adapter->erp_dbf,0,"fsf_s_dir_ind_nv");
++ zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
++ zfcp_cmd_dbf_event_fsf(
++ "dirinv", fsf_req,
++ &fsf_req->qtcb->header.fsf_status_qual, sizeof(fsf_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ /* FIXME: this should be obsolete, isn' it? */
++ case FSF_INBOUND_DATA_LENGTH_NOT_VALID:
++ ZFCP_LOG_FLAGS(0, "FSF_INBOUND_DATA_LENGTH_NOT_VALID\n");
++ ZFCP_LOG_NORMAL("bug: An invalid inbound data length field "
++ "was found in a command for the unit with "
++ "FCP_LUN 0x%016Lx of the remote port "
++ "with WWPN 0x%016Lx via the adapter with "
++ "devno 0x%04x\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ /* stop operation for this adapter */
++ debug_text_event(fsf_req->adapter->erp_dbf,0,"fsf_s_in_dl_nv");
++ zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
++ zfcp_cmd_dbf_event_fsf(
++ "odleninv", fsf_req,
++ &fsf_req->qtcb->header.fsf_status_qual, sizeof(fsf_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ /* FIXME: this should be obsolete, isn' it? */
++ case FSF_OUTBOUND_DATA_LENGTH_NOT_VALID:
++ ZFCP_LOG_FLAGS(0, "FSF_OUTBOUND_DATA_LENGTH_NOT_VALID\n");
++ ZFCP_LOG_NORMAL("bug: An invalid outbound data length field "
++ "was found in a command for the unit with "
++ "FCP_LUN 0x%016Lx of the remote port "
++ "with WWPN 0x%016Lx via the adapter with "
++ "devno 0x%04x\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ /* stop operation for this adapter */
++ debug_text_event(fsf_req->adapter->erp_dbf,0,"fsf_s_out_dl_nv");
++ zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
++ zfcp_cmd_dbf_event_fsf(
++ "idleninv", fsf_req,
++ &fsf_req->qtcb->header.fsf_status_qual, sizeof(fsf_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_CMND_LENGTH_NOT_VALID:
++ ZFCP_LOG_FLAGS(0, "FSF_CMND_LENGTH_NOT_VALID\n");
++ ZFCP_LOG_NORMAL("bug: An invalid control-data-block length field "
++ "was found in a command for the unit with "
++ "FCP_LUN 0x%016Lx of the remote port "
++ "with WWPN 0x%016Lx via the adapter with "
++ "devno 0x%04x (debug info %d)\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno,
++ fsf_req->qtcb->bottom.io.fcp_cmnd_length);
++ /* stop operation for this adapter */
++ debug_text_event(fsf_req->adapter->erp_dbf,0,"fsf_s_cmd_len_nv");
++ zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
++ zfcp_cmd_dbf_event_fsf(
++ "cleninv", fsf_req,
++ &fsf_req->qtcb->header.fsf_status_qual, sizeof(fsf_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++
++ case FSF_PORT_BOXED :
++ ZFCP_LOG_FLAGS(2, "FSF_PORT_BOXED\n");
++ ZFCP_LOG_DEBUG("The remote port "
++ "with WWPN 0x%016Lx on the adapter with "
++ "devno 0x%04x needs to be reopened\n",
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ debug_text_event(fsf_req->adapter->erp_dbf,2,"fsf_s_pboxed");
++ zfcp_erp_port_reopen(unit->port, 0);
++ zfcp_cmd_dbf_event_fsf(
++ "portbox", fsf_req,
++ &fsf_req->qtcb->header.fsf_status_qual, sizeof(fsf_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
++ | ZFCP_STATUS_FSFREQ_RETRY;
++ break;
++
++
++ case FSF_LUN_BOXED :
++ ZFCP_LOG_FLAGS(0, "FSF_LUN_BOXED\n");
++ ZFCP_LOG_NORMAL(
++ "The remote unit needs to be reopened "
++ "(devno=0x%04x wwpn=0x%016Lx lun=0x%016Lx)\n",
++ unit->port->adapter->devno,
++ (llui_t)unit->port->wwpn,
++ (llui_t)unit->fcp_lun);
++ debug_text_event(fsf_req->adapter->erp_dbf, 1, "fsf_s_lboxed");
++ zfcp_erp_unit_reopen(unit, 0);
++ zfcp_cmd_dbf_event_fsf(
++ "unitbox",
++ fsf_req,
++ &fsf_req->qtcb->header.fsf_status_qual,
++ sizeof(fsf_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR
++ | ZFCP_STATUS_FSFREQ_RETRY;
++ break;
++
++ case FSF_ADAPTER_STATUS_AVAILABLE :
++ ZFCP_LOG_FLAGS(2, "FSF_ADAPTER_STATUS_AVAILABLE\n");
++ switch (fsf_req->qtcb->header.fsf_status_qual.word[0]){
++ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE :
++ ZFCP_LOG_FLAGS(2, "FSF_SQ_INVOKE_LINK_TEST_PROCEDURE\n");
++ /* re-establish link to port */
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_sq_ltest");
++ zfcp_erp_port_reopen(unit->port, 0);
++ zfcp_cmd_dbf_event_fsf(
++ "sqltest", fsf_req,
++ &fsf_req->qtcb->header.fsf_status_qual, sizeof(fsf_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED :
++ ZFCP_LOG_FLAGS(3, "FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED\n");
++ /* FIXME(hw) need proper specs for proper action */
++ /* let scsi stack deal with retries and escalation */
++ debug_text_event(fsf_req->adapter->erp_dbf,1,"fsf_sq_ulp");
++ zfcp_cmd_dbf_event_fsf(
++ "sqdeperp", fsf_req,
++ &fsf_req->qtcb->header.fsf_status_qual, sizeof(fsf_status_qual_t));
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
++ break;
++ default:
++ /* FIXME: shall we consider this a successful transfer? */
++ ZFCP_LOG_NORMAL("bug: Wrong status qualifier 0x%x arrived.\n",
++ fsf_req->qtcb->header.fsf_status_qual.word[0]);
++ debug_text_event(fsf_req->adapter->erp_dbf,0,"fsf_sq_inval:");
++ debug_exception(fsf_req->adapter->erp_dbf,0,
++ &fsf_req->qtcb->header.fsf_status_qual.word[0],
++ sizeof(u32));
++ break;
++ }
++ break;
++
++ case FSF_GOOD:
++ ZFCP_LOG_FLAGS(3, "FSF_GOOD\n");
++ break;
++
++ case FSF_FCP_RSP_AVAILABLE:
++ ZFCP_LOG_FLAGS(2, "FSF_FCP_RSP_AVAILABLE\n");
++ break;
++
++ default :
++ debug_text_event(fsf_req->adapter->erp_dbf,0,"fsf_s_inval:");
++ debug_exception(fsf_req->adapter->erp_dbf,0,
++ &fsf_req->qtcb->header.fsf_status,
++ sizeof(u32));
++ break;
++ }
++
++skip_fsfstatus:
++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) {
++ retval = zfcp_fsf_send_fcp_command_task_management_handler(
++ fsf_req);
++ } else {
++ retval = zfcp_fsf_send_fcp_command_task_handler(
++ fsf_req);
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_send_fcp_command_task_handler
++ *
++ * purpose: evaluates FCP_RSP IU
++ *
++ * returns:
++ */
++static int zfcp_fsf_send_fcp_command_task_handler(
++ zfcp_fsf_req_t *fsf_req)
++
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval = 0;
++
++ Scsi_Cmnd *scpnt;
++ fcp_rsp_iu_t *fcp_rsp_iu =
++ (fcp_rsp_iu_t*)
++ &(fsf_req->qtcb->bottom.io.fcp_rsp);
++ fcp_cmnd_iu_t *fcp_cmnd_iu =
++ (fcp_cmnd_iu_t*)
++ &(fsf_req->qtcb->bottom.io.fcp_cmnd);
++ u32 sns_len;
++ char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu);
++ unsigned long flags;
++ zfcp_unit_t *unit = fsf_req->data.send_fcp_command_task.unit;
++
++ ZFCP_LOG_TRACE(
++ "enter (fsf_req=0x%lx)\n",
++ (unsigned long)fsf_req);
++
++ read_lock_irqsave(&fsf_req->adapter->abort_lock, flags);
++ scpnt = fsf_req->data.send_fcp_command_task.scsi_cmnd;
++ if (!scpnt) {
++ ZFCP_LOG_DEBUG("Command with fsf_req 0x%lx is not associated to "
++ "a scsi command anymore. Aborted?\n",
++ (unsigned long)fsf_req);
++ goto out;
++ }
++
++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTED) {
++ /* FIXME: (design) mid-layer should handle DID_ABORT like
++ * DID_SOFT_ERROR by retrying the request for devices
++ * that allow retries.
++ */
++ ZFCP_LOG_DEBUG("Setting DID_SOFT_ERROR and SUGGEST_RETRY\n");
++ scpnt->result |= DID_SOFT_ERROR << 16 |
++ SUGGEST_RETRY << 24;
++ goto skip_fsfstatus;
++ }
++
++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
++ ZFCP_LOG_DEBUG("Setting DID_ERROR\n");
++ scpnt->result |= DID_ERROR << 16;
++ goto skip_fsfstatus;
++ }
++
++ /* set message byte of result in SCSI command */
++ scpnt->result |= COMMAND_COMPLETE << 8;
++
++ /*
++ * copy SCSI status code of FCP_STATUS of FCP_RSP IU to status byte
++ * of result in SCSI command
++ */
++ scpnt->result |= fcp_rsp_iu->scsi_status;
++ if(fcp_rsp_iu->scsi_status) {
++ /* DEBUG */
++ ZFCP_LOG_DEBUG("status for SCSI Command:\n");
++ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
++ scpnt->cmnd,
++ scpnt->cmd_len);
++ ZFCP_LOG_DEBUG("SCSI status code 0x%x\n",
++ fcp_rsp_iu->scsi_status);
++ ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
++ (void *)fcp_rsp_iu,
++ sizeof(fcp_rsp_iu_t));
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_DEBUG,
++ zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu),
++ fcp_rsp_iu->fcp_sns_len);
++ }
++
++ /* check FCP_RSP_INFO */
++ if (fcp_rsp_iu->validity.bits.fcp_rsp_len_valid) {
++ ZFCP_LOG_DEBUG("rsp_len is valid\n");
++ switch (fcp_rsp_info[3]) {
++ case RSP_CODE_GOOD:
++ ZFCP_LOG_FLAGS(3, "RSP_CODE_GOOD\n");
++ /* ok, continue */
++ ZFCP_LOG_TRACE(
++ "no failure or Task Management "
++ "Function complete\n");
++ scpnt->result |= DID_OK << 16;
++ break;
++ case RSP_CODE_LENGTH_MISMATCH:
++ ZFCP_LOG_FLAGS(0, "RSP_CODE_LENGTH_MISMATCH\n");
++ /* hardware bug */
++ ZFCP_LOG_NORMAL(
++ "bug: FCP response code indictates "
++ " that the fibre-channel protocol data "
++ "length differs from the burst "
++ "length. The problem occured on the unit "
++ "with FCP_LUN 0x%016Lx connected to the "
++ "port with WWPN 0x%016Lx at the adapter with "
++ "devno 0x%04x\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ /* dump SCSI CDB as prepared by zfcp */
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_DEBUG,
++ (char*)&fsf_req->qtcb->
++ bottom.io.fcp_cmnd,
++ FSF_FCP_CMND_SIZE);
++ zfcp_cmd_dbf_event_fsf("clenmism", fsf_req, NULL, 0);
++ scpnt->result |= DID_ERROR << 16;
++ goto skip_fsfstatus;
++ case RSP_CODE_FIELD_INVALID:
++ ZFCP_LOG_FLAGS(0, "RSP_CODE_FIELD_INVALID\n");
++ /* driver or hardware bug */
++ ZFCP_LOG_NORMAL(
++ "bug: FCP response code indictates "
++ "that the fibre-channel protocol data "
++ "fields were incorrectly set-up. "
++ "The problem occured on the unit "
++ "with FCP_LUN 0x%016Lx connected to the "
++ "port with WWPN 0x%016Lx at the adapter with "
++ "devno 0x%04x\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ /* dump SCSI CDB as prepared by zfcp */
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_DEBUG,
++ (char*)&fsf_req->qtcb->
++ bottom.io.fcp_cmnd,
++ FSF_FCP_CMND_SIZE);
++ zfcp_cmd_dbf_event_fsf("codeinv", fsf_req, NULL, 0);
++ scpnt->result |= DID_ERROR << 16;
++ goto skip_fsfstatus;
++ case RSP_CODE_RO_MISMATCH:
++ ZFCP_LOG_FLAGS(0, "RSP_CODE_RO_MISMATCH\n");
++ /* hardware bug */
++ ZFCP_LOG_NORMAL(
++ "bug: The FCP response code indicates "
++ "that conflicting values for the "
++ "fibre-channel payload offset from the "
++ "header were found. "
++ "The problem occured on the unit "
++ "with FCP_LUN 0x%016Lx connected to the "
++ "port with WWPN 0x%016Lx at the adapter with "
++ "devno 0x%04x\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ /* dump SCSI CDB as prepared by zfcp */
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_DEBUG,
++ (char*)&fsf_req->qtcb->
++ bottom.io.fcp_cmnd,
++ FSF_FCP_CMND_SIZE);
++ zfcp_cmd_dbf_event_fsf("codemism", fsf_req, NULL, 0);
++ scpnt->result |= DID_ERROR << 16;
++ goto skip_fsfstatus;
++ default :
++ ZFCP_LOG_NORMAL(
++ "bug: An invalid FCP response "
++ "code was detected for a command. "
++ "The problem occured on the unit "
++ "with FCP_LUN 0x%016Lx connected to the "
++ "port with WWPN 0x%016Lx at the adapter with "
++ "devno 0x%04x (debug info 0x%x)\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno,
++ fcp_rsp_info[3]);
++ /* dump SCSI CDB as prepared by zfcp */
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_DEBUG,
++ (char*)&fsf_req->qtcb->
++ bottom.io.fcp_cmnd,
++ FSF_FCP_CMND_SIZE);
++ zfcp_cmd_dbf_event_fsf("undeffcp", fsf_req, NULL, 0);
++ scpnt->result |= DID_ERROR << 16;
++ }
++ }
++
++ /* check for sense data */
++ if (fcp_rsp_iu->validity.bits.fcp_sns_len_valid) {
++ sns_len = FSF_FCP_RSP_SIZE -
++ sizeof(fcp_rsp_iu_t) +
++ fcp_rsp_iu->fcp_rsp_len;
++ ZFCP_LOG_TRACE(
++ "room for %i bytes sense data in QTCB\n",
++ sns_len);
++ sns_len = min(sns_len, (u32)SCSI_SENSE_BUFFERSIZE);
++ ZFCP_LOG_TRACE(
++ "room for %i bytes sense data in SCSI command\n",
++ SCSI_SENSE_BUFFERSIZE);
++ sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len);
++ ZFCP_LOG_TRACE("scpnt->result =0x%x, command was:\n",
++ scpnt->result);
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_TRACE,
++ (void *)&scpnt->cmnd,
++ scpnt->cmd_len);
++
++ ZFCP_LOG_TRACE(
++ "%i bytes sense data provided by FCP\n",
++ fcp_rsp_iu->fcp_sns_len);
++ memcpy( &scpnt->sense_buffer,
++ zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu),
++ sns_len);
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_TRACE,
++ (void *)&scpnt->sense_buffer,
++ sns_len);
++ }
++
++ /* check for overrun */
++ if (fcp_rsp_iu->validity.bits.fcp_resid_over) {
++ ZFCP_LOG_INFO(
++ "A data overrun was detected for a command. "
++ "This happened for a command to the unit "
++ "with FCP_LUN 0x%016Lx connected to the "
++ "port with WWPN 0x%016Lx at the adapter with "
++ "devno 0x%04x. The response data length is "
++ "%d, the original length was %d.\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno,
++ fcp_rsp_iu->fcp_resid,
++ zfcp_get_fcp_dl(fcp_cmnd_iu));
++ }
++
++ /* check for underrun */
++ if (fcp_rsp_iu->validity.bits.fcp_resid_under) {
++ ZFCP_LOG_DEBUG(
++ "A data underrun was detected for a command. "
++ "This happened for a command to the unit "
++ "with FCP_LUN 0x%016Lx connected to the "
++ "port with WWPN 0x%016Lx at the adapter with "
++ "devno 0x%04x. The response data length is "
++ "%d, the original length was %d.\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno,
++ fcp_rsp_iu->fcp_resid,
++ zfcp_get_fcp_dl(fcp_cmnd_iu));
++ /*
++ * It may not have been possible to send all data and the
++ * underrun on send may already be in scpnt->resid, so it's add
++ * not equals in the below statement.
++ */
++ scpnt->resid += fcp_rsp_iu->fcp_resid;
++ ZFCP_LOG_TRACE("scpnt->resid=0x%x\n",
++ scpnt->resid);
++ }
++
++skip_fsfstatus:
++#if 0
++ /*
++ * This nasty chop at the problem is not working anymore
++ * as we do not adjust the retry count anylonger in order
++ * to have a number of retries that avoids I/O errors.
++ * The manipulation of the retry count has been removed
++ * in favour of a safe tape device handling. We must not
++ * sent SCSI commands more than once to a device if no
++ * retries are permitted by the high level driver. Generally
++ * speaking, it was a mess to change retry counts. So it is
++ * fine that this sort of workaround is gone.
++ * Then, we had to face a certain number of immediate retries in case of
++ * busy and queue full conditions (see below).
++ * This is not acceptable
++ * for the latter. Queue full conditions are used
++ * by devices to indicate to a host that the host can rely
++ * on the completion (or timeout) of at least one outstanding
++ * command as a suggested trigger for command retries.
++ * Busy conditions require a different trigger since
++ * no commands are outstanding for that initiator from the
++ * devices perspective.
++ * The drawback of mapping a queue full condition to a
++ * busy condition is the chance of wasting all retries prior
++ * to the time when the device indicates that a command
++ * rejected due to a queue full condition should be re-driven.
++ * This case would lead to unnecessary I/O errors that
++ * have to be considered fatal if for example ext3's
++ * journaling would be torpedoed by such an avoidable
++ * I/O error.
++ * So, what issues are there with not mapping a queue-full
++ * condition to a busy condition?
++ * Due to the 'exclusive LUN'
++ * policy enforced by the zSeries FCP channel, this
++ * Linux instance is the only initiator with regard to
++ * this adapter. It is safe to rely on the information
++ * 'don't disturb me now ... and btw. no other commands
++ * pending for you' (= queue full) sent by the LU,
++ * since no other Linux can use this LUN via this adapter
++ * at the same time. If there is a potential race
++ * introduced by the FCP channel by not inhibiting Linux A
++ * to give up a LU with commands pending while Linux B
++ * grabs this LU and sends commands - thus providing
++ * an exploit at the 'exclusive LUN' policy - then this
++ * issue has to be considered a hardware problem. It should
++ * be tracked as such if it really occurs. Even if the
++ * FCP Channel spec. begs exploiters to wait for the
++ * completion of all request sent to a LU prior to
++ * closing this LU connection.
++ * This spec. statement in conjunction with
++ * the 'exclusive LUN' policy is not consistent design.
++ * Another issue is how resource constraints for SCSI commands
++ * might be handled by the FCP channel (just guessing for now).
++ * If the FCP channel would always map resource constraints,
++ * e.g. no free FC exchange ID due to I/O stress caused by
++ * other sharing Linux instances, to faked queue-full
++ * conditions then this would be a misinterpretation and
++ * violation of SCSI standards.
++ * If there are SCSI stack races as indicated below
++ * then they need to be fixed just there.
++ * Providing all issue above are not applicable or will
++ * be fixed appropriately, removing the following hack
++ * is the right thing to do.
++ */
++
++ /*
++ * Note: This is a rather nasty chop at the problem. We cannot
++ * risk adding to the mlqueue however as this will block the
++ * device. If it is the last outstanding command for this host
++ * it will remain blocked indefinitely. This would be quite possible
++ * on the zSeries FCP adapter.
++ * Also, there exists a race with scsi_insert_special relying on
++ * scsi_request_fn to recalculate some command data which may not
++ * happen when q->plugged is true in scsi_request_fn
++ */
++ if (status_byte(scpnt->result) == QUEUE_FULL) {
++ ZFCP_LOG_DEBUG("Changing QUEUE_FULL to BUSY....\n");
++ scpnt->result &= ~(QUEUE_FULL << 1);
++ scpnt->result |= (BUSY << 1);
++ }
++#endif
++
++ ZFCP_LOG_DEBUG("scpnt->result =0x%x\n",
++ scpnt->result);
++
++ zfcp_cmd_dbf_event_scsi("response", fsf_req->adapter, scpnt);
++
++ /* cleanup pointer (need this especially for abort) */
++ scpnt->host_scribble = NULL;
++ scpnt->SCp.ptr = (char*)0;
++
++ /*
++ * NOTE:
++ * according to the outcome of a discussion on linux-scsi we
++ * don't need to grab the io_request_lock here since we use
++ * the new eh
++ */
++ /* always call back */
++#ifdef ZFCP_DEBUG_REQUESTS
++ debug_text_event(fsf_req->adapter->req_dbf, 2, "ok_done:");
++ debug_event(fsf_req->adapter->req_dbf, 2, &scpnt, sizeof(unsigned long));
++ debug_event(fsf_req->adapter->req_dbf, 2, &scpnt->scsi_done,
++ sizeof(unsigned long));
++ debug_event(fsf_req->adapter->req_dbf, 2, &fsf_req, sizeof(unsigned long));
++#endif /* ZFCP_DEBUG_REQUESTS */
++ (scpnt->scsi_done)(scpnt);
++ /*
++ * We must hold this lock until scsi_done has been called.
++ * Otherwise we may call scsi_done after abort regarding this
++ * command has completed.
++ * Note: scsi_done must not block!
++ */
++out:
++ read_unlock_irqrestore(&fsf_req->adapter->abort_lock, flags);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_send_fcp_command_task_management_handler
++ *
++ * purpose: evaluates FCP_RSP IU
++ *
++ * returns:
++ */
++static int zfcp_fsf_send_fcp_command_task_management_handler(
++ zfcp_fsf_req_t *fsf_req)
++
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval = 0;
++ fcp_rsp_iu_t *fcp_rsp_iu =
++ (fcp_rsp_iu_t*)
++ &(fsf_req->qtcb->bottom.io.fcp_rsp);
++ char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu);
++ zfcp_unit_t *unit = fsf_req->data.send_fcp_command_task_management.unit;
++
++ ZFCP_LOG_TRACE(
++ "enter (fsf_req=0x%lx)\n",
++ (unsigned long)fsf_req);
++
++ del_timer(&fsf_req->adapter->scsi_er_timer);
++ if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
++ goto skip_fsfstatus;
++ }
++
++ /* check FCP_RSP_INFO */
++ switch (fcp_rsp_info[3]) {
++ case RSP_CODE_GOOD:
++ ZFCP_LOG_FLAGS(3, "RSP_CODE_GOOD\n");
++ /* ok, continue */
++ ZFCP_LOG_DEBUG(
++ "no failure or Task Management "
++ "Function complete\n");
++ break;
++ case RSP_CODE_TASKMAN_UNSUPP:
++ ZFCP_LOG_FLAGS(0, "RSP_CODE_TASKMAN_UNSUPP\n");
++ ZFCP_LOG_NORMAL(
++ "bug: A reuested task management function "
++ "is not supported on the target device "
++ "The corresponding device is the unit with "
++ "FCP_LUN 0x%016Lx at the port "
++ "with WWPN 0x%016Lx at the adapter with devno "
++ "0x%04x\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ fsf_req->status
++ |= ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP;
++ break;
++ case RSP_CODE_TASKMAN_FAILED:
++ ZFCP_LOG_FLAGS(0, "RSP_CODE_TASKMAN_FAILED\n");
++ ZFCP_LOG_NORMAL(
++ "bug: A reuested task management function "
++ "failed to complete successfully. "
++ "The corresponding device is the unit with "
++ "FCP_LUN 0x%016Lx at the port "
++ "with WWPN 0x%016Lx at the adapter with devno "
++ "0x%04x\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ fsf_req->status
++ |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
++ break;
++ default :
++ ZFCP_LOG_NORMAL(
++ "bug: An invalid FCP response "
++ "code was detected for a command. "
++ "The problem occured on the unit "
++ "with FCP_LUN 0x%016Lx connected to the "
++ "port with WWPN 0x%016Lx at the adapter with "
++ "devno 0x%04x (debug info 0x%x)\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno,
++ fcp_rsp_info[3]);
++ fsf_req->status
++ |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
++ }
++
++skip_fsfstatus:
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_req_wait_and_cleanup
++ *
++ * purpose:
++ *
++ * FIXME(design): signal seems to be <0 !!!
++ * returns: 0 - request completed (*status is valid), cleanup succeeded
++ * <0 - request completed (*status is valid), cleanup failed
++ * >0 - signal which interrupted waiting (*status is not valid),
++ * request not completed, no cleanup
++ *
++ * *status is a copy of status of completed fsf_req
++ */
++static int zfcp_fsf_req_wait_and_cleanup(
++ zfcp_fsf_req_t *fsf_req,
++ int interruptible,
++ u32 *status)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval = 0;
++ int signal = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (fsf_req=0x%lx,"
++ "interruptible=%d, *status=0x%x\n",
++ (unsigned long)fsf_req,
++ interruptible,
++ *status);
++
++ if (interruptible) {
++ __wait_event_interruptible(
++ fsf_req->completion_wq,
++ fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED,
++ signal);
++ if (signal) {
++ ZFCP_LOG_DEBUG(
++ "Caught signal %i while waiting for the "
++ "completion of the request at 0x%lx\n",
++ signal,
++ (unsigned long)fsf_req);
++ retval = signal;
++ goto out;
++ }
++ } else {
++ __wait_event(
++ fsf_req->completion_wq,
++ fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
++ }
++
++ *status = fsf_req->status;
++
++ /* cleanup request */
++ retval = zfcp_fsf_req_cleanup(fsf_req);
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++static inline int zfcp_fsf_req_create_sbal_check(
++ unsigned long *flags,
++ zfcp_qdio_queue_t *queue,
++ int needed)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ write_lock_irqsave(&queue->queue_lock, *flags);
++ if (atomic_read(&queue->free_count) >= needed)
++ return 1;
++ write_unlock_irqrestore(&queue->queue_lock, *flags);
++ return 0;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * set qtcb pointer in fsf_req and initialize QTCB
++ */
++static inline void zfcp_fsf_req_qtcb_init(zfcp_fsf_req_t *fsf_req, u32 fsf_cmd)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++ if (fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS) {
++ struct zfcp_fsf_req_pool_buffer *data =
++ (struct zfcp_fsf_req_pool_buffer *) fsf_req;
++ fsf_req->qtcb = &data->qtcb;
++ }
++
++ if (fsf_req->qtcb) {
++ ZFCP_LOG_TRACE("fsf_req->qtcb=0x%lx\n",
++ (unsigned long ) fsf_req->qtcb);
++ fsf_req->qtcb->prefix.req_id = (unsigned long)fsf_req;
++ fsf_req->qtcb->prefix.ulp_info = zfcp_data.driver_version;
++ fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd];
++ fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION;
++ fsf_req->qtcb->header.req_handle = (unsigned long)fsf_req;
++ fsf_req->qtcb->header.fsf_command = fsf_cmd;
++ /* Request Sequence Number is set later when the request is
++ actually sent. */
++ }
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * try to get needed SBALs in request queue
++ * (get queue lock on success)
++ */
++static int zfcp_fsf_req_sbal_get(zfcp_adapter_t *adapter, int req_flags,
++ unsigned long *lock_flags)
++{
++ int condition;
++ unsigned long timeout = ZFCP_SBAL_TIMEOUT;
++ zfcp_qdio_queue_t *req_queue = &adapter->request_queue;
++
++ if (req_flags & ZFCP_WAIT_FOR_SBAL) {
++ ZFCP_WAIT_EVENT_TIMEOUT(adapter->request_wq, timeout,
++ (condition =
++ (zfcp_fsf_req_create_sbal_check)
++ (lock_flags, req_queue, 1)));
++ if (!condition)
++ return -EIO;
++ } else if (!zfcp_fsf_req_create_sbal_check(lock_flags, req_queue, 1))
++ return -EIO;
++
++ return 0;
++}
++
++
++/*
++ * function: zfcp_fsf_req_create
++ *
++ * purpose: create an FSF request at the specified adapter and
++ * setup common fields
++ *
++ * returns: -ENOMEM if there was insufficient memory for a request
++ * -EIO if no qdio buffers could be allocate to the request
++ * -EINVAL/-EPERM on bug conditions in req_dequeue
++ * 0 in success
++ *
++ * note: The created request is returned by reference.
++ *
++ * locks: lock of concerned request queue must not be held,
++ * but is held on completion (write, irqsave)
++ */
++static int zfcp_fsf_req_create(zfcp_adapter_t *adapter, u32 fsf_cmd,
++ int req_flags, zfcp_mem_pool_t *mem_pool,
++ unsigned long *lock_flags,
++ zfcp_fsf_req_t **fsf_req_p)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++ zfcp_fsf_req_t *fsf_req = NULL;
++ int retval=0;
++ zfcp_qdio_queue_t *req_queue = &adapter->request_queue;
++ volatile qdio_buffer_element_t *sbale;
++
++ ZFCP_LOG_TRACE("enter (adapter=0x%lx fsf_cmd=0x%x *lock_flags=0x%lx "
++ "req_flags=0x%x)\n", (unsigned long)adapter,
++ fsf_cmd, *lock_flags, req_flags);
++
++ atomic_inc(&adapter->reqs_in_progress);
++
++ /* allocate new FSF request */
++ fsf_req = zfcp_fsf_req_alloc(mem_pool, req_flags, GFP_ATOMIC);
++ if (!fsf_req) {
++ ZFCP_LOG_DEBUG(
++ "error: Could not put an FSF request into"
++ "the outbound (send) queue.\n");
++ retval=-ENOMEM;
++ goto failed_fsf_req;
++ }
++
++ zfcp_fsf_req_qtcb_init(fsf_req, fsf_cmd);
++
++ /* initialize waitqueue which may be used to wait on
++ this request completion */
++ init_waitqueue_head(&fsf_req->completion_wq);
++
++ retval = zfcp_fsf_req_sbal_get(adapter, req_flags, lock_flags);
++ if(retval < 0)
++ goto failed_sbals;
++
++ /*
++ * We hold queue_lock here. Check if QDIOUP is set and let request fail
++ * if it is not set (see also *_open_qdio and *_close_qdio).
++ */
++
++ if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) {
++ write_unlock_irqrestore(&req_queue->queue_lock, *lock_flags);
++ retval = -EIO;
++ goto failed_sbals;
++ }
++
++#ifndef ZFCP_PARANOIA_DEAD_CODE
++ /* set magics */
++ fsf_req->common_magic = ZFCP_MAGIC;
++ fsf_req->specific_magic = ZFCP_MAGIC_FSFREQ;
++#endif
++ fsf_req->adapter = adapter;
++ fsf_req->fsf_command = fsf_cmd;
++ fsf_req->sbal_number = 1;
++ fsf_req->sbal_first = req_queue->free_index;
++ fsf_req->sbal_curr = req_queue->free_index;
++ fsf_req->sbale_curr = 1;
++
++ if (req_flags & ZFCP_REQ_AUTO_CLEANUP)
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
++
++ sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
++
++ /* setup common SBALE fields */
++ sbale[0].addr = fsf_req;
++ sbale[0].flags |= SBAL_FLAGS0_COMMAND;
++ if (fsf_req->qtcb != 0) {
++ sbale[1].addr = (void *)fsf_req->qtcb;
++ sbale[1].length = sizeof(fsf_qtcb_t);
++ }
++
++ ZFCP_LOG_TRACE("got %i free BUFFERs starting at index %i\n",
++ fsf_req->sbal_number, fsf_req->sbal_first);
++
++ goto success;
++
++ failed_sbals:
++#ifdef ZFCP_STAT_QUEUES
++ atomic_inc(&adapter->outbound_queue_full);
++#endif
++ /* dequeue new FSF request previously enqueued */
++ zfcp_fsf_req_free(fsf_req);
++ fsf_req = NULL;
++
++ failed_fsf_req:
++ //failed_running:
++ write_lock_irqsave(&req_queue->queue_lock, *lock_flags);
++
++ success:
++ *fsf_req_p = fsf_req;
++ ZFCP_LOG_TRACE("exit (%d)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++static inline int zfcp_qdio_determine_pci(zfcp_qdio_queue_t *req_queue,
++ zfcp_fsf_req_t *fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_QDIO
++ int new_distance_from_int;
++ int pci_pos;
++ volatile qdio_buffer_element_t *sbale;
++
++ ZFCP_LOG_TRACE("enter (0x%lx, 0x%lx)\n",
++ (unsigned long)req_queue,
++ (unsigned long)fsf_req);
++
++ new_distance_from_int = req_queue->distance_from_int +
++ fsf_req->sbal_number;
++
++ if (new_distance_from_int >= ZFCP_QDIO_PCI_INTERVAL) {
++ new_distance_from_int %= ZFCP_QDIO_PCI_INTERVAL;
++ pci_pos = fsf_req->sbal_first;
++ pci_pos += fsf_req->sbal_number;
++ pci_pos -= new_distance_from_int;
++ pci_pos -= 1;
++ pci_pos %= QDIO_MAX_BUFFERS_PER_Q;
++ sbale = zfcp_qdio_sbale_req(fsf_req, pci_pos, 0);
++ sbale->flags |= SBAL_FLAGS0_PCI;
++ ZFCP_LOG_DEBUG(
++ "Setting PCI flag at pos %d (0x%lx)\n",
++ pci_pos,
++ (unsigned long)sbale);
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_TRACE,
++ (char*)sbale,
++ sizeof(qdio_buffer_t));
++ }
++
++ ZFCP_LOG_TRACE("exit (%d)\n", new_distance_from_int);
++ return new_distance_from_int;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++
++/*
++ * function: zfcp_fsf_req_send
++ *
++ * purpose: start transfer of FSF request via QDIO
++ *
++ * returns: 0 - request transfer succesfully started
++ * !0 - start of request transfer failed
++ */
++static int zfcp_fsf_req_send(zfcp_fsf_req_t *fsf_req, struct timer_list *timer)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval = 0;
++ zfcp_adapter_t *adapter = fsf_req->adapter;
++ zfcp_qdio_queue_t *req_queue = &adapter->request_queue;
++ volatile qdio_buffer_element_t* sbale;
++ int inc_seq_no = 1;
++ int new_distance_from_int;
++ unsigned long flags;
++ int test_count;
++
++ u8 sbal_index = fsf_req->sbal_first;
++
++ ZFCP_LOG_TRACE(
++ "enter (fsf_req=0x%lx timer=0x%lx)\n",
++ (unsigned long)fsf_req,
++ (unsigned long)timer);
++
++ /* FIXME(debug): remove it later */
++ sbale = zfcp_qdio_sbale_req(fsf_req, sbal_index, 0);
++ ZFCP_LOG_DEBUG(
++ "SBALE0 flags=0x%x\n",
++ sbale[0].flags);
++ ZFCP_LOG_TRACE("HEX DUMP OF SBALE1 PAYLOAD:\n");
++ ZFCP_HEX_DUMP(
++ ZFCP_LOG_LEVEL_TRACE,
++ (char*)sbale[1].addr,
++ sbale[1].length);
++
++ test_count = (fsf_req->sbal_curr - fsf_req->sbal_first) + 1;
++ test_count += QDIO_MAX_BUFFERS_PER_Q; /* no module of <0 */
++ test_count %= QDIO_MAX_BUFFERS_PER_Q;
++ if (fsf_req->sbal_number != test_count)
++ ZFCP_LOG_NORMAL(
++ "error: inconsistent SBAL count in request "
++ "(%d, %d, %d, %d, %d)\n",
++ fsf_req->sbal_first,
++ fsf_req->sbal_curr,
++ fsf_req->sbal_last,
++ fsf_req->sbal_number,
++ test_count);
++
++ /* set sequence counter in QTCB */
++ if (fsf_req->qtcb) {
++ fsf_req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
++ fsf_req->seq_no = adapter->fsf_req_seq_no;
++ ZFCP_LOG_TRACE(
++ "FSF request 0x%lx of adapter 0x%lx gets "
++ "FSF sequence counter value of %i\n",
++ (unsigned long)fsf_req,
++ (unsigned long)adapter,
++ fsf_req->qtcb->prefix.req_seq_no);
++ } else
++ inc_seq_no = 0;
++
++ /* put allocated FSF request at list tail */
++ write_lock_irqsave(&adapter->fsf_req_list_lock, flags);
++ list_add_tail(&fsf_req->list,
++ &adapter->fsf_req_list_head);
++ write_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
++
++ /* figure out expiration time of timeout and start timeout */
++ if (timer) {
++ timer->expires += jiffies;
++ add_timer(timer);
++ }
++
++ ZFCP_LOG_TRACE(
++ "request queue of adapter with devno=0x%04x: "
++ "next free SBAL is %i, %i free SBALs\n",
++ adapter->devno,
++ req_queue->free_index,
++ atomic_read(&req_queue->free_count));
++
++ ZFCP_LOG_DEBUG(
++ "Calling do QDIO irq=0x%x, flags=0x%x, queue_no=%i, "
++ "index_in_queue=%i, count=%i, buffers=0x%lx\n",
++ adapter->irq,
++ QDIO_FLAG_SYNC_OUTPUT,
++ 0,
++ fsf_req->sbal_first,
++ fsf_req->sbal_number,
++ (unsigned long)&req_queue->buffer[sbal_index]);
++
++ /*
++ * adjust the number of free SBALs in request queue as well as
++ * position of first one
++ */
++ atomic_sub(fsf_req->sbal_number, &req_queue->free_count);
++ ZFCP_LOG_TRACE("free_count=%d\n",
++ atomic_read(&req_queue->free_count));
++ req_queue->free_index += fsf_req->sbal_number; /* increase */
++ req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap if needed */
++ new_distance_from_int = zfcp_qdio_determine_pci(req_queue, fsf_req);
++ retval = do_QDIO(
++ adapter->irq,
++ QDIO_FLAG_SYNC_OUTPUT,
++ 0,
++ fsf_req->sbal_first,
++ fsf_req->sbal_number,
++ NULL);
++
++ if (retval) {
++ /* Queues are down..... */
++ retval=-EIO;
++ /* FIXME(potential race): timer might be expired (absolutely unlikely) */
++ if (timer)
++ del_timer_sync(timer);
++ write_lock_irqsave(&adapter->fsf_req_list_lock, flags);
++ list_del(&fsf_req->list);
++ write_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
++ /*
++ * adjust the number of free SBALs in request queue as well as
++ * position of first one
++ */
++ zfcp_zero_sbals(
++ req_queue->buffer,
++ fsf_req->sbal_first,
++ fsf_req->sbal_number);
++ atomic_add(fsf_req->sbal_number, &req_queue->free_count);
++ req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q;
++ req_queue->free_index -= fsf_req->sbal_number;
++ req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q;
++
++ ZFCP_LOG_DEBUG(
++ "error: do_QDIO failed. Buffers could not be enqueued "
++ "to request queue.\n");
++ } else {
++ req_queue->distance_from_int = new_distance_from_int;
++#ifdef ZFCP_DEBUG_REQUESTS
++ debug_text_event(adapter->req_dbf, 1, "o:a/seq");
++ debug_event(adapter->req_dbf, 1, &fsf_req,
++ sizeof(unsigned long));
++ if (inc_seq_no)
++ debug_event(adapter->req_dbf, 1,
++ &adapter->fsf_req_seq_no, sizeof(u32));
++ else
++ debug_text_event(adapter->req_dbf, 1, "nocb");
++ debug_event(adapter->req_dbf, 4, &fsf_req->fsf_command,
++ sizeof(fsf_req->fsf_command));
++ if (fsf_req->qtcb)
++ debug_event(adapter->req_dbf, 5, &fsf_req->qtcb,
++ sizeof(unsigned long));
++ if (fsf_req && (fsf_req->status & ZFCP_STATUS_FSFREQ_POOL))
++ debug_text_event(adapter->req_dbf, 5, "fsfa_pl");
++#endif /* ZFCP_DEBUG_REQUESTS */
++ /*
++ * increase FSF sequence counter -
++ * this must only be done for request successfully enqueued to QDIO
++ * this rejected requests may be cleaned up by calling routines
++ * resulting in missing sequence counter values otherwise,
++ */
++ /* Don't increase for unsolicited status */
++ if (inc_seq_no) {
++ adapter->fsf_req_seq_no++;
++ ZFCP_LOG_TRACE(
++ "FSF sequence counter value of adapter 0x%lx "
++ "increased to %i\n",
++ (unsigned long)adapter,
++ adapter->fsf_req_seq_no);
++ }
++ /* count FSF requests pending */
++ atomic_inc(&adapter->fsf_reqs_active);
++#ifdef ZFCP_STAT_QUEUES
++ atomic_inc(&adapter->outbound_total);
++#endif
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_req_cleanup
++ *
++ * purpose: cleans up an FSF request and removes it from the specified list
++ *
++ * returns:
++ *
++ * assumption: no pending SB in SBALEs other than QTCB
++ */
++static int zfcp_fsf_req_cleanup(zfcp_fsf_req_t *fsf_req)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval;
++ zfcp_adapter_t *adapter = fsf_req->adapter;
++ unsigned long flags;
++
++ ZFCP_LOG_TRACE("enter (fsf_req=0x%lx)\n", (unsigned long)fsf_req);
++
++ write_lock_irqsave(&adapter->fsf_req_list_lock, flags);
++ list_del(&fsf_req->list);
++ write_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
++ retval = zfcp_fsf_req_free(fsf_req);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * function: zfcp_zero_sbals
++ *
++ * purpose: zeros specified range of SBALs
++ *
++ * returns:
++ */
++static inline void zfcp_zero_sbals(qdio_buffer_t *buf[], int first, int clean_count)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_QDIO
++
++ int cur_pos;
++ int index;
++
++
++ ZFCP_LOG_TRACE(
++ "enter (buf=0x%lx, first=%i, clean_count=%i\n",
++ (unsigned long)buf, first, clean_count);
++
++ for (cur_pos = first; cur_pos < (first + clean_count); cur_pos++){
++ index = cur_pos % QDIO_MAX_BUFFERS_PER_Q;
++ memset(buf[index], 0, sizeof(qdio_buffer_t));
++ ZFCP_LOG_TRACE(
++ "zeroing BUFFER %d at address 0x%lx\n",
++ index,
++ (unsigned long) buf[index]);
++ }
++
++ ZFCP_LOG_TRACE("exit\n");
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++static void zfcp_config_parse_error(
++ unsigned char *s, /* complete mapping string */
++ unsigned char *err_pos, /* position of error in mapping string */
++ const char *err_msg, /* error message */
++ ...) /* additional arguments to be integrated into error message */
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++
++ int buf_l;
++ va_list args;
++ unsigned char *pos;
++ unsigned char c;
++
++ ZFCP_LOG_TRACE(
++ "enter (s=0x%lx, err_pos=0x%lx, err_msg=0x%lx\n",
++ (unsigned long)s,
++ (unsigned long)err_pos,
++ (unsigned long)err_msg);
++
++ /* integrate additional arguments into error message */
++ va_start(args, err_msg);
++ buf_l = vsprintf(zfcp_data.perrbuf, err_msg, args);
++ va_end(args);
++ if (buf_l > ZFCP_PARSE_ERR_BUF_SIZE) {
++ ZFCP_LOG_NORMAL("Buffer overflow while parsing error message\n");
++ /* truncate error message */
++ zfcp_data.perrbuf[ZFCP_PARSE_ERR_BUF_SIZE - 1] = '\0';
++ buf_l = ZFCP_PARSE_ERR_BUF_SIZE;
++ }
++
++ /* calculate and print substring of mapping followed by error info */
++ pos = min((s + strlen(s) - 1), (err_pos + 1));
++ c = *pos;
++ *pos = '\0';
++ ZFCP_LOG_NORMAL("\"%s\" <- %s\n", s, zfcp_data.perrbuf);
++ *pos = c;
++
++ ZFCP_LOG_TRACE("exit\n");
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/* these macros implement the logic of the following 3 functions */
++#define ZFCP_PARSE_CHECK(condition, err_msg...) \
++ if (condition) { \
++ zfcp_config_parse_error(s, s + s_l - ts_l, err_msg); \
++ retval = -EINVAL; \
++ goto out; \
++ }
++
++#define ZFCP_PARSE_CHECKEND \
++ ZFCP_PARSE_CHECK(!ts_l, "syntax error: unexpected end of record")
++
++#define ZFCP_PARSE_TRUNCATE \
++ ts += count; ts_l -= count;
++
++#define ZFCP_PARSE_SKIP_CHARS(characters, min, max) \
++ count = strnspn(ts, characters, ts_l); \
++ ZFCP_PARSE_CHECK((size_t)count < (size_t)min, "syntax error: missing \"%c\" or equivalent character", *characters) \
++ ZFCP_PARSE_CHECK((size_t)count > (size_t)max, "syntax error: extranous \"%c\" or equivalent character", *characters) \
++ ZFCP_PARSE_TRUNCATE
++
++#define ZFCP_PARSE_SKIP_COMMENT \
++ count = strnspn(ts, ZFCP_PARSE_COMMENT_CHARS, ts_l); \
++ if (count) { \
++ char *tmp; \
++ ZFCP_PARSE_TRUNCATE \
++ tmp = strnpbrk(ts, ZFCP_PARSE_RECORD_DELIM_CHARS, ts_l); \
++ if (tmp) \
++ count = (unsigned long)tmp - (unsigned long)ts; \
++ else count = ts_l; \
++ ZFCP_PARSE_TRUNCATE \
++ }
++
++#define ZFCP_PARSE_NUMBER(func, value, add_cond, msg...) \
++ value = func(ts, &endp, 0); \
++ count = (unsigned long)endp - (unsigned long)ts; \
++ ZFCP_PARSE_CHECK(!count || (add_cond), msg) \
++ ZFCP_PARSE_TRUNCATE
++
++#define ZFCP_PARSE_UL(value, cond, msg...) \
++ ZFCP_PARSE_NUMBER(simple_strtoul, value, cond, msg)
++
++#define ZFCP_PARSE_ULL(value, cond, msg...) \
++ ZFCP_PARSE_NUMBER(simple_strtoull, value, cond, msg)
++
++
++static int zfcp_config_parse_record_list(unsigned char *s, int s_l, int flags)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++
++ int retval;
++ int count;
++ zfcp_config_record_t rec;
++ int ts_l = s_l;
++ unsigned char *ts = s;
++
++ ZFCP_LOG_TRACE(
++ "enter (s=0x%lx, s_l=%i, flags=%i)\n",
++ (unsigned long)s, s_l, flags);
++
++ while (ts_l) {
++ /* parse single line */
++ count = zfcp_config_parse_record(ts, ts_l, &rec);
++ if (count < 0) {
++ retval = count;
++ goto out;
++ }
++ ZFCP_PARSE_TRUNCATE;
++
++ /* create configuration according to parsed line */
++ if (rec.valid) {
++ if (flags & ZFCP_PARSE_ADD) {
++ retval = zfcp_config_parse_record_add(&rec);
++ } else {
++ /* FIXME (implement) switch in when record_del works again */
++#if 0
++ retval = zfcp_config_parse_record_del(&rec);
++#endif
++ ZFCP_LOG_TRACE("DEL\n");
++ retval = -1;
++ }
++ if (retval < 0)
++ goto out;
++ } /* else we parsed an empty line or a comment */
++ if (ts_l > 0) {
++ /* skip expected 'new line' */
++ ZFCP_PARSE_SKIP_CHARS(ZFCP_PARSE_RECORD_DELIM_CHARS, 1, ts_l);
++ }
++ }
++ retval = s_l;
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++static int zfcp_config_parse_record(
++ unsigned char *s,
++ int s_l,
++ zfcp_config_record_t *rec)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++
++ int retval;
++ int count = 0;
++ char *endp;
++ unsigned char *ts = s;
++ int ts_l = s_l;
++
++ ZFCP_LOG_TRACE(
++ "enter (s=0x%lx, s_l=%i, rec=0x%lx)\n",
++ (unsigned long)s,
++ s_l,
++ (unsigned long)rec);
++
++ rec->valid = 0;
++
++ /* skip any leading spaces + tabs */
++ ZFCP_PARSE_SKIP_CHARS(ZFCP_PARSE_SPACE_CHARS, 0, -1UL);
++
++ /* allow for comments */
++ ZFCP_PARSE_SKIP_COMMENT;
++
++ /* allow 'empty' line */
++ if (strnspn(ts, ZFCP_PARSE_RECORD_DELIM_CHARS, 1))
++ goto calculate;
++
++ /* parse device number of host */
++ ZFCP_PARSE_UL(rec->devno, rec->devno > 0xFFFF, "no valid device number");
++ ZFCP_LOG_TRACE("devno \"0x%lx\"\n", rec->devno);
++ ZFCP_PARSE_CHECKEND;
++
++ /* skip delimiting spaces + tabs (at least 1 character is mandatory */
++ ZFCP_PARSE_SKIP_CHARS(ZFCP_PARSE_SPACE_CHARS, 1, -1UL);
++ ZFCP_PARSE_CHECKEND;
++
++ /* parse scsi id of remote port */
++ ZFCP_PARSE_UL(rec->scsi_id, 0, "no valid SCSI ID");
++ ZFCP_LOG_TRACE("SCSI ID \"0x%lx\"\n", rec->scsi_id);
++ ZFCP_PARSE_CHECKEND;
++
++ /* skip delimiting character */
++ ZFCP_PARSE_SKIP_CHARS(ZFCP_PARSE_DELIM_CHARS, 1, 1);
++ ZFCP_PARSE_CHECKEND;
++
++ /* parse wwpn of remote port */
++ ZFCP_PARSE_ULL(rec->wwpn, 0, "no valid WWPN");
++ ZFCP_LOG_TRACE("WWPN \"0x%016Lx\"\n", rec->wwpn);
++ ZFCP_PARSE_CHECKEND;
++
++ /* skip delimiting spaces + tabs (at least 1 character is mandatory */
++ ZFCP_PARSE_SKIP_CHARS(ZFCP_PARSE_SPACE_CHARS, 1, -1UL);
++ ZFCP_PARSE_CHECKEND;
++
++ /* parse scsi lun of logical unit */
++ ZFCP_PARSE_UL(rec->scsi_lun, 0, "no valid SCSI LUN");
++ ZFCP_LOG_TRACE("SCSI LUN \"0x%lx\"\n", rec->scsi_lun);
++ ZFCP_PARSE_CHECKEND;
++
++ /* skip delimiting character */
++ ZFCP_PARSE_SKIP_CHARS(ZFCP_PARSE_DELIM_CHARS, 1, 1);
++ ZFCP_PARSE_CHECKEND;
++
++ /* parse fcp_lun of logical unit */
++ ZFCP_PARSE_ULL(rec->fcp_lun, 0, "no valid FCP_LUN");
++ ZFCP_LOG_TRACE("FCP_LUN \"0x%016Lx\"\n", rec->fcp_lun);
++
++ /* skip any ending spaces + tabs */
++ ZFCP_PARSE_SKIP_CHARS(ZFCP_PARSE_SPACE_CHARS, 0, -1UL);
++
++ /* allow for comments */
++ ZFCP_PARSE_SKIP_COMMENT;
++
++ /* this is something valid */
++ rec->valid = 1;
++
++calculate:
++ /* length of string which has been parsed */
++ retval = s_l - ts_l;
++
++out:
++ ZFCP_LOG_TRACE("exit %d\n",
++ retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++#define ZFCP_PRINT_FAILED_RECORD(rec, log_func) \
++ log_func( \
++ "warning: unable to add record: " \
++ "0x%04lx %li:0x%016Lx %li:0x%016Lx\n", \
++ rec->devno, \
++ rec->scsi_id, rec->wwpn, \
++ rec->scsi_lun, rec->fcp_lun);
++
++
++/*
++ * function: zfcp_config_parse_record_add
++ *
++ * purpose: Alloctes the required adapter, port and unit structs
++ * and puts them into their respective lists
++ *
++ * returns: 0 on success
++ * -E* on failure (depends on called routines)
++ */
++int zfcp_config_parse_record_add(zfcp_config_record_t *rec)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ int retval;
++ zfcp_adapter_t *adapter;
++ zfcp_port_t *port;
++ zfcp_unit_t *unit;
++
++ ZFCP_LOG_TRACE("enter (rec=0x%lx)\n", (unsigned long)rec);
++
++ /* don't allow SCSI ID 0 for any port since it is reserved for adapters */
++ if (rec->scsi_id == 0) {
++ ZFCP_LOG_NORMAL(
++ "warning: SCSI ID 0 is not allowed for ports as it is "
++ "reserved for adapters\n");
++ retval = -EINVAL;
++ goto failed_record;
++ }
++ /* check for adapter and configure it if needed */
++ retval = zfcp_adapter_enqueue(rec->devno, &adapter);
++ if (retval < 0)
++ goto failed_record;
++
++ /*
++ * no explicit adapter reopen necessary,
++ * will be escalated by unit reopen if required
++ */
++
++ retval = zfcp_port_enqueue(
++ adapter,
++ rec->scsi_id,
++ rec->wwpn,
++ 0,
++ &port);
++ if (retval < 0)
++ goto failed_record;
++
++ /*
++ * no explicit port reopen necessary,
++ * will be escalated by unit reopen if required
++ */
++
++ retval = zfcp_unit_enqueue(
++ port,
++ rec->scsi_lun,
++ rec->fcp_lun,
++ &unit);
++ if (retval < 0)
++ goto failed_record;
++
++ zfcp_erp_unit_reopen(unit, 0);
++
++ /* processed record successfully */
++ goto out;
++
++failed_record:
++ ZFCP_PRINT_FAILED_RECORD(rec, ZFCP_LOG_NORMAL);
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++#if 0
++/* FIXME(design): rewrite necessary */
++static int zfcp_config_parse_record_del(zfcp_config_record_t *rec)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_CONFIG
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_CONFIG
++
++ int retval = 0;
++ unsigned long flags;
++ zfcp_adapter_t *adapter;
++ zfcp_port_t *port;
++ zfcp_unit_t *unit;
++
++ ZFCP_LOG_TRACE(
++ "enter (rec=0x%lx)\n",
++ (unsigned long)rec);
++
++ /* check for adapter */
++ write_lock_irqsave(&zfcp_data.adapter_list_lock, flags);
++ ZFCP_FOR_EACH_ADAPTER(adapter) {
++ if (adapter->devno == rec->devno)
++ break;
++ }
++ if (!adapter) {
++ ZFCP_LOG_NORMAL(
++ "warning: Could not delete a record. "
++ "The adapter with devno 0x%04x does not exist.\n",
++ adapter->devno);
++ ZFCP_PRINT_FAILED_RECORD(rec, ZFCP_LOG_DEBUG);
++ goto unlock_adapter;
++ }
++
++ /* check for remote port */
++ write_lock(&adapter->port_list_lock);
++ ZFCP_FOR_EACH_PORT(adapter, port) {
++ if (port->scsi_id == rec->scsi_id)
++ break;
++ }
++ if (!port) {
++ ZFCP_LOG_NORMAL(
++ "warning: Could not delete a record. "
++ "The port with SCSI ID %i does not exist.\n",
++ port->scsi_id);
++ ZFCP_PRINT_FAILED_RECORD(rec, ZFCP_LOG_DEBUG);
++ goto unlock_port;
++ }
++ if (port->wwpn != rec->wwpn) {
++ ZFCP_LOG_NORMAL(
++ "error: The port WWPN 0x%016Lx "
++ "does not match the already configured WWPN 0x%016Lx\n",
++ rec->wwpn,
++ (llui_t)port->wwpn);
++ ZFCP_PRINT_FAILED_RECORD(rec, ZFCP_LOG_INFO);
++ goto unlock_port;
++ }
++
++ /* check for logical unit */
++ write_lock(&port->unit_list_lock);
++ ZFCP_FOR_EACH_UNIT(port, unit) {
++ if (unit->scsi_lun == rec->scsi_lun)
++ break;
++ }
++ if (!unit) {
++ ZFCP_LOG_NORMAL(
++ "warning: Could not delete a record. "
++ "The unit with SCSI LUN %i does not exist\n",
++ unit->scsi_lun);
++ ZFCP_PRINT_FAILED_RECORD(rec, ZFCP_LOG_DEBUG);
++ goto unlock_unit;
++ }
++ if (unit->fcp_lun != rec->fcp_lun) {
++ ZFCP_LOG_NORMAL(
++ "error: The record for the FCP_LUN 0x%016Lx "
++ "does not match that of the already "
++ "configured FCP_LUN 0x%016Lx\n",
++ rec->fcp_lun,
++ (llui_t)unit->fcp_lun);
++ ZFCP_PRINT_FAILED_RECORD(rec, ZFCP_LOG_INFO);
++ goto unlock_unit;
++ }
++
++ /* FIXME: do more work here: CLOSE UNIT */
++ retval = zfcp_unit_dequeue(unit);
++ if (retval == -EBUSY) {
++ ZFCP_LOG_NORMAL("warning: Attempt to remove active unit with "
++ "FCP_LUN 0x%016Lx, at the port with WWPN 0x%016Lx of the "
++ "adapter with devno 0x%04x was ignored. Unit "
++ "is still in use.\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ ZFCP_PRINT_FAILED_RECORD(rec, ZFCP_LOG_INFO);
++ goto unlock_unit;
++ }
++
++ /* FIXME: do more work here: CLOSE PORT */
++ retval = zfcp_port_dequeue(port);
++ if (retval == -EBUSY) {
++ retval = 0;
++ goto unlock_unit;
++ }
++
++ /* FIXME: do more work here: shutdown adapter */
++ retval = zfcp_adapter_dequeue(adapter);
++ if (retval == -EBUSY)
++ retval = 0;
++
++unlock_unit:
++ write_unlock(&port->unit_list_lock);
++unlock_port:
++ write_unlock(&adapter->port_list_lock);
++unlock_adapter:
++ write_unlock_irqrestore(&zfcp_data.adapter_list_lock, flags);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++#endif
++
++
++
++/*
++ * function:
++ *
++ * purpose: called if an adapter failed,
++ * initiates adapter recovery which is done
++ * asynchronously
++ *
++ * returns: 0 - initiated action succesfully
++ * <0 - failed to initiate action
++ */
++static int zfcp_erp_adapter_reopen_internal(
++ zfcp_adapter_t *adapter,
++ int clear_mask)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++
++ ZFCP_LOG_TRACE(
++ "enter (adapter=0x%lx clear_mask=0x%x)\n",
++ (unsigned long)adapter,
++ clear_mask);
++
++ debug_text_event(adapter->erp_dbf,5,"a_ro");
++ ZFCP_LOG_DEBUG(
++ "Reopen on the adapter with devno 0x%04x\n",
++ adapter->devno);
++
++ zfcp_erp_adapter_block(adapter, clear_mask);
++
++ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) {
++ ZFCP_LOG_DEBUG(
++ "skipped reopen on the failed adapter with devno 0x%04x\n",
++ adapter->devno);
++ debug_text_event(adapter->erp_dbf,5,"a_ro_f");
++ /* ensure propagation of failed status to new devices */
++ zfcp_erp_adapter_failed(adapter);
++ retval = -EIO;
++ goto out;
++ }
++ retval = zfcp_erp_action_enqueue(
++ ZFCP_ERP_ACTION_REOPEN_ADAPTER,
++ adapter,
++ NULL,
++ NULL);
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: Wrappper for zfcp_erp_adapter_reopen_internal
++ * used to ensure the correct locking
++ *
++ * returns: 0 - initiated action succesfully
++ * <0 - failed to initiate action
++ */
++static int zfcp_erp_adapter_reopen(
++ zfcp_adapter_t *adapter,
++ int clear_mask)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++ unsigned long flags;
++
++ ZFCP_LOG_TRACE(
++ "enter (adapter=0x%lx clear_mask=0x%x)\n",
++ (unsigned long)adapter,
++ clear_mask);
++
++ write_lock_irqsave(&adapter->erp_lock, flags);
++ retval = zfcp_erp_adapter_reopen_internal(adapter, clear_mask);
++ write_unlock_irqrestore(&adapter->erp_lock, flags);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static inline int zfcp_erp_adapter_shutdown(zfcp_adapter_t* adapter, int clear_mask)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++
++ ZFCP_LOG_TRACE(
++ "enter (adapter=0x%lx clear_mask=0x%x)\n",
++ (unsigned long)adapter,
++ clear_mask);
++
++ retval=zfcp_erp_adapter_reopen(
++ adapter,
++ ZFCP_STATUS_COMMON_RUNNING |
++ ZFCP_STATUS_COMMON_ERP_FAILED |
++ clear_mask);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static inline int zfcp_erp_port_shutdown(zfcp_port_t* port, int clear_mask)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++
++ ZFCP_LOG_TRACE(
++ "enter (port=0x%lx clear_mask=0x%x)\n",
++ (unsigned long)port,
++ clear_mask);
++
++ retval = zfcp_erp_port_reopen(
++ port,
++ ZFCP_STATUS_COMMON_RUNNING |
++ ZFCP_STATUS_COMMON_ERP_FAILED |
++ clear_mask);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static inline int zfcp_erp_unit_shutdown(zfcp_unit_t* unit, int clear_mask)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++
++ ZFCP_LOG_TRACE(
++ "enter (unit=0x%lx clear_mask=0x%x)\n",
++ (unsigned long)unit,
++ clear_mask);
++
++ retval = zfcp_erp_unit_reopen(
++ unit,
++ ZFCP_STATUS_COMMON_RUNNING |
++ ZFCP_STATUS_COMMON_ERP_FAILED |
++ clear_mask);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_els
++ *
++ * purpose: Originator of the ELS commands
++ *
++ * returns: 0 - Operation completed successfuly
++ * -EINVAL - Unknown IOCTL command or invalid sense data record
++ * -ENOMEM - Insufficient memory
++ * -EPERM - Cannot create or queue FSF request
++ */
++static int zfcp_els(zfcp_port_t *port, u8 ls_code)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ struct zfcp_send_els *send_els;
++ struct zfcp_ls_rls *rls;
++ struct zfcp_ls_pdisc *pdisc;
++ struct zfcp_ls_adisc *adisc;
++ void *page = NULL;
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (port=0x%lx ls_code=0x%02x)\n",
++ (unsigned long)port, ls_code);
++
++ send_els = (struct zfcp_send_els*)ZFCP_KMALLOC(
++ sizeof(struct zfcp_send_els), GFP_ATOMIC);
++ if (send_els == NULL)
++ goto nomem;
++
++ send_els->req = (struct scatterlist*)ZFCP_KMALLOC(
++ sizeof(struct scatterlist), GFP_ATOMIC);
++ if (send_els->req == NULL)
++ goto nomem;
++ send_els->req_count = 1;
++
++ send_els->resp = (struct scatterlist*)ZFCP_KMALLOC(
++ sizeof(struct scatterlist), GFP_ATOMIC);
++ if (send_els->resp == NULL)
++ goto nomem;
++ send_els->resp_count = 1;
++
++ page = (void*)ZFCP_GET_ZEROED_PAGE(GFP_ATOMIC);
++ if (page == NULL)
++ goto nomem;
++ send_els->req->address = (char*)page;
++ send_els->resp->address = (char*)(page + (PAGE_SIZE >> 1));
++
++ send_els->port = port;
++ send_els->ls_code = ls_code;
++ send_els->handler = zfcp_els_handler;
++ send_els->handler_data = (unsigned long)send_els;
++
++ *(u32*)page = 0;
++ *(u8*)page = ls_code;
++
++ switch (ls_code) {
++
++ case ZFCP_LS_RTV:
++ send_els->req->length = sizeof(struct zfcp_ls_rtv);
++ send_els->resp->length = sizeof(struct zfcp_ls_rtv_acc);
++ ZFCP_LOG_NORMAL(
++ "RTV request from sid 0x%06x to did 0x%06x\n",
++ port->adapter->s_id, port->d_id);
++ break;
++
++ case ZFCP_LS_RLS:
++ send_els->req->length = sizeof(struct zfcp_ls_rls);
++ send_els->resp->length = sizeof(struct zfcp_ls_rls_acc);
++ rls = (struct zfcp_ls_rls*)send_els->req->address;
++ rls->port_id = port->adapter->s_id;
++ ZFCP_LOG_NORMAL(
++ "RLS request from sid 0x%06x to did 0x%06x "
++ "payload(port_id=0x%06x)\n",
++ port->adapter->s_id, port->d_id, rls->port_id);
++ break;
++
++ case ZFCP_LS_PDISC:
++ send_els->req->length = sizeof(struct zfcp_ls_pdisc);
++ send_els->resp->length = sizeof(struct zfcp_ls_pdisc_acc);
++ pdisc = (struct zfcp_ls_pdisc*)send_els->req->address;
++ pdisc->wwpn = port->adapter->wwpn;
++ pdisc->wwnn = port->adapter->wwnn;
++ ZFCP_LOG_NORMAL(
++ "PDISC request from sid 0x%06x to did 0x%06x "
++ "payload(wwpn=0x%016Lx wwnn=0x%016Lx)\n",
++ port->adapter->s_id, port->d_id,
++ (unsigned long long)pdisc->wwpn,
++ (unsigned long long)pdisc->wwnn);
++ break;
++
++ case ZFCP_LS_ADISC:
++ send_els->req->length = sizeof(struct zfcp_ls_adisc);
++ send_els->resp->length = sizeof(struct zfcp_ls_adisc_acc);
++ adisc = (struct zfcp_ls_adisc*)send_els->req->address;
++ adisc->hard_nport_id = port->adapter->s_id;
++ adisc->wwpn = port->adapter->wwpn;
++ adisc->wwnn = port->adapter->wwnn;
++ adisc->nport_id = port->adapter->s_id;
++ ZFCP_LOG_NORMAL(
++ "ADISC request from sid 0x%06x to did 0x%06x "
++ "payload(wwpn=0x%016Lx wwnn=0x%016Lx "
++ "hard_nport_id=0x%06x nport_id=0x%06x)\n",
++ port->adapter->s_id, port->d_id,
++ (unsigned long long)adisc->wwpn,
++ (unsigned long long)adisc->wwnn,
++ adisc->hard_nport_id, adisc->nport_id);
++ break;
++
++ default:
++ ZFCP_LOG_NORMAL(
++ "ELS command code 0x%02x is not supported\n", ls_code);
++ retval = -EINVAL;
++ goto invalid_ls_code;
++ }
++
++ retval = zfcp_fsf_send_els(send_els);
++ if (retval != 0) {
++ ZFCP_LOG_NORMAL(
++ "ELS request could not be processed "
++ "(sid=0x%06x did=0x%06x)\n",
++ port->adapter->s_id, port->d_id);
++ retval = -EPERM;
++ }
++
++ goto out;
++
++nomem:
++ ZFCP_LOG_INFO("Out of memory!\n");
++ retval = -ENOMEM;
++
++invalid_ls_code:
++ if (page != NULL)
++ ZFCP_FREE_PAGE((unsigned long)page);
++ if (send_els != NULL) {
++ if (send_els->req != NULL)
++ ZFCP_KFREE(send_els->req, sizeof(struct scatterlist));
++ if (send_els->resp != NULL)
++ ZFCP_KFREE(send_els->resp, sizeof(struct scatterlist));
++ ZFCP_KFREE(send_els, sizeof(struct zfcp_send_els));
++ }
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/**
++ * zfcp_els_handler - handler for ELS commands
++ * @data: pointer to struct zfcp_send_els
++ * If ELS failed (LS_RJT or timed out) forced reopen of the port is triggered.
++ */
++static void zfcp_els_handler(unsigned long data)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ struct zfcp_send_els *send_els = (struct zfcp_send_els*)data;
++ zfcp_port_t *port = send_els->port;
++ zfcp_adapter_t *adapter = port->adapter;
++ u8 req_code = *(u8*)send_els->req->address;
++ struct zfcp_ls_rtv_acc *rtv;
++ struct zfcp_ls_rls_acc *rls;
++ struct zfcp_ls_pdisc_acc *pdisc;
++ struct zfcp_ls_adisc_acc *adisc;
++
++ ZFCP_LOG_TRACE("enter (data=0x%lx)\n", data);
++
++ /* request rejected or timed out */
++ if (send_els->status != 0) {
++ ZFCP_LOG_NORMAL("ELS request failed, force physical port "
++ "reopen (wwpn=0x%016Lx devno=0x%04x)\n",
++ (unsigned long long)port->wwpn, adapter->devno);
++ debug_text_event(adapter->erp_dbf, 3, "forcreop");
++ if (zfcp_erp_port_forced_reopen(port, 0))
++ ZFCP_LOG_NORMAL(
++ "Cannot reopen a remote port "
++ "(wwpn=0x%016Lx devno=0x%04x)\n",
++ (unsigned long long)port->wwpn,
++ adapter->devno);
++ goto out;
++ }
++
++ switch (req_code) {
++
++ case ZFCP_LS_RTV:
++ rtv = (struct zfcp_ls_rtv_acc*)send_els->resp->address;
++ ZFCP_LOG_NORMAL(
++ "RTV response from did 0x%06x to sid 0x%06x "
++ "with payload(R_A_TOV=%ds E_D_TOV=%d%cs)\n",
++ port->d_id, port->adapter->s_id,
++ rtv->r_a_tov, rtv->e_d_tov,
++ rtv->qualifier & ZFCP_LS_RTV_E_D_TOV_FLAG ?
++ 'n' : 'm');
++ break;
++
++ case ZFCP_LS_RLS:
++ rls = (struct zfcp_ls_rls_acc*)send_els->resp->address;
++ ZFCP_LOG_NORMAL(
++ "RLS response from did 0x%06x to sid 0x%06x "
++ "with payload(link_failure_count=%u "
++ "loss_of_sync_count=%u "
++ "loss_of_signal_count=%u "
++ "primitive_sequence_protocol_error=%u "
++ "invalid_transmition_word=%u "
++ "invalid_crc_count=%u)\n",
++ port->d_id, port->adapter->s_id,
++ rls->link_failure_count,
++ rls->loss_of_sync_count,
++ rls->loss_of_signal_count,
++ rls->prim_seq_prot_error,
++ rls->invalid_transmition_word,
++ rls->invalid_crc_count);
++ break;
++
++ case ZFCP_LS_PDISC:
++ pdisc = (struct zfcp_ls_pdisc_acc*)send_els->resp->address;
++ ZFCP_LOG_NORMAL(
++ "PDISC response from did 0x%06x to sid 0x%06x "
++ "with payload(wwpn=0x%016Lx wwnn=0x%016Lx "
++ "vendor='%-16s')\n",
++ port->d_id, port->adapter->s_id,
++ (unsigned long long)pdisc->wwpn,
++ (unsigned long long)pdisc->wwnn,
++ pdisc->vendor_version);
++ break;
++
++ case ZFCP_LS_ADISC:
++ adisc = (struct zfcp_ls_adisc_acc*)send_els->resp->address;
++ ZFCP_LOG_NORMAL(
++ "ADISC response from did 0x%06x to sid 0x%06x "
++ "with payload(wwpn=0x%016Lx wwnn=0x%016Lx "
++ "hard_nport_id=0x%06x nport_id=0x%06x)\n",
++ port->d_id, port->adapter->s_id,
++ (unsigned long long)adisc->wwpn,
++ (unsigned long long)adisc->wwnn,
++ adisc->hard_nport_id, adisc->nport_id);
++ if (port->wwpn != adisc->wwpn) {
++ ZFCP_LOG_NORMAL("d_id assignment changed, reopening "
++ "port (devno=0x%04x, wwpn=0x%016Lx, "
++ "adisc_resp_wwpn=0x%016Lx)\n",
++ adapter->devno,
++ (unsigned long long) port->wwpn,
++ (unsigned long long) adisc->wwpn);
++ if (zfcp_erp_port_reopen(port, 0))
++ ZFCP_LOG_NORMAL("failed reopen of port "
++ "(devno=0x%04x, "
++ "wwpn=0x%016Lx)\n",
++ adapter->devno,
++ (long long) port->wwpn);
++ } else
++ if (port->wwnn == 0)
++ port->wwnn = adisc->wwnn;
++
++ break;
++ }
++
++ out:
++ ZFCP_FREE_PAGE((unsigned long)send_els->req->address);
++ ZFCP_KFREE(send_els->req, sizeof(struct scatterlist));
++ ZFCP_KFREE(send_els->resp, sizeof(struct scatterlist));
++ ZFCP_KFREE(send_els, sizeof(struct zfcp_send_els));
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_test_link
++ *
++ * purpose: Test a status of a link to a remote port using the ELS command ADISC
++ *
++ * returns: 0 - Link is OK
++ * -EPERM - Port forced reopen failed
++ */
++static int zfcp_test_link(zfcp_port_t *port)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_FSF
++
++ int retval;
++
++ ZFCP_LOG_TRACE("enter (port=0x%lx)\n", (unsigned long)port);
++
++ retval = zfcp_els(port, ZFCP_LS_ADISC);
++ if (retval != 0) {
++ ZFCP_LOG_NORMAL(
++ "Port needs to be reopened "
++ "(wwpn=0x%016Lx devno=0x%04x)\n",
++ (unsigned long long)port->wwpn,
++ port->adapter->devno);
++ retval = zfcp_erp_port_forced_reopen(port, 0);
++ if (retval != 0) {
++ ZFCP_LOG_NORMAL(
++ "Cannot reopen a remote port "
++ "(wwpn=0x%016Lx devno=0x%04x)\n",
++ (unsigned long long)port->wwpn,
++ port->adapter->devno);
++ retval = -EPERM;
++ }
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: called if a port failed to be opened normally
++ * initiates Forced Reopen recovery which is done
++ * asynchronously
++ *
++ * returns: 0 - initiated action succesfully
++ * <0 - failed to initiate action
++ */
++static int zfcp_erp_port_forced_reopen_internal(
++ zfcp_port_t *port,
++ int clear_mask)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++ zfcp_adapter_t *adapter = port->adapter;
++
++ ZFCP_LOG_TRACE(
++ "enter (port=0x%lx clear_mask=0x%x)\n",
++ (unsigned long)port,
++ clear_mask);
++
++ debug_text_event(adapter->erp_dbf,5,"pf_ro");
++ debug_event(adapter->erp_dbf,5,&port->wwpn,
++ sizeof(wwn_t));
++
++ ZFCP_LOG_DEBUG(
++ "Forced reopen of the port with WWPN 0x%016Lx "
++ "on the adapter with devno 0x%04x\n",
++ (llui_t)port->wwpn,
++ adapter->devno);
++
++ zfcp_erp_port_block(port, clear_mask);
++
++ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) {
++ ZFCP_LOG_DEBUG(
++ "skipped forced reopen on the failed port "
++ "with WWPN 0x%016Lx on the adapter with devno 0x%04x\n",
++ (llui_t)port->wwpn,
++ adapter->devno);
++ debug_text_event(adapter->erp_dbf,5,"pf_ro_f");
++ debug_event(adapter->erp_dbf,5,&port->wwpn,
++ sizeof(wwn_t));
++ retval = -EIO;
++ goto out;
++ }
++
++ retval = zfcp_erp_action_enqueue(
++ ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
++ adapter,
++ port,
++ NULL);
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: Wrappper for zfcp_erp_port_forced_reopen_internal
++ * used to ensure the correct locking
++ *
++ * returns: 0 - initiated action succesfully
++ * <0 - failed to initiate action
++ */
++static int zfcp_erp_port_forced_reopen(
++ zfcp_port_t *port,
++ int clear_mask)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++ unsigned long flags;
++ zfcp_adapter_t *adapter = port->adapter;
++
++ ZFCP_LOG_TRACE(
++ "enter (port=0x%lx clear_mask=x0%x)\n",
++ (unsigned long)port,
++ clear_mask);
++
++ write_lock_irqsave(&adapter->erp_lock, flags);
++ retval = zfcp_erp_port_forced_reopen_internal(port, clear_mask);
++ write_unlock_irqrestore(&adapter->erp_lock, flags);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: called if a port is to be opened
++ * initiates Reopen recovery which is done
++ * asynchronously
++ *
++ * returns: 0 - initiated action succesfully
++ * <0 - failed to initiate action
++ */
++static int zfcp_erp_port_reopen_internal(
++ zfcp_port_t *port,
++ int clear_mask)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++ zfcp_adapter_t *adapter = port->adapter;
++
++ ZFCP_LOG_TRACE(
++ "enter (port=0x%lx clear_mask=0x%x)\n",
++ (unsigned long)port,
++ clear_mask);
++
++ debug_text_event(adapter->erp_dbf, 5, "p_ro");
++ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof(wwn_t));
++
++ ZFCP_LOG_DEBUG(
++ "Reopen of the port with WWPN 0x%016Lx "
++ "on the adapter with devno 0x%04x\n",
++ (llui_t)port->wwpn,
++ adapter->devno);
++
++ zfcp_erp_port_block(port, clear_mask);
++
++ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) {
++ ZFCP_LOG_DEBUG(
++ "skipped reopen on the failed port with WWPN 0x%016Lx "
++ "on the adapter with devno 0x%04x\n",
++ (llui_t)port->wwpn,
++ adapter->devno);
++ debug_text_event(adapter->erp_dbf, 5, "p_ro_f");
++ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof(wwn_t));
++ /* ensure propagation of failed status to new devices */
++ zfcp_erp_port_failed(port);
++ retval = -EIO;
++ goto out;
++ }
++
++ retval = zfcp_erp_action_enqueue(
++ ZFCP_ERP_ACTION_REOPEN_PORT,
++ adapter,
++ port,
++ NULL);
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: Wrappper for zfcp_erp_port_reopen_internal
++ * used to ensure the correct locking
++ *
++ * returns: 0 - initiated action succesfully
++ * <0 - failed to initiate action
++ */
++static int zfcp_erp_port_reopen(
++ zfcp_port_t *port,
++ int clear_mask)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++ unsigned long flags;
++ zfcp_adapter_t *adapter = port->adapter;
++
++ ZFCP_LOG_TRACE(
++ "enter (port=0x%lx clear_mask=0x%x)\n",
++ (unsigned long)port,
++ clear_mask);
++
++ write_lock_irqsave(&adapter->erp_lock, flags);
++ retval = zfcp_erp_port_reopen_internal(port, clear_mask);
++ write_unlock_irqrestore(&adapter->erp_lock, flags);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: called if a unit is to be opened
++ * initiates Reopen recovery which is done
++ * asynchronously
++ *
++ * returns: 0 - initiated action succesfully
++ * <0 - failed to initiate action
++ */
++static int zfcp_erp_unit_reopen_internal(
++ zfcp_unit_t *unit,
++ int clear_mask)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++ zfcp_adapter_t *adapter = unit->port->adapter;
++
++ ZFCP_LOG_TRACE(
++ "enter (unit=0x%lx clear_mask=0x%x)\n",
++ (unsigned long)unit,
++ clear_mask);
++
++ debug_text_event(adapter->erp_dbf,5,"u_ro");
++ debug_event(adapter->erp_dbf,5,&unit->fcp_lun,
++ sizeof(fcp_lun_t));
++ ZFCP_LOG_DEBUG(
++ "Reopen of the unit with FCP_LUN 0x%016Lx on the "
++ "port with WWPN 0x%016Lx "
++ "on the adapter with devno 0x%04x\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ adapter->devno);
++
++ zfcp_erp_unit_block(unit, clear_mask);
++
++ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status)) {
++ ZFCP_LOG_DEBUG(
++ "skipped reopen on the failed unit with FCP_LUN 0x%016Lx on the "
++ "port with WWPN 0x%016Lx "
++ "on the adapter with devno 0x%04x\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ adapter->devno);
++ debug_text_event(adapter->erp_dbf,5,"u_ro_f");
++ debug_event(adapter->erp_dbf,5,&unit->fcp_lun,
++ sizeof(fcp_lun_t));
++ retval = -EIO;
++ goto out;
++ }
++
++ retval = zfcp_erp_action_enqueue(
++ ZFCP_ERP_ACTION_REOPEN_UNIT,
++ unit->port->adapter,
++ unit->port,
++ unit);
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: Wrappper for zfcp_erp_unit_reopen_internal
++ * used to ensure the correct locking
++ *
++ * returns: 0 - initiated action succesfully
++ * <0 - failed to initiate action
++ */
++static int zfcp_erp_unit_reopen(
++ zfcp_unit_t *unit,
++ int clear_mask)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++ unsigned long flags;
++ zfcp_adapter_t *adapter = unit->port->adapter;
++
++ ZFCP_LOG_TRACE(
++ "enter (unit=0x%lx clear_mask=0x%x)\n",
++ (unsigned long)unit,
++ clear_mask);
++
++ write_lock_irqsave(&adapter->erp_lock, flags);
++ retval = zfcp_erp_unit_reopen_internal(unit, clear_mask);
++ write_unlock_irqrestore(&adapter->erp_lock, flags);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * function:
++ *
++ * purpose: disable I/O,
++ * return any open requests and clean them up,
++ * aim: no pending and incoming I/O
++ *
++ * returns:
++ */
++static int zfcp_erp_adapter_block(zfcp_adapter_t *adapter, int clear_mask)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (adapter=0x%lx clear_mask=0x%x)\n",
++ (unsigned long)adapter,
++ clear_mask);
++
++ debug_text_event(adapter->erp_dbf,6,"a_bl");
++
++ zfcp_erp_modify_adapter_status(
++ adapter,
++ ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask,
++ ZFCP_CLEAR);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: enable I/O
++ *
++ * returns:
++ */
++static int zfcp_erp_adapter_unblock(zfcp_adapter_t *adapter)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ debug_text_event(adapter->erp_dbf,6,"a_ubl");
++ atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: disable I/O,
++ * return any open requests and clean them up,
++ * aim: no pending and incoming I/O
++ *
++ * returns:
++ */
++static int zfcp_erp_port_block(zfcp_port_t *port, int clear_mask)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++ zfcp_adapter_t *adapter=port->adapter;
++
++ ZFCP_LOG_TRACE(
++ "enter (port=0x%lx clear_mask=0x%x)\n",
++ (unsigned long)port,
++ clear_mask);
++
++ debug_text_event(adapter->erp_dbf,6,"p_bl");
++ debug_event(adapter->erp_dbf,6,&port->wwpn,
++ sizeof(wwn_t));
++
++ zfcp_erp_modify_port_status(
++ port,
++ ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask,
++ ZFCP_CLEAR);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: enable I/O
++ *
++ * returns:
++ */
++static int zfcp_erp_port_unblock(zfcp_port_t *port)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++ zfcp_adapter_t *adapter=port->adapter;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ debug_text_event(adapter->erp_dbf,6,"p_ubl");
++ debug_event(adapter->erp_dbf,6,&port->wwpn,
++ sizeof(wwn_t));
++ atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: disable I/O,
++ * return any open requests and clean them up,
++ * aim: no pending and incoming I/O
++ *
++ * returns:
++ */
++static int zfcp_erp_unit_block(zfcp_unit_t *unit, int clear_mask)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++ zfcp_adapter_t *adapter=unit->port->adapter;
++
++ ZFCP_LOG_TRACE(
++ "enter (unit=0x%lx clear_mask=0x%x\n",
++ (unsigned long)unit,
++ clear_mask);
++
++ debug_text_event(adapter->erp_dbf,6,"u_bl");
++ debug_event(adapter->erp_dbf,6,&unit->fcp_lun,
++ sizeof(fcp_lun_t));
++
++ zfcp_erp_modify_unit_status(
++ unit,
++ ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask,
++ ZFCP_CLEAR);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: enable I/O
++ *
++ * returns:
++ */
++static int zfcp_erp_unit_unblock(zfcp_unit_t *unit)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++ zfcp_adapter_t *adapter=unit->port->adapter;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ debug_text_event(adapter->erp_dbf,6,"u_ubl");
++ debug_event(adapter->erp_dbf,6,&unit->fcp_lun,
++ sizeof(fcp_lun_t));
++ atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_erp_action_ready(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++
++ ZFCP_LOG_TRACE(
++ "enter (erp_action=0x%lx)\n",
++ (unsigned long)erp_action);
++
++ debug_text_event(adapter->erp_dbf, 4, "a_ar");
++ debug_event(adapter->erp_dbf, 4, &erp_action->action, sizeof(int));
++
++ zfcp_erp_action_to_ready(erp_action);
++ ZFCP_LOG_DEBUG(
++ "Waking erp_thread of adapter with devno 0x%04x\n",
++ adapter->devno);
++ up(&adapter->erp_ready_sem);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns: <0 erp_action not found in any list
++ * ZFCP_ERP_ACTION_READY erp_action is in ready list
++ * ZFCP_ERP_ACTION_RUNNING erp_action is in running list
++ *
++ * locks: erp_lock must be held
++ */
++static int zfcp_erp_action_exists(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = -EINVAL;
++ struct list_head *entry;
++ zfcp_erp_action_t *entry_erp_action;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++
++ ZFCP_LOG_TRACE(
++ "enter (erp_action=0x%lx)\n",
++ (unsigned long)erp_action);
++
++ /* search in running list */
++ list_for_each(entry, &adapter->erp_running_head) {
++ entry_erp_action = list_entry(entry, zfcp_erp_action_t, list);
++ if (entry_erp_action == erp_action) {
++ retval = ZFCP_ERP_ACTION_RUNNING;
++ goto out;
++ }
++ }
++ /* search in ready list */
++ list_for_each(entry, &adapter->erp_ready_head) {
++ entry_erp_action = list_entry(entry, zfcp_erp_action_t, list);
++ if (entry_erp_action == erp_action) {
++ retval = ZFCP_ERP_ACTION_READY;
++ goto out;
++ }
++ }
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * purpose: checks current status of action (timed out, dismissed, ...)
++ * and does appropriate preparations (dismiss fsf request, ...)
++ *
++ * locks: called under erp_lock (disabled interrupts)
++ *
++ * returns: 0
++ */
++static int
++zfcp_erp_strategy_check_fsfreq(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++ zfcp_fsf_req_t *fsf_req;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++
++ if (erp_action->fsf_req) {
++ /* take lock to ensure that request is not being deleted meanwhile */
++ write_lock(&adapter->fsf_req_list_lock);
++ /* check whether fsf req does still exist */
++ list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list)
++ if (fsf_req == erp_action->fsf_req)
++ break;
++ if (fsf_req == erp_action->fsf_req) {
++ /* fsf_req still exists */
++ debug_text_event(adapter->erp_dbf, 3, "a_ca_req");
++ debug_event(adapter->erp_dbf, 3, &fsf_req,
++ sizeof (unsigned long));
++ /* dismiss fsf_req of timed out or dismissed erp_action */
++ if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED |
++ ZFCP_STATUS_ERP_TIMEDOUT)) {
++ debug_text_event(adapter->erp_dbf, 3,
++ "a_ca_disreq");
++ fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
++ }
++ if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
++ ZFCP_LOG_NORMAL("error: erp step timed out "
++ "(action=%d, fsf_req=%p)\n ",
++ erp_action->action,
++ erp_action->fsf_req);
++ }
++ /*
++ * If fsf_req is neither dismissed nor completed
++ * then keep it running asynchronously and don't mess
++ * with the association of erp_action and fsf_req.
++ */
++ if (fsf_req->status & (ZFCP_STATUS_FSFREQ_COMPLETED |
++ ZFCP_STATUS_FSFREQ_DISMISSED)) {
++ /* forget about association between fsf_req
++ and erp_action */
++ fsf_req->erp_action = NULL;
++ erp_action->fsf_req = NULL;
++ }
++ } else {
++ debug_text_event(adapter->erp_dbf, 3, "a_ca_gonereq");
++ /*
++ * even if this fsf_req has gone, forget about
++ * association between erp_action and fsf_req
++ */
++ erp_action->fsf_req = NULL;
++ }
++ write_unlock(&adapter->fsf_req_list_lock);
++ } else
++ debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq");
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * purpose: generic handler for asynchronous events related to erp_action events
++ * (normal completion, time-out, dismissing, retry after
++ * low memory condition)
++ *
++ * note: deletion of timer is not required (e.g. in case of a time-out),
++ * but a second try does no harm,
++ * we leave it in here to allow for greater simplification
++ *
++ * returns: 0 - there was an action to handle
++ * !0 - otherwise
++ */
++static int
++zfcp_erp_async_handler_nolock(zfcp_erp_action_t *erp_action,
++ unsigned long set_mask)
++{
++ int retval;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++
++ if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) {
++ debug_text_event(adapter->erp_dbf, 2, "a_asyh_ex");
++ debug_event(adapter->erp_dbf, 2, &erp_action->action,
++ sizeof (int));
++ if (!(set_mask & ZFCP_STATUS_ERP_TIMEDOUT))
++ del_timer_sync(&erp_action->timer);
++ erp_action->status |= set_mask;
++ zfcp_erp_action_ready(erp_action);
++ retval = 0;
++ } else {
++ /* action is ready or gone - nothing to do */
++ debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone");
++ debug_event(adapter->erp_dbf, 3, &erp_action->action,
++ sizeof (int));
++ retval = 1;
++ }
++
++ return retval;
++}
++
++/*
++ * purpose: generic handler for asynchronous events related to erp_action
++ * events (normal completion, time-out, dismissing, retry after
++ * low memory condition)
++ *
++ * note: deletion of timer is not required (e.g. in case of a time-out),
++ * but a second try does no harm,
++ * we leave it in here to allow for greater simplification
++ *
++ * returns: 0 - there was an action to handle
++ * !0 - otherwise
++ */
++static int
++zfcp_erp_async_handler(zfcp_erp_action_t *erp_action,
++ unsigned long set_mask)
++{
++ zfcp_adapter_t *adapter = erp_action->adapter;
++ unsigned long flags;
++ int retval;
++
++ write_lock_irqsave(&adapter->erp_lock, flags);
++ retval = zfcp_erp_async_handler_nolock(erp_action, set_mask);
++ write_unlock_irqrestore(&adapter->erp_lock, flags);
++
++ return retval;
++}
++
++/*
++ * purpose: is called for erp_action which was slept waiting for
++ * memory becoming avaliable,
++ * will trigger that this action will be continued
++ */
++static void
++zfcp_erp_memwait_handler(unsigned long data)
++{
++ zfcp_erp_action_t *erp_action = (zfcp_erp_action_t *) data;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++
++ debug_text_event(adapter->erp_dbf, 2, "a_mwh");
++ debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int));
++
++ zfcp_erp_async_handler(erp_action, 0);
++}
++
++/*
++ * purpose: is called if an asynchronous erp step timed out,
++ * action gets an appropriate flag and will be processed
++ * accordingly
++ */
++static void
++zfcp_erp_timeout_handler(unsigned long data)
++{
++ zfcp_erp_action_t *erp_action = (zfcp_erp_action_t *) data;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++
++ debug_text_event(adapter->erp_dbf, 2, "a_th");
++ debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int));
++
++ zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT);
++}
++
++/*
++ * purpose: is called for an erp_action which needs to be ended
++ * though not being done,
++ * this is usually required if an higher is generated,
++ * action gets an appropriate flag and will be processed
++ * accordingly
++ *
++ * locks: erp_lock held (thus we need to call another handler variant)
++ */
++static int
++zfcp_erp_action_dismiss(zfcp_erp_action_t *erp_action)
++{
++ zfcp_adapter_t *adapter = erp_action->adapter;
++
++ debug_text_event(adapter->erp_dbf, 2, "a_adis");
++ debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int));
++
++ zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED);
++
++ return 0;
++}
++
++
++static int zfcp_erp_thread_setup(zfcp_adapter_t *adapter)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++ struct tq_struct *erp_task = NULL;
++
++ ZFCP_LOG_TRACE(
++ "enter (adapter=0x%lx)\n",
++ (unsigned long)adapter);
++
++ erp_task = ZFCP_KMALLOC(sizeof(struct tq_struct), GFP_KERNEL);
++ if (!erp_task) {
++ ZFCP_LOG_INFO(
++ "error: Not enough memory for the error handler "
++ "of the adapter with devno 0x%04x, leaving.\n",
++ adapter->devno);
++ retval = -ENOMEM;
++ goto out;
++ }
++
++ atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_DONE, &adapter->status);
++
++ rwlock_init(&adapter->erp_lock);
++ INIT_LIST_HEAD(&adapter->erp_ready_head);
++ INIT_LIST_HEAD(&adapter->erp_running_head);
++ sema_init(&adapter->erp_ready_sem, 0);
++#ifdef ZFCP_ERP_DEBUG_SINGLE_STEP
++ sema_init(&adapter->erp_continue_sem, 0);
++#endif // ZFCP_ERP_DEBUG_SINGLE_STEP
++
++ INIT_TQUEUE(erp_task,
++ zfcp_erp_thread_setup_task,
++ adapter);
++ schedule_task(erp_task);
++
++ __wait_event_interruptible(
++ adapter->erp_thread_wqh,
++ atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_DONE, &adapter->status),
++ retval);
++
++ if (retval) {
++ ZFCP_LOG_INFO(
++ "error: The error recovery procedure thread creation "
++ "for the adapter with devno 0x%04x was aborted. An "
++ "OS signal was receiveved.\n",
++ adapter->devno);
++ if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP,
++ &adapter->status)) {
++ zfcp_erp_thread_kill(adapter);
++ }
++ }
++
++ ZFCP_KFREE(erp_task, sizeof(*erp_task));
++
++ debug_text_event(adapter->erp_dbf, 5, "a_thset_ok");
++out:
++ ZFCP_LOG_TRACE("exit %d\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++static void zfcp_erp_thread_setup_task(void *data)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ zfcp_adapter_t *adapter = (zfcp_adapter_t *)data;
++ int retval;
++
++ ZFCP_LOG_TRACE(
++ "enter (data=0x%lx)\n",
++ (unsigned long)data);
++
++ retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD);
++ if (retval < 0) {
++ ZFCP_LOG_INFO(
++ "error: Out of resources. Could not create an "
++ "error recovery procedure thread "
++ "for the adapter with devno 0x%04x\n",
++ adapter->devno);
++ atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_DONE, &adapter->status);
++ wake_up_interruptible(&adapter->erp_thread_wqh);
++ } else {
++ ZFCP_LOG_DEBUG(
++ "created erp thread "
++ "for the adapter with devno 0x%04x\n",
++ adapter->devno);
++ debug_text_event(adapter->erp_dbf,5,"a_thset_t_ok");
++ }
++
++ ZFCP_LOG_TRACE("exit\n");
++
++ return;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ *
++ * context: process (i.e. proc-fs or rmmod/insmod)
++ *
++ * note: The caller of this routine ensures that the specified
++ * adapter has been shut down and that this operation
++ * has been completed. Thus, there are no pending erp_actions
++ * which would need to be handled here.
++ */
++static int zfcp_erp_thread_kill(zfcp_adapter_t *adapter)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (adapter=0x%lx)\n",
++ (unsigned long)adapter);
++
++ ZFCP_LOG_DEBUG(
++ "Killing erp_thread for the adapter with devno 0x%04x\n",
++ adapter->devno);
++ atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status);
++ up(&adapter->erp_ready_sem);
++ wait_event(
++ adapter->erp_thread_wqh,
++ !atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status));
++ atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status);
++
++ debug_text_event(adapter->erp_dbf,5,"a_thki_ok");
++
++ ZFCP_LOG_DEBUG(
++ "Killed erp_thread for the adapter with devno 0x%04x\n",
++ adapter->devno);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: should be run as a thread,
++ * goes through list of error recovery actions of associated adapter
++ * and delegates single action to execution
++ *
++ * returns:
++ */
++/* FIXME(design): static or not? */
++static int zfcp_erp_thread(void *data)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++ zfcp_adapter_t *adapter = (zfcp_adapter_t*)data;
++ struct list_head *next;
++ zfcp_erp_action_t *erp_action;
++ unsigned long flags;
++
++ ZFCP_LOG_TRACE(
++ "enter (data=0x%lx)\n",
++ (unsigned long)data);
++
++ __sti();
++ daemonize();
++ /* disable all signals */
++ siginitsetinv(¤t->blocked, 0);
++
++ sprintf(current->comm, "zfcp_erp_0x%04x", adapter->devno);
++
++ atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
++ ZFCP_LOG_DEBUG(
++ "erp thread for adapter with devno 0x%04x is up.\n",
++ adapter->devno);
++ debug_text_event(adapter->erp_dbf, 5, "a_th_run");
++ atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_DONE, &adapter->status);
++ wake_up_interruptible(&adapter->erp_thread_wqh);
++
++ /* (nearly) infinite loop */
++ for (;;) {
++ /* sleep as long as there is no action in 'ready' queue */
++ down_interruptible(&adapter->erp_ready_sem);
++#ifdef ZFCP_ERP_DEBUG_SINGLE_STEP
++ down(&adapter->erp_continue_sem);
++#endif // ZFCP_ERP_DEBUG_SINGLE_STEP
++ ZFCP_LOG_TRACE(
++ "erp thread woken on adapter with devno 0x%04x\n",
++ adapter->devno);
++
++ /* killing this thread */
++ if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status)) {
++ debug_text_event(adapter->erp_dbf,5,"a_th_kill");
++ ZFCP_LOG_DEBUG(
++ "Recognized kill flag for the erp_thread of "
++ "the adapter with devno 0x%04x\n",
++ adapter->devno);
++ read_lock_irqsave(&adapter->erp_lock, flags);
++ retval = !list_empty(&adapter->erp_ready_head) ||
++ !list_empty(&adapter->erp_running_head);
++ read_unlock_irqrestore(&adapter->erp_lock, flags);
++ if (retval) {
++ debug_text_exception(adapter->erp_dbf, 1, "a_th_bkill");
++ ZFCP_LOG_NORMAL(
++ "bug: error recovery thread is "
++ "shutting down although there are "
++ "error recovery actions pending at "
++ "adapter with devno 0x%04x\n",
++ adapter->devno);
++ /* don't exit erp to avoid potential system crash */
++ } else break;
++ }
++
++ ZFCP_PARANOIA {
++ /* there should be something in 'ready' queue */
++ /*
++ * need lock since list_empty checks for entry at
++ * lists head while lists head is subject to
++ * modification when another action is put to this
++ * queue (only list tail won't be modified then)
++ */
++ read_lock_irqsave(&adapter->erp_lock, flags);
++ retval = list_empty(&adapter->erp_ready_head);
++ read_unlock_irqrestore(&adapter->erp_lock, flags);
++ if (retval) {
++ debug_text_exception(adapter->erp_dbf, 1, "a_th_empt");
++ ZFCP_LOG_NORMAL(
++ "bug: Error recovery procedure thread "
++ "woken for empty action list on the "
++ "adapter with devno 0x%04x.\n",
++ adapter->devno);
++ /* sleep until next try */
++ continue;
++ }
++ }
++
++ /*
++ * get next action to be executed; FIFO -> head.prev
++ * (don't need lock since there is an action at lists tail and
++ * lists tail won't be modified concurrently; only lists head
++ * would be modified if another action is put to this queue)
++ */
++ next = adapter->erp_ready_head.prev;
++ erp_action = list_entry(next, zfcp_erp_action_t, list);
++ /* process action (incl. [re]moving it from 'ready' queue) */
++ retval = zfcp_erp_strategy(erp_action);
++ }
++
++ atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
++ ZFCP_LOG_DEBUG(
++ "erp thread for adapter with devno 0x%04x is down.\n",
++ adapter->devno);
++
++ wake_up(&adapter->erp_thread_wqh);
++
++ debug_text_event(adapter->erp_dbf, 5, "a_th_stop");
++
++ ZFCP_LOG_TRACE("exit %d\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: drives single error recovery action and schedules higher and
++ * subordinate actions, if necessary
++ *
++ * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
++ * ZFCP_ERP_SUCCEEDED - action finished successfully (action dequeued)
++ * ZFCP_ERP_FAILED - action finished unsuccessfully (action dequeued)
++ * ZFCP_ERP_EXIT - action finished (action dequeued), target offline
++ * ZFCP_ERP_DISMISSED - action canceled (action dequeued)
++ */
++static int zfcp_erp_strategy(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++ zfcp_port_t *port = erp_action->port;
++ zfcp_unit_t *unit = erp_action->unit;
++ int action = erp_action->action;
++ u32 status = erp_action->status;
++ unsigned long flags;
++
++ ZFCP_LOG_TRACE(
++ "enter (erp_action=0x%lx)\n",
++ (unsigned long)erp_action);
++
++ write_lock_irqsave(&adapter->erp_lock, flags);
++ /* dequeue dismissed action and leave, if required */
++ retval = zfcp_erp_strategy_check_action(erp_action, retval);
++ if (retval == ZFCP_ERP_DISMISSED) {
++ debug_text_event(adapter->erp_dbf, 4, "a_st_dis1");
++ goto unlock;
++ }
++
++ /*
++ * move action to 'running' queue before processing it
++ * (to avoid a race condition regarding moving the
++ * action to the 'running' queue and back)
++ */
++ zfcp_erp_action_to_running(erp_action);
++
++ write_unlock_irqrestore(&adapter->erp_lock, flags);
++ retval = zfcp_erp_strategy_do_action(erp_action);
++ write_lock_irqsave(&adapter->erp_lock, flags);
++
++ /*
++ * check for dismissed status again to avoid follow-up actions,
++ * failing of targets and so on for dismissed actions
++ */
++ retval = zfcp_erp_strategy_check_action(erp_action, retval);
++
++ switch (retval) {
++ case ZFCP_ERP_DISMISSED:
++ /* leave since this action has ridden to its ancestors */
++ debug_text_event(adapter->erp_dbf, 6, "a_st_dis2");
++ goto unlock;
++ case ZFCP_ERP_NOMEM :
++ /* no memory to continue immediately, let it sleep */
++ debug_text_event(adapter->erp_dbf, 2, "a_st_memw");
++ retval = zfcp_erp_strategy_memwait(erp_action);
++ goto unlock;
++ case ZFCP_ERP_CONTINUES :
++ /* leave since this action runs asynchronously */
++ debug_text_event(adapter->erp_dbf, 6, "a_st_cont");
++ goto unlock;
++ }
++
++ /* ok, finished action (whatever its result is) */
++
++ /* check for unrecoverable targets */
++ retval = zfcp_erp_strategy_check_target(erp_action, retval);
++
++ /* action must be dequeued (here to allow for further ones) */
++ zfcp_erp_action_dequeue(erp_action);
++
++ /*
++ * put this target through the erp mill again if someone has
++ * requested to change the status of a target being online
++ * to offline or the other way around
++ * (old retval is preserved if nothing has to be done here)
++ */
++ retval = zfcp_erp_strategy_statechange(
++ action, status, adapter, port, unit, retval);
++
++ /*
++ * leave if target is in permanent error state or if
++ * action is repeated in order to process state change
++ */
++ if (retval == ZFCP_ERP_EXIT) {
++ debug_text_event(adapter->erp_dbf, 2, "a_st_exit");
++ goto unlock;
++ }
++
++ /* trigger follow up actions */
++ zfcp_erp_strategy_followup_actions(
++ action, adapter, port, unit, retval);
++
++unlock:
++ write_unlock_irqrestore(&adapter->erp_lock, flags);
++
++ /*
++ * 2 things we want to check finally if the erp queues will be empty
++ * (don't do that if the last action evaluated was dismissed
++ * since this clearly indicates that there is more to come) :
++ * - close the name server port if it is open yet (enqueues another final action)
++ * - otherwise, wake up whoever wants to be woken when we are done with erp
++ */
++ if (retval != ZFCP_ERP_DISMISSED)
++ zfcp_erp_strategy_check_queues(adapter);
++
++ debug_text_event(adapter->erp_dbf, 6, "a_st_done");
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns: ZFCP_ERP_DISMISSED - if action has been dismissed
++ * retval - otherwise
++ */
++static int
++zfcp_erp_strategy_check_action(zfcp_erp_action_t *erp_action, int retval)
++{
++ zfcp_adapter_t *adapter = erp_action->adapter;
++
++ zfcp_erp_strategy_check_fsfreq(erp_action);
++
++ debug_event(adapter->erp_dbf, 5, &erp_action->action, sizeof (int));
++ if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
++ debug_text_event(adapter->erp_dbf, 3, "a_stcd_dis");
++ zfcp_erp_action_dequeue(erp_action);
++ retval = ZFCP_ERP_DISMISSED;
++ } else
++ debug_text_event(adapter->erp_dbf, 5, "a_stcd_nodis");
++
++ return retval;
++}
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_erp_strategy_do_action(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = ZFCP_ERP_FAILED;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++
++ ZFCP_LOG_TRACE(
++ "enter (erp_action=0x%lx)\n",
++ (unsigned long)erp_action);
++ /*
++ * no lock in subsequent stratetgy routines
++ * (this allows these routine to call schedule, e.g.
++ * kmalloc with such flags or qdio_initialize & friends)
++ */
++
++ if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
++ /* DEBUG */
++ //unsigned long timeout = 1000 * HZ;
++
++ debug_text_event(adapter->erp_dbf, 3, "a_stda_tim");
++ debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof(int));
++
++ /* DEBUG */
++ //__ZFCP_WAIT_EVENT_TIMEOUT(timeout, 0);
++ }
++ /* Note: in case of timeout, the seperate strategies will fail
++ anyhow. No need for a special action. Even worse, a nameserver
++ failure would not wake up waiting ports without the call.
++ */
++ /* try to execute/continue action as far as possible */
++ switch (erp_action->action) {
++ case ZFCP_ERP_ACTION_REOPEN_ADAPTER :
++ retval = zfcp_erp_adapter_strategy(erp_action);
++ break;
++ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED :
++ retval = zfcp_erp_port_forced_strategy(erp_action);
++ break;
++ case ZFCP_ERP_ACTION_REOPEN_PORT :
++ retval = zfcp_erp_port_strategy(erp_action);
++ break;
++ case ZFCP_ERP_ACTION_REOPEN_UNIT :
++ retval = zfcp_erp_unit_strategy(erp_action);
++ break;
++ default :
++ debug_text_exception(adapter->erp_dbf, 1, "a_stda_bug");
++ debug_event(adapter->erp_dbf, 1, &erp_action->action, sizeof(int));
++ ZFCP_LOG_NORMAL("bug: Unknown error recovery procedure "
++ "action requested on the adapter with "
++ "devno 0x%04x (debug info %d)\n",
++ erp_action->adapter->devno,
++ erp_action->action);
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: triggers retry of this action after a certain amount of time
++ * by means of timer provided by erp_action
++ *
++ * returns: ZFCP_ERP_CONTINUES - erp_action sleeps in erp running queue
++ */
++static int zfcp_erp_strategy_memwait(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = ZFCP_ERP_CONTINUES;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++
++ ZFCP_LOG_TRACE(
++ "enter (erp_action=0x%lx)\n",
++ (unsigned long)erp_action);
++
++ debug_text_event(adapter->erp_dbf, 6, "a_mwinit");
++ debug_event(adapter->erp_dbf, 6, &erp_action->action, sizeof(int));
++ init_timer(&erp_action->timer);
++ erp_action->timer.function = zfcp_erp_memwait_handler;
++ erp_action->timer.data = (unsigned long)erp_action;
++ erp_action->timer.expires = jiffies + ZFCP_ERP_MEMWAIT_TIMEOUT;
++ add_timer(&erp_action->timer);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_erp_adapter_failed
++ *
++ * purpose: sets the adapter and all underlying devices to ERP_FAILED
++ *
++ */
++static void zfcp_erp_adapter_failed(zfcp_adapter_t *adapter)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++
++ ZFCP_LOG_TRACE(
++ "enter (adapter=0x%lx)\n",
++ (unsigned long)adapter);
++
++ zfcp_erp_modify_adapter_status(adapter,
++ ZFCP_STATUS_COMMON_ERP_FAILED,
++ ZFCP_SET);
++ ZFCP_LOG_NORMAL(
++ "Adapter recovery failed on the "
++ "adapter with devno 0x%04x.\n",
++ adapter->devno);
++ debug_text_event(adapter->erp_dbf, 2, "a_afail");
++
++
++ ZFCP_LOG_TRACE("exit\n");
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_erp_port_failed
++ *
++ * purpose: sets the port and all underlying devices to ERP_FAILED
++ *
++ */
++static void zfcp_erp_port_failed(zfcp_port_t *port)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++
++ ZFCP_LOG_TRACE("enter (port=0x%lx)\n",
++ (unsigned long)port);
++
++ zfcp_erp_modify_port_status(port,
++ ZFCP_STATUS_COMMON_ERP_FAILED,
++ ZFCP_SET);
++ ZFCP_LOG_NORMAL("Port recovery failed on the "
++ "port with WWPN 0x%016Lx at the "
++ "adapter with devno 0x%04x.\n",
++ (llui_t)port->wwpn,
++ port->adapter->devno);
++ debug_text_event(port->adapter->erp_dbf, 2, "p_pfail");
++ debug_event(port->adapter->erp_dbf, 2, &port->wwpn, sizeof(wwn_t));
++
++ ZFCP_LOG_TRACE("exit\n");
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_erp_unit_failed
++ *
++ * purpose: sets the unit to ERP_FAILED
++ *
++ */
++static void zfcp_erp_unit_failed(zfcp_unit_t *unit)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++
++ ZFCP_LOG_TRACE(
++ "enter (unit=0x%lx)\n",
++ (unsigned long)unit);
++
++ zfcp_erp_modify_unit_status(unit,
++ ZFCP_STATUS_COMMON_ERP_FAILED,
++ ZFCP_SET);
++ ZFCP_LOG_NORMAL(
++ "Unit recovery failed on the unit with FCP_LUN 0x%016Lx "
++ "connected to the port with WWPN 0x%016Lx at the "
++ "adapter with devno 0x%04x.\n",
++ (llui_t)unit->fcp_lun,
++ (llui_t)unit->port->wwpn,
++ unit->port->adapter->devno);
++ debug_text_event(unit->port->adapter->erp_dbf, 2, "u_ufail");
++ debug_event(unit->port->adapter->erp_dbf, 2,
++ &unit->fcp_lun, sizeof(fcp_lun_t));
++
++ ZFCP_LOG_TRACE("exit\n");
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_erp_strategy_check_target
++ *
++ * purpose: increments the erp action count on the device currently in recovery if
++ * the action failed or resets the count in case of success. If a maximum
++ * count is exceeded the device is marked as ERP_FAILED.
++ * The 'blocked' state of a target which has been recovered successfully is reset.
++ *
++ * returns: ZFCP_ERP_CONTINUES - action continues (not considered)
++ * ZFCP_ERP_SUCCEEDED - action finished successfully
++ * ZFCP_ERP_EXIT - action failed and will not continue
++ */
++static int zfcp_erp_strategy_check_target(
++ zfcp_erp_action_t *erp_action,
++ int result)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ zfcp_adapter_t *adapter = erp_action->adapter;
++ zfcp_port_t *port = erp_action->port;
++ zfcp_unit_t *unit = erp_action->unit;
++
++ ZFCP_LOG_TRACE(
++ "enter (erp_action=0x%lx, result=%d)\n",
++ (unsigned long)erp_action,
++ result);
++
++ debug_text_event(adapter->erp_dbf, 5, "a_stct_norm");
++ debug_event(adapter->erp_dbf, 5, &erp_action->action, sizeof(int));
++ debug_event(adapter->erp_dbf, 5, &result, sizeof(int));
++
++ switch (erp_action->action) {
++
++ case ZFCP_ERP_ACTION_REOPEN_UNIT :
++ result = zfcp_erp_strategy_check_unit(unit, result);
++ break;
++
++ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED :
++ case ZFCP_ERP_ACTION_REOPEN_PORT :
++ result = zfcp_erp_strategy_check_port(port, result);
++ break;
++
++ case ZFCP_ERP_ACTION_REOPEN_ADAPTER :
++ result = zfcp_erp_strategy_check_adapter(adapter, result);
++ break;
++ }
++
++ ZFCP_LOG_TRACE("exit (%d)\n", result);
++
++ return result;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_erp_strategy_statechange(
++ int action,
++ u32 status,
++ zfcp_adapter_t *adapter,
++ zfcp_port_t *port,
++ zfcp_unit_t *unit,
++ int retval)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ ZFCP_LOG_TRACE(
++ "enter (action=%d status=0x%x adapter=0x%lx port=0x%lx "
++ "unit=0x%lx retval=0x%x)\n",
++ action,
++ status,
++ (unsigned long)adapter,
++ (unsigned long)port,
++ (unsigned long)unit,
++ retval);
++ debug_text_event(adapter->erp_dbf, 5, "a_stsc");
++ debug_event(adapter->erp_dbf, 5, &action, sizeof(int));
++
++ switch (action) {
++
++ case ZFCP_ERP_ACTION_REOPEN_ADAPTER :
++ if (zfcp_erp_strategy_statechange_detected(&adapter->status, status)) {
++ zfcp_erp_adapter_reopen_internal(
++ adapter,
++ ZFCP_STATUS_COMMON_ERP_FAILED);
++ retval = ZFCP_ERP_EXIT;
++ }
++ break;
++
++ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED :
++ case ZFCP_ERP_ACTION_REOPEN_PORT :
++ if (zfcp_erp_strategy_statechange_detected(&port->status, status)) {
++ zfcp_erp_port_reopen_internal(
++ port,
++ ZFCP_STATUS_COMMON_ERP_FAILED);
++ retval = ZFCP_ERP_EXIT;
++ }
++ break;
++
++ case ZFCP_ERP_ACTION_REOPEN_UNIT :
++ if (zfcp_erp_strategy_statechange_detected(&unit->status, status)) {
++ zfcp_erp_unit_reopen_internal(
++ unit,
++ ZFCP_STATUS_COMMON_ERP_FAILED);
++ retval = ZFCP_ERP_EXIT;
++ }
++ break;
++
++ default :
++ debug_text_exception(adapter->erp_dbf, 1, "a_stsc_bug");
++ debug_event(adapter->erp_dbf, 1, &action, sizeof(int));
++ ZFCP_LOG_NORMAL(
++ "bug: Unknown error recovery procedure "
++ "action requested on the adapter with "
++ "devno 0x%04x (debug info %d)\n",
++ adapter->devno,
++ action);
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static inline int zfcp_erp_strategy_statechange_detected(atomic_t *target_status, u32 erp_status)
++{
++ return
++ /* take it online */
++ (atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, target_status) &&
++ (ZFCP_STATUS_ERP_CLOSE_ONLY & erp_status)) ||
++ /* take it offline */
++ (!atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, target_status) &&
++ !(ZFCP_STATUS_ERP_CLOSE_ONLY & erp_status));
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_erp_strategy_check_unit(zfcp_unit_t *unit, int result)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ ZFCP_LOG_TRACE(
++ "enter (unit=0x%lx result=%d)\n",
++ (unsigned long)unit,
++ result);
++
++ debug_text_event(unit->port->adapter->erp_dbf, 5, "u_stct");
++ debug_event(unit->port->adapter->erp_dbf, 5, &unit->fcp_lun, sizeof(fcp_lun_t));
++
++ switch (result) {
++ case ZFCP_ERP_SUCCEEDED :
++ atomic_set(&unit->erp_counter, 0);
++ zfcp_erp_unit_unblock(unit);
++ break;
++ case ZFCP_ERP_FAILED :
++ atomic_inc(&unit->erp_counter);
++ if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS)
++ zfcp_erp_unit_failed(unit);
++ break;
++ case ZFCP_ERP_EXIT :
++ /* nothing */
++ break;
++ }
++
++ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &unit->status)) {
++ zfcp_erp_unit_block(unit, 0); /* for ZFCP_ERP_SUCCEEDED */
++ result = ZFCP_ERP_EXIT;
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", result);
++
++ return result;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_erp_strategy_check_port(zfcp_port_t *port, int result)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ ZFCP_LOG_TRACE(
++ "enter (port=0x%lx result=%d\n",
++ (unsigned long)port,
++ result);
++
++ debug_text_event(port->adapter->erp_dbf, 5, "p_stct");
++ debug_event(port->adapter->erp_dbf, 5, &port->wwpn, sizeof(wwn_t));
++
++ switch (result) {
++ case ZFCP_ERP_SUCCEEDED :
++ atomic_set(&port->erp_counter, 0);
++ zfcp_erp_port_unblock(port);
++ break;
++ case ZFCP_ERP_FAILED :
++ atomic_inc(&port->erp_counter);
++ if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS)
++ zfcp_erp_port_failed(port);
++ break;
++ case ZFCP_ERP_EXIT :
++ /* nothing */
++ break;
++ }
++
++ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &port->status)) {
++ zfcp_erp_port_block(port, 0); /* for ZFCP_ERP_SUCCEEDED */
++ result = ZFCP_ERP_EXIT;
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", result);
++
++ return result;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_erp_strategy_check_adapter(zfcp_adapter_t *adapter, int result)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ ZFCP_LOG_TRACE(
++ "enter (adapter=0x%lx result=%d)\n",
++ (unsigned long)adapter,
++ result);
++
++ debug_text_event(adapter->erp_dbf, 5, "a_stct");
++
++ switch (result) {
++ case ZFCP_ERP_SUCCEEDED :
++ atomic_set(&adapter->erp_counter, 0);
++ zfcp_erp_adapter_unblock(adapter);
++ break;
++ case ZFCP_ERP_FAILED :
++ atomic_inc(&adapter->erp_counter);
++ if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS)
++ zfcp_erp_adapter_failed(adapter);
++ break;
++ case ZFCP_ERP_EXIT :
++ /* nothing */
++ break;
++ }
++
++ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) {
++ zfcp_erp_adapter_block(adapter, 0); /* for ZFCP_ERP_SUCCEEDED */
++ result = ZFCP_ERP_EXIT;
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", result);
++
++ return result;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: remaining things in good cases,
++ * escalation in bad cases
++ *
++ * returns:
++ */
++static int zfcp_erp_strategy_followup_actions(
++ int action,
++ zfcp_adapter_t *adapter,
++ zfcp_port_t *port,
++ zfcp_unit_t *unit,
++ int status)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ ZFCP_LOG_TRACE(
++ "enter (action=%d adapter=0x%lx port=0x%lx "
++ "unit=0x%lx status=0x%x)\n",
++ action,
++ (unsigned long)adapter,
++ (unsigned long)port,
++ (unsigned long)unit,
++ status);
++ debug_text_event(adapter->erp_dbf, 5, "a_stfol");
++ debug_event(adapter->erp_dbf, 5, &action, sizeof(int));
++
++ /* initiate follow-up actions depending on success of finished action */
++ switch (action) {
++
++ case ZFCP_ERP_ACTION_REOPEN_ADAPTER :
++ if (status == ZFCP_ERP_SUCCEEDED)
++ zfcp_erp_port_reopen_all_internal(adapter, 0);
++ else zfcp_erp_adapter_reopen_internal(adapter, 0);
++ break;
++
++ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED :
++ if (status == ZFCP_ERP_SUCCEEDED)
++ zfcp_erp_port_reopen_internal(port, 0);
++ else zfcp_erp_adapter_reopen_internal(adapter, 0);
++ break;
++
++ case ZFCP_ERP_ACTION_REOPEN_PORT :
++ if (status == ZFCP_ERP_SUCCEEDED)
++ zfcp_erp_unit_reopen_all_internal(port, 0);
++ else zfcp_erp_port_forced_reopen_internal(port, 0);
++ break;
++
++ case ZFCP_ERP_ACTION_REOPEN_UNIT :
++ if (status == ZFCP_ERP_SUCCEEDED)
++ ;/* no further action */
++ else zfcp_erp_port_reopen_internal(unit->port, 0);
++ break;
++
++ default :
++ debug_text_exception(adapter->erp_dbf, 1, "a_stda_bug");
++ debug_event(adapter->erp_dbf, 1, &action, sizeof(int));
++ ZFCP_LOG_NORMAL(
++ "bug: Unknown error recovery procedure "
++ "action requested on the adapter with "
++ "devno 0x%04x (debug info %d)\n",
++ adapter->devno,
++ action);
++ }
++
++ ZFCP_LOG_TRACE("exit\n");
++
++ return 0;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/**
++ * FIXME: document
++ */
++static int zfcp_erp_strategy_check_queues(zfcp_adapter_t *adapter)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++ unsigned long flags;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ read_lock_irqsave(&adapter->erp_lock, flags);
++ if (list_empty(&adapter->erp_ready_head) &&
++ list_empty(&adapter->erp_running_head)) {
++ debug_text_event(adapter->erp_dbf, 4, "a_cq_wake");
++ atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
++ &adapter->status);
++ wake_up(&adapter->erp_done_wqh);
++ } else debug_text_event(adapter->erp_dbf, 5, "a_cq_notempty");
++ read_unlock_irqrestore(&adapter->erp_lock, flags);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++int zfcp_erp_wait(zfcp_adapter_t *adapter)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ wait_event(
++ adapter->erp_done_wqh,
++ !atomic_test_mask(
++ ZFCP_STATUS_ADAPTER_ERP_PENDING,
++ &adapter->status));
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_erp_modify_adapter_status
++ *
++ * purpose:
++ *
++ */
++static void zfcp_erp_modify_adapter_status(zfcp_adapter_t *adapter,
++ u32 mask,
++ int set_or_clear)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ zfcp_port_t *port;
++ unsigned long flags;
++ u32 common_mask=mask & ZFCP_COMMON_FLAGS;
++
++ ZFCP_LOG_TRACE(
++ "enter (adapter=0x%lx)\n",
++ (unsigned long)adapter);
++
++ if(set_or_clear==ZFCP_SET) {
++ atomic_set_mask(mask, &adapter->status);
++ debug_text_event(adapter->erp_dbf, 3, "a_mod_as_s");
++ } else {
++ atomic_clear_mask(mask, &adapter->status);
++ if(mask & ZFCP_STATUS_COMMON_ERP_FAILED) {
++ atomic_set(&adapter->erp_counter, 0);
++ }
++ debug_text_event(adapter->erp_dbf, 3, "a_mod_as_c");
++ }
++ debug_event(adapter->erp_dbf, 3, &mask, sizeof(u32));
++
++ if(!common_mask) goto out;
++ /* Deal with all underlying devices, only pass common_mask */
++ read_lock_irqsave(&adapter->port_list_lock, flags);
++ ZFCP_FOR_EACH_PORT(adapter, port) {
++ zfcp_erp_modify_port_status(port,
++ common_mask,
++ set_or_clear);
++ }
++ read_unlock_irqrestore(&adapter->port_list_lock, flags);
++ out:
++ ZFCP_LOG_TRACE("exit\n");
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_erp_modify_port_status
++ *
++ * purpose: sets the port and all underlying devices to ERP_FAILED
++ *
++ */
++static void zfcp_erp_modify_port_status(zfcp_port_t *port,
++ u32 mask,
++ int set_or_clear)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ zfcp_unit_t *unit;
++ unsigned long flags;
++ u32 common_mask=mask & ZFCP_COMMON_FLAGS;
++
++ ZFCP_LOG_TRACE(
++ "enter (port=0x%lx)\n",
++ (unsigned long)port);
++
++ if(set_or_clear==ZFCP_SET) {
++ atomic_set_mask(mask, &port->status);
++ debug_text_event(port->adapter->erp_dbf, 3,
++ "p_mod_ps_s");
++ } else {
++ atomic_clear_mask(mask, &port->status);
++ if(mask & ZFCP_STATUS_COMMON_ERP_FAILED) {
++ atomic_set(&port->erp_counter, 0);
++ }
++ debug_text_event(port->adapter->erp_dbf, 3,
++ "p_mod_ps_c");
++ }
++ debug_event(port->adapter->erp_dbf, 3, &port->wwpn, sizeof(wwn_t));
++ debug_event(port->adapter->erp_dbf, 3, &mask, sizeof(u32));
++
++ if(!common_mask) goto out;
++ /* Modify status of all underlying devices, only pass common mask */
++ read_lock_irqsave(&port->unit_list_lock, flags);
++ ZFCP_FOR_EACH_UNIT(port, unit) {
++ zfcp_erp_modify_unit_status(unit,
++ common_mask,
++ set_or_clear);
++ }
++ read_unlock_irqrestore(&port->unit_list_lock, flags);
++ out:
++ ZFCP_LOG_TRACE("exit\n");
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_erp_modify_unit_status
++ *
++ * purpose: sets the unit to ERP_FAILED
++ *
++ */
++static void zfcp_erp_modify_unit_status(zfcp_unit_t *unit,
++ u32 mask,
++ int set_or_clear)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ ZFCP_LOG_TRACE(
++ "enter (unit=0x%lx)\n",
++ (unsigned long)unit);
++
++ if(set_or_clear==ZFCP_SET) {
++ atomic_set_mask(mask, &unit->status);
++ debug_text_event(unit->port->adapter->erp_dbf, 3, "u_mod_us_s");
++ } else {
++ atomic_clear_mask(mask, &unit->status);
++ if(mask & ZFCP_STATUS_COMMON_ERP_FAILED) {
++ atomic_set(&unit->erp_counter, 0);
++ }
++ debug_text_event(unit->port->adapter->erp_dbf, 3, "u_mod_us_c");
++ }
++ debug_event(unit->port->adapter->erp_dbf, 3, &unit->fcp_lun, sizeof(fcp_lun_t));
++ debug_event(unit->port->adapter->erp_dbf, 3, &mask, sizeof(u32));
++
++ ZFCP_LOG_TRACE("exit\n");
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: Wrappper for zfcp_erp_port_reopen_all_internal
++ * used to ensure the correct locking
++ *
++ * returns: 0 - initiated action succesfully
++ * <0 - failed to initiate action
++ */
++static int zfcp_erp_port_reopen_all(
++ zfcp_adapter_t *adapter,
++ int clear_mask)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++ unsigned long flags;
++
++ ZFCP_LOG_TRACE(
++ "enter (adapter=0x%lx clear_mask=0x%x)\n",
++ (unsigned long)adapter,
++ clear_mask);
++
++ write_lock_irqsave(&adapter->erp_lock, flags);
++ retval = zfcp_erp_port_reopen_all_internal(adapter, clear_mask);
++ write_unlock_irqrestore(&adapter->erp_lock, flags);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_erp_port_reopen_all_internal(
++ zfcp_adapter_t *adapter,
++ int clear_mask)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++ unsigned long flags;
++ zfcp_port_t *port;
++
++ ZFCP_LOG_TRACE(
++ "enter (adapter=0x%lx clear_mask=0x%x)\n",
++ (unsigned long)adapter,
++ clear_mask);
++
++ read_lock_irqsave(&adapter->port_list_lock, flags);
++ ZFCP_FOR_EACH_PORT(adapter, port) {
++ if (!atomic_test_mask(ZFCP_STATUS_PORT_NAMESERVER, &port->status))
++ zfcp_erp_port_reopen_internal(port, clear_mask);
++ }
++ read_unlock_irqrestore(&adapter->port_list_lock, flags);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_erp_unit_reopen_all_internal(
++ zfcp_port_t *port,
++ int clear_mask)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++ unsigned long flags;
++ zfcp_unit_t *unit;
++
++ ZFCP_LOG_TRACE(
++ "enter (port=0x%lx clear_mask=0x%x)\n",
++ (unsigned long)port,
++ clear_mask);
++
++ read_lock_irqsave(&port->unit_list_lock, flags);
++ ZFCP_FOR_EACH_UNIT(port, unit)
++ zfcp_erp_unit_reopen_internal(unit, clear_mask);
++ read_unlock_irqrestore(&port->unit_list_lock, flags);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: this routine executes the 'Reopen Adapter' action
++ * (the entire action is processed synchronously, since
++ * there are no actions which might be run concurrently
++ * per definition)
++ *
++ * returns: ZFCP_ERP_SUCCEEDED - action finished successfully
++ * ZFCP_ERP_FAILED - action finished unsuccessfully
++ */
++static int zfcp_erp_adapter_strategy(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++ unsigned long timeout;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ retval = zfcp_erp_adapter_strategy_close(erp_action);
++ if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
++ retval = ZFCP_ERP_EXIT;
++ else retval = zfcp_erp_adapter_strategy_open(erp_action);
++
++ debug_text_event(adapter->erp_dbf, 3, "a_ast/ret");
++ debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof(int));
++ debug_event(adapter->erp_dbf, 3, &retval, sizeof(int));
++
++ if(retval==ZFCP_ERP_FAILED) {
++ /*INFO*/
++ ZFCP_LOG_NORMAL("Waiting to allow the adapter with devno "
++ "0x%04x to recover itself\n",
++ adapter->devno);
++ /*
++ * SUGGESTION: substitute by
++ * timeout = ZFCP_TYPE2_RECOVERY_TIME;
++ * __ZFCP_WAIT_EVENT_TIMEOUT(timeout, 0);
++ */
++ timeout=ZFCP_TYPE2_RECOVERY_TIME;
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ do {
++ timeout=schedule_timeout(timeout);
++ } while (timeout);
++ /* FIXME: why no current->state = TASK_RUNNING ? */
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns: ZFCP_ERP_SUCCEEDED - action finished successfully
++ * ZFCP_ERP_FAILED - action finished unsuccessfully
++ */
++static int zfcp_erp_adapter_strategy_close(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->adapter->status);
++ retval = zfcp_erp_adapter_strategy_generic(erp_action, 1);
++ atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->adapter->status);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns: ZFCP_ERP_SUCCEEDED - action finished successfully
++ * ZFCP_ERP_FAILED - action finished unsuccessfully
++ */
++static int zfcp_erp_adapter_strategy_open(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->adapter->status);
++ retval = zfcp_erp_adapter_strategy_generic(erp_action, 0);
++ atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->adapter->status);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_register_adapter
++ *
++ * purpose: allocate the irq associated with this devno and register
++ * the FSF adapter with the SCSI stack
++ *
++ * returns:
++ */
++static int zfcp_erp_adapter_strategy_generic(zfcp_erp_action_t *erp_action, int close)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = ZFCP_ERP_SUCCEEDED;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ if (close)
++ goto close_only;
++
++ retval = zfcp_erp_adapter_strategy_open_irq(erp_action);
++ if (retval != ZFCP_ERP_SUCCEEDED) {
++ ZFCP_LOG_INFO(
++ "error: Could not setup irq, "
++ "adapter with devno 0x%04x. ",
++ adapter->devno);
++ goto failed_irq;
++ }
++ ZFCP_LOG_DEBUG(
++ "got irq %d (adapter devno=0x%04x)\n",
++ adapter->irq,
++ adapter->devno);
++
++ /* setup QDIO for this adapter */
++ retval = zfcp_erp_adapter_strategy_open_qdio(erp_action);
++ if (retval != ZFCP_ERP_SUCCEEDED) {
++ ZFCP_LOG_INFO(
++ "error: Could not start QDIO (data transfer mechanism) "
++ "adapter with devno 0x%04x.\n",
++ adapter->devno);
++ goto failed_qdio;
++ }
++ ZFCP_LOG_DEBUG("QDIO started (adapter devno=0x%04x)\n", adapter->devno);
++
++ /* setup FSF for this adapter */
++ retval = zfcp_erp_adapter_strategy_open_fsf(erp_action);
++ if (retval != ZFCP_ERP_SUCCEEDED) {
++ ZFCP_LOG_INFO(
++ "error: Could not communicate with the adapter with "
++ "devno 0x%04x. Card may be busy.\n",
++ adapter->devno);
++ goto failed_openfcp;
++ }
++ ZFCP_LOG_DEBUG("FSF started (adapter devno=0x%04x)\n", adapter->devno);
++
++ /* Success */
++ atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &erp_action->adapter->status);
++ goto out;
++
++close_only:
++ atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
++ &erp_action->adapter->status);
++failed_openfcp:
++ zfcp_erp_adapter_strategy_close_qdio(erp_action);
++ zfcp_erp_adapter_strategy_close_fsf(erp_action);
++
++failed_qdio:
++ zfcp_erp_adapter_strategy_close_irq(erp_action);
++
++failed_irq:
++
++ /* NOP */
++out:
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: gets irq associated with devno and requests irq
++ *
++ * returns:
++ */
++static int zfcp_erp_adapter_strategy_open_irq(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = ZFCP_ERP_FAILED;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++#if 0
++ retval = zfcp_adapter_detect(adapter)
++ if (retval == -ENOMEM) {
++ retval = ZFCP_ERP_NOMEM;
++ goto out;
++ }
++ if (retval != 0) {
++ retval = ZFCP_ERP_FAILED;
++ goto out;
++ }
++
++ retval = zfcp_adapter_detect(adapter)
++ if (retval == -ENOMEM) {
++ retval = ZFCP_ERP_NOMEM;
++ goto out;
++ }
++ if (retval != 0) {
++ retval = ZFCP_ERP_FAILED;
++ goto out;
++ }
++ retval = ZFCP_ERP_SUCCEEDED;
++#endif
++
++ if (zfcp_adapter_detect(adapter) == 0)
++ if (zfcp_adapter_irq_register(adapter) == 0)
++ retval = ZFCP_ERP_SUCCEEDED;
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: releases owned irq
++ *
++ * returns:
++ */
++static int zfcp_erp_adapter_strategy_close_irq(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = ZFCP_ERP_SUCCEEDED;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ zfcp_adapter_irq_unregister(erp_action->adapter);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_qdio_init
++ *
++ * purpose: setup QDIO operation for specified adapter
++ *
++ * returns: 0 - successful setup
++ * !0 - failed setup
++ */
++int zfcp_erp_adapter_strategy_open_qdio(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++ int i;
++ volatile qdio_buffer_element_t *buffere;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) {
++ ZFCP_LOG_NORMAL(
++ "bug: QDIO (data transfer mechanism) start-up on "
++ "adapter with devno 0x%04x attempted twice. "
++ "Second attempt ignored.",
++ adapter->devno);
++ goto failed_sanity;
++ }
++
++ if (zfcp_initialize_with_0copy(adapter) != 0) {
++ ZFCP_LOG_INFO(
++ "error: Could not establish queues for QDIO (data "
++ "transfer mechanism) operation on adapter with devno "
++ "0x%04x.\n",
++ adapter->devno);
++ goto failed_qdio_initialize;
++ }
++ ZFCP_LOG_DEBUG("queues established\n");
++
++ /* activate QDIO request and response queue */
++ if (qdio_activate(adapter->irq, 0) != 0) {
++ ZFCP_LOG_INFO(
++ "error: Could not activate queues for QDIO (data "
++ "transfer mechanism) operation on adapter with devno "
++ "0x%04x.\n",
++ adapter->devno);
++ goto failed_qdio_activate;
++ }
++ ZFCP_LOG_DEBUG("queues activated\n");
++
++ /*
++ * put buffers into response queue,
++ */
++ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
++ buffere = &(adapter->response_queue.buffer[i]->element[0]);
++ buffere->length = 0;
++ buffere->flags = SBAL_FLAGS_LAST_ENTRY;
++ buffere->addr = 0;
++ }
++
++ ZFCP_LOG_TRACE(
++ "Calling do QDIO irq=0x%x,flags=0x%x, queue_no=%i, "
++ "index_in_queue=%i, count=%i\n",
++ adapter->irq,
++ QDIO_FLAG_SYNC_INPUT,
++ 0,
++ 0,
++ QDIO_MAX_BUFFERS_PER_Q);
++
++ retval = do_QDIO(
++ adapter->irq,
++ QDIO_FLAG_SYNC_INPUT,
++ 0,
++ 0,
++ QDIO_MAX_BUFFERS_PER_Q,
++ NULL);
++
++ if (retval) {
++ ZFCP_LOG_NORMAL(
++ "bug: QDIO (data transfer mechanism) inobund transfer "
++ "structures could not be set-up (debug info %d)\n",
++ retval);
++ goto failed_do_qdio;
++ } else {
++ adapter->response_queue.free_index = 0;
++ atomic_set(&adapter->response_queue.free_count, 0);
++ ZFCP_LOG_DEBUG(
++ "%i buffers successfully enqueued to response queue\n",
++ QDIO_MAX_BUFFERS_PER_Q);
++ }
++
++ /* set index of first avalable SBALS / number of available SBALS */
++ adapter->request_queue.free_index = 0;
++ atomic_set(&adapter->request_queue.free_count, QDIO_MAX_BUFFERS_PER_Q);
++ adapter->request_queue.distance_from_int = 0;
++
++ /* initialize waitqueue used to wait for free SBALs in requests queue */
++ init_waitqueue_head(&adapter->request_wq);
++
++ /* ok, we did it - skip all cleanups for different failures */
++ atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
++ retval = ZFCP_ERP_SUCCEEDED;
++
++ goto out;
++
++failed_do_qdio:
++ /* NOP */
++
++failed_qdio_activate:
++ debug_text_event(adapter->erp_dbf, 3, "qdio_down1a");
++ while (qdio_cleanup(adapter->irq,
++ QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS) {
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ schedule_timeout(HZ);
++ }
++ debug_text_event(adapter->erp_dbf, 3, "qdio_down1b");
++
++ /*
++ * First we had to stop QDIO operation.
++ * Now it is safe to take the following actions.
++ */
++
++failed_qdio_initialize:
++failed_sanity:
++ retval = ZFCP_ERP_FAILED;
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_qdio_cleanup
++ *
++ * purpose: cleans up QDIO operation for the specified adapter
++ *
++ * returns: 0 - successful cleanup
++ * !0 - failed cleanup
++ */
++int zfcp_erp_adapter_strategy_close_qdio(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = ZFCP_ERP_SUCCEEDED;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) {
++ ZFCP_LOG_DEBUG(
++ "Termination of QDIO (data transfer operation) "
++ "attempted for an inactive qdio on the "
++ "adapter with devno 0x%04x....ignored.\n",
++ adapter->devno);
++ retval = ZFCP_ERP_FAILED;
++ goto out;
++ }
++
++ /*
++ * Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that
++ * do_QDIO won't be called while qdio_shutdown is in progress.
++ */
++
++ write_lock_irq(&adapter->request_queue.queue_lock);
++ atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
++ write_unlock_irq(&adapter->request_queue.queue_lock);
++
++ debug_text_event(adapter->erp_dbf, 3, "qdio_down2a");
++ while (qdio_cleanup(adapter->irq,
++ QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS) {
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ schedule_timeout(HZ);
++ }
++ debug_text_event(adapter->erp_dbf, 3, "qdio_down2b");
++
++ /*
++ * First we had to stop QDIO operation.
++ * Now it is safe to take the following actions.
++ */
++
++ zfcp_zero_sbals(
++ adapter->request_queue.buffer,
++ 0,
++ QDIO_MAX_BUFFERS_PER_Q);
++ adapter->response_queue.free_index = 0;
++ atomic_set(&adapter->response_queue.free_count, 0);
++ adapter->request_queue.free_index = 0;
++ atomic_set(&adapter->request_queue.free_count, 0);
++ adapter->request_queue.distance_from_int = 0;
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_init
++ *
++ * purpose: initializes FSF operation for the specified adapter
++ *
++ * returns: 0 - succesful initialization of FSF operation
++ * !0 - failed to initialize FSF operation
++ */
++static int zfcp_erp_adapter_strategy_open_fsf(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ /* do 'exchange configuration data' */
++ retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action);
++ if (retval == ZFCP_ERP_FAILED)
++ goto out;
++
++ /* start the desired number of Status Reads */
++ retval = zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action);
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_erp_adapter_strategy_open_fsf_xconfig(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = ZFCP_ERP_SUCCEEDED;
++ int retries;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
++ retries = ZFCP_EXCHANGE_CONFIG_DATA_RETRIES;
++
++ do {
++ atomic_clear_mask(
++ ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
++ &adapter->status);
++ ZFCP_LOG_DEBUG("Doing exchange config data\n");
++ zfcp_erp_action_to_running(erp_action);
++ zfcp_erp_timeout_init(erp_action);
++ if (zfcp_fsf_exchange_config_data(erp_action)) {
++ retval = ZFCP_ERP_FAILED;
++ debug_text_event(adapter->erp_dbf, 5, "a_fstx_xf");
++ ZFCP_LOG_INFO(
++ "error: Out of resources. Could not "
++ "start exchange of configuration data "
++ "between the adapter with devno "
++ "0x%04x and the device driver.\n",
++ adapter->devno);
++ break;
++ }
++ debug_text_event(adapter->erp_dbf, 6, "a_fstx_xok");
++ ZFCP_LOG_DEBUG("Xchange underway\n");
++
++ /*
++ * Why this works:
++ * Both the normal completion handler as well as the timeout
++ * handler will do an 'up' when the 'exchange config data'
++ * request completes or times out. Thus, the signal to go on
++ * won't be lost utilizing this semaphore.
++ * Furthermore, this 'adapter_reopen' action is
++ * guaranteed to be the only action being there (highest action
++ * which prevents other actions from being created).
++ * Resulting from that, the wake signal recognized here
++ * _must_ be the one belonging to the 'exchange config
++ * data' request.
++ */
++ down(&adapter->erp_ready_sem);
++ if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
++ ZFCP_LOG_INFO(
++ "error: Exchange of configuration data between "
++ "the adapter with devno 0x%04x and the device "
++ "driver timed out\n",
++ adapter->devno);
++ break;
++ }
++ if (atomic_test_mask(
++ ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
++ &adapter->status)) {
++ ZFCP_LOG_DEBUG(
++ "Host connection still initialising... "
++ "waiting and retrying....\n");
++ /* sleep a little bit before retry */
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule_timeout(ZFCP_EXCHANGE_CONFIG_DATA_SLEEP);
++ }
++ } while ((retries--) &&
++ atomic_test_mask(
++ ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
++ &adapter->status));
++
++ if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status)) {
++ ZFCP_LOG_INFO(
++ "error: Exchange of configuration data between "
++ "the adapter with devno 0x%04x and the device "
++ "driver failed.\n",
++ adapter->devno);
++ retval = ZFCP_ERP_FAILED;;
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_erp_adapter_strategy_open_fsf_statusread(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = ZFCP_ERP_SUCCEEDED;
++ int temp_ret;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++ int i;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ adapter->status_read_failed = 0;
++ for (i = 0; i < ZFCP_STATUS_READS_RECOM; i++) {
++ ZFCP_LOG_TRACE("issuing status read request #%d...\n", i);
++ temp_ret = zfcp_fsf_status_read(
++ adapter,
++ ZFCP_WAIT_FOR_SBAL);
++ if (temp_ret < 0) {
++ ZFCP_LOG_INFO(
++ "error: Out of resources. Could not "
++ "set-up the infrastructure for "
++ "unsolicited status presentation "
++ "for the adapter with devno "
++ "0x%04x.\n",
++ adapter->devno);
++ retval = ZFCP_ERP_FAILED;
++ i--;
++ break;
++ }
++ }
++ ZFCP_LOG_DEBUG("started %i status reads\n", i);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_fsf_cleanup
++ *
++ * purpose: cleanup FSF operation for specified adapter
++ *
++ * returns: 0 - FSF operation successfully cleaned up
++ * !0 - failed to cleanup FSF operation for this adapter
++ */
++static int zfcp_erp_adapter_strategy_close_fsf(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = ZFCP_ERP_SUCCEEDED;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++ ZFCP_LOG_TRACE("enter (adapter=0x%lx)\n", (unsigned long)adapter);
++
++ /*
++ * wake waiting initiators of requests,
++ * return SCSI commands (with error status),
++ * clean up all requests (synchronously)
++ */
++ zfcp_fsf_req_dismiss_all(adapter);
++ /* reset FSF request sequence number */
++ adapter->fsf_req_seq_no = 0;
++ /* all ports and units are closed */
++ zfcp_erp_modify_adapter_status(
++ adapter,
++ ZFCP_STATUS_COMMON_OPEN,
++ ZFCP_CLEAR);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: this routine executes the 'Reopen Physical Port' action
++ *
++ * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
++ * ZFCP_ERP_SUCCEEDED - action finished successfully
++ * ZFCP_ERP_FAILED - action finished unsuccessfully
++ */
++static int zfcp_erp_port_forced_strategy(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = ZFCP_ERP_FAILED;
++ zfcp_port_t *port = erp_action->port;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ switch (erp_action->step) {
++
++ /* FIXME: the ULP spec. begs for waiting for oustanding commands */
++ case ZFCP_ERP_STEP_UNINITIALIZED :
++ zfcp_erp_port_strategy_clearstati(port);
++ /*
++ * it would be sufficient to test only the normal open flag
++ * since the phys. open flag cannot be set if the normal
++ * open flag is unset - however, this is for readabilty ...
++ */
++ if (atomic_test_mask(
++ (ZFCP_STATUS_PORT_PHYS_OPEN |
++ ZFCP_STATUS_COMMON_OPEN),
++ &port->status)) {
++ ZFCP_LOG_DEBUG(
++ "Port WWPN=0x%016Lx is open -> trying close physical\n",
++ (llui_t)port->wwpn);
++ retval = zfcp_erp_port_forced_strategy_close(erp_action);
++ } else retval = ZFCP_ERP_FAILED;
++ break;
++
++ case ZFCP_ERP_STEP_PHYS_PORT_CLOSING :
++ if (atomic_test_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status)) {
++ ZFCP_LOG_DEBUG(
++ "failed to close physical port WWPN=0x%016Lx\n",
++ (llui_t)port->wwpn);
++ retval = ZFCP_ERP_FAILED;
++ } else retval = ZFCP_ERP_SUCCEEDED;
++ break;
++ }
++
++ debug_text_event(adapter->erp_dbf, 3, "p_pfst/ret");
++ debug_event(adapter->erp_dbf, 3, &port->wwpn, sizeof(wwn_t));
++ debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof(int));
++ debug_event(adapter->erp_dbf, 3, &retval, sizeof(int));
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: this routine executes the 'Reopen Port' action
++ *
++ * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
++ * ZFCP_ERP_SUCCEEDED - action finished successfully
++ * ZFCP_ERP_FAILED - action finished unsuccessfully
++ */
++static int zfcp_erp_port_strategy(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = ZFCP_ERP_FAILED;
++ zfcp_port_t *port = erp_action->port;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ switch (erp_action->step) {
++
++ /* FIXME: the ULP spec. begs for waiting for oustanding commands */
++ case ZFCP_ERP_STEP_UNINITIALIZED :
++ zfcp_erp_port_strategy_clearstati(port);
++ if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &port->status)) {
++ ZFCP_LOG_DEBUG(
++ "port WWPN=0x%016Lx is open -> trying close\n",
++ (llui_t)port->wwpn);
++ retval = zfcp_erp_port_strategy_close(erp_action);
++ goto out;
++ } /* else it's already closed, open it */
++ break;
++
++ case ZFCP_ERP_STEP_PORT_CLOSING :
++ if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &port->status)) {
++ ZFCP_LOG_DEBUG(
++ "failed to close port WWPN=0x%016Lx\n",
++ (llui_t)port->wwpn);
++ retval = ZFCP_ERP_FAILED;
++ goto out;
++ } /* else it's closed now, open it */
++ break;
++ }
++ if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
++ retval = ZFCP_ERP_EXIT;
++ else retval = zfcp_erp_port_strategy_open(erp_action);
++
++out:
++ debug_text_event(adapter->erp_dbf, 3, "p_pst/ret");
++ debug_event(adapter->erp_dbf, 3, &port->wwpn, sizeof(wwn_t));
++ debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof(int));
++ debug_event(adapter->erp_dbf, 3, &retval, sizeof(int));
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_erp_port_strategy_open(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ if (atomic_test_mask(ZFCP_STATUS_PORT_NAMESERVER, &erp_action->port->status))
++ retval = zfcp_erp_port_strategy_open_nameserver(erp_action);
++ else retval = zfcp_erp_port_strategy_open_common(erp_action);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ *
++ * FIXME(design): currently only prepared for fabric (nameserver!)
++ */
++static int zfcp_erp_port_strategy_open_common(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++ zfcp_port_t *port = erp_action->port;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ switch (erp_action->step) {
++
++ case ZFCP_ERP_STEP_UNINITIALIZED :
++ case ZFCP_ERP_STEP_PHYS_PORT_CLOSING :
++ case ZFCP_ERP_STEP_PORT_CLOSING :
++ if (!(adapter->nameserver_port)) {
++ retval = zfcp_nameserver_enqueue(adapter);
++ if (retval == -ENOMEM) {
++ retval = ZFCP_ERP_NOMEM;
++ break;
++ }
++ if (retval != 0) {
++ ZFCP_LOG_NORMAL(
++ "error: nameserver port not available "
++ "(adapter with devno 0x%04x)\n",
++ adapter->devno);
++ retval = ZFCP_ERP_FAILED;
++ break;
++ }
++ }
++ if (!atomic_test_mask(
++ ZFCP_STATUS_COMMON_UNBLOCKED,
++ &adapter->nameserver_port->status)) {
++ ZFCP_LOG_DEBUG(
++ "nameserver port is not open -> open nameserver port\n");
++ /* nameserver port may live again */
++ atomic_set_mask(
++ ZFCP_STATUS_COMMON_RUNNING,
++ &adapter->nameserver_port->status);
++ if (zfcp_erp_port_reopen(adapter->nameserver_port, 0) >= 0) {
++ erp_action->step = ZFCP_ERP_STEP_NAMESERVER_OPEN;
++ retval = ZFCP_ERP_CONTINUES;
++ } else retval = ZFCP_ERP_FAILED;
++ break;
++ } /* else nameserver port is already open, fall through */
++
++ case ZFCP_ERP_STEP_NAMESERVER_OPEN :
++ if (!atomic_test_mask(ZFCP_STATUS_COMMON_OPEN,
++ &adapter->nameserver_port->status)) {
++ ZFCP_LOG_DEBUG("failed to open nameserver port\n");
++ retval = ZFCP_ERP_FAILED;
++ } else {
++ ZFCP_LOG_DEBUG(
++ "nameserver port is open -> "
++ "ask nameserver for current D_ID of port with WWPN 0x%016Lx\n",
++ (llui_t)port->wwpn);
++ retval = zfcp_erp_port_strategy_open_common_lookup(erp_action);
++ }
++ break;
++
++ case ZFCP_ERP_STEP_NAMESERVER_LOOKUP :
++ if (!atomic_test_mask(ZFCP_STATUS_PORT_DID_DID, &port->status)) {
++ if (atomic_test_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status)) {
++ ZFCP_LOG_DEBUG(
++ "failed to look up the D_ID of the port WWPN=0x%016Lx "
++ "(misconfigured WWPN?)\n",
++ (llui_t)port->wwpn);
++ zfcp_erp_port_failed(port);
++ retval = ZFCP_ERP_EXIT;
++ } else {
++ ZFCP_LOG_DEBUG(
++ "failed to look up the D_ID of the port WWPN=0x%016Lx\n",
++ (llui_t)port->wwpn);
++ retval = ZFCP_ERP_FAILED;
++ }
++ } else {
++ ZFCP_LOG_DEBUG(
++ "port WWPN=0x%016Lx has D_ID=0x%06x -> trying open\n",
++ (llui_t)port->wwpn,
++ port->d_id);
++ retval = zfcp_erp_port_strategy_open_port(erp_action);
++ }
++ break;
++
++ case ZFCP_ERP_STEP_PORT_OPENING :
++ if (atomic_test_mask(
++ (ZFCP_STATUS_COMMON_OPEN |
++ ZFCP_STATUS_PORT_DID_DID),/* D_ID might have changed during open */
++ &port->status)) {
++ ZFCP_LOG_DEBUG(
++ "port WWPN=0x%016Lx is open ",
++ (llui_t)port->wwpn);
++ retval = ZFCP_ERP_SUCCEEDED;
++ } else {
++ ZFCP_LOG_DEBUG(
++ "failed to open port WWPN=0x%016Lx\n",
++ (llui_t)port->wwpn);
++ retval = ZFCP_ERP_FAILED;
++ }
++ break;
++
++ default :
++ ZFCP_LOG_NORMAL(
++ "bug: unkown erp step 0x%x\n",
++ erp_action->step);
++ retval = ZFCP_ERP_FAILED;
++ }
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_erp_port_strategy_open_nameserver(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++ zfcp_port_t *port = erp_action->port;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ switch (erp_action->step) {
++
++ case ZFCP_ERP_STEP_UNINITIALIZED :
++ case ZFCP_ERP_STEP_PHYS_PORT_CLOSING :
++ case ZFCP_ERP_STEP_PORT_CLOSING :
++ ZFCP_LOG_DEBUG(
++ "port WWPN=0x%016Lx has D_ID=0x%06x -> trying open\n",
++ (llui_t)port->wwpn,
++ port->d_id);
++ retval = zfcp_erp_port_strategy_open_port(erp_action);
++ break;
++
++ case ZFCP_ERP_STEP_PORT_OPENING :
++ if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &port->status)) {
++ ZFCP_LOG_DEBUG("nameserver port is open\n");
++ retval = ZFCP_ERP_SUCCEEDED;
++ } else {
++ ZFCP_LOG_DEBUG("failed to open nameserver port\n");
++ retval = ZFCP_ERP_FAILED;
++ }
++ /* this is needed anyway (dont care for retval of wakeup) */
++ ZFCP_LOG_DEBUG("continue other open port operations\n");
++ zfcp_erp_port_strategy_open_nameserver_wakeup(erp_action);
++ break;
++
++ default :
++ ZFCP_LOG_NORMAL(
++ "bug: unkown erp step 0x%x\n",
++ erp_action->step);
++ retval = ZFCP_ERP_FAILED;
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: makes the erp thread continue with reopen (physical) port
++ * actions which have been paused until the name server port
++ * is opened (or failed)
++ *
++ * returns: 0 (a kind of void retval, its not used)
++ */
++static int zfcp_erp_port_strategy_open_nameserver_wakeup(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++ unsigned long flags;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++ struct list_head *entry, *temp_entry;
++ zfcp_erp_action_t *tmp_erp_action;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ write_lock_irqsave(&adapter->erp_lock, flags);
++ list_for_each_safe(entry, temp_entry, &adapter->erp_running_head) {
++ tmp_erp_action = list_entry(entry, zfcp_erp_action_t, list);
++ debug_text_event(adapter->erp_dbf, 3, "p_pstnsw_n");
++ debug_event(adapter->erp_dbf, 3, &tmp_erp_action->port->wwpn, sizeof(wwn_t));
++ if (tmp_erp_action->step == ZFCP_ERP_STEP_NAMESERVER_OPEN) {
++ debug_text_event(adapter->erp_dbf, 3, "p_pstnsw_w");
++ debug_event(adapter->erp_dbf, 3, &tmp_erp_action->port->wwpn, sizeof(wwn_t));
++ if (atomic_test_mask(
++ ZFCP_STATUS_COMMON_ERP_FAILED,
++ &adapter->nameserver_port->status))
++ zfcp_erp_port_failed(tmp_erp_action->port);
++ zfcp_erp_action_ready(tmp_erp_action);
++ }
++ }
++ write_unlock_irqrestore(&adapter->erp_lock, flags);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
++ * ZFCP_ERP_FAILED - action finished unsuccessfully
++ */
++static int zfcp_erp_port_forced_strategy_close(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++ zfcp_port_t *port = erp_action->port;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ zfcp_erp_timeout_init(erp_action);
++ retval = zfcp_fsf_close_physical_port(erp_action);
++ if (retval == -ENOMEM) {
++ debug_text_event(adapter->erp_dbf, 5, "o_pfstc_nomem");
++ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof(wwn_t));
++ retval = ZFCP_ERP_NOMEM;
++ goto out;
++ }
++ erp_action->step = ZFCP_ERP_STEP_PHYS_PORT_CLOSING;
++ if (retval != 0) {
++ debug_text_event(adapter->erp_dbf, 5, "o_pfstc_cpf");
++ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof(wwn_t));
++ /* could not send 'open', fail */
++ retval = ZFCP_ERP_FAILED;
++ goto out;
++ }
++ debug_text_event(adapter->erp_dbf, 6, "o_pfstc_cpok");
++ debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof(wwn_t));
++ retval = ZFCP_ERP_CONTINUES;
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_erp_port_strategy_clearstati(zfcp_port_t *port)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++ zfcp_adapter_t *adapter = port->adapter;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ debug_text_event(adapter->erp_dbf, 5, "p_pstclst");
++ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof(wwn_t));
++
++ atomic_clear_mask(
++ ZFCP_STATUS_COMMON_OPENING |
++ ZFCP_STATUS_COMMON_CLOSING |
++ ZFCP_STATUS_PORT_DID_DID |
++ ZFCP_STATUS_PORT_PHYS_CLOSING |
++ ZFCP_STATUS_PORT_INVALID_WWPN,
++ &port->status);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
++ * ZFCP_ERP_FAILED - action finished unsuccessfully
++ */
++static int zfcp_erp_port_strategy_close(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++ zfcp_port_t *port = erp_action->port;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ zfcp_erp_timeout_init(erp_action);
++ retval = zfcp_fsf_close_port(erp_action);
++ if (retval == -ENOMEM) {
++ debug_text_event(adapter->erp_dbf, 5, "p_pstc_nomem");
++ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof(wwn_t));
++ retval = ZFCP_ERP_NOMEM;
++ goto out;
++ }
++ erp_action->step = ZFCP_ERP_STEP_PORT_CLOSING;
++ if (retval != 0) {
++ debug_text_event(adapter->erp_dbf, 5, "p_pstc_cpf");
++ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof(wwn_t));
++ /* could not send 'close', fail */
++ retval = ZFCP_ERP_FAILED;
++ goto out;
++ }
++ debug_text_event(adapter->erp_dbf, 6, "p_pstc_cpok");
++ debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof(wwn_t));
++ retval = ZFCP_ERP_CONTINUES;
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
++ * ZFCP_ERP_FAILED - action finished unsuccessfully
++ */
++static int zfcp_erp_port_strategy_open_port(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++ zfcp_port_t *port = erp_action->port;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ zfcp_erp_timeout_init(erp_action);
++ retval = zfcp_fsf_open_port(erp_action);
++ if (retval == -ENOMEM) {
++ debug_text_event(adapter->erp_dbf, 5, "p_psto_nomem");
++ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof(wwn_t));
++ retval = ZFCP_ERP_NOMEM;
++ goto out;
++ }
++ erp_action->step = ZFCP_ERP_STEP_PORT_OPENING;
++ if (retval != 0) {
++ debug_text_event(adapter->erp_dbf, 5, "p_psto_opf");
++ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof(wwn_t));
++ /* could not send 'open', fail */
++ retval = ZFCP_ERP_FAILED;
++ goto out;
++ }
++ debug_text_event(adapter->erp_dbf, 6, "p_psto_opok");
++ debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof(wwn_t));
++ retval = ZFCP_ERP_CONTINUES;
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
++ * ZFCP_ERP_FAILED - action finished unsuccessfully
++ */
++static int zfcp_erp_port_strategy_open_common_lookup(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++ zfcp_port_t *port = erp_action->port;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ zfcp_erp_timeout_init(erp_action);
++ retval = zfcp_ns_gid_pn_request(erp_action);
++ if (retval == -ENOMEM) {
++ debug_text_event(adapter->erp_dbf, 5, "p_pstn_nomem");
++ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof(wwn_t));
++ retval = ZFCP_ERP_NOMEM;
++ goto out;
++ }
++ erp_action->step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
++ if (retval != 0) {
++ debug_text_event(adapter->erp_dbf, 5, "p_pstn_ref");
++ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof(wwn_t));
++ /* could not send nameserver request, fail */
++ retval = ZFCP_ERP_FAILED;
++ goto out;
++ }
++ debug_text_event(adapter->erp_dbf, 6, "p_pstn_reok");
++ debug_event(adapter->erp_dbf, 6, &port->wwpn, sizeof(wwn_t));
++ retval = ZFCP_ERP_CONTINUES;
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: this routine executes the 'Reopen Unit' action
++ * currently no retries
++ *
++ * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
++ * ZFCP_ERP_SUCCEEDED - action finished successfully
++ * ZFCP_ERP_FAILED - action finished unsuccessfully
++ */
++static int zfcp_erp_unit_strategy(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = ZFCP_ERP_FAILED;
++ zfcp_unit_t *unit = erp_action->unit;
++ zfcp_adapter_t *adapter=erp_action->adapter;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ switch (erp_action->step) {
++
++ /* FIXME: the ULP spec. begs for waiting for oustanding commands */
++ case ZFCP_ERP_STEP_UNINITIALIZED :
++ zfcp_erp_unit_strategy_clearstati(unit);
++ if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status)) {
++ ZFCP_LOG_DEBUG(
++ "unit FCP_LUN=0x%016Lx is open -> trying close\n",
++ (llui_t)unit->fcp_lun);
++ retval = zfcp_erp_unit_strategy_close(erp_action);
++ break;
++ } /* else it's already closed, fall through */
++
++ case ZFCP_ERP_STEP_UNIT_CLOSING :
++ if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status)) {
++ ZFCP_LOG_DEBUG(
++ "failed to close unit FCP_LUN=0x%016Lx\n",
++ (llui_t)unit->fcp_lun);
++ retval = ZFCP_ERP_FAILED;
++ } else {
++ if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
++ retval = ZFCP_ERP_EXIT;
++ else {
++ ZFCP_LOG_DEBUG(
++ "unit FCP_LUN=0x%016Lx is not open -> trying open\n",
++ (llui_t)unit->fcp_lun);
++ retval = zfcp_erp_unit_strategy_open(erp_action);
++ }
++ }
++ break;
++
++ case ZFCP_ERP_STEP_UNIT_OPENING :
++ if (atomic_test_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status)) {
++ ZFCP_LOG_DEBUG(
++ "unit FCP_LUN=0x%016Lx is open\n",
++ (llui_t)unit->fcp_lun);
++ retval = ZFCP_ERP_SUCCEEDED;
++ } else {
++ ZFCP_LOG_DEBUG(
++ "failed to open unit FCP_LUN=0x%016Lx\n",
++ (llui_t)unit->fcp_lun);
++ retval = ZFCP_ERP_FAILED;
++ }
++ break;
++ }
++
++ debug_text_event(adapter->erp_dbf, 3, "u_ust/ret");
++ debug_event(adapter->erp_dbf, 3, &unit->fcp_lun, sizeof(fcp_lun_t));
++ debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof(int));
++ debug_event(adapter->erp_dbf, 3, &retval, sizeof(int));
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_erp_unit_strategy_clearstati(zfcp_unit_t *unit)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++ zfcp_adapter_t *adapter=unit->port->adapter;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ debug_text_event(adapter->erp_dbf,5,"u_ustclst");
++ debug_event(adapter->erp_dbf,5,&unit->fcp_lun,
++ sizeof(fcp_lun_t));
++
++ atomic_clear_mask(
++ ZFCP_STATUS_COMMON_OPENING |
++ ZFCP_STATUS_COMMON_CLOSING,
++ &unit->status);
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
++ * ZFCP_ERP_FAILED - action finished unsuccessfully
++ */
++static int zfcp_erp_unit_strategy_close(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++ zfcp_unit_t *unit = erp_action->unit;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ zfcp_erp_timeout_init(erp_action);
++ retval = zfcp_fsf_close_unit(erp_action);
++ if (retval == -ENOMEM) {
++ debug_text_event(adapter->erp_dbf, 5, "u_ustc_nomem");
++ debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof(fcp_lun_t));
++ retval = ZFCP_ERP_NOMEM;
++ goto out;
++ }
++ erp_action->step = ZFCP_ERP_STEP_UNIT_CLOSING;
++ if (retval != 0) {
++ debug_text_event(adapter->erp_dbf, 5, "u_ustc_cuf");
++ debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof(fcp_lun_t));
++ /* could not send 'close', fail */
++ retval = ZFCP_ERP_FAILED;
++ goto out;
++ }
++ debug_text_event(adapter->erp_dbf, 6, "u_ustc_cuok");
++ debug_event(adapter->erp_dbf, 6, &unit->fcp_lun, sizeof(fcp_lun_t));
++ retval = ZFCP_ERP_CONTINUES;
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns: ZFCP_ERP_CONTINUES - action continues (asynchronously)
++ * ZFCP_ERP_FAILED - action finished unsuccessfully
++ */
++static int zfcp_erp_unit_strategy_open(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++ zfcp_unit_t *unit = erp_action->unit;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ zfcp_erp_timeout_init(erp_action);
++ retval = zfcp_fsf_open_unit(erp_action);
++ if (retval == -ENOMEM) {
++ debug_text_event(adapter->erp_dbf, 5, "u_usto_nomem");
++ debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof(fcp_lun_t));
++ retval = ZFCP_ERP_NOMEM;
++ goto out;
++ }
++ erp_action->step = ZFCP_ERP_STEP_UNIT_OPENING;
++ if (retval != 0) {
++ debug_text_event(adapter->erp_dbf, 5, "u_usto_ouf");
++ debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof(fcp_lun_t));
++ /* could not send 'open', fail */
++ retval = ZFCP_ERP_FAILED;
++ goto out;
++ }
++ debug_text_event(adapter->erp_dbf, 6, "u_usto_ouok");
++ debug_event(adapter->erp_dbf, 6, &unit->fcp_lun, sizeof(fcp_lun_t));
++ retval = ZFCP_ERP_CONTINUES;
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static /*inline*/ int zfcp_erp_timeout_init(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++ zfcp_adapter_t *adapter=erp_action->adapter;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ debug_text_event(adapter->erp_dbf, 6, "a_timinit");
++ debug_event(adapter->erp_dbf, 6, &erp_action->action, sizeof(int));
++ init_timer(&erp_action->timer);
++ erp_action->timer.function = zfcp_erp_timeout_handler;
++ erp_action->timer.data = (unsigned long)erp_action;
++ /* jiffies will be added in zfcp_fsf_req_send */
++ erp_action->timer.expires = ZFCP_ERP_FSFREQ_TIMEOUT;
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: enqueue the specified error recovery action, if needed
++ *
++ * returns:
++ */
++static int zfcp_erp_action_enqueue(
++ int action,
++ zfcp_adapter_t *adapter,
++ zfcp_port_t *port,
++ zfcp_unit_t *unit)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 1;
++ zfcp_erp_action_t *erp_action = NULL;
++ int stronger_action = 0;
++ u32 status = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (action=%d adapter=0x%lx "
++ "port=0x%lx unit=0x%lx)\n",
++ action,
++ (unsigned long)adapter,
++ (unsigned long)port,
++ (unsigned long)unit);
++
++ /*
++ * We need some rules here which check whether we really need
++ * this action or whether we should just drop it.
++ * E.g. if there is a unfinished 'Reopen Port' request then we drop a
++ * 'Reopen Unit' request for an associated unit since we can't
++ * satisfy this request now. A 'Reopen Port' action will trigger
++ * 'Reopen Unit' actions when it completes.
++ * Thus, there are only actions in the queue which can immediately be
++ * executed. This makes the processing of the action queue more
++ * efficient.
++ */
++
++ debug_text_event(adapter->erp_dbf, 4, "a_actenq");
++ debug_event(adapter->erp_dbf, 4, &action, sizeof(int));
++ /* check whether we really need this */
++ switch (action) {
++ case ZFCP_ERP_ACTION_REOPEN_UNIT :
++ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) {
++ debug_text_event(adapter->erp_dbf, 4, "u_actenq_drp");
++ debug_event(adapter->erp_dbf, 4, &unit->fcp_lun, sizeof(fcp_lun_t));
++ ZFCP_LOG_DEBUG(
++ "drop: erp action %i on unit "
++ "FCP_LUN=0x%016Lx "
++ "(erp action %i already exists)\n",
++ action,
++ (llui_t)unit->fcp_lun,
++ unit->erp_action.action);
++ goto out;
++ }
++ if (!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) {
++ stronger_action = ZFCP_ERP_ACTION_REOPEN_PORT;
++ unit = NULL;
++ }
++ /* fall through !!! */
++
++ case ZFCP_ERP_ACTION_REOPEN_PORT :
++ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status)) {
++ debug_text_event(adapter->erp_dbf, 4, "p_actenq_drp");
++ debug_event(adapter->erp_dbf, 4, &port->wwpn, sizeof(wwn_t));
++ ZFCP_LOG_DEBUG(
++ "drop: erp action %i on port "
++ "WWPN=0x%016Lx "
++ "(erp action %i already exists)\n",
++ action,
++ (llui_t)port->wwpn,
++ port->erp_action.action);
++ goto out;
++ }
++ /* fall through !!! */
++
++ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED :
++ /*
++ * FIXME(erpmod):
++ * Can't override normal port_reopen in the usual
++ * manner. Trying adapter_reopen as a workaround
++ * won't do it. The old code indifferently overwrote
++ * the normal port_reopen action. That was not a valid
++ * approach. The old implementation left the erp-action
++ * list corrupted which had held the former normal
++ * port_reopen action, since the latter had not been
++ * removed from the list, just added in another place.
++ * The hapless endeavour continued with the immediate
++ * dismissal of the shiny new port_forced_reopen,
++ * a doom that was meant to be met by the just wiped out
++ * normal port_reopen action. Not to mention the stray
++ * fsf_req that might still have felt attached to the
++ * converted action. Lots of accidents .... who knows
++ * what else was about to go awry in the wake of them.
++ * As a (permanent) hack we are going to use a flag to
++ * get this action scheduled as a retry after completion
++ * of the normal port_reopen.
++ * But til then, just gracefully exit here (and warn).
++ */
++ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status)) {
++ if (port->erp_action.action == ZFCP_ERP_ACTION_REOPEN_PORT_FORCED) {
++ debug_text_event(adapter->erp_dbf, 4, "pf_actenq_drp");
++ debug_event(adapter->erp_dbf, 4, &port->wwpn, sizeof(wwn_t));
++ ZFCP_LOG_DEBUG(
++ "drop: erp action %i on port "
++ "WWPN=0x%016Lx "
++ "(erp action %i already exists)\n",
++ action,
++ (llui_t)port->wwpn,
++ port->erp_action.action);
++ } else {
++ debug_text_event(adapter->erp_dbf, 0, "pf_actenq_drpcp");
++ debug_event(adapter->erp_dbf, 0, &port->wwpn, sizeof(wwn_t));
++ ZFCP_LOG_NORMAL(
++ "drop: erp action %i on port "
++ "WWPN=0x%016Lx "
++ "(erp action %i already exists)\n",
++ action,
++ (llui_t)port->wwpn,
++ port->erp_action.action);
++
++ }
++ goto out;
++ }
++ if (!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) {
++ stronger_action = ZFCP_ERP_ACTION_REOPEN_ADAPTER;
++ port = NULL;
++ }
++ /* fall through !!! */
++
++ case ZFCP_ERP_ACTION_REOPEN_ADAPTER :
++ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status)) {
++ debug_text_event(adapter->erp_dbf, 4, "a_actenq_drp");
++ ZFCP_LOG_DEBUG(
++ "drop: erp action %i on adapter "
++ "devno=0x%04x "
++ "(erp action %i already exists)\n",
++ action,
++ adapter->devno,
++ adapter->erp_action.action);
++ goto out;
++ }
++ break;
++
++ default :
++ debug_text_exception(adapter->erp_dbf, 1, "a_actenq_bug");
++ debug_event(adapter->erp_dbf, 1, &action, sizeof(int));
++ ZFCP_LOG_NORMAL(
++ "bug: Unknown error recovery procedure "
++ "action requested on the adapter with "
++ "devno 0x%04x "
++ "(debug info %d)\n",
++ adapter->devno,
++ action);
++ goto out;
++ }
++
++ /* check whether we need something stronger first */
++ if (stronger_action) {
++ debug_text_event(adapter->erp_dbf, 4, "a_actenq_str");
++ debug_event(adapter->erp_dbf, 4, &stronger_action, sizeof(int));
++ ZFCP_LOG_DEBUG(
++ "shortcut: need erp action %i before "
++ "erp action %i (adapter devno=0x%04x)\n",
++ stronger_action,
++ action,
++ adapter->devno);
++ action = stronger_action;
++ }
++
++ /* mark adapter to have some error recovery pending */
++ atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
++
++ /* setup error recovery action */
++ switch (action) {
++
++ case ZFCP_ERP_ACTION_REOPEN_UNIT :
++ atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status);
++ erp_action = &unit->erp_action;
++ if (!atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status))
++ status = ZFCP_STATUS_ERP_CLOSE_ONLY;
++ break;
++
++ case ZFCP_ERP_ACTION_REOPEN_PORT :
++ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED :
++ zfcp_erp_action_dismiss_port(port);
++ atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
++ erp_action = &port->erp_action;
++ if (!atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status))
++ status = ZFCP_STATUS_ERP_CLOSE_ONLY;
++ break;
++
++ case ZFCP_ERP_ACTION_REOPEN_ADAPTER :
++ zfcp_erp_action_dismiss_adapter(adapter);
++ atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
++ erp_action = &adapter->erp_action;
++ if (!atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, &adapter->status))
++ status = ZFCP_STATUS_ERP_CLOSE_ONLY;
++ break;
++ }
++
++ memset(erp_action, 0, sizeof(zfcp_erp_action_t));
++ erp_action->adapter = adapter;
++ erp_action->port = port;
++ erp_action->unit = unit;
++ erp_action->action = action;
++ erp_action->status = status;
++
++ /* finally put it into 'ready' queue and kick erp thread */
++ list_add(&erp_action->list, &adapter->erp_ready_head);
++ ZFCP_LOG_DEBUG(
++ "waking erp_thread of the adapter with devno=0x%04x\n",
++ adapter->devno);
++ up(&adapter->erp_ready_sem);
++ retval = 0;
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_erp_action_dequeue(
++ zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++ zfcp_adapter_t *adapter = erp_action->adapter;
++
++ ZFCP_LOG_TRACE(
++ "enter (erp_action=0x%lx)\n",
++ (unsigned long)erp_action);
++
++ debug_text_event(adapter->erp_dbf, 4, "a_actdeq");
++ debug_event(adapter->erp_dbf, 4, &erp_action->action, sizeof(int));
++ list_del(&erp_action->list);
++ switch (erp_action->action) {
++ case ZFCP_ERP_ACTION_REOPEN_UNIT :
++ atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &erp_action->unit->status);
++ break;
++ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED :
++ case ZFCP_ERP_ACTION_REOPEN_PORT :
++ atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &erp_action->port->status);
++ break;
++ case ZFCP_ERP_ACTION_REOPEN_ADAPTER :
++ atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &erp_action->adapter->status);
++ break;
++ default :
++ /* bug */
++ break;
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_erp_action_dismiss_adapter(zfcp_adapter_t *adapter)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++ zfcp_port_t *port;
++ unsigned long flags;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ debug_text_event(adapter->erp_dbf, 5, "a_actab");
++ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status))
++ /* that's really all in this case */
++ zfcp_erp_action_dismiss(&adapter->erp_action);
++ else {
++ /* have a deeper look */
++ read_lock_irqsave(&adapter->port_list_lock, flags);
++ ZFCP_FOR_EACH_PORT(adapter, port) {
++ zfcp_erp_action_dismiss_port(port);
++ }
++ read_unlock_irqrestore(&adapter->port_list_lock, flags);
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose:
++ *
++ * returns:
++ */
++static int zfcp_erp_action_dismiss_port(zfcp_port_t *port)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ int retval = 0;
++ zfcp_unit_t *unit;
++ zfcp_adapter_t *adapter = port->adapter;
++ unsigned long flags;
++
++ ZFCP_LOG_TRACE("enter\n");
++
++ debug_text_event(adapter->erp_dbf, 5, "p_actab");
++ debug_event(adapter->erp_dbf, 5, &port->wwpn, sizeof(wwn_t));
++ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status))
++ /* that's really all in this case */
++ zfcp_erp_action_dismiss(&port->erp_action);
++ else {
++ /* have a deeper look */
++ read_lock_irqsave(&port->unit_list_lock, flags);
++ ZFCP_FOR_EACH_UNIT(port, unit) {
++ if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) {
++ zfcp_erp_action_dismiss(&unit->erp_action);
++ }
++ }
++ read_unlock_irqrestore(&port->unit_list_lock, flags);
++ }
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: moves erp_action to 'erp running list'
++ *
++ * returns:
++ */
++static inline void zfcp_erp_action_to_running(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ zfcp_adapter_t *adapter = erp_action->adapter;
++
++ ZFCP_LOG_TRACE(
++ "enter (erp_action=0x%lx\n",
++ (unsigned long)erp_action);
++
++ debug_text_event(adapter->erp_dbf, 6, "a_toru");
++ debug_event(adapter->erp_dbf, 6, &erp_action->action, sizeof(int));
++ zfcp_erp_from_one_to_other(
++ &erp_action->list,
++ &erp_action->adapter->erp_running_head);
++
++ ZFCP_LOG_TRACE("exit\n");
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: moves erp_action to 'erp ready list'
++ *
++ * returns:
++ */
++static inline void zfcp_erp_action_to_ready(zfcp_erp_action_t *erp_action)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ zfcp_adapter_t *adapter = erp_action->adapter;
++
++ ZFCP_LOG_TRACE(
++ "enter (erp_action=0x%lx\n",
++ (unsigned long)erp_action);
++
++ debug_text_event(adapter->erp_dbf, 6, "a_tore");
++ debug_event(adapter->erp_dbf, 6, &erp_action->action, sizeof(int));
++ zfcp_erp_from_one_to_other(
++ &erp_action->list,
++ &erp_action->adapter->erp_ready_head);
++
++ ZFCP_LOG_TRACE("exit\n");
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function:
++ *
++ * purpose: moves a request from one erp_action list to the other
++ *
++ * returns:
++ */
++static inline void zfcp_erp_from_one_to_other(
++ struct list_head *entry,
++ struct list_head *head)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_ERP
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_ERP
++
++ ZFCP_LOG_TRACE(
++ "enter entry=0x%lx, head=0x%lx\n",
++ (unsigned long)entry,
++ (unsigned long)head);
++
++ list_del(entry);
++ list_add(entry, head);
++
++ ZFCP_LOG_TRACE("exit\n");
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++#ifdef ZFCP_STAT_REQSIZES
++
++static int zfcp_statistics_clear(
++ zfcp_adapter_t *adapter,
++ struct list_head *head)
++{
++ int retval = 0;
++ unsigned long flags;
++ struct list_head *entry, *next_entry;
++ zfcp_statistics_t *stat;
++
++ write_lock_irqsave(&adapter->stat_lock, flags);
++ list_for_each_safe(entry, next_entry, head) {
++ stat = list_entry(entry, zfcp_statistics_t, list);
++ list_del(entry);
++ kfree(stat);
++ }
++ write_unlock_irqrestore(&adapter->stat_lock, flags);
++
++ return retval;
++}
++
++
++static inline void zfcp_statistics_new(
++ zfcp_adapter_t *adapter,
++ struct list_head *head,
++ u32 num)
++{
++ zfcp_statistics_t *stat;
++
++ stat = ZFCP_KMALLOC(sizeof(zfcp_statistics_t), GFP_ATOMIC);
++ if (stat) {
++ stat->num = num;
++ stat->hits = 1;
++ list_add_tail(&stat->list, head);
++ } else atomic_inc(&adapter->stat_errors);
++}
++
++/**
++ * list_for_some_prev - iterate over a list backwards
++ * starting somewhere in the middle
++ * of the list
++ * @pos: the &list_t to use as a loop counter.
++ * @middle: the &list_t pointing to the antecessor to start at
++ * @head: the head for your list.
++ */
++#define list_for_some_prev(pos, middle, head) \
++ for (pos = (middle)->prev, prefetch(pos->prev); pos != (head); \
++ pos = pos->prev, prefetch(pos->prev))
++
++/*
++ * Sort list if necessary to find frequently used entries quicker.
++ * Since counters are only increased by one, sorting can be implemented
++ * in a quite efficient way. It usually comprimises swapping positions
++ * of the given entry with its antecessor, if at all. In rare cases
++ * (= if there is a series of antecessors with identical counter values
++ * which are in turn less than the value hold by the current entry)
++ * searching for the position where we want to move the current entry to
++ * takes more than one hop back through the list. As to the overall
++ * performance of our statistics this is not a big deal.
++ * As a side-effect, we provide statistics sorted by hits to the user.
++ */
++static inline void zfcp_statistics_sort(
++ struct list_head *head,
++ struct list_head *entry,
++ zfcp_statistics_t *stat)
++{
++ zfcp_statistics_t *stat_sort = NULL;
++ struct list_head *entry_sort = NULL;
++
++ list_for_some_prev(entry_sort, entry, head) {
++ stat_sort = list_entry(entry_sort, zfcp_statistics_t, list);
++ if (stat_sort->hits >= stat->hits)
++ break;
++ }
++ if (stat_sort &&
++ entry->prev != entry_sort)
++ list_move(entry, entry_sort);
++}
++
++
++static void zfcp_statistics_inc(
++ zfcp_adapter_t *adapter,
++ struct list_head *head,
++ u32 num)
++{
++ unsigned long flags;
++ zfcp_statistics_t *stat;
++ struct list_head *entry;
++
++ if (atomic_read(&adapter->stat_on) == 0)
++ return;
++
++ write_lock_irqsave(&adapter->stat_lock, flags);
++ list_for_each(entry, head) {
++ stat = list_entry(entry, zfcp_statistics_t, list);
++ if (stat->num == num) {
++ stat->hits++;
++ zfcp_statistics_sort(head, entry, stat);
++ goto unlock;
++ }
++ }
++ /* hits is initialized to 1 */
++ zfcp_statistics_new(adapter, head, num);
++unlock:
++ write_unlock_irqrestore(&adapter->stat_lock, flags);
++}
++
++
++static int zfcp_statistics_print(
++ zfcp_adapter_t *adapter,
++ struct list_head *head,
++ char *prefix,
++ char *buf,
++ int len,
++ int max)
++{
++ unsigned long flags;
++ zfcp_statistics_t *stat;
++ struct list_head *entry;
++
++ write_lock_irqsave(&adapter->stat_lock, flags);
++ list_for_each(entry, head) {
++ if (len > max - 26)
++ break;
++ stat = list_entry(entry, zfcp_statistics_t, list);
++ len += sprintf(buf + len, "%s 0x%08x: 0x%08x\n",
++ prefix, stat->num, stat->hits);
++ }
++ write_unlock_irqrestore(&adapter->stat_lock, flags);
++
++ return len;
++}
++
++#endif // ZFCP_STAT_REQSIZES
++
++/*
++ * function: zfcp_cfdc_dev_ioctl
++ *
++ * purpose: Handle control file upload/download transaction via IOCTL interface
++ *
++ * returns: 0 - Operation completed successfuly
++ * -ENOTTY - Unknown IOCTL command
++ * -EINVAL - Invalid sense data record
++ * -ENXIO - The FCP adapter is not available
++ * -EOPNOTSUPP - The FCP adapter does not have Control File support
++ * -ENOMEM - Insufficient memory
++ * -EFAULT - User space memory I/O operation fault
++ * -EPERM - Cannot create or queue FSF request or create SBALs
++ */
++int zfcp_cfdc_dev_ioctl(
++ struct inode *inode,
++ struct file *file,
++ unsigned int command,
++ unsigned long buffer)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ zfcp_cfdc_sense_data_t *sense_data, *sense_data_user;
++ zfcp_adapter_t *adapter;
++ zfcp_fsf_req_t *fsf_req = NULL;
++ zfcp_sg_list_t *sg_list = NULL;
++ u32 fsf_command, option;
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (inode=0x%lx file=0x%lx command=0x%x buffer=0x%lx)\n",
++ (unsigned long)inode, (unsigned long)file, command, buffer);
++
++ ZFCP_LOG_NORMAL(
++ "Control file data channel transaction opened\n");
++
++ sense_data = (zfcp_cfdc_sense_data_t*)ZFCP_KMALLOC(
++ sizeof(zfcp_cfdc_sense_data_t), GFP_KERNEL);
++ if (sense_data == NULL) {
++ ZFCP_LOG_NORMAL(
++ "Not enough memory for the sense data record\n");
++ retval = -ENOMEM;
++ goto out;
++ }
++
++ sg_list = (zfcp_sg_list_t*)ZFCP_KMALLOC(
++ sizeof(zfcp_sg_list_t), GFP_KERNEL);
++ if (sg_list == NULL) {
++ ZFCP_LOG_NORMAL(
++ "Not enough memory for the scatter-gather list\n");
++ retval = -ENOMEM;
++ goto out;
++ }
++
++ if (command != ZFCP_CFDC_IOC) {
++ ZFCP_LOG_NORMAL(
++ "IOC request code 0x%x is not valid\n",
++ command);
++ retval = -ENOTTY;
++ goto out;
++ }
++
++ if ((sense_data_user = (zfcp_cfdc_sense_data_t*)buffer) == NULL) {
++ ZFCP_LOG_NORMAL(
++ "Sense data record is required\n");
++ retval = -EINVAL;
++ goto out;
++ }
++
++ retval = copy_from_user(
++ sense_data, sense_data_user, sizeof(zfcp_cfdc_sense_data_t));
++ if (retval) {
++ ZFCP_LOG_NORMAL(
++ "Cannot copy sense data record from user space memory\n");
++ retval = -EFAULT;
++ goto out;
++ }
++
++ if (sense_data->signature != ZFCP_CFDC_SIGNATURE) {
++ ZFCP_LOG_NORMAL(
++ "No valid sense data request signature 0x%08x found\n",
++ ZFCP_CFDC_SIGNATURE);
++ retval = -EINVAL;
++ goto out;
++ }
++
++ switch (sense_data->command) {
++
++ case ZFCP_CFDC_CMND_DOWNLOAD_NORMAL:
++ fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
++ option = FSF_CFDC_OPTION_NORMAL_MODE;
++ break;
++
++ case ZFCP_CFDC_CMND_DOWNLOAD_FORCE:
++ fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
++ option = FSF_CFDC_OPTION_FORCE;
++ break;
++
++ case ZFCP_CFDC_CMND_FULL_ACCESS:
++ fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
++ option = FSF_CFDC_OPTION_FULL_ACCESS;
++ break;
++
++ case ZFCP_CFDC_CMND_RESTRICTED_ACCESS:
++ fsf_command = FSF_QTCB_DOWNLOAD_CONTROL_FILE;
++ option = FSF_CFDC_OPTION_RESTRICTED_ACCESS;
++ break;
++
++ case ZFCP_CFDC_CMND_UPLOAD:
++ fsf_command = FSF_QTCB_UPLOAD_CONTROL_FILE;
++ option = 0;
++ break;
++
++ default:
++ ZFCP_LOG_NORMAL(
++ "Command code 0x%08x is not valid\n",
++ sense_data->command);
++ retval = -EINVAL;
++ goto out;
++ }
++
++ retval = zfcp_adapter_enqueue(sense_data->devno, &adapter);
++ if (retval != 0) {
++ if (retval != ZFCP_KNOWN) {
++ ZFCP_LOG_NORMAL(
++ "Cannot enqueue the FCP adapter (devno=0x%04x)\n",
++ sense_data->devno);
++ retval = -ENXIO;
++ goto out;
++ }
++ } else {
++ retval = zfcp_erp_adapter_reopen(adapter, 0);
++ if (retval < 0) {
++ ZFCP_LOG_NORMAL(
++ "Cannot reopen the FCP adapter (devno=0x%04x)\n",
++ adapter->devno);
++ retval = -ENXIO;
++ goto out;
++ }
++ zfcp_erp_wait(adapter);
++ }
++
++ if (!atomic_test_mask(ZFCP_STATUS_COMMON_RUNNING, &adapter->status) ||
++ atomic_test_mask(ZFCP_STATUS_COMMON_ERP_FAILED, &adapter->status)) {
++ ZFCP_LOG_NORMAL(
++ "The FCP adapter is not available (devno=0x%04x)\n",
++ adapter->devno);
++ retval = -ENXIO;
++ goto out;
++ }
++
++ if (adapter->devinfo.sid_data.dev_model != ZFCP_DEVICE_MODEL_PRIV) {
++ ZFCP_LOG_NORMAL(
++ "Control file upload/download is accepted "
++ "only on privileged subchannels (devno=0x%04x)\n",
++ adapter->devno);
++ retval = -ENXIO;
++ goto out;
++ }
++
++ if (sense_data->command & ZFCP_CFDC_WITH_CONTROL_FILE) {
++ retval = zfcp_sg_list_alloc(sg_list, ZFCP_CFDC_MAX_CONTROL_FILE_SIZE);
++ if (retval) {
++ ZFCP_LOG_NORMAL(
++ "Not enough memory for the scatter-gather list\n");
++ retval = -ENOMEM;
++ goto out;
++ }
++ }
++
++ if ((sense_data->command & ZFCP_CFDC_DOWNLOAD) &&
++ (sense_data->command & ZFCP_CFDC_WITH_CONTROL_FILE)) {
++ retval = zfcp_sg_list_copy_from_user(
++ sg_list, &sense_data_user->control_file,
++ ZFCP_CFDC_MAX_CONTROL_FILE_SIZE);
++ if (retval) {
++ ZFCP_LOG_NORMAL(
++ "Cannot copy control file from user space memory\n");
++ retval = -EFAULT;
++ goto out;
++ }
++ }
++
++ retval = zfcp_fsf_control_file(
++ adapter, &fsf_req, fsf_command, option, sg_list);
++ if (retval == -EOPNOTSUPP) {
++ ZFCP_LOG_NORMAL(
++ "Specified adapter does not support control file\n");
++ goto out;
++ } else if (retval != 0) {
++ ZFCP_LOG_NORMAL(
++ "Cannot create or queue FSF request or create SBALs\n");
++ retval = -EPERM;
++ goto out;
++ }
++
++ wait_event(fsf_req->completion_wq,
++ fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
++
++ if ((fsf_req->qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
++ (fsf_req->qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
++ retval = -ENXIO;
++ goto out;
++ }
++
++ sense_data->fsf_status = fsf_req->qtcb->header.fsf_status;
++ memcpy(&sense_data->fsf_status_qual,
++ &fsf_req->qtcb->header.fsf_status_qual,
++ sizeof(fsf_status_qual_t));
++ memcpy(&sense_data->payloads, &fsf_req->qtcb->bottom.support.els, 256);
++
++ retval = copy_to_user(
++ sense_data_user, sense_data, sizeof(zfcp_cfdc_sense_data_t));
++ if (retval) {
++ ZFCP_LOG_NORMAL(
++ "Cannot copy sense data record to user space memory\n");
++ retval = -EFAULT;
++ goto out;
++ }
++
++ if (sense_data->command & ZFCP_CFDC_UPLOAD) {
++ retval = zfcp_sg_list_copy_to_user(
++ &sense_data_user->control_file, sg_list,
++ ZFCP_CFDC_MAX_CONTROL_FILE_SIZE);
++ if (retval) {
++ ZFCP_LOG_NORMAL(
++ "Cannot copy control file to user space memory\n");
++ retval = -EFAULT;
++ goto out;
++ }
++ }
++
++out:
++ if (fsf_req) {
++ retval = zfcp_fsf_req_cleanup(fsf_req);
++ if (retval) {
++ ZFCP_LOG_NORMAL(
++ "bug: Could not remove the FSF request "
++ "(devno=0x%04x fsf_req=0x%lx)\n",
++ adapter->devno,
++ (unsigned long)fsf_req);
++ retval = -EPERM;
++ }
++ }
++
++ if (sg_list != NULL) {
++ zfcp_sg_list_free(sg_list);
++ ZFCP_KFREE(sg_list, sizeof(zfcp_sg_list_t));
++ }
++
++ if (sense_data != NULL)
++ ZFCP_KFREE(sense_data, sizeof(zfcp_cfdc_sense_data_t));
++
++ ZFCP_LOG_NORMAL(
++ "Control file data channel transaction closed\n");
++
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_sg_list_alloc
++ *
++ * purpose: Create a scatter-gather list of the specified size
++ *
++ * returns: 0 - Scatter gather list is created
++ * -ENOMEM - Insufficient memory (*list_ptr is then set to NULL)
++ */
++static inline int zfcp_sg_list_alloc(zfcp_sg_list_t *sg_list, size_t size)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ struct scatterlist *sg;
++ int i;
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (sg_list=0x%lx size=%ld)\n",
++ (unsigned long)sg_list, size);
++
++ sg_list->count = size >> PAGE_SHIFT;
++ if (size & ~PAGE_MASK)
++ sg_list->count++;
++ sg_list->sg = (struct scatterlist*)ZFCP_KMALLOC(
++ sg_list->count * sizeof(struct scatterlist), GFP_KERNEL);
++ if (sg_list->sg == NULL) {
++ ZFCP_LOG_INFO("Out of memory!\n");
++ retval = -ENOMEM;
++ goto out;
++ }
++
++ for (i = 0, sg = sg_list->sg; i < sg_list->count; i++, sg++) {
++ if (i < sg_list->count - 1) {
++ sg->length = PAGE_SIZE;
++ sg->address = (char*)ZFCP_GET_ZEROED_PAGE(GFP_KERNEL);
++ } else {
++ sg->length = size & ~PAGE_MASK;
++ sg->address = (char*)ZFCP_KMALLOC(sg->length, GFP_KERNEL);
++ }
++ if (sg->address == NULL) {
++ while (sg-- != sg_list->sg)
++ ZFCP_FREE_PAGE((unsigned long)sg->address);
++ ZFCP_KFREE(sg_list->sg,
++ sg_list->count * sizeof(struct scatterlist));
++ ZFCP_LOG_INFO("Out of memory!\n");
++ retval = -ENOMEM;
++ goto out;
++ }
++ }
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_sg_list_free
++ *
++ * purpose: Destroy a scatter-gather list and release memory
++ *
++ * returns: Always 0
++ */
++static inline int zfcp_sg_list_free(zfcp_sg_list_t *sg_list)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ struct scatterlist *sg;
++ int i;
++ int retval = 0;
++
++ ZFCP_LOG_TRACE("enter (sg_list=0x%lx)\n", (unsigned long)sg_list);
++
++ if ((sg_list->sg == NULL) || (sg_list->count == 0))
++ goto out;
++
++ for (i = 0, sg = sg_list->sg; i < sg_list->count; i++, sg++)
++ ZFCP_FREE_PAGE((unsigned long)sg->address);
++ ZFCP_KFREE(sg_list->sg, sg_list->count * sizeof(struct scatterlist));
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_sg_list_copy_from_user
++ *
++ * purpose: Copy data from user space memory to the scatter-gather list
++ *
++ * returns: 0 - The data has been copied from user
++ * -EFAULT - Memory I/O operation fault
++ */
++static inline int zfcp_sg_list_copy_from_user(
++ zfcp_sg_list_t *sg_list, void *buffer, size_t size)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ struct scatterlist *sg;
++ unsigned int length;
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (sg_list=0x%lx buffer=0x%lx size=%ld)\n",
++ (unsigned long)sg_list, (unsigned long)buffer, size);
++
++ for (sg = sg_list->sg; size > 0; sg++) {
++ length = min((unsigned int)size, sg->length);
++ if (copy_from_user(sg->address, buffer, length)) {
++ ZFCP_LOG_INFO("Memory error (copy_from_user)\n");
++ retval = -EFAULT;
++ goto out;
++ }
++ buffer += length;
++ size -= length;
++ }
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++/*
++ * function: zfcp_sg_list_copy_to_user
++ *
++ * purpose: Copy data from the scatter-gather list to user space memory
++ *
++ * returns: 0 - The data has been copied to user
++ * -EFAULT - Memory I/O operation fault
++ */
++static inline int zfcp_sg_list_copy_to_user(
++ void *buffer, zfcp_sg_list_t *sg_list, size_t size)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_SCSI
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_SCSI
++
++ struct scatterlist *sg;
++ unsigned int length;
++ int retval = 0;
++
++ ZFCP_LOG_TRACE(
++ "enter (buffer=0x%lx sg_list=0x%lx size=%ld)\n",
++ (unsigned long)buffer, (unsigned long)sg_list, size);
++
++ for (sg = sg_list->sg; size > 0; sg++) {
++ length = min((unsigned int)size, sg->length);
++ if (copy_to_user(buffer, sg->address, length)) {
++ ZFCP_LOG_INFO("Memory error (copy_to_user)\n");
++ retval = -EFAULT;
++ goto out;
++ }
++ buffer += length;
++ size -= length;
++ }
++
++out:
++ ZFCP_LOG_TRACE("exit (%i)\n", retval);
++
++ return retval;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++EXPORT_SYMBOL(zfcp_data);
++
++/*
++ * Overrides for Emacs so that we get a uniform tabbing style.
++ * Emacs will notice this stuff at the end of the file and automatically
++ * adjust the settings for this buffer only. This must remain at the end
++ * of the file.
++ * ---------------------------------------------------------------------------
++ * Local variables:
++ * c-indent-level: 4
++ * c-brace-imaginary-offset: 0
++ * c-brace-offset: -4
++ * c-argdecl-indent: 4
++ * c-label-offset: -4
++ * c-continued-statement-offset: 4
++ * c-continued-brace-offset: 0
++ * indent-tabs-mode: nil
++ * tab-width: 8
++ * End:
++ */
+=== drivers/s390/scsi/zh.h
+==================================================================
+--- drivers/s390/scsi/zh.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/scsi/zh.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,512 @@
++/*
++ * $Id: zh.h,v 1.7.2.2 2004/03/24 11:18:00 aherrman Exp $
++ *
++ * Module providing an interface for HBA API (FC-HBA) implementation
++ * to the zfcp driver.
++ *
++ * (C) Copyright IBM Corp. 2003
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version. See the file COPYING for more
++ * information.
++ *
++ * Authors:
++ * Stefan Voelkel <Stefan.Voelkel at millenux.com>
++ * Andreas Herrmann <aherrman at de.ibm.com>
++ */
++
++/*
++ * We expect wwn to be a 64 bit type and not to be an array of 8 characters.
++ * Conversion of wwn thus has to be done in vendor/os specific library.
++ */
++
++#ifndef _ZH_H_
++#define _ZH_H_
++
++#include <linux/ioctl.h>
++#include <asm/string.h>
++
++#include "zfcp_zh.h"
++
++typedef u64 devid_t;
++
++#define DEVID_TO_DEVNO(devid) (devno_t)(devid & 0xffff)
++#define ZH_DEVID(scsid, cssid, devno) \
++ ((devid_t) (scsid << 24) | (cssid << 16) | devno)
++
++/* Maximum number of events in the queues (shared and polled) */
++#define ZH_EVENTS_MAX 20
++#define ZH_GET_EVENT_BUFFER_COUNT 10
++
++/* SPC-2 defines the addional_length field in the sense data format as a byte,
++ * thus only 255 bytes of additional data may be returned. Size of header in
++ * sense data format is 8 bytes.
++ */
++#define ZH_SCSI_SENSE_BUFFERSIZE 263
++
++typedef unsigned short zh_count_t;
++
++/**
++ * struct sg_list - ng scatterlist
++ * @*sg: pointer to the real scatterlist
++ * @count: number of allocated pages
++ *
++ * Even if this struct is small, it groups logical depended information. And it
++ * reduces parameter list length.
++ */
++struct sg_list
++{
++ struct scatterlist *sg;
++ unsigned int count;
++};
++
++/**
++ * struct scsi_inquiry_cmd
++ * FIXME: to be documented
++ * The following are defined by the SCSI-2 specification.
++ */
++struct scsi_inquiry_cmd
++{
++ u8 op;
++ u8 reserved1:6;
++ u8 cmdt:1;
++ u8 evpd:1;
++ u8 page_code;
++ u8 reserved2;
++ u8 alloc_length;
++ u8 control;
++} __attribute__((packed));
++
++/**
++ * struct scsi_read_capacity_cmd
++ * FIXME: to be documented
++ * the 10 byte version of the command, see SBC-2
++ */
++struct scsi_read_capacity_cmd
++{
++ u8 op;
++ u8 reserved1:7;
++ u8 reladdr:1;
++ u32 lba;
++ u16 reserved2;
++ u8 reserved3:7;
++ u8 pmi:1;
++ u8 control;
++} __attribute__((packed));
++
++/**
++ * struct scsi_report_luns_cmd
++ * FIXME: to be documented
++ */
++struct scsi_report_luns_cmd
++{
++ u8 op;
++ u8 reserved1[5];
++ u32 alloc_length;
++ u8 reserved2;
++ u8 control;
++} __attribute__((packed));
++
++/* minimum size of the response data for REPORT LUNS */
++#define SCSI_REPORT_LUNS_SIZE_MIN 8
++
++#ifndef REPORT_LUNS
++#define REPORT_LUNS 0xA0
++#endif
++
++/**
++ * struct zh_get_config
++ * @devid: of the adapter to generate config events for
++ * @wwpn: of the port to generate config events for
++ * @size: must be set to sizeof(struct zh_event)
++ *
++ * The size member is used to assert that the events structure in user- and
++ * kernelspace are of the same size.
++ * If devid is 0, adapter_add events are generated for all configured adapters.
++ * If devid is != 0 and wwpn is zero, port_add events are generated for all
++ * ports of the specified adapter.
++ * if devid != 0 and wwpn != 0, unit_add events are generated for all units
++ * configured on the specified port;
++ */
++struct zh_get_config
++{
++ devid_t devid;
++ wwn_t wwpn;
++ unsigned int flags:2;
++};
++
++/**
++ * struct zh_get_adapterattributes
++ * @devid: unique id for adapter device
++ * @attributes: adapter attributes
++ *
++ * structure passed by ioctl ZH_IOC_GET_ADAPTERATTRIBUTES
++ */
++struct zh_get_adapterattributes
++{
++ devid_t devid;
++ struct zfcp_adapter_attributes attributes;
++};
++
++/**
++ * struct zh_get_portattributes
++ * @devid: unique id for adapter device
++ * @wwpn: wwn of discovered or local port (optional)
++ * @attributes: port attributes
++ *
++ * structure passed by ioctls ZH_IOC_GET_PORTATTRIBUTES and
++ * ZH_IOC_GET_DPORTATTRIBUTES
++ */
++struct zh_get_portattributes
++{
++ devid_t devid;
++ wwn_t wwpn;
++ struct zfcp_port_attributes attributes;
++};
++
++/**
++ * struct zh_get_portstatistics
++ * @devid: unique id for adapter device
++ * @portwwn: wwn local port (optional)
++ * @stat: port statistics
++ *
++ * structure passed by ioctl ZH_IOC_GET_PORTSTATISTICS
++ */
++struct zh_get_portstatistics
++{
++ devid_t devid;
++ wwn_t portwwn;
++ struct zfcp_port_statistics statistics;
++};
++
++/**
++ * struct zh_event_polled_link
++ * @event: subtype of link event, see @zh_event_polled_link_e
++ * @port_fc_id: port FC id, where this event occured
++ *
++ * The standard defines an array of 3 u32 as reserved
++ * in its structure. We do not need this here since no data
++ * is passed with this member from kernel, to userspace
++ */
++struct zh_event_polled_link
++{
++ u32 port_fc_id;
++};
++
++/**
++ * struct zh_event_polled_rscn
++ * @port_fc_id: port FC id, where this event occured
++ * @port_page: affected port_id pages
++ *
++ * The standard defines an array of 2 u32 as reserved
++ * in its structure. We do not need this here since no data
++ * is passed with this member from kernel, to userspace
++ */
++struct zh_event_polled_rscn
++{
++ u32 port_fc_id;
++ u32 port_page;
++};
++
++/**
++ * struct zh_event_polled_pty
++ * @pty_data: proprietary data
++ */
++struct zh_event_polled_pty
++{
++ u32 pty_data[4];
++};
++
++/**
++ * struct zh_event_polled
++ * @event: type of occured event
++ * @data: union of diffrent events
++ * @link: link event, @see zh_event_polled_link
++ * @rscn: rscn event, @see zh_event_polled_rscn
++ * @pty: pty event, @see zh_event_polled_pty
++ */
++struct zh_event_polled
++{
++ u32 event;
++ devid_t devid;
++ union
++ {
++ struct zh_event_polled_link link;
++ struct zh_event_polled_rscn rscn;
++ struct zh_event_polled_pty pty;
++ } data;
++};
++
++/**
++ * struct zh_get_event_buffer
++ * @devid: of the adapter to poll events
++ * @polled: array of events
++ */
++struct zh_get_event_buffer
++{
++ devid_t devid;
++ unsigned int count;
++ struct zh_event_polled polled[ZH_GET_EVENT_BUFFER_COUNT];
++};
++
++/**
++ * struct zh_event_adapter_add
++ * @devid: unique id of adapter device
++ * @wwnn: wwn of adapter node
++ * @wwpn: wwn of adapter port
++ *
++ * structure used for adapter add events
++ */
++struct zh_event_adapter_add
++{
++ devid_t devid;
++ wwn_t wwnn;
++ wwn_t wwpn;
++};
++
++/**
++ * struct zh_event_port_edd
++ * @devid: of the adapter the port was added
++ * @wwpn: of the added port
++ * @wwnn: of the added port
++ * @did: of the added port
++ */
++struct zh_event_port_add
++{
++ devid_t devid;
++ wwn_t wwpn;
++ wwn_t wwnn;
++ fc_id_t did;
++};
++
++/**
++ * struct zh_event_unit_add
++ * @devid: of the adapter the unit was added
++ * @wwpn: of the adapter the unit was added
++ * @fclun: of the added unit
++ * @host: SCSI host id
++ * @channel: SCSI channel
++ * @id: SCSI id
++ * @lun: SCSI lun
++ */
++struct zh_event_unit_add
++{
++ devid_t devid;
++ wwn_t wwpn;
++ fcp_lun_t fclun;
++ unsigned int host;
++ unsigned int channel;
++ unsigned int id;
++ unsigned int lun;
++};
++
++/**
++ * struct zh_event
++ * @event: type of event
++ * @data: union of event specific structures
++ *
++ * structure passed by ioctl ZH_IOC_EVENT (and others?)
++ */
++struct zh_event
++{
++ u8 event;
++ union
++ {
++ struct zh_event_polled polled;
++ struct zh_event_adapter_add adapter_add;
++ struct zh_event_port_add port_add;
++ struct zh_event_unit_add unit_add;
++ } data;
++};
++
++/* SPC-2 defines the addional_length field of the inquiry reply as a byte, thus
++ * only 255 bytes of additional data may be returned. Size of header for
++ * standard INQUIRY data is 5 bytes.
++ */
++#define ZH_SCSI_INQUIRY_SIZE 260
++
++/**
++ * struct zh_scsi_inquiry - data needed for an INQUIRY
++ * @devid: of the adapter
++ * @wwpn: of the port
++ * @fclun: of the unit to send the command to
++ * @evpd: request EVPD?
++ * @page_code: of the EVPD to request
++ * @inquiry: payload of the response
++ * @sense: buffer for sense data
++ */
++struct zh_scsi_inquiry
++{
++ devid_t devid;
++ wwn_t wwpn;
++ u64 fclun;
++ u8 evpd;
++ u32 page_code;
++ u8 inquiry[ZH_SCSI_INQUIRY_SIZE];
++ u8 sense[ZH_SCSI_SENSE_BUFFERSIZE];
++};
++
++/* SBC-2 defines the READ CAPACITY data */
++#define ZH_SCSI_READ_CAPACITY_SIZE 8
++
++/**
++ * struct zh_scsi_read_capacity - data needed for a READ_CAPACITY
++ * @devid: of the adapter
++ * @wwpn: of the port
++ * @fclun: of the unit to send the command to
++ * @read_capacity: payload of the response
++ * @sense: buffer for sense data
++ */
++struct zh_scsi_read_capacity
++{
++ devid_t devid;
++ wwn_t wwpn;
++ u64 fclun;
++ u8 read_capacity[ZH_SCSI_READ_CAPACITY_SIZE];
++ u8 sense[ZH_SCSI_SENSE_BUFFERSIZE];
++};
++
++/**
++ * struct zh_scsi_report_luns - data needed for an REPORT_LUNS
++ * @devid: of the adapter
++ * @wwpn: of the port
++ * @*rsp_buffer: pointer to response buffer
++ * @rsp_buffer_size: of the response buffer
++ * @sense: buffer for sense data
++ */
++struct zh_scsi_report_luns
++{
++ devid_t devid;
++ wwn_t wwpn;
++ void *rsp_buffer;
++ u32 rsp_buffer_size;
++ u8 sense[ZH_SCSI_SENSE_BUFFERSIZE];
++} __attribute__((packed));
++
++/**
++ * struct zh_els_rscn_payload - RSCN ELS payload as of FC-FS
++ * @qualifier: event qualifier
++ * @address_format: format of the address
++ * @domain:
++ * @area:
++ * @sequence:
++ */
++struct zh_els_rscn_payload
++{
++ u8 reserved1:2;
++ u8 qualifier:4;
++ u8 address_format:2;
++ u8 domain;
++ u8 area;
++ u8 sequence;
++} __attribute__((packed));
++
++/**
++ * struct zh_els_rscn - RSCN ELS as of FC-FS
++ * FIXME: to be documented
++ */
++struct zh_els_rscn
++{
++ u8 op;
++ u8 page_length;
++ u16 payload_length;
++ struct zh_els_rscn_payload payload[0];
++} __attribute__((packed));
++
++/**
++ * struct zh_get_rnid - retrieve RNID from adapter
++ * @devid: to send the rnid via
++ * @payload: payload for RNID ELS
++ */
++struct zh_get_rnid
++{
++ devid_t devid;
++ struct zfcp_ls_rnid_acc payload;
++};
++
++/**
++ * struct zh_send_rnid - send out an RNID ELS
++ * @devid: to send the rnid via
++ * @wwpn: to send it to
++ * @size: of the buffer
++ * @payload: payload buffer
++ */
++struct zh_send_rnid
++{
++ devid_t devid;
++ wwn_t wwpn;
++ u32 size;
++ struct zfcp_ls_rnid_acc payload;
++};
++
++/**
++ * struct zh_send_ct - data needed to send out a Generic Service command
++ * struct zh_send_ct - send out a Generic Service command
++ * @devid: id of HBA via which to send CT
++ * @req_length: size the request buffer
++ * @req: request buffer
++ * @resp_length: size of response buffer
++ * @resp: response buffer
++ */
++struct zh_send_ct
++{
++ devid_t devid;
++ u32 req_length;
++ void *req;
++ u32 resp_length;
++ void *resp;
++} __attribute__((packed));
++
++/* IOCTL's */
++#define ZH_IOC_MAGIC 0xDD
++
++#define ZH_IOC_GET_ADAPTERATTRIBUTES \
++ _IOWR(ZH_IOC_MAGIC, 1, struct zh_get_adapterattributes)
++#define ZH_IOC_GET_PORTATTRIBUTES \
++ _IOWR(ZH_IOC_MAGIC, 2, struct zh_get_portattributes)
++#define ZH_IOC_GET_PORTSTATISTICS \
++ _IOWR(ZH_IOC_MAGIC, 3, struct zh_get_portstatistics)
++#define ZH_IOC_GET_DPORTATTRIBUTES \
++ _IOWR(ZH_IOC_MAGIC, 4, struct zh_get_portattributes)
++#define ZH_IOC_GET_RNID _IOWR(ZH_IOC_MAGIC, 5, struct zh_get_rnid)
++#define ZH_IOC_SEND_RNID _IOWR(ZH_IOC_MAGIC, 6, struct zh_send_rnid)
++#define ZH_IOC_SEND_CT _IOWR(ZH_IOC_MAGIC, 7, struct zh_send_ct)
++#define ZH_IOC_SCSI_INQUIRY _IOW(ZH_IOC_MAGIC, 8, struct zh_scsi_inquiry)
++#define ZH_IOC_SCSI_READ_CAPACITY \
++ _IOW(ZH_IOC_MAGIC, 9, struct zh_scsi_read_capacity)
++#define ZH_IOC_SCSI_REPORT_LUNS \
++ _IOW(ZH_IOC_MAGIC, 10, struct zh_scsi_report_luns)
++#define ZH_IOC_GET_EVENT_BUFFER \
++ _IOWR(ZH_IOC_MAGIC, 11, struct zh_get_event_buffer)
++#define ZH_IOC_GET_CONFIG _IOW(ZH_IOC_MAGIC, 12, struct zh_get_config)
++#define ZH_IOC_CLEAR_CONFIG _IO(ZH_IOC_MAGIC, 13)
++#define ZH_IOC_EVENT_START _IO(ZH_IOC_MAGIC, 14)
++#define ZH_IOC_EVENT_STOP _IO(ZH_IOC_MAGIC, 15)
++#define ZH_IOC_EVENT _IOR(ZH_IOC_MAGIC, 16, struct zh_event)
++#define ZH_IOC_EVENT_INSERT _IO(ZH_IOC_MAGIC, 17)
++
++enum zh_event_e {
++ ZH_EVENT_DUMMY,
++ ZH_EVENT_ADAPTER_ADD,
++ ZH_EVENT_ADAPTER_DEL,
++ ZH_EVENT_PORT_ADD,
++ ZH_EVENT_UNIT_ADD,
++ ZH_EVENT_POLLED
++};
++
++enum zh_event_polled_e {
++ ZH_EVENT_POLLED_LINK_UP,
++ ZH_EVENT_POLLED_LINK_DOWN,
++ ZH_EVENT_POLLED_RSCN,
++ ZH_EVENT_POLLED_PTY
++};
++
++#define ZH_LOG(level, fmt, args...) \
++printk(level"%s:%d, "fmt, __FUNCTION__, __LINE__, ##args)
++
++int zh_send_ct_helper(struct zh_send_ct *);
++int zh_report_luns_helper(struct zh_scsi_report_luns *);
++
++#endif /* _ZH_H_ */
+=== drivers/s390/scsi/zfcp.h
+==================================================================
+--- drivers/s390/scsi/zfcp.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/scsi/zfcp.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,1298 @@
++/*
++ * $Id$
++ *
++ * FCP adapter driver for IBM eServer zSeries
++ *
++ * (C) Copyright IBM Corp. 2002, 2003
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version. See the file COPYING for more
++ * information.
++ *
++ * Authors:
++ * Martin Peschke <mpeschke at de.ibm.com>
++ * Raimund Schroeder <raimund.schroeder at de.ibm.com>
++ * Aron Zeh
++ * Wolfgang Taphorn
++ * Stefan Bader <stefan.bader at de.ibm.com>
++ * Andreas Herrmann <aherrman at de.ibm.com>
++ * Stefan Voelkel <Stefan.Voelkel at millenux.com>
++ */
++
++#ifndef _ZFCP_H_
++#define _ZFCP_H_
++
++#define ZFCP_LOW_MEM_CREDITS
++#define ZFCP_STAT_REQSIZES
++#define ZFCP_STAT_QUEUES
++
++#define ZFCP_PARSE_ERR_BUF_SIZE 100
++
++#include <linux/config.h>
++#include <linux/notifier.h>
++#include <linux/types.h>
++#include <linux/list.h>
++#include <linux/completion.h>
++
++#include <asm/types.h>
++#include <asm/irq.h>
++#include <asm/s390io.h>
++#include <asm/s390dyn.h> /* devreg_t */
++#include <asm/debug.h> /* debug_info_t */
++#include <asm/qdio.h> /* qdio_buffer_t */
++#ifdef CONFIG_S390_SUPPORT
++#include <asm/ioctl32.h>
++#endif
++
++#include <linux/blk.h>
++#include <../drivers/scsi/scsi.h>
++#include <../drivers/scsi/hosts.h>
++
++#include "zfcp_fsf.h"
++
++/* 32 bit for SCSI ID and LUN as long as the SCSI stack uses this type */
++typedef u32 scsi_id_t;
++typedef u32 scsi_lun_t;
++
++typedef u16 devno_t;
++typedef u16 irq_t;
++
++typedef u64 wwn_t;
++typedef u32 fc_id_t;
++typedef u64 fcp_lun_t;
++
++
++struct _zfcp_adapter;
++struct _zfcp_fsf_req;
++
++/*
++ * very simple implementation of an emergency pool:
++ * a pool consists of a fixed number of equal elements,
++ * for each purpose a different pool should be created
++ */
++typedef struct _zfcp_mem_pool_element {
++ atomic_t use;
++ void *buffer;
++} zfcp_mem_pool_element_t;
++
++typedef struct _zfcp_mem_pool {
++ int entries;
++ int size;
++ zfcp_mem_pool_element_t *element;
++ struct timer_list timer;
++} zfcp_mem_pool_t;
++
++typedef struct _zfcp_adapter_mem_pool {
++ zfcp_mem_pool_t fsf_req_status_read;
++ zfcp_mem_pool_t data_status_read;
++ zfcp_mem_pool_t data_gid_pn;
++ zfcp_mem_pool_t fsf_req_erp;
++ zfcp_mem_pool_t fsf_req_scsi;
++} zfcp_adapter_mem_pool_t;
++
++typedef void zfcp_fsf_req_handler_t(struct _zfcp_fsf_req*);
++
++typedef struct {
++} zfcp_exchange_config_data_t;
++
++typedef struct {
++ struct _zfcp_port *port;
++} zfcp_open_port_t;
++
++typedef struct {
++ struct _zfcp_port *port;
++} zfcp_close_port_t;
++
++typedef struct {
++ struct _zfcp_unit *unit;
++} zfcp_open_unit_t;
++
++typedef struct {
++ struct _zfcp_unit *unit;
++} zfcp_close_unit_t;
++
++typedef struct {
++ struct _zfcp_port *port;
++} zfcp_close_physical_port_t;
++
++typedef struct {
++ struct _zfcp_unit *unit;
++ Scsi_Cmnd *scsi_cmnd;
++ unsigned long start_jiffies;
++} zfcp_send_fcp_command_task_t;
++
++
++typedef struct {
++ struct _zfcp_unit *unit;
++} zfcp_send_fcp_command_task_management_t;
++
++typedef struct {
++ struct _zfcp_fsf_req_t *fsf_req;
++ struct _zfcp_unit *unit;
++} zfcp_abort_fcp_command_t;
++
++/*
++ * FC-GS-2 stuff
++ */
++#define ZFCP_CT_REVISION 0x01
++#define ZFCP_CT_DIRECTORY_SERVICE 0xFC
++#define ZFCP_CT_NAME_SERVER 0x02
++#define ZFCP_CT_SYNCHRONOUS 0x00
++#define ZFCP_CT_GID_PN 0x0121
++#define ZFCP_CT_GA_NXT 0x0100
++#define ZFCP_CT_MAX_SIZE 0x1020
++#define ZFCP_CT_ACCEPT 0x8002
++#define ZFCP_CT_REJECT 0x8001
++
++/*
++ * FC-FS stuff
++ */
++#define R_A_TOV 10 /* seconds */
++#define ZFCP_ELS_TIMEOUT (2 * R_A_TOV)
++
++#define ZFCP_LS_RJT 0x01
++#define ZFCP_LS_ACC 0x02
++#define ZFCP_LS_RTV 0x0E
++#define ZFCP_LS_RLS 0x0F
++#define ZFCP_LS_PDISC 0x50
++#define ZFCP_LS_ADISC 0x52
++#define ZFCP_LS_RSCN 0x61
++#define ZFCP_LS_RNID 0x78
++#define ZFCP_LS_RLIR 0x7A
++#define ZFCP_LS_RTV_E_D_TOV_FLAG 0x04000000
++
++/* LS_ACC Reason Codes */
++#define ZFCP_LS_RJT_INVALID_COMMAND_CODE 0x01
++#define ZFCP_LS_RJT_LOGICAL_ERROR 0x03
++#define ZFCP_LS_RJT_LOGICAL_BUSY 0x05
++#define ZFCP_LS_RJT_PROTOCOL_ERROR 0x07
++#define ZFCP_LS_RJT_UNABLE_TO_PERFORM 0x09
++#define ZFCP_LS_RJT_COMMAND_NOT_SUPPORTED 0x0B
++#define ZFCP_LS_RJT_VENDOR_UNIQUE_ERROR 0xFF
++
++struct zfcp_ls_rjt_par {
++ u8 action;
++ u8 reason_code;
++ u8 reason_expl;
++ u8 vendor_unique;
++} __attribute__ ((packed));
++
++struct zfcp_ls_rtv {
++ u8 code;
++ u8 field[3];
++} __attribute__ ((packed));
++
++struct zfcp_ls_rtv_acc {
++ u8 code;
++ u8 field[3];
++ u32 r_a_tov;
++ u32 e_d_tov;
++ u32 qualifier;
++} __attribute__ ((packed));
++
++struct zfcp_ls_rls {
++ u8 code;
++ u8 field[3];
++ fc_id_t port_id;
++} __attribute__ ((packed));
++
++struct zfcp_ls_rls_acc {
++ u8 code;
++ u8 field[3];
++ u32 link_failure_count;
++ u32 loss_of_sync_count;
++ u32 loss_of_signal_count;
++ u32 prim_seq_prot_error;
++ u32 invalid_transmition_word;
++ u32 invalid_crc_count;
++} __attribute__ ((packed));
++
++struct zfcp_ls_pdisc {
++ u8 code;
++ u8 field[3];
++ u8 common_svc_parm[16];
++ wwn_t wwpn;
++ wwn_t wwnn;
++ struct {
++ u8 class1[16];
++ u8 class2[16];
++ u8 class3[16];
++ } svc_parm;
++ u8 reserved[16];
++ u8 vendor_version[16];
++} __attribute__ ((packed));
++
++struct zfcp_ls_pdisc_acc {
++ u8 code;
++ u8 field[3];
++ u8 common_svc_parm[16];
++ wwn_t wwpn;
++ wwn_t wwnn;
++ struct {
++ u8 class1[16];
++ u8 class2[16];
++ u8 class3[16];
++ } svc_parm;
++ u8 reserved[16];
++ u8 vendor_version[16];
++} __attribute__ ((packed));
++
++struct zfcp_ls_adisc {
++ u8 code;
++ u8 field[3];
++ fc_id_t hard_nport_id;
++ wwn_t wwpn;
++ wwn_t wwnn;
++ fc_id_t nport_id;
++} __attribute__ ((packed));
++
++struct zfcp_ls_adisc_acc {
++ u8 code;
++ u8 field[3];
++ fc_id_t hard_nport_id;
++ wwn_t wwpn;
++ wwn_t wwnn;
++ fc_id_t nport_id;
++} __attribute__ ((packed));
++
++struct zfcp_ls_rnid {
++ u8 code;
++ u8 field[3];
++ u8 node_id_format;
++ u8 reserved[3];
++} __attribute__((packed));
++
++/* common identification data */
++struct zfcp_ls_rnid_common_id {
++ u64 n_port_name;
++ u64 node_name;
++} __attribute__((packed));
++
++/* general topology specific identification data */
++struct zfcp_ls_rnid_general_topology_id {
++ u8 vendor_unique[16];
++ u32 associated_type;
++ u32 physical_port_number;
++ u32 nr_attached_nodes;
++ u8 node_management;
++ u8 ip_version;
++ u16 port_number;
++ u8 ip_address[16];
++ u8 reserved[2];
++ u16 vendor_specific;
++} __attribute__((packed));
++
++struct zfcp_ls_rnid_acc {
++ u8 code;
++ u8 field[3];
++ u8 node_id_format;
++ u8 common_id_length;
++ u8 reserved;
++ u8 specific_id_length;
++ struct zfcp_ls_rnid_common_id
++ common_id;
++ struct zfcp_ls_rnid_general_topology_id
++ specific_id;
++} __attribute__((packed));
++
++struct zfcp_rc_entry {
++ u8 code;
++ const char *description;
++};
++
++
++/*
++ * FC-GS-4 stuff
++ */
++#define ZFCP_CT_TIMEOUT (3 * R_A_TOV)
++
++
++/*
++ * header for CT_IU
++ */
++struct ct_hdr {
++ u8 revision; // 0x01
++ u8 in_id[3]; // 0x00
++ u8 gs_type; // 0xFC Directory Service
++ u8 gs_subtype; // 0x02 Name Server
++ u8 options; // 0x00 single bidirectional exchange
++ u8 reserved0;
++ u16 cmd_rsp_code; // 0x0121 GID_PN, or 0x0100 GA_NXT
++ u16 max_res_size; // <= (4096 - 16) / 4
++ u8 reserved1;
++ u8 reason_code;
++ u8 reason_code_expl;
++ u8 vendor_unique;
++} __attribute__ ((packed));
++
++/*
++ * nameserver request CT_IU -- for requests where
++ * a port identifier or a port name is required
++ */
++struct ct_iu_ns_req {
++ struct ct_hdr header;
++ union {
++ wwn_t wwpn; /* e.g .for GID_PN */
++ fc_id_t d_id; /* e.g. for GA_NXT */
++ } data;
++} __attribute__ ((packed));
++
++/* FS_ACC IU and data unit for GID_PN nameserver request */
++struct ct_iu_gid_pn {
++ struct ct_hdr header;
++ fc_id_t d_id;
++} __attribute__ ((packed));
++
++/* data unit for GA_NXT nameserver request */
++struct ns_ga_nxt {
++ u8 port_type;
++ u8 port_id[3];
++ u64 port_wwn;
++ u8 port_symbolic_name_length;
++ u8 port_symbolic_name[255];
++ u64 node_wwn;
++ u8 node_symbolic_name_length;
++ u8 node_symbolic_name[255];
++ u64 initial_process_associator;
++ u8 node_ip[16];
++ u32 cos;
++ u8 fc4_types[32];
++ u8 port_ip[16];
++ u64 fabric_wwn;
++ u8 reserved;
++ u8 hard_address[3];
++} __attribute__ ((packed));
++
++/* FS_ACC IU and data unit for GA_NXT nameserver request */
++struct ct_iu_ga_nxt {
++ struct ct_hdr header;
++ struct ns_ga_nxt du;
++} __attribute__ ((packed));
++
++
++typedef void (*zfcp_send_ct_handler_t)(unsigned long);
++
++/* used to pass parameters to zfcp_send_ct() */
++struct zfcp_send_ct {
++ struct _zfcp_port *port;
++ struct scatterlist *req;
++ struct scatterlist *resp;
++ unsigned int req_count;
++ unsigned int resp_count;
++ zfcp_send_ct_handler_t handler;
++ unsigned long handler_data;
++ struct _zfcp_mem_pool *pool;
++ int timeout;
++ struct timer_list *timer;
++ struct completion *completion;
++ int status;
++};
++
++/* used for name server requests in error recovery */
++struct zfcp_gid_pn_data {
++ struct zfcp_send_ct ct;
++ struct scatterlist req;
++ struct scatterlist resp;
++ struct ct_iu_ns_req ct_iu_req;
++ struct ct_iu_gid_pn ct_iu_resp;
++};
++
++typedef void (*zfcp_send_els_handler_t)(unsigned long);
++
++/* used to pass parameters to zfcp_send_els() */
++struct zfcp_send_els {
++ struct _zfcp_port *port;
++ struct scatterlist *req;
++ struct scatterlist *resp;
++ unsigned int req_count;
++ unsigned int resp_count;
++ zfcp_send_els_handler_t handler;
++ unsigned long handler_data;
++ struct completion *completion;
++ int ls_code;
++ int status;
++};
++
++typedef struct {
++ fsf_status_read_buffer_t *buffer;
++} zfcp_status_read_t;
++
++/* request specific data */
++typedef union _zfcp_req_data {
++ zfcp_exchange_config_data_t exchange_config_data;
++ zfcp_open_port_t open_port;
++ zfcp_close_port_t close_port;
++ zfcp_open_unit_t open_unit;
++ zfcp_close_unit_t close_unit;
++ zfcp_close_physical_port_t close_physical_port;
++ zfcp_send_fcp_command_task_t send_fcp_command_task;
++ zfcp_send_fcp_command_task_management_t
++ send_fcp_command_task_management;
++ zfcp_abort_fcp_command_t abort_fcp_command;
++ struct zfcp_send_ct *send_ct;
++ struct zfcp_send_els *send_els;
++ zfcp_status_read_t status_read;
++ fsf_qtcb_bottom_port_t *port_data;
++} zfcp_req_data_t;
++
++/* FSF request */
++typedef struct _zfcp_fsf_req {
++ /* driver wide common magic */
++ u32 common_magic;
++ /* data structure specific magic */
++ u32 specific_magic;
++ /* list of FSF requests */
++ struct list_head list;
++ /* adapter this request belongs to */
++ struct _zfcp_adapter *adapter;
++ /* number of SBALs that can be used */
++ u8 sbal_number;
++ /* first SBAL for this request */
++ u8 sbal_first;
++ /* last possible SBAL for this request */
++ u8 sbal_last;
++ /* current SBAL during creation of request */
++ u8 sbal_curr;
++ /* current SBALE during creation of request */
++ u8 sbale_curr;
++
++ /* can be used by routine to wait for request completion */
++ wait_queue_head_t completion_wq;
++ /* status of this request */
++ volatile u32 status;
++ /* copy of FSF Command (avoid to touch SBAL when it is QDIO owned) */
++ u32 fsf_command;
++ /* address of QTCB*/
++ fsf_qtcb_t *qtcb;
++ /* Sequence number used with this request */
++ u32 seq_no;
++ /* Information fields corresponding to the various types of request */
++ zfcp_req_data_t data;
++ /* used if this request is issued on behalf of erp */
++ struct _zfcp_erp_action *erp_action;
++ /* used if this request is alloacted from emergency pool */
++ struct _zfcp_mem_pool *pool;
++} zfcp_fsf_req_t;
++
++typedef struct _zfcp_erp_action {
++ struct list_head list;
++ /* requested action */
++ int action;
++ /* thing which should be recovered */
++ struct _zfcp_adapter *adapter;
++ struct _zfcp_port *port;
++ struct _zfcp_unit *unit;
++ /* status of recovery */
++ volatile u32 status;
++ /* step which is currently taken */
++ u32 step;
++ /* fsf_req which is currently pending for this action */
++ struct _zfcp_fsf_req *fsf_req;
++ struct timer_list timer;
++ /* retry counter, ... ? */
++ union {
++ /* used for nameserver requests (GID_PN) in error recovery */
++ struct zfcp_gid_pn_data *gid_pn;
++ } data;
++} zfcp_erp_action_t;
++
++/* logical unit */
++typedef struct _zfcp_unit {
++ /* driver wide common magic */
++ u32 common_magic;
++ /* data structure specific magic */
++ u32 specific_magic;
++ /* list of logical units */
++ struct list_head list;
++ /* remote port this logical unit belongs to */
++ struct _zfcp_port *port;
++ /* status of this logical unit */
++ atomic_t status;
++ /* own SCSI LUN */
++ scsi_lun_t scsi_lun;
++ /* own FCP_LUN */
++ fcp_lun_t fcp_lun;
++ /* handle assigned by FSF */
++ u32 handle;
++ /* save scsi device struct pointer locally */
++ Scsi_Device *device;
++ /* used for proc_fs support */
++ char *proc_buffer;
++ struct proc_dir_entry *proc_file;
++ struct proc_dir_entry *proc_dir;
++ /* error recovery action pending for this unit (if any) */
++ struct _zfcp_erp_action erp_action;
++ atomic_t erp_counter;
++ /* list of units in order of configuration via mapping */
++ struct list_head map_list;
++} zfcp_unit_t;
++
++/* remote port */
++typedef struct _zfcp_port {
++ /* driver wide common magic */
++ u32 common_magic;
++ /* data structure specific magic */
++ u32 specific_magic;
++ /* list of remote ports */
++ struct list_head list;
++ /* adapter this remote port accessed */
++ struct _zfcp_adapter *adapter;
++ /* head of logical unit list */
++ struct list_head unit_list_head;
++ /* lock for critical operations on list of logical units */
++ rwlock_t unit_list_lock;
++ /* number of logical units in list */
++ u32 units;
++ /* status of this remote port */
++ atomic_t status;
++ /* own SCSI ID */
++ scsi_id_t scsi_id;
++ /* WWNN of node this remote port belongs to (if known) */
++ wwn_t wwnn;
++ /* own WWPN */
++ wwn_t wwpn;
++ /* D_ID */
++ fc_id_t d_id;
++ /* largest SCSI LUN of units attached to this port */
++ scsi_lun_t max_scsi_lun;
++ /* handle assigned by FSF */
++ u32 handle;
++ /* used for proc_fs support */
++ char *proc_buffer;
++ struct proc_dir_entry *proc_file;
++ struct proc_dir_entry *proc_dir;
++ /* error recovery action pending for this port (if any) */
++ struct _zfcp_erp_action erp_action;
++ atomic_t erp_counter;
++} zfcp_port_t;
++
++
++/* QDIO request/response queue */
++typedef struct _zfcp_qdio_queue {
++ /* SBALs */
++ qdio_buffer_t *buffer[QDIO_MAX_BUFFERS_PER_Q];
++ /* index of next free buffer in queue (only valid if free_count>0) */
++ u8 free_index;
++ /* number of free buffers in queue */
++ atomic_t free_count;
++ /* lock for critical operations on queue */
++ rwlock_t queue_lock;
++ /* outbound queue only, SBALs since PCI indication */
++ int distance_from_int;
++} zfcp_qdio_queue_t;
++
++
++/* Control file data channel sense data record */
++typedef struct _zfcp_cfdc_sense_data {
++ /* Request signature */
++ u32 signature;
++ /* FCP adapter device number */
++ u32 devno;
++ /* Command code */
++ u32 command;
++ /* FSF request status */
++ u32 fsf_status;
++ /* FSF request status qualifier */
++ u32 fsf_status_qual[4];
++ /* Access conflicts list */
++ u8 payloads[256];
++ /* Access control table */
++ u8 control_file[0];
++} zfcp_cfdc_sense_data_t;
++
++#define ZFCP_CFDC_SIGNATURE 0xCFDCACDF
++
++#define ZFCP_CFDC_CMND_DOWNLOAD_NORMAL 0x00010001
++#define ZFCP_CFDC_CMND_DOWNLOAD_FORCE 0x00010101
++#define ZFCP_CFDC_CMND_FULL_ACCESS 0x00000201
++#define ZFCP_CFDC_CMND_RESTRICTED_ACCESS 0x00000401
++#define ZFCP_CFDC_CMND_UPLOAD 0x00010002
++
++#define ZFCP_CFDC_DOWNLOAD 0x00000001
++#define ZFCP_CFDC_UPLOAD 0x00000002
++#define ZFCP_CFDC_WITH_CONTROL_FILE 0x00010000
++
++#define ZFCP_CFDC_MAX_CONTROL_FILE_SIZE 127 * 1024
++
++
++#define ZFCP_PARSE_ERR_BUF_SIZE 100
++
++/* adapter */
++typedef struct _zfcp_adapter {
++/* elements protected by zfcp_data.adapter_list_lock */
++ /* driver wide common magic */
++ u32 common_magic;
++ /* data structure specific magic */
++ u32 specific_magic;
++ struct list_head list;
++ /* WWNN */
++ wwn_t wwnn;
++ /* WWPN */
++ wwn_t wwpn;
++ /* N_Port ID */
++ fc_id_t s_id;
++ /* irq (subchannel) */
++ irq_t irq;
++ /* device number */
++ devno_t devno;
++ /* default FC service class */
++ u8 fc_service_class;
++ /* topology which this adapter is attached to */
++ u32 fc_topology;
++ /* FC interface speed */
++ u32 fc_link_speed;
++ /* Hydra version */
++ u32 hydra_version;
++ /* Licensed Internal Code version of FSF in adapter */
++ u32 fsf_lic_version;
++ /* supported features of FCP channel */
++ u32 supported_features;
++ /* hardware version of FCP channel */
++ u32 hardware_version;
++ /* serial number of hardware */
++ u8 serial_number[32];
++ /* SCSI host structure of the mid layer of the SCSI stack */
++ struct Scsi_Host *scsi_host;
++ /* Start of packets in flight list */
++ Scsi_Cmnd *first_fake_cmnd;
++ /* lock for the above */
++ rwlock_t fake_list_lock;
++ /* starts processing of faked commands */
++ struct timer_list fake_scsi_timer;
++ atomic_t fake_scsi_reqs_active;
++ /* name */
++ unsigned char name[9];
++ /* elements protected by port_list_lock */
++ /* head of remote port list */
++ struct list_head port_list_head;
++ /* lock for critical operations on list of remote ports */
++ rwlock_t port_list_lock;
++ /* number of remote currently configured */
++ u32 ports;
++ /* largest SCSI ID of ports attached to this adapter */
++ scsi_id_t max_scsi_id;
++ /* largest SCSI LUN of units of ports attached to this adapter */
++ scsi_lun_t max_scsi_lun;
++ /* elements protected by fsf_req_list_lock */
++ /* head of FSF request list */
++ struct list_head fsf_req_list_head;
++ /* lock for critical operations on list of FSF requests */
++ rwlock_t fsf_req_list_lock;
++ /* number of existing FSF requests pending */
++ atomic_t fsf_reqs_active;
++ /* elements partially protected by request_queue.lock */
++ /* request queue */
++ struct _zfcp_qdio_queue request_queue;
++ /* FSF command sequence number */
++ u32 fsf_req_seq_no;
++ /* can be used to wait for avaliable SBALs in request queue */
++ wait_queue_head_t request_wq;
++ /* elements partially protected by response_queue.lock */
++ /* response queue */
++ struct _zfcp_qdio_queue response_queue;
++ devstat_t devstat;
++ s390_dev_info_t devinfo;
++ rwlock_t abort_lock;
++ /* number of status reads failed */
++ u16 status_read_failed;
++ /* elements which various bits are protected by several locks */
++ /* status of this adapter */
++ atomic_t status;
++ /* for proc_info */
++ char *proc_buffer;
++ /* and here for the extra proc_dir */
++ struct proc_dir_entry *proc_dir;
++ struct proc_dir_entry *proc_file;
++ /* nameserver avaliable via this adapter */
++ struct _zfcp_port *nameserver_port;
++ /* error recovery for this adapter and associated devices */
++ struct list_head erp_ready_head;
++ struct list_head erp_running_head;
++ rwlock_t erp_lock;
++ struct semaphore erp_ready_sem;
++ wait_queue_head_t erp_thread_wqh;
++ wait_queue_head_t erp_done_wqh;
++ /* error recovery action pending for this adapter (if any) */
++ struct _zfcp_erp_action erp_action;
++ atomic_t erp_counter;
++ debug_info_t *erp_dbf;
++ debug_info_t *abort_dbf;
++ debug_info_t *req_dbf;
++ debug_info_t *in_els_dbf;
++ debug_info_t *cmd_dbf;
++ rwlock_t cmd_dbf_lock;
++ zfcp_adapter_mem_pool_t pool;
++ /* SCSI error recovery watch */
++ struct timer_list scsi_er_timer;
++ /* Used to handle buffer positioning when reopening queues*/
++ atomic_t reqs_in_progress;
++#ifdef ZFCP_ERP_DEBUG_SINGLE_STEP
++ struct semaphore erp_continue_sem;
++#endif /* ZFCP_ERP_DEBUG_SINGLE_STEP */
++#ifdef ZFCP_STAT_REQSIZES
++ struct list_head read_req_head;
++ struct list_head write_req_head;
++ rwlock_t stat_lock;
++ atomic_t stat_errors;
++ atomic_t stat_on;
++#endif
++#ifdef ZFCP_STAT_QUEUES
++ atomic_t outbound_queue_full;
++ atomic_t outbound_total;
++#endif
++} zfcp_adapter_t;
++
++/* driver data */
++typedef struct _zfcp_data {
++ /* SCSI stack data structure storing information about this driver */
++ Scsi_Host_Template scsi_host_template;
++ /* head of adapter list */
++ struct list_head adapter_list_head;
++ /* lock for critical operations on list of adapters */
++ rwlock_t adapter_list_lock;
++ /* number of adapters in list */
++ u16 adapters;
++ /* data used for dynamic I/O */
++ devreg_t devreg;
++ devreg_t devreg_priv;
++ /* driver version number derived from cvs revision */
++ u32 driver_version;
++ /* serialises proc-fs/configuration changes */
++ struct semaphore proc_sema;
++ /* for proc_info */
++ char *proc_buffer_parm;
++ char *proc_buffer_map;
++ char *proc_line;
++ unsigned long proc_line_length;
++ /* and here for the extra proc_dir */
++ struct proc_dir_entry *proc_dir;
++ struct proc_dir_entry *parm_proc_file;
++ struct proc_dir_entry *map_proc_file;
++ struct proc_dir_entry *add_map_proc_file;
++ /* buffer for parse error messages (don't want to put it on stack) */
++ unsigned char perrbuf[ZFCP_PARSE_ERR_BUF_SIZE];
++ atomic_t mem_count;
++ struct notifier_block reboot_notifier;
++ atomic_t loglevel;
++#ifdef ZFCP_LOW_MEM_CREDITS
++ atomic_t lowmem_credit;
++#endif
++ /* no extra lock here, we have the proc_sema */
++ struct list_head map_list_head;
++} zfcp_data_t;
++
++/* struct used by memory pools for fsf_requests */
++struct zfcp_fsf_req_pool_buffer {
++ struct _zfcp_fsf_req fsf_req;
++ struct fsf_qtcb qtcb;
++};
++
++/* record generated from parsed conf. lines */
++typedef struct _zfcp_config_record {
++ int valid;
++ unsigned long devno;
++ unsigned long scsi_id;
++ unsigned long long wwpn;
++ unsigned long scsi_lun;
++ unsigned long long fcp_lun;
++} zfcp_config_record_t;
++
++/* for use by zfcp_sg_list_...() */
++typedef struct _zfcp_sg_list {
++ struct scatterlist *sg;
++ unsigned int count;
++} zfcp_sg_list_t;
++
++extern zfcp_data_t zfcp_data;
++
++#ifdef ZFCP_LOW_MEM_CREDITS
++/* only substract i from v if v is not equal to no_sub; returns 0 then, 1 otherwise */
++static __inline__ int atomic_test_and_sub(int no_sub, int i, atomic_t *v)
++{
++ int old_val, new_val;
++ do {
++ old_val = atomic_read(v);
++ if (old_val == no_sub)
++ return 1;
++ new_val = old_val - i;
++ } while (atomic_compare_and_swap(old_val, new_val, v));
++ return 0;
++}
++
++/* only decrement v if v is not equal to no_dec; returns 0 then, 1 otherwise */
++static __inline__ int atomic_test_and_dec(int no_dec, atomic_t *v)
++{
++ return atomic_test_and_sub(no_dec, 1, v);
++}
++#endif
++
++#ifndef atomic_test_mask
++#define atomic_test_mask(mask, target) \
++ ((atomic_read(target) & mask) == mask)
++#endif
++
++/*
++ * Macros used for logging etc.
++ */
++
++#define ZFCP_NAME "zfcp"
++
++/*
++ * Logging may be applied on certain kinds of driver operations
++ * independently. Besides different log levels are supported for
++ * each of these areas.
++ */
++
++/* independent areas being subject of logging */
++#define ZFCP_LOG_AREA_OTHER 0
++#define ZFCP_LOG_AREA_SCSI 1
++#define ZFCP_LOG_AREA_FSF 2
++#define ZFCP_LOG_AREA_CONFIG 3
++#define ZFCP_LOG_AREA_DIO 4
++#define ZFCP_LOG_AREA_QDIO 5
++#define ZFCP_LOG_AREA_ERP 6
++
++/* values for log level - keep it simple for now */
++#define ZFCP_LOG_LEVEL_NORMAL 0
++#define ZFCP_LOG_LEVEL_INFO 1
++#define ZFCP_LOG_LEVEL_DEBUG 2
++#define ZFCP_LOG_LEVEL_TRACE 3
++
++/* default log levels for different log areas */
++#define ZFCP_LOG_LEVEL_DEFAULT_OTHER ZFCP_LOG_LEVEL_NORMAL
++#define ZFCP_LOG_LEVEL_DEFAULT_SCSI ZFCP_LOG_LEVEL_NORMAL
++#define ZFCP_LOG_LEVEL_DEFAULT_FSF ZFCP_LOG_LEVEL_NORMAL
++#define ZFCP_LOG_LEVEL_DEFAULT_CONFIG ZFCP_LOG_LEVEL_NORMAL
++#define ZFCP_LOG_LEVEL_DEFAULT_DIO ZFCP_LOG_LEVEL_NORMAL
++#define ZFCP_LOG_LEVEL_DEFAULT_QDIO ZFCP_LOG_LEVEL_NORMAL
++#define ZFCP_LOG_LEVEL_DEFAULT_ERP ZFCP_LOG_LEVEL_NORMAL
++
++/*
++ * this allows to remove excluded logs from the code by the preprocessor
++ * (this is the last log level compiled in, higher log levels are removed)
++ */
++#define ZFCP_LOG_LEVEL_LIMIT ZFCP_LOG_LEVEL_DEBUG
++
++/* nibbles of "loglevel" are used for particular purposes */
++#define ZFCP_LOG_VALUE(zfcp_lognibble) \
++ ((atomic_read(&zfcp_data.loglevel) >> (zfcp_lognibble<<2)) & 0xF)
++
++#define ZFCP_LOG_VALUE_OTHER ZFCP_LOG_VALUE(ZFCP_LOG_AREA_OTHER)
++#define ZFCP_LOG_VALUE_SCSI ZFCP_LOG_VALUE(ZFCP_LOG_AREA_SCSI)
++#define ZFCP_LOG_VALUE_FSF ZFCP_LOG_VALUE(ZFCP_LOG_AREA_FSF)
++#define ZFCP_LOG_VALUE_CONFIG ZFCP_LOG_VALUE(ZFCP_LOG_AREA_CONFIG)
++#define ZFCP_LOG_VALUE_DIO ZFCP_LOG_VALUE(ZFCP_LOG_AREA_DIO)
++#define ZFCP_LOG_VALUE_QDIO ZFCP_LOG_VALUE(ZFCP_LOG_AREA_QDIO)
++#define ZFCP_LOG_VALUE_ERP ZFCP_LOG_VALUE(ZFCP_LOG_AREA_ERP)
++
++/* all log level defaults put together into log level word */
++#define ZFCP_LOG_LEVEL_DEFAULTS \
++ ((ZFCP_LOG_LEVEL_DEFAULT_OTHER << (ZFCP_LOG_AREA_OTHER<<2)) | \
++ (ZFCP_LOG_LEVEL_DEFAULT_SCSI << (ZFCP_LOG_AREA_SCSI<<2)) | \
++ (ZFCP_LOG_LEVEL_DEFAULT_FSF << (ZFCP_LOG_AREA_FSF<<2)) | \
++ (ZFCP_LOG_LEVEL_DEFAULT_CONFIG << (ZFCP_LOG_AREA_CONFIG<<2)) | \
++ (ZFCP_LOG_LEVEL_DEFAULT_DIO << (ZFCP_LOG_AREA_DIO<<2)) | \
++ (ZFCP_LOG_LEVEL_DEFAULT_QDIO << (ZFCP_LOG_AREA_QDIO<<2)) | \
++ (ZFCP_LOG_LEVEL_DEFAULT_ERP << (ZFCP_LOG_AREA_ERP<<2)))
++
++/* that's the prefix placed at the beginning of each driver message */
++#define ZFCP_LOG_PREFIX ZFCP_NAME": "
++
++/* log area specific log prefixes */
++#define ZFCP_LOG_AREA_PREFIX_OTHER ""
++#define ZFCP_LOG_AREA_PREFIX_SCSI "SCSI: "
++#define ZFCP_LOG_AREA_PREFIX_FSF "FSF: "
++#define ZFCP_LOG_AREA_PREFIX_CONFIG "config: "
++#define ZFCP_LOG_AREA_PREFIX_DIO "dynamic I/O: "
++#define ZFCP_LOG_AREA_PREFIX_QDIO "QDIO: "
++#define ZFCP_LOG_AREA_PREFIX_ERP "ERP: "
++
++/* check whether we have the right level for logging */
++#define ZFCP_LOG_CHECK(ll) (ZFCP_LOG_VALUE(ZFCP_LOG_AREA)) >= ll
++
++/* As we have two printks it is possible for them to be seperated by another
++ * message. This holds true even for printks from within this module.
++ * In any case there should only be a small readability hit, however.
++ */
++#define _ZFCP_LOG(m...) \
++ { \
++ printk( "%s%s: ", \
++ ZFCP_LOG_PREFIX ZFCP_LOG_AREA_PREFIX, \
++ __FUNCTION__); \
++ printk(m); \
++ }
++
++#define ZFCP_LOG(ll, m...) \
++ if (ZFCP_LOG_CHECK(ll)) \
++ _ZFCP_LOG(m)
++
++#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_NORMAL
++#define ZFCP_LOG_NORMAL(m...)
++#else /* ZFCP_LOG_LEVEL_LIMIT >= ZFCP_LOG_LEVEL_NORMAL */
++#define ZFCP_LOG_NORMAL(m...) ZFCP_LOG(ZFCP_LOG_LEVEL_NORMAL, m)
++#endif
++
++#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_INFO
++#define ZFCP_LOG_INFO(m...)
++#else /* ZFCP_LOG_LEVEL_LIMIT >= ZFCP_LOG_LEVEL_INFO */
++#define ZFCP_LOG_INFO(m...) ZFCP_LOG(ZFCP_LOG_LEVEL_INFO, m)
++#endif
++
++#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_DEBUG
++#define ZFCP_LOG_DEBUG(m...)
++#else /* ZFCP_LOG_LEVEL_LIMIT >= ZFCP_LOG_LEVEL_DEBUG */
++#define ZFCP_LOG_DEBUG(m...) ZFCP_LOG(ZFCP_LOG_LEVEL_DEBUG, m)
++#endif
++
++#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_TRACE
++#define ZFCP_LOG_TRACE(m...)
++#else /* ZFCP_LOG_LEVEL_LIMIT >= ZFCP_LOG_LEVEL_TRACE */
++#define ZFCP_LOG_TRACE(m...) ZFCP_LOG(ZFCP_LOG_LEVEL_TRACE, m)
++#endif
++
++/**************** memory management wrappers ************************/
++
++#define ZFCP_KMALLOC(params...) zfcp_kmalloc(params, __FUNCTION__)
++#define ZFCP_KFREE(params...) zfcp_kfree(params, __FUNCTION__)
++#define ZFCP_GET_ZEROED_PAGE(params...) zfcp_get_zeroed_page(params, __FUNCTION__)
++#define ZFCP_FREE_PAGE(params...) zfcp_free_page(params, __FUNCTION__)
++
++static inline void *zfcp_kmalloc(size_t size, int type, char *origin)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++ void *ret = NULL;
++#if 0
++ if (error_counter>=10000) {
++ if(error_counter==10000) {
++ printk("********LOW MEMORY********\n");
++ }
++ error_counter=10001;
++ goto out;
++ }
++#endif
++
++#ifdef ZFCP_LOW_MEM_CREDITS
++ if (!atomic_test_and_dec(0, &zfcp_data.lowmem_credit))
++ return NULL;
++#endif
++
++ ret = kmalloc(size, type);
++ if (ret) {
++ atomic_add(size, &zfcp_data.mem_count);
++ memset(ret, 0, size);
++ }
++#ifdef ZFCP_MEMORY_DEBUG
++ /* FIXME(design): shouldn't this rather be a dbf entry? */
++ ZFCP_LOG_NORMAL(
++ "origin: %s, addr=0x%lx, size=%li, type=%d\n",
++ origin,
++ (unsigned long)ret,
++ size,
++ type);
++#endif
++ /* out: */
++ return ret;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++static inline unsigned long zfcp_get_zeroed_page(int flags, char *origin)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++ unsigned long ret = 0;
++#if 0
++ if (error_counter>=10000) {
++ if(error_counter==10000) {
++ printk("********LOW MEMORY********\n");
++ }
++ error_counter=10001;
++ goto out;
++ }
++#endif
++ ret = get_zeroed_page(flags);
++ if (ret) {
++ atomic_add(PAGE_SIZE, &zfcp_data.mem_count);
++ }
++
++#ifdef ZFCP_MEMORY_DEBUG
++ /* FIXME(design): shouldn't this rather be a dbf entry? */
++ ZFCP_LOG_NORMAL(
++ "origin=%s, addr=0x%lx, type=%d\n",
++ origin,
++ ret,
++ flags);
++#endif
++ /* out :*/
++ return ret;
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++/*
++ * Note:
++ * 'kfree' may free a different amount of storage than specified here by
++ * 'size' since 'kfree' has its own means to figure this number out.
++ * Thus, an arbitrary value assigned to 'size' (usage error) will
++ * mess up our storage accounting even in cases of no memory leaks.
++ */
++static inline void zfcp_kfree(void *addr, size_t size, char *origin)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++ atomic_sub(size, &zfcp_data.mem_count);
++#ifdef ZFCP_MEMORY_DEBUG
++ /* FIXME(design): shouldn't this rather be a dbf entry? */
++ ZFCP_LOG_NORMAL(
++ "origin: %s, addr=0x%lx, count=%ld \n",
++ origin,
++ (unsigned long)addr,
++ size);
++#endif
++ kfree(addr);
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++
++static inline void zfcp_free_page(unsigned long addr, char *origin)
++{
++#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
++#define ZFCP_LOG_AREA_PREFIX ZFCP_LOG_AREA_PREFIX_OTHER
++
++ atomic_sub(PAGE_SIZE, &zfcp_data.mem_count);
++#ifdef ZFCP_MEMORY_DEBUG
++ ZFCP_LOG_NORMAL("origin: %s, addr=0x%lx\n",
++ origin,
++ addr);
++#endif
++ free_page(addr);
++
++#undef ZFCP_LOG_AREA
++#undef ZFCP_LOG_AREA_PREFIX
++}
++
++int zfcp_config_parse_record_add (zfcp_config_record_t*);
++
++#define ZFCP_FIRST_ENTITY(head,type) \
++ ( \
++ list_empty(head) ? \
++ NULL : \
++ list_entry((head)->next,type,list) \
++ )
++
++#define ZFCP_LAST_ENTITY(head,type) \
++ ( \
++ list_empty(head) ? \
++ NULL : \
++ list_entry((head)->prev,type,list) \
++ )
++
++#define ZFCP_PREV_ENTITY(head,curr,type) \
++ ( \
++ (curr == ZFCP_FIRST_ENTITY(head,type)) ? \
++ NULL : \
++ list_entry(curr->list.prev,type,list) \
++ )
++
++#define ZFCP_NEXT_ENTITY(head,curr,type) \
++ ( \
++ (curr == ZFCP_LAST_ENTITY(head,type)) ? \
++ NULL : \
++ list_entry(curr->list.next,type,list) \
++ )
++
++#define ZFCP_FOR_EACH_ENTITY(head,curr,type) \
++ for (curr = ZFCP_FIRST_ENTITY(head,type); \
++ curr; \
++ curr = ZFCP_NEXT_ENTITY(head,curr,type))
++
++/*
++ * use these macros if you traverse a list and stop iterations after
++ * altering the list since changing the list will most likely cause
++ * next/previous pointers to become unavailable,
++ * usually: examining some list elements, or removing a single
++ * element from somewhere in the middle of the list,
++ * lock the list by means of the associated rwlock before entering
++ * the loop and thus above the macro,
++ * unlock the list (the associated rwlock) after leaving the loop
++ * belonging to the macro,
++ * use read variant of lock if only looking up something without
++ * changing the list,
++ * use write variant of lock if changing the list (in last iteration !),
++ * attention: "upgrading" read lock to write lock is not supported!
++ */
++
++#define ZFCP_FOR_EACH_ADAPTER(a) \
++ ZFCP_FOR_EACH_ENTITY(&zfcp_data.adapter_list_head,(a),zfcp_adapter_t)
++
++#define ZFCP_FOR_EACH_PORT(a,p) \
++ ZFCP_FOR_EACH_ENTITY(&(a)->port_list_head,(p),zfcp_port_t)
++
++#define ZFCP_FOR_EACH_UNIT(p,u) \
++ ZFCP_FOR_EACH_ENTITY(&(p)->unit_list_head,(u),zfcp_unit_t)
++
++
++/* Note, the leftmost status byte is common among adapter, port
++ and unit
++ */
++#define ZFCP_COMMON_FLAGS 0xff000000
++#define ZFCP_SPECIFIC_FLAGS 0x00ffffff
++
++/* common status bits */
++#define ZFCP_STATUS_COMMON_TO_BE_REMOVED 0x80000000
++#define ZFCP_STATUS_COMMON_RUNNING 0x40000000
++#define ZFCP_STATUS_COMMON_ERP_FAILED 0x20000000
++#define ZFCP_STATUS_COMMON_UNBLOCKED 0x10000000
++#define ZFCP_STATUS_COMMON_OPENING 0x08000000
++#define ZFCP_STATUS_COMMON_OPEN 0x04000000
++#define ZFCP_STATUS_COMMON_CLOSING 0x02000000
++#define ZFCP_STATUS_COMMON_ERP_INUSE 0x01000000
++
++/* status of adapter */
++#define ZFCP_STATUS_ADAPTER_IRQOWNER 0x00000001
++#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002
++#define ZFCP_STATUS_ADAPTER_REGISTERED 0x00000004
++#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
++#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
++#define ZFCP_STATUS_ADAPTER_ERP_THREAD_UP 0x00000020
++#define ZFCP_STATUS_ADAPTER_ERP_THREAD_DONE 0x00000040
++#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080
++#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
++#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
++
++#define ZFCP_STATUS_ADAPTER_SCSI_UP \
++ (ZFCP_STATUS_COMMON_UNBLOCKED | \
++ ZFCP_STATUS_ADAPTER_REGISTERED)
++
++#define ZFCP_DID_NAMESERVER 0xFFFFFC
++
++/* status of remote port */
++#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001
++#define ZFCP_STATUS_PORT_DID_DID 0x00000002
++#define ZFCP_STATUS_PORT_PHYS_CLOSING 0x00000004
++#define ZFCP_STATUS_PORT_NO_WWPN 0x00000008
++#define ZFCP_STATUS_PORT_NO_SCSI_ID 0x00000010
++#define ZFCP_STATUS_PORT_INVALID_WWPN 0x00000020
++
++#define ZFCP_STATUS_PORT_NAMESERVER \
++ (ZFCP_STATUS_PORT_NO_WWPN | \
++ ZFCP_STATUS_PORT_NO_SCSI_ID)
++
++/* status of logical unit */
++#define ZFCP_STATUS_UNIT_NOTSUPPUNITRESET 0x00000001
++#define ZFCP_STATUS_UNIT_ASSUMETCQ 0x00000002
++
++/* no common part here */
++/* status of FSF request */
++#define ZFCP_STATUS_FSFREQ_NOT_INIT 0x00000000
++#define ZFCP_STATUS_FSFREQ_POOL 0x00000001
++#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002
++#define ZFCP_STATUS_FSFREQ_COMPLETED 0x00000004
++#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008
++#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010
++#define ZFCP_STATUS_FSFREQ_ABORTING 0x00000020
++#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040
++#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080
++#define ZFCP_STATUS_FSFREQ_ABORTED 0x00000100
++#define ZFCP_STATUS_FSFREQ_TMFUNCFAILED 0x00000200
++#define ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP 0x00000400
++#define ZFCP_STATUS_FSFREQ_RETRY 0x00000800
++#define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000
++#define ZFCP_STATUS_FSFREQ_POOLBUF 0x00002000
++
++#define ZFCP_KNOWN 0x00000001
++#define ZFCP_REQ_AUTO_CLEANUP 0x00000002
++#define ZFCP_WAIT_FOR_SBAL 0x00000004
++#define ZFCP_REQ_USE_MEMPOOL 0x00000008
++
++/* Mask parameters */
++#define ZFCP_SET 0x00000100
++#define ZFCP_CLEAR 0x00000200
++
++#define ZFCP_INTERRUPTIBLE 1
++#define ZFCP_UNINTERRUPTIBLE 0
++
++#define ZFCP_MAX_ERPS 3
++
++#define ZFCP_ERP_FSFREQ_TIMEOUT (100 * HZ)
++#define ZFCP_ERP_MEMWAIT_TIMEOUT HZ
++
++#define ZFCP_STATUS_ERP_TIMEDOUT 0x10000000
++#define ZFCP_STATUS_ERP_CLOSE_ONLY 0x01000000
++#define ZFCP_STATUS_ERP_DISMISSING 0x00100000
++#define ZFCP_STATUS_ERP_DISMISSED 0x00200000
++
++#define ZFCP_ERP_STEP_UNINITIALIZED 0x00000000
++#define ZFCP_ERP_STEP_FSF_XCONFIG 0x00000001
++#define ZFCP_ERP_STEP_PHYS_PORT_CLOSING 0x00000010
++#define ZFCP_ERP_STEP_PORT_CLOSING 0x00000100
++#define ZFCP_ERP_STEP_NAMESERVER_OPEN 0x00000200
++#define ZFCP_ERP_STEP_NAMESERVER_LOOKUP 0x00000400
++#define ZFCP_ERP_STEP_PORT_OPENING 0x00000800
++#define ZFCP_ERP_STEP_UNIT_CLOSING 0x00001000
++#define ZFCP_ERP_STEP_UNIT_OPENING 0x00002000
++
++/* ordered ! */
++#define ZFCP_ERP_ACTION_REOPEN_ADAPTER 0x4
++#define ZFCP_ERP_ACTION_REOPEN_PORT_FORCED 0x3
++#define ZFCP_ERP_ACTION_REOPEN_PORT 0x2
++#define ZFCP_ERP_ACTION_REOPEN_UNIT 0x1
++
++#define ZFCP_ERP_ACTION_RUNNING 0x1
++#define ZFCP_ERP_ACTION_READY 0x2
++
++#define ZFCP_ERP_SUCCEEDED 0x0
++#define ZFCP_ERP_FAILED 0x1
++#define ZFCP_ERP_CONTINUES 0x2
++#define ZFCP_ERP_EXIT 0x3
++#define ZFCP_ERP_DISMISSED 0x4
++#define ZFCP_ERP_NOMEM 0x5
++
++/* task attribute values in FCP-2 FCP_CMND IU */
++#define SIMPLE_Q 0
++#define HEAD_OF_Q 1
++#define ORDERED_Q 2
++#define ACA_Q 4
++#define UNTAGGED 5
++
++/* task management flags in FCP-2 FCP_CMND IU */
++#define CLEAR_ACA 0x40
++#define TARGET_RESET 0x20
++#define LOGICAL_UNIT_RESET 0x10
++#define CLEAR_TASK_SET 0x04
++#define ABORT_TASK_SET 0x02
++
++#define FCP_CDB_LENGTH 16
++
++
++/* some magics which may be used to authenticate data structures */
++#define ZFCP_MAGIC 0xFCFCFCFC
++#define ZFCP_MAGIC_ADAPTER 0xAAAAAAAA
++#define ZFCP_MAGIC_PORT 0xBBBBBBBB
++#define ZFCP_MAGIC_UNIT 0xCCCCCCCC
++#define ZFCP_MAGIC_FSFREQ 0xEEEEEEEE
++
++/* function prototypes */
++int zfcp_erp_wait(zfcp_adapter_t*);
++int zfcp_fsf_exchange_port_data(zfcp_adapter_t*, fsf_qtcb_bottom_port_t*);
++int zfcp_fsf_send_els(struct zfcp_send_els *);
++int zfcp_config_parse_record_add(zfcp_config_record_t*);
++int zfcp_scsi_command_sync(zfcp_unit_t *, Scsi_Cmnd *);
++int zfcp_ns_ga_nxt_request(zfcp_port_t *, struct ct_iu_ga_nxt *);
++int zfcp_fsf_send_ct(struct zfcp_send_ct *, zfcp_mem_pool_t *,
++ zfcp_erp_action_t *);
++extern int zfcp_check_ct_response(struct ct_hdr *);
++extern int zfcp_handle_els_rjt(u32, struct zfcp_ls_rjt_par *);
++
++#endif /* _ZFCP_H_ */
+=== drivers/s390/scsi/zfcp_zh.h
+==================================================================
+--- drivers/s390/scsi/zfcp_zh.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/scsi/zfcp_zh.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,171 @@
++/*
++ * $Id: zfcp_zh.h,v 1.3.2.1 2004/01/26 17:26:34 mschwide Exp $
++ *
++ * Module providing an interface for HBA API (FC-HBA) implementation
++ * to the zfcp driver.
++ *
++ * (C) Copyright IBM Corp. 2002, 2003
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version. See the file COPYING for more
++ * information.
++ *
++ * Authors:
++ * Stefan Voelkel <Stefan.Voelkel at millenux.com>
++ * Andreas Herrmann <aherrman at de.ibm.com>
++ */
++
++
++#ifndef _ZFCP_ZH_H_
++#define _ZFCP_ZH_H_
++
++#include "zfcp.h"
++#include <scsi/scsi_ioctl.h>
++#include <asm/scatterlist.h>
++
++/*
++ * Besides a punch of standard error codes we use some newly defined error
++ * codes.
++ */
++#define ENOADA 200 /* no such adapter */
++#define ENOPOR 201 /* no such port */
++#define ENOUNI 202 /* no such unit */
++
++/*
++ * flags for get_config
++ * */
++#define ZH_GET_CONFIG_ADAPTERS 0
++#define ZH_GET_CONFIG_PORTS 1
++#define ZH_GET_CONFIG_UNITS 2
++
++/**
++ * struct zfcp_zh_callbacks
++ * @adapter_add: callback for adapter add events
++ *
++ * structure containing all the callbacks
++ */
++struct zfcp_zh_callbacks
++{
++ void (*adapter_add) (struct file *, devno_t, wwn_t, wwn_t);
++ void (*port_add) (struct file *, devno_t, wwn_t, wwn_t, fc_id_t);
++ void (*unit_add) (struct file *, devno_t, wwn_t, fcp_lun_t,
++ unsigned int, unsigned int,
++ unsigned int, unsigned int);
++ void (*incomming_els) (const devno_t, const fc_id_t, const void *);
++ void (*link_down) (const fc_id_t);
++ void (*link_up) (const fc_id_t);
++};
++
++/**
++ * struct zfcp_callbacks
++ * @lock: rw-lock
++ * @callbacks: relevant callbacks into zh module
++ *
++ * callbacks and according lock
++ */
++struct zfcp_callbacks
++{
++ rwlock_t lock;
++ struct zfcp_zh_callbacks *callbacks;
++};
++
++extern struct zfcp_callbacks zfcp_callback;
++
++/**
++ * struct zfcp_adapter_attributes
++ */
++struct zfcp_adapter_attributes
++{
++ char manufacturer[64];
++ char serial_number[64];
++ char model[256];
++ char model_description[256];
++ wwn_t node_wwn;
++ char node_symbolic_name[256];
++ char hardware_version[256];
++ char driver_version[256];
++ char option_rom_version[256];
++ char firmware_version[256];
++ u32 vendor_specific_id;
++ u32 number_of_ports;
++ char driver_name[256];
++};
++
++/**
++ * struct zfcp_port_attributes
++ */
++struct zfcp_port_attributes {
++ wwn_t wwnn;
++ wwn_t wwpn;
++ wwn_t fabric_name;
++ u32 fcid;
++ u32 type;
++ u32 state;
++ u32 supported_class_of_service;
++ u32 supported_speed;
++ u32 speed;
++ u32 max_frame_size;
++ u32 discovered_ports;
++ u8 supported_fc4_types[32];
++ u8 active_fc4_types[32];
++ char symbolic_name[256];
++};
++
++/**
++ * struct zfcp_port_statistics
++ */
++struct zfcp_port_statistics {
++ u64 last_reset;
++ u64 tx_frames;
++ u64 tx_words;
++ u64 rx_frames;
++ u64 rx_words;
++ u64 lip;
++ u64 nos;
++ u64 error_frames;
++ u64 dumped_frames;
++ u64 link_failure;
++ u64 loss_of_sync;
++ u64 loss_of_signal;
++ u64 prim_seq_prot_error;
++ u64 invalid_tx_words;
++ u64 invalid_crc;
++ u64 input_requests;
++ u64 output_requests;
++ u64 control_requests;
++ u64 input_megabytes;
++ u64 output_megabytes;
++};
++
++/*
++ * functions needed by zfcp_hbaapi in zfcp
++ */
++int zfcp_zh_callbacks_register(struct zfcp_zh_callbacks *);
++int zfcp_zh_callbacks_unregister(struct zfcp_zh_callbacks *);
++int zfcp_zh_get_config(struct file *, devno_t, wwn_t, unsigned int);
++int zfcp_zh_get_adapter_attributes(devno_t, struct zfcp_adapter_attributes *);
++int zfcp_zh_get_port_attributes(devno_t, struct zfcp_port_attributes *);
++int zfcp_zh_get_port_statistics(devno_t, struct zfcp_port_statistics *);
++int zfcp_zh_get_dport_attributes(devno_t, wwn_t, struct zfcp_port_attributes *);
++int zfcp_zh_send_ct(devno_t, struct scatterlist *, unsigned int,
++ struct scatterlist *, unsigned int);
++int zfcp_zh_send_els(devno_t, wwn_t, struct scatterlist*, unsigned int,
++ struct scatterlist*, unsigned int);
++int zfcp_zh_send_scsi(devno_t, wwn_t, fcp_lun_t, Scsi_Cmnd *);
++int zfcp_zh_assert_fclun_zero(devno_t, wwn_t); /* needed for ReportLUNs */
++
++/*
++ * functions needed for callbacks into zfcp_hbaapi
++ */
++void zfcp_callback_do_adapter_add(struct file *, const zfcp_adapter_t *);
++void zfcp_callback_do_port_add(struct file *, const zfcp_adapter_t *,
++ const zfcp_port_t *);
++void zfcp_callback_do_unit_add(struct file *, const zfcp_adapter_t *,
++ const zfcp_port_t *, const zfcp_unit_t *);
++void zfcp_callback_do_incomming_els(const zfcp_adapter_t *, const void *);
++void zfcp_callback_do_link_down(const zfcp_adapter_t *);
++void zfcp_callback_do_link_up(const zfcp_adapter_t *);
++
++#endif /* _ZFCP_ZH_H_ */
+=== drivers/s390/scsi/Makefile
+==================================================================
+--- drivers/s390/scsi/Makefile (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/scsi/Makefile (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,25 @@
++#
++# Makefile for the S/390 specific device drivers
++#
++
++O_TARGET := s390-scsi.o
++
++obj-$(CONFIG_ZFCP) += zfcp.o
++obj-$(CONFIG_ZFCP_HBAAPI) += zfcp_hbaapi.o
++export-objs += zfcp_main.o zfcp_zh.o
++
++zfcp-objs := zfcp_main.o zfcp_zh.o
++hbaapi-objs := zh_main.o
++
++ifeq ($(CONFIG_S390_SUPPORT),y)
++ hbaapi-objs += zh_ioctl32.o
++endif
++
++include $(TOPDIR)/Rules.make
++
++zfcp.o: $(zfcp-objs)
++ $(LD) -r -o $@ $(zfcp-objs)
++
++zfcp_hbaapi.o: $(hbaapi-objs)
++ $(LD) -r -o $@ $(hbaapi-objs)
++
+=== drivers/s390/cmf.c
+==================================================================
+--- drivers/s390/cmf.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/cmf.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,701 @@
++/*
++ * linux/drivers/s390/cmf.c ($Revision: 1.5.6.2 $)
++ *
++ * Linux on zSeries Channel Measurement Facility support
++ *
++ * Copyright 2000,2003 IBM Corporation
++ *
++ * Author: Arnd Bergmann <arndb at de.ibm.com>
++ *
++ * original idea from Natarajan Krishnaswami <nkrishna at us.ibm.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2, or (at your option)
++ * any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++
++#include <asm/cmb.h>
++#include <asm/div64.h>
++#include <asm/irq.h>
++
++/**
++ * enum cmb_format - types of supported measurement block formats
++ *
++ * @CMF_BASIC: traditional channel measurement blocks supported
++ * by all machines that we run on
++ * @CMF_EXTENDED: improved format that was introduced with the z990
++ * machine
++ * @CMF_AUTODETECT: default: use extended format when running on a z990
++ * or later machine, otherwise fall back to basic format
++ **/
++enum cmb_format {
++ CMF_BASIC,
++ CMF_EXTENDED,
++ CMF_AUTODETECT = -1,
++};
++/**
++ * format - actual format for all measurement blocks
++ *
++ * The format module parameter can be set to a value of 0 (zero)
++ * or 1, indicating basic or extended format as described for
++ * enum cmb_format.
++ */
++static int format = CMF_AUTODETECT;
++MODULE_PARM(format, "i");
++
++/**
++ * struct cmb_operations - functions to use depending on cmb_format
++ *
++ * all these functions operate on a struct cmf_device. There is only
++ * one instance of struct cmb_operations because all cmf_device
++ * objects are guaranteed to be of the same type.
++ *
++ * @alloc: allocate memory for a channel measurement block,
++ * either with the help of a special pool or with kmalloc
++ * @free: free memory allocated with @alloc
++ * @set: enable or disable measurement
++ * @readall: read a measurement block in a common format
++ * @reset: clear the data in the associated measurement block and
++ * reset its time stamp
++ */
++struct cmb_operations {
++ int (*alloc) (struct cmf_device*);
++ void(*free) (struct cmf_device*);
++ int (*set) (struct cmf_device*, u32);
++ int (*readall)(struct cmf_device*, struct cmbdata *);
++ void (*reset) (struct cmf_device*);
++};
++static struct cmb_operations *cmbops;
++
++/* our user interface is designed in terms of nanoseconds,
++ * while the hardware measures total times in its own
++ * unit.*/
++static inline u64 time_to_nsec(u32 value)
++{
++ return ((u64)value) * 128000ull;
++}
++
++/*
++ * Users are usually interested in average times,
++ * not accumulated time.
++ * This also helps us with atomicity problems
++ * when reading sinlge values.
++ */
++static inline u64 time_to_avg_nsec(u32 value, u32 count)
++{
++ u64 ret;
++
++ /* no samples yet, avoid division by 0 */
++ if (count == 0)
++ return 0;
++
++ /* value comes in units of 128 µsec */
++ ret = time_to_nsec(value);
++ do_div(ret, count);
++
++ return ret;
++}
++
++/* activate or deactivate the channel monitor. When area is NULL,
++ * the monitor is deactivated. The channel monitor needs to
++ * be active in order to measure subchannels, which also need
++ * to be enabled. */
++static inline void
++cmf_activate(void *area, unsigned int onoff)
++{
++ register void * __gpr2 asm("2");
++ register long __gpr1 asm("1");
++
++ __gpr2 = area;
++ __gpr1 = onoff ? 2 : 0;
++ /* activate channel measurement */
++ asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
++}
++
++static int
++set_schib(int sch, u32 mme, int mbfc, unsigned long address)
++{
++ int ret;
++ int retry;
++ schib_t *schib;
++ unsigned long mba, mbi;
++
++ /* address can be either a block address or a block index */
++ mba = mbi = 0;
++ (mbfc ? mba : mbi) = address;
++
++ /* msch can silently fail, so do it again if necessary */
++ for (retry = 0; retry < 3; retry++) {
++ /* prepare schib */
++ schib = &ioinfo[sch]->schib;
++ stsch(sch, schib);
++ schib->pmcw.mme = mme;
++ schib->pmcw.mbfc = mbfc;
++ schib->pmcw.mbi = mbi;
++ if (mbfc)
++ schib->mba = mba;
++
++ /* try to submit it */
++ switch(ret = msch_err(sch, schib)) {
++ case 0:
++ break;
++ case 1:
++ case 2: /* in I/O or status pending */
++ ret = -EBUSY;
++ break;
++ case 3: /* subchannel is no longer valid */
++ ret = -ENODEV;
++ break;
++ default: /* msch caught an exception */
++ ret = -EINVAL;
++ break;
++ }
++ stsch(sch, schib); /* restore the schib */
++
++ if (ret)
++ break;
++
++ /* check if it worked */
++ if (schib->pmcw.mme == mme &&
++ schib->pmcw.mbfc == mbfc &&
++ schib->pmcw.mbi == mbi &&
++ (!mbfc || schib->mba == mba))
++ return 0;
++
++ ret = -EINVAL;
++ }
++
++ return ret;
++}
++
++/**
++ * struct cmb_area - container for global cmb data
++ *
++ * @mem: pointer to CMBs (only in basic measurement mode)
++ * @list: contains a linked list of all subchannels
++ * @lock: protect concurrent access to @mem and @list
++ */
++struct cmb_area {
++ struct cmb *mem;
++ struct list_head list;
++ spinlock_t lock;
++};
++static struct cmb_area cmb_area = {
++ .lock = SPIN_LOCK_UNLOCKED,
++ .list = LIST_HEAD_INIT(cmb_area.list),
++};
++
++
++/* ****** old style CMB handling ********/
++
++/** int maxchannels
++ *
++ * Basic channel measurement blocks are allocated in one contiguous
++ * block of memory, which can not be moved as long as any channel
++ * is active. Therefore, a maximum number of subchannels needs to
++ * be defined somewhere. This is a module parameter, defaulting to
++ * a resonable value of 1024, or 32 kb of memory.
++ * Current kernels don't allow kmalloc with more than 128kb, so the
++ * maximum is 4096
++ */
++static int maxchannels = 1024;
++MODULE_PARM(maxchannels,"i");
++
++/**
++ * struct cmb - basic channel measurement block
++ *
++ * cmb as used by the hardware the fields are described in z/Architecture
++ * Principles of Operation, chapter 17.
++ * The area to be a contiguous array and may not be reallocated or freed.
++ * Only one cmb area can be present in the system.
++ */
++struct cmb {
++ u16 ssch_rsch_count;
++ u16 sample_count;
++ u32 device_connect_time;
++ u32 function_pending_time;
++ u32 device_disconnect_time;
++ u32 control_unit_queuing_time;
++ u32 device_active_only_time;
++ u32 reserved[2];
++};
++
++/* insert a single device into the cmb_area list
++ * called with cmb_area.lock held from alloc_cmb
++ */
++static inline int
++alloc_cmb_single (struct cmf_device *cdev)
++{
++ struct cmb *cmb;
++ struct cmf_device *node;
++ int ret;
++
++ spin_lock_irq(cdev->ccwlock);
++ if (!list_empty(&cdev->cmb_list)) {
++ ret = -EBUSY;
++ goto out;
++ }
++
++ /* find first unused cmb in cmb_area.mem.
++ * this is a little tricky: cmb_area.list
++ * remains sorted by ->cmb pointers */
++ cmb = cmb_area.mem;
++ list_for_each_entry(node, &cmb_area.list, cmb_list) {
++ if ((struct cmb*)node->cmb > cmb)
++ break;
++ cmb++;
++ }
++ if (cmb - cmb_area.mem >= maxchannels) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ /* insert new cmb */
++ list_add_tail(&cdev->cmb_list, &node->cmb_list);
++ cdev->cmb = cmb;
++ ret = 0;
++out:
++ spin_unlock_irq(cdev->ccwlock);
++ return ret;
++}
++
++static int
++alloc_cmb (struct cmf_device *cdev)
++{
++ int ret;
++ struct cmb *mem;
++ ssize_t size;
++
++ spin_lock(&cmb_area.lock);
++
++ if (!cmb_area.mem) {
++ /* there is no user yet, so we need a new area */
++ size = sizeof(struct cmb) * maxchannels;
++ BUG_ON(!list_empty(&cmb_area.list));
++
++ spin_unlock(&cmb_area.lock);
++ mem = kmalloc(size, GFP_KERNEL | GFP_DMA);
++ spin_lock(&cmb_area.lock);
++
++ if (cmb_area.mem) {
++ /* ok, another thread was faster */
++ kfree(mem);
++ } else if (!mem) {
++ /* no luck */
++ ret = -ENOMEM;
++ goto out;
++ } else {
++ /* everything ok */
++ memset(mem, 0, size);
++ cmb_area.mem = mem;
++ cmf_activate(cmb_area.mem, 1);
++ }
++ }
++
++ /* do the actual allocation */
++ ret = alloc_cmb_single(cdev);
++out:
++ spin_unlock(&cmb_area.lock);
++
++ return ret;
++}
++
++static void
++free_cmb(struct cmf_device *cdev)
++{
++ spin_lock(&cmb_area.lock);
++ spin_lock_irq(cdev->ccwlock);
++
++ if (list_empty(&cdev->cmb_list)) {
++ /* already freed */
++ goto out;
++ }
++
++ cdev->cmb = NULL;
++ list_del_init(&cdev->cmb_list);
++
++ if (list_empty(&cmb_area.list)) {
++ cmf_activate(NULL, 0);
++ kfree(cmb_area.mem);
++ cmb_area.mem = NULL;
++ }
++out:
++ spin_unlock_irq(cdev->ccwlock);
++ spin_unlock(&cmb_area.lock);
++}
++
++static int
++set_cmb(struct cmf_device *cdev, u32 mme)
++{
++ u16 offset;
++
++ if (!cdev->cmb)
++ return -EINVAL;
++
++ offset = mme ? (struct cmb *)cdev->cmb - cmb_area.mem : 0;
++
++ return set_schib(cdev->irq, mme, 0, offset);
++}
++
++static int
++readall_cmb (struct cmf_device *cdev, struct cmbdata *data)
++{
++ /* yes, we have to put it on the stack
++ * because the cmb must only be accessed
++ * atomically, e.g. with mvc */
++ struct cmb cmb;
++ unsigned long flags;
++ u64 time;
++
++ spin_lock_irqsave(cdev->ccwlock, flags);
++ if (!cdev->cmb) {
++ spin_unlock_irqrestore(cdev->ccwlock, flags);
++ return -ENODEV;
++ }
++
++ cmb = *(struct cmb*)cdev->cmb;
++ time = get_clock() - cdev->cmb_start_time;
++ spin_unlock_irqrestore(cdev->ccwlock, flags);
++
++ *data = (struct cmbdata) {
++ /* we only know values before device_busy_time */
++ .size = offsetof(struct cmbdata, device_busy_time),
++
++ /* conver to nanoseconds */
++ .elapsed_time = (time * 1000) >> 12,
++
++ /* copy data to new structure */
++ .ssch_rsch_count = cmb.ssch_rsch_count,
++ .sample_count = cmb.sample_count,
++
++ /* time fields are converted to nanoseconds while copying */
++ .device_connect_time
++ = time_to_nsec(cmb.device_connect_time),
++ .function_pending_time
++ = time_to_nsec(cmb.function_pending_time),
++ .device_disconnect_time
++ = time_to_nsec(cmb.device_disconnect_time),
++ .control_unit_queuing_time
++ = time_to_nsec(cmb.control_unit_queuing_time),
++ .device_active_only_time
++ = time_to_nsec(cmb.device_active_only_time),
++ };
++
++ return 0;
++}
++
++static void
++reset_cmb(struct cmf_device *cdev)
++{
++ struct cmb *cmb;
++ spin_lock_irq(cdev->ccwlock);
++ cmb = cdev->cmb;
++ if (cmb)
++ memset (cmb, 0, sizeof (*cmb));
++ cdev->cmb_start_time = get_clock();
++ spin_unlock_irq(cdev->ccwlock);
++}
++
++static struct cmb_operations cmbops_basic = {
++ .alloc = alloc_cmb,
++ .free = free_cmb,
++ .set = set_cmb,
++ .readall= readall_cmb,
++ .reset = reset_cmb,
++};
++
++/* ******** extended cmb handling ********/
++
++/**
++ * struct cmbe - extended channel measurement block
++ *
++ * cmb as used by the hardware, may be in any 64 bit physical location,
++ * the fields are described in z/Architecture Principles of Operation,
++ * third edition, chapter 17.
++ */
++struct cmbe {
++ u32 ssch_rsch_count;
++ u32 sample_count;
++ u32 device_connect_time;
++ u32 function_pending_time;
++ u32 device_disconnect_time;
++ u32 control_unit_queuing_time;
++ u32 device_active_only_time;
++ u32 device_busy_time;
++ u32 initial_command_response_time;
++ u32 reserved[7];
++};
++
++static int
++alloc_cmbe (struct cmf_device *cdev)
++{
++ struct cmbe *cmbe;
++ cmbe = kmalloc (sizeof (*cmbe), GFP_KERNEL /* | GFP_DMA ? */);
++ if (!cmbe)
++ return -ENOMEM;
++
++ spin_lock_irq(cdev->ccwlock);
++ if (cdev->cmb)
++ kfree(cmbe);
++ else
++ cdev->cmb = cmbe;
++ spin_unlock_irq(cdev->ccwlock);
++
++ /* activate global measurement if this is the first channel */
++ spin_lock(&cmb_area.lock);
++ if (list_empty(&cmb_area.list))
++ cmf_activate(NULL, 1);
++ list_add_tail(&cdev->cmb_list, &cmb_area.list);
++ spin_unlock(&cmb_area.lock);
++
++ return 0;
++}
++
++static void
++free_cmbe (struct cmf_device *cdev)
++{
++ spin_lock_irq(cdev->ccwlock);
++ if (cdev->cmb)
++ kfree(cdev->cmb);
++ cdev->cmb = NULL;
++ spin_unlock_irq(cdev->ccwlock);
++
++ /* deactivate global measurement if this is the last channel */
++ spin_lock(&cmb_area.lock);
++ list_del_init(&cdev->cmb_list);
++ if (list_empty(&cmb_area.list))
++ cmf_activate(NULL, 0);
++ spin_unlock(&cmb_area.lock);
++}
++
++static int
++set_cmbe(struct cmf_device *cdev, u32 mme)
++{
++ unsigned long mba;
++
++ if (!cdev->cmb)
++ return -EINVAL;
++
++ mba = mme ? (unsigned long)cdev->cmb : 0;
++
++ return set_schib(cdev->irq, mme, 1, mba);
++}
++
++static int
++readall_cmbe (struct cmf_device *cdev, struct cmbdata *data)
++{
++ /* yes, we have to put it on the stack
++ * because the cmb must only be accessed
++ * atomically, e.g. with mvc */
++ struct cmbe cmb;
++ unsigned long flags;
++ u64 time;
++
++ spin_lock_irqsave(cdev->ccwlock, flags);
++ if (!cdev->cmb) {
++ spin_unlock_irqrestore(cdev->ccwlock, flags);
++ return -ENODEV;
++ }
++
++ cmb = *(struct cmbe*)cdev->cmb;
++ time = get_clock() - cdev->cmb_start_time;
++ spin_unlock_irqrestore(cdev->ccwlock, flags);
++
++ *data = (struct cmbdata) {
++ /* we only know values before device_busy_time */
++ .size = offsetof(struct cmbdata, device_busy_time),
++
++ /* conver to nanoseconds */
++ .elapsed_time = (time * 1000) >> 12,
++
++ /* copy data to new structure */
++ .ssch_rsch_count = cmb.ssch_rsch_count,
++ .sample_count = cmb.sample_count,
++
++ /* time fields are converted to nanoseconds while copying */
++ .device_connect_time
++ = time_to_nsec(cmb.device_connect_time),
++ .function_pending_time
++ = time_to_nsec(cmb.function_pending_time),
++ .device_disconnect_time
++ = time_to_nsec(cmb.device_disconnect_time),
++ .control_unit_queuing_time
++ = time_to_nsec(cmb.control_unit_queuing_time),
++ .device_active_only_time
++ = time_to_nsec(cmb.device_active_only_time),
++ .device_busy_time
++ = time_to_nsec(cmb.device_busy_time),
++ .initial_command_response_time
++ = time_to_nsec(cmb.initial_command_response_time),
++ };
++
++ return 0;
++}
++
++static void
++reset_cmbe(struct cmf_device *cdev)
++{
++ struct cmbe *cmb;
++ spin_lock_irq(cdev->ccwlock);
++ cmb = cdev->cmb;
++ if (cmb)
++ memset (cmb, 0, sizeof (*cmb));
++ cdev->cmb_start_time = get_clock();
++ spin_unlock_irq(cdev->ccwlock);
++}
++
++static struct cmb_operations cmbops_extended = {
++ .alloc = alloc_cmbe,
++ .free = free_cmbe,
++ .set = set_cmbe,
++ .readall= readall_cmbe,
++ .reset = reset_cmbe,
++};
++
++/******* external interface to kernel *******/
++
++/**
++ * enable_cmf, disable_cmf, set_cmf, cmf_readall, cmf_reset:
++ * simple wrappers around the cmb_operations.
++ */
++int
++enable_cmf(struct cmf_device *cdev)
++{
++ return cmbops->alloc(cdev);
++}
++
++void
++disable_cmf(struct cmf_device *cdev)
++{
++ cmbops->free(cdev);
++}
++
++int
++set_cmf(struct cmf_device *cdev, u32 mme)
++{
++ return cmbops->set(cdev, mme);
++}
++
++int
++cmf_readall(struct cmf_device *cdev, struct cmbdata *data)
++{
++ return cmbops->readall(cdev, data);
++}
++
++void
++cmf_reset(struct cmf_device *cdev)
++{
++ return cmbops->reset(cdev);
++}
++
++/* set up maxchannels and format parameter when the module is built-in */
++#ifndef MODULE
++static int __init
++setup_cmf_maxchannels(char *arg)
++{
++ int arglen = sizeof("cmf_maxchannels=") - 1;
++ int c;
++
++ c = simple_strtoul(arg + arglen + 1, 0, 0);
++ if (c <= 0 || c > 65536) {
++ printk(KERN_WARNING "Invalid parameter %s, using "
++ "default (%d)\n", arg, maxchannels);
++ } else {
++ maxchannels = c;
++ }
++
++ return 0;
++}
++__setup("cmf_maxchannels=", setup_cmf_maxchannels);
++
++static int __init
++setup_cmf_format(char *arg)
++{
++ int arglen = sizeof("cmf_format") - 1;
++
++ format = simple_strtoul(arg + arglen, 0, 0);
++ if (format < CMF_BASIC || format > CMF_EXTENDED) {
++ printk(KERN_WARNING "Invalid parameter %s\n", arg);
++ format = -1;
++ }
++
++ return 0;
++}
++__setup("cmf_format=", setup_cmf_format);
++#endif
++
++static int __init
++init_cmf(void)
++{
++ char *format_string;
++ char *detect_string = "parameter";
++
++ /* We cannot really autoprobe this. If the user did not give a parameter,
++ see if we are running on z990 or up, otherwise fall back to basic mode. */
++
++ if (format == CMF_AUTODETECT) {
++ if (!MACHINE_NEW_STIDP) {
++ format = CMF_BASIC;
++ } else {
++ format = CMF_EXTENDED;
++ }
++ detect_string = "autodetected";
++ } else {
++ detect_string = "parameter";
++ }
++
++ switch (format) {
++ case CMF_BASIC:
++ format_string = "basic";
++ cmbops = &cmbops_basic;
++ if (maxchannels > 4096 || maxchannels < 1) {
++ printk(KERN_ERR "Basic channel measurement facility"
++ " can only use 1 to 4096 devices\n"
++ KERN_ERR "when the cmf driver is built"
++ " as a loadable module\n");
++ return 1;
++ }
++ break;
++ case CMF_EXTENDED:
++ format_string = "extended";
++ cmbops = &cmbops_extended;
++ break;
++ default:
++ printk(KERN_ERR "Invalid format %d for channel "
++ "measurement facility\n", format);
++ return 1;
++ }
++
++ printk(KERN_INFO "Channel measurement facility using %s format (%s)\n",
++ format_string, detect_string);
++ return 0;
++}
++
++module_init(init_cmf);
++
++
++MODULE_AUTHOR("Arnd Bergmann <arndb at de.ibm.com>");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("channel measurement facility base driver\n"
++ "Copyright 2003 IBM Corporation\n");
++
++EXPORT_SYMBOL_GPL(enable_cmf);
++EXPORT_SYMBOL_GPL(disable_cmf);
++EXPORT_SYMBOL_GPL(set_cmf);
++EXPORT_SYMBOL_GPL(cmf_readall);
++EXPORT_SYMBOL_GPL(cmf_reset);
+=== drivers/s390/net/smsgiucv.c
+==================================================================
+--- drivers/s390/net/smsgiucv.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/net/smsgiucv.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,167 @@
++/*
++ * IUCV special message driver
++ *
++ * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
++ * Author(s): Martin Schwidefsky (schwidefsky at de.ibm.com)
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2, or (at your option)
++ * any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/slab.h>
++#include <asm/cpcmd.h>
++#include <asm/ebcdic.h>
++
++#include "iucv.h"
++
++struct smsg_callback {
++ struct list_head list;
++ char *prefix;
++ int len;
++ void (*callback)(char *str);
++};
++
++MODULE_AUTHOR
++ ("(C) 2003 IBM Corporation by Martin Schwidefsky (schwidefsky at de.ibm.com)");
++MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver");
++
++static iucv_handle_t smsg_handle;
++static unsigned short smsg_pathid;
++static spinlock_t smsg_list_lock = SPIN_LOCK_UNLOCKED;
++static struct list_head smsg_list = LIST_HEAD_INIT(smsg_list);
++
++static void
++smsg_connection_complete(iucv_ConnectionComplete *eib, void *pgm_data)
++{
++}
++
++
++static void
++smsg_message_pending(iucv_MessagePending *eib, void *pgm_data)
++{
++ struct smsg_callback *cb;
++ unsigned char *msg;
++ unsigned short len;
++ int rc;
++
++ len = eib->ln1msg2.ipbfln1f;
++ msg = kmalloc(len + 1, GFP_ATOMIC|GFP_DMA);
++ if (!msg) {
++ iucv_reject(eib->ippathid, eib->ipmsgid, eib->iptrgcls);
++ return;
++ }
++ rc = iucv_receive(eib->ippathid, eib->ipmsgid, eib->iptrgcls,
++ msg, len, 0, 0, 0);
++ if (rc == 0) {
++ msg[len] = 0;
++ EBCASC(msg, len);
++ spin_lock(&smsg_list_lock);
++ list_for_each_entry(cb, &smsg_list, list)
++ if (strncmp(msg + 8, cb->prefix, cb->len) == 0) {
++ cb->callback(msg + 8);
++ break;
++ }
++ spin_unlock(&smsg_list_lock);
++ }
++ kfree(msg);
++}
++
++static iucv_interrupt_ops_t smsg_ops = {
++ .ConnectionComplete = smsg_connection_complete,
++ .MessagePending = smsg_message_pending,
++};
++
++int
++smsg_register_callback(char *prefix, void (*callback)(char *str))
++{
++ struct smsg_callback *cb;
++
++ cb = kmalloc(sizeof(struct smsg_callback), GFP_KERNEL);
++ if (!cb)
++ return -ENOMEM;
++ cb->prefix = prefix;
++ cb->len = strlen(prefix);
++ cb->callback = callback;
++ spin_lock(&smsg_list_lock);
++ list_add_tail(&cb->list, &smsg_list);
++ spin_unlock(&smsg_list_lock);
++ return 0;
++}
++
++void
++smsg_unregister_callback(char *prefix, void (*callback)(char *str))
++{
++ struct smsg_callback *cb, *tmp;
++
++ spin_lock(&smsg_list_lock);
++ cb = 0;
++ list_for_each_entry(tmp, &smsg_list, list)
++ if (tmp->callback == callback &&
++ strcmp(tmp->prefix, prefix) == 0) {
++ cb = tmp;
++ list_del(&cb->list);
++ break;
++ }
++ spin_unlock(&smsg_list_lock);
++ kfree(cb);
++}
++
++static void __exit
++smsg_exit(void)
++{
++ if (smsg_handle > 0) {
++ cpcmd("SET SMSG OFF", 0, 0);
++ iucv_sever(smsg_pathid, 0);
++ iucv_unregister_program(smsg_handle);
++ }
++ return;
++}
++
++static int __init
++smsg_init(void)
++{
++ static unsigned char pgmmask[24] = {
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
++ };
++ int rc;
++
++ smsg_handle = iucv_register_program("SMSGIUCV ", "*MSG ",
++ pgmmask, &smsg_ops, 0);
++ if (!smsg_handle) {
++ printk(KERN_ERR "SMSGIUCV: failed to register to iucv");
++ return -EIO; /* better errno ? */
++ }
++ rc = iucv_connect (&smsg_pathid, 1, 0, "*MSG ", 0, 0, 0, 0,
++ smsg_handle, 0);
++ if (rc) {
++ printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG");
++ iucv_unregister_program(smsg_handle);
++ smsg_handle = 0;
++ return -EIO;
++ }
++ cpcmd("SET SMSG IUCV", 0, 0);
++ return 0;
++}
++
++module_init(smsg_init);
++module_exit(smsg_exit);
++MODULE_LICENSE("GPL");
++
++EXPORT_SYMBOL(smsg_register_callback);
++EXPORT_SYMBOL(smsg_unregister_callback);
+=== drivers/s390/net/qeth.c
+==================================================================
+--- drivers/s390/net/qeth.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/net/qeth.c (/trunk/2.4.27) (revision 52)
+@@ -1,6 +1,6 @@
+ /*
+ *
+- * linux/drivers/s390/net/qeth.c ($Revision: 1.337 $)
++ * linux/drivers/s390/net/qeth.c ($Revision: 1.337.4.32 $)
+ *
+ * Linux on zSeries OSA Express and HiperSockets support
+ *
+@@ -28,9 +28,6 @@
+ */
+
+ /*
+- * The driver supports in general all QDIO driven network devices on the
+- * Hydra card.
+- *
+ * For all devices, three channels must be available to the driver. One
+ * channel is the read channel, one is the write channel and the third
+ * one is the channel used to control QDIO.
+@@ -149,6 +146,7 @@
+ #include <linux/trdevice.h>
+ #include <linux/etherdevice.h>
+ #include <linux/reboot.h>
++#include <linux/mii.h>
+
+ #include <linux/if_vlan.h>
+ #include <asm/chandev.h>
+@@ -156,8 +154,10 @@
+ #include <asm/irq.h>
+ #include <asm/s390dyn.h>
+ #include <asm/debug.h>
++#include <asm/processor.h>
+
+ #include <asm/qdio.h>
++#include <asm/qeth.h>
+
+ #include "qeth_mpc.h"
+ #include "qeth.h"
+@@ -171,7 +171,7 @@
+ static int global_stay_in_mem=0;
+
+ /****************** MODULE STUFF **********************************/
+-#define VERSION_QETH_C "$Revision: 1.337 $"
++#define VERSION_QETH_C "$Revision: 1.337.4.32 $"
+ static const char *version="qeth S/390 OSA-Express driver (" \
+ VERSION_QETH_C "/" VERSION_QETH_H "/" VERSION_QETH_MPC_H
+ QETH_VERSION_IPV6 QETH_VERSION_VLAN ")";
+@@ -183,10 +183,8 @@
+
+ /******************** HERE WE GO ***********************************/
+
+-#define PROCFILE_SLEEP_SEM_MAX_VALUE 0
+-#define PROCFILE_IOCTL_SEM_MAX_VALUE 3
+-static struct semaphore qeth_procfile_ioctl_lock;
+-static struct semaphore qeth_procfile_ioctl_sem;
++
++
+ static qeth_card_t *firstcard=NULL;
+
+ static sparebufs_t sparebufs[MAX_SPARE_BUFFERS];
+@@ -224,9 +222,12 @@
+ /* thought I could get along without forward declarations...
+ * just lazyness here */
+ static int qeth_reinit_thread(void*);
+-static void qeth_schedule_recovery(qeth_card_t *card);
++static inline void qeth_schedule_recovery(qeth_card_t *card);
++static int qeth_fake_header(struct sk_buff *skb, struct net_device *dev,
++ unsigned short type, void *daddr, void *saddr,
++ unsigned len);
+
+-inline static int QETH_IP_VERSION(struct sk_buff *skb)
++static inline int QETH_IP_VERSION(struct sk_buff *skb)
+ {
+ switch (skb->protocol) {
+ case ETH_P_IPV6: return 6;
+@@ -243,6 +244,21 @@
+ return b;
+ }
+
++/*
++ * This is our local skb_unshare, only with pskb_copy instead of skb_copy.
++ * We place our headers whare Ethernet MAC was, so we do not need
++ * full skb_copy.
++ */
++static inline struct sk_buff *qeth_pskb_unshare(struct sk_buff *skb, int pri)
++{
++ struct sk_buff *nskb;
++ if (!skb_cloned(skb))
++ return skb;
++ nskb = skb_copy(skb, pri);
++ kfree_skb(skb); /* free our shared copy */
++ return nskb;
++}
++
+ static inline unsigned int qeth_get_millis(void)
+ {
+ __u64 time;
+@@ -291,7 +307,8 @@
+ set_task_state(current,TASK_RUNNING);
+ }
+
+-static void qeth_get_mac_for_ipm(__u32 ipm,char *mac,struct net_device *dev) {
++static inline void qeth_get_mac_for_ipm(__u32 ipm,char *mac,
++ struct net_device *dev) {
+ if (dev->type==ARPHRD_IEEE802_TR)
+ ip_tr_mc_map(ipm,mac);
+ else
+@@ -403,7 +420,7 @@
+ #define QETH_GET_ADDR(x) ((__u32)x)
+ #endif /* CONFIG_ARCH_S390X */
+
+-static int qeth_does_card_exist(qeth_card_t *card)
++static inline int qeth_does_card_exist(qeth_card_t *card)
+ {
+ qeth_card_t *c=firstcard;
+ int rc=0;
+@@ -696,6 +713,12 @@
+ QETH_DBF_TEXT2(0,trace,dbf_text);
+ QETH_DBF_TEXT2(0,setup,dbf_text);
+
++ if ((card->options.layer2 == DO_LAYER2) &&
++ (!atomic_read(&card->mac_registered))) {
++ QETH_DBF_TEXT2(0,trace,"nomacaddr");
++ return -EPERM;
++ }
++
+ qeth_save_dev_flag_state(card);
+
+ netif_start_queue(dev);
+@@ -718,7 +741,9 @@
+
+ static int qeth_is_multicast_skb_at_all(struct sk_buff *skb,int version)
+ {
+- int i;
++ int i=RTN_UNSPEC;
++ qeth_card_t *card = (qeth_card_t *)skb->dev->priv;
++
+ if (skb->dst && skb->dst->neighbour) {
+ i=skb->dst->neighbour->type;
+ return ((i==RTN_BROADCAST)||
+@@ -731,13 +756,38 @@
+ } else if (version==6) {
+ return (skb->nh.raw[24]==0xff)?RTN_MULTICAST:0;
+ }
+- return 0;
++
++ if (!memcmp(skb->data,skb->dev->broadcast,6)) {
++ i=RTN_BROADCAST;
++ } else {
++ __u16 hdr_mac;
++ hdr_mac=*((__u16*)skb->data);
++ /* tr multicast? */
++ switch (card->link_type) {
++ case QETH_MPC_LINK_TYPE_HSTR:
++ case QETH_MPC_LINK_TYPE_LANE_TR:
++ if ( (hdr_mac==QETH_TR_MC_MAC_NC) ||
++ (hdr_mac==QETH_TR_MC_MAC_C) )
++ i = RTN_MULTICAST;
++ break;
++ /* eth or so multicast? */
++ default:
++ if ( (hdr_mac==QETH_ETH_MC_MAC_V4) ||
++ (hdr_mac==QETH_ETH_MC_MAC_V6) )
++ i = RTN_MULTICAST;
++ }
++ }
++ return ((i==RTN_BROADCAST)||
++ (i==RTN_MULTICAST)||
++ (i==RTN_ANYCAST))?i:0;
+ }
+
+ static int qeth_get_prioqueue(qeth_card_t *card,struct sk_buff *skb,
+ int multicast,int version)
+ {
+- if (!version) return QETH_DEFAULT_QUEUE;
++ if (!version &&
++ (card->type==QETH_CARD_TYPE_OSAE))
++ return QETH_DEFAULT_QUEUE;
+ switch (card->no_queues) {
+ case 1:
+ return 0;
+@@ -901,6 +951,7 @@
+ sprintf(dbf_text,"CBOT%04x",card->irq0);
+ QETH_DBF_TEXT1(0,trace,dbf_text);
+ qeth_set_dev_flag_norunning(card);
++ netif_carrier_off(card->dev);
+ problem=0;
+ goto out;
+ }
+@@ -910,6 +961,7 @@
+ atomic_set(&card->is_startlaned,1);
+ problem=PROBLEM_CARD_HAS_STARTLANED;
+ }
++ netif_carrier_on(card->dev);
+ goto out;
+ }
+ if ( *(PDU_ENCAPSULATION(buffer))==
+@@ -1046,7 +1098,7 @@
+ return retval;
+ }
+
+-static int qeth_get_spare_buf(void)
++static inline int qeth_get_spare_buf(void)
+ {
+ int i=0;
+ char dbf_text[15];
+@@ -1131,8 +1183,8 @@
+ }
+ }
+
+-static void qeth_queue_input_buffer(qeth_card_t *card,int bufno,
+- unsigned int under_int)
++static inline void qeth_queue_input_buffer(qeth_card_t *card,int bufno,
++ unsigned int under_int)
+ {
+ int count=0,start=0,stop=0,pos;
+ int result;
+@@ -1283,10 +1335,11 @@
+ return skb;
+ }
+
+-static struct sk_buff *qeth_get_next_skb(qeth_card_t *card,
+- int *element_ptr,int *pos_in_el_ptr,
+- void **hdr_ptr,
+- qdio_buffer_t *buffer)
++static inline struct sk_buff *qeth_get_next_skb(qeth_card_t *card,
++ int *element_ptr,
++ int *pos_in_el_ptr,
++ void **hdr_ptr,
++ qdio_buffer_t *buffer)
+ {
+ int length;
+ char *data_ptr;
+@@ -1357,7 +1410,9 @@
+ }
+
+ *hdr_ptr=SBALE_ADDR(element)+pos_in_el;
+-
++ if (card->options.layer2 == DO_LAYER2)
++ length=*(__u16*)((char*)(*hdr_ptr)+QETH_HEADER2_LEN_POS);
++ else
+ length=*(__u16*)((char*)(*hdr_ptr)+QETH_HEADER_LEN_POS);
+
+ #ifdef QETH_DBF_LIKE_HELL
+@@ -1394,10 +1449,6 @@
+ skb=qeth_get_skb(length+QETH_FAKE_LL_LEN);
+ if (!skb) goto nomem;
+ skb_pull(skb,QETH_FAKE_LL_LEN);
+- if (!skb) {
+- dev_kfree_skb_irq(skb);
+- goto nomem;
+- }
+ } else {
+ skb=qeth_get_skb(length);
+ if (!skb) goto nomem;
+@@ -1472,8 +1523,8 @@
+ return NULL;
+ }
+
+-static void qeth_transform_outbound_addrs(qeth_card_t *card,
+- qdio_buffer_t *buffer)
++static inline void qeth_transform_outbound_addrs(qeth_card_t *card,
++ qdio_buffer_t *buffer)
+ {
+ int i;
+ void *ptr;
+@@ -1485,7 +1536,8 @@
+ }
+ }
+ }
+-static void qeth_get_linux_addrs_for_buffer(qeth_card_t *card,int buffer_no)
++static inline void qeth_get_linux_addrs_for_buffer(qeth_card_t *card,
++ int buffer_no)
+ {
+ int i;
+ void *ptr;
+@@ -1501,7 +1553,7 @@
+ }
+ }
+
+-static void qeth_read_in_buffer(qeth_card_t *card,int buffer_no)
++static inline void qeth_read_in_buffer(qeth_card_t *card,int buffer_no)
+ {
+ struct sk_buff *skb;
+ void *hdr_ptr;
+@@ -1511,8 +1563,10 @@
+ unsigned short cast_type;
+ #ifdef QETH_VLAN
+ __u16 *vlan_tag;
++ __u16 l2_vlan_tag=0;
+ #endif
+ int i;
++ __u32 l2_cast_type=0;
+ int max_elements;
+ char dbf_text[15];
+ struct net_device *dev;
+@@ -1559,12 +1613,41 @@
+ if (skb) {
+ skb->dev=dev;
+
+-#ifdef QETH_IPV6
++ /* QDIO header type 2 -> layer 2 layout */
++ if ( (*(__u8 *)(hdr_ptr))==2) {
++ l2_cast_type = (*(__u8*)(hdr_ptr+3));
++ /* unicast is probably most of the traffic,
++ * so we don't use the default branch down at
++ * the bottom for this */
++ if (l2_cast_type &
++ QETH_QDIO_HEADER2_FLAG_UNICAST_FRAME)
++ skb->pkt_type = PACKET_HOST;
++ else if (l2_cast_type &
++ QETH_QDIO_HEADER2_FLAG_MULTICAST_FRAME)
++ skb->pkt_type = PACKET_MULTICAST;
++ else if (l2_cast_type &
++ QETH_QDIO_HEADER2_FLAG_BROADCAST_FRAME)
++ skb->pkt_type = PACKET_BROADCAST;
++ else /* default: unicast */
++ skb->pkt_type = PACKET_HOST;
++ version=0;
++
++#ifdef QETH_VLAN
++ if (l2_cast_type==
++ QETH_QDIO_HEADER2_FLAG_VLAN_FRAME)
++ l2_vlan_tag=
++ *(__u16*)(((__u8*)hdr_ptr)+10);
++#endif
++ skb->protocol=card->type_trans(skb,dev);
++ if(card->options.checksum_type==NO_CHECKSUMMING)
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ else
++ skb->ip_summed = CHECKSUM_NONE;
++ goto jump_layer2;
++ }
+ if ( (*(__u16 *)(hdr_ptr))&(QETH_HEADER_PASSTHRU) ) {
+ skb->protocol=card->type_trans(skb,dev);
+- } else
+-#endif /* QETH_IPV6 */
+- {
++ } else {
+ version=((*(__u16 *)(hdr_ptr))&
+ (QETH_HEADER_IPV6))?6:4;
+ skb->protocol=htons((version==4)?ETH_P_IP:
+@@ -1597,7 +1680,27 @@
+ sprintf(dbf_text,"castun%2x",cast_type);
+ QETH_DBF_TEXT2(1,trace,dbf_text);
+ }
+-
++#ifdef QETH_VLAN
++ if (*(__u8*)(hdr_ptr+11)&
++ QETH_EXT_HEADER_VLAN_FRAME) {
++ vlan_tag=(__u16 *)skb_push(skb,
++ VLAN_HLEN);
++ /*
++ if (*(__u8*)(hdr_ptr+11) &
++ QETH_EXT_HEADER_INCLUDE_VLAN_TAG) {
++ *vlan_tag = *(__u16*)(hdr_ptr+28);
++ *(vlan_tag+1)= *(__u16*)(hdr_ptr+30);
++ } else {
++ */
++ *vlan_tag = *(__u16*)(hdr_ptr+12);
++ *(vlan_tag+1) = skb->protocol;
++ /*
++ }
++ */
++ skb->protocol=
++ __constant_htons(ETH_P_8021Q);
++ }
++#endif
+ if (card->options.fake_ll==FAKE_LL) {
+ skb->mac.raw=skb->data-QETH_FAKE_LL_LEN;
+ if (skb->pkt_type==PACKET_MULTICAST) {
+@@ -1643,7 +1746,7 @@
+ } else {
+ /* clear source MAC for security reasons */
+ memset(skb->mac.raw+
+- QETH_FAKE_LL_DEST_MAC_POS,0,
++ QETH_FAKE_LL_SRC_MAC_POS,0,
+ QETH_FAKE_LL_ADDR_LEN);
+ }
+ memcpy(skb->mac.raw+
+@@ -1654,58 +1757,51 @@
+ skb->mac.raw=skb->data;
+ }
+
+- skb->ip_summed=card->options.checksum_type;
+ if (card->options.checksum_type==HW_CHECKSUMMING) {
+ /* do we have a checksummed packet? */
+- if (*(__u8*)(hdr_ptr+11)&
+- QETH_EXT_HEADER_CSUM_TRANSP_REQ) {
+- /* skb->ip_summed is set already */
+
+- /* vlan is not an issue here, it's still in
++ /* we only check for TCP/UDP checksums when the
++ * pseudo header was also checked sucessfully -- for
++ * the rest of the packets, it's not clear, whether
++ * the upper layer csum is alright. And they
++ * shouldn't occur too often anyway in real life */
++ if ( (*(__u8*)(hdr_ptr+11)&
++ (QETH_EXT_HEADER_CSUM_HDR_REQ|
++ QETH_EXT_HEADER_CSUM_TRANSP_REQ)) ==
++ (QETH_EXT_HEADER_CSUM_HDR_REQ|
++ QETH_EXT_HEADER_CSUM_TRANSP_REQ) ) {
++ /* csum does not need to be set
++ * inbound anyway
++ *
++ * vlan is not an issue here, it's still in
+ * the QDIO header, not pushed in the
+- * skb yet */
++ * skb yet *
+ int ip_len=(skb->data[0]&0x0f)<<2;
+
+ if (*(__u8*)(hdr_ptr+11)&
+ QETH_EXT_HEADER_CSUM_TRANSP_FRAME_TYPE) {
+- /* get the UDP checksum */
++ * get the UDP checksum *
+ skb->csum=*(__u16*)
+ (&skb->data[ip_len+
+ QETH_UDP_CSUM_OFFSET]);
+ } else {
+- /* get the TCP checksum */
++ * get the TCP checksum *
+ skb->csum=*(__u16*)
+ (&skb->data[ip_len+
+ QETH_TCP_CSUM_OFFSET]);
+ }
++ */
++ skb->ip_summed=CHECKSUM_UNNECESSARY;
+ } else {
+ /* make the stack check it */
+- skb->ip_summed=SW_CHECKSUMMING;
++ skb->ip_summed=CHECKSUM_NONE;
+ }
++ } else {
++ skb->ip_summed=card->options.checksum_type;
+ }
+
+-#ifdef QETH_VLAN
+- if (*(__u8*)(hdr_ptr+11)&
+- QETH_EXT_HEADER_VLAN_FRAME) {
+- vlan_tag=(__u16 *)skb_push(skb,
+- VLAN_HLEN);
+- /*
+- if (*(__u8*)(hdr_ptr+11) &
+- QETH_EXT_HEADER_INCLUDE_VLAN_TAG) {
+- *vlan_tag = *(__u16*)(hdr_ptr+28);
+- *(vlan_tag+1)= *(__u16*)(hdr_ptr+30);
+- } else {
+- */
+- *vlan_tag = *(__u16*)(hdr_ptr+12);
+- *(vlan_tag+1) = skb->protocol;
+- /*
+- }
+- */
+- skb->protocol=
+- __constant_htons(ETH_P_8021Q);
+- }
+-#endif
+ }
++jump_layer2:
+
+ #ifdef QETH_PERFORMANCE_STATS
+ card->perf_stats.inbound_time+=
+@@ -1718,7 +1814,19 @@
+ QETH_DBF_TEXT6(0,trace,dbf_text);
+ #endif /* QETH_DBF_LIKE_HELL */
+
++#ifdef QETH_VLAN
++ if (l2_vlan_tag) {
++ /* the tag was in the VLAN information has
++ * been in the QDIO header, therefore remove
++ * the tag for the hw acceleration function */
++ skb_pull(skb,VLAN_HLEN);
++ vlan_hwaccel_rx(skb,card->vlangrp,l2_vlan_tag);
++ } else {
++ netif_rx(skb);
++ }
++#else
+ netif_rx(skb);
++#endif /* QETH_VLAN */
+
+ card->stats->rx_packets++;
+ card->stats->rx_bytes+=skb->len;
+@@ -1735,39 +1843,95 @@
+ buffer_no]);
+ }
+
+-static void qeth_fill_header(qeth_hdr_t *hdr,struct sk_buff *skb,
+- int version,int multicast)
++static inline void qeth_fill_header(qeth_hdr_t *hdr,struct sk_buff *skb,
++ int version,int multicast)
+ {
+ #ifdef QETH_DBF_LIKE_HELL
+ char dbf_text[15];
+ #endif /* QETH_DBF_LIKE_HELL */
+-#ifdef QETH_VLAN
+ qeth_card_t *card;
+-#endif
++ int is_l2=0;
+
++ card = (qeth_card_t *)skb->dev->priv;
++
++ if (card->options.layer2 == DO_LAYER2) {
++ is_l2=1;
++ } else {
+ hdr->id=1;
+ hdr->ext_flags=0;
+
++ /* as skb->len includes the header now */
++ hdr->length=skb->len-QETH_HEADER_SIZE;
+ #ifdef QETH_VLAN
+ /* before we're going to overwrite
+- this location with next hop ip
+- */
+- card = (qeth_card_t *)skb->dev->priv;
++ * this location with next hop ip.
++ * v6 uses passthrough, v4 sets the tag in the QDIO header */
+ if ((card->vlangrp != NULL) &&
+- (version == 4) &&
+- vlan_tx_tag_present(skb))
+- {
+- hdr->ext_flags = QETH_EXT_HEADER_VLAN_FRAME;
++ vlan_tx_tag_present(skb)) {
++ if (version == 4) {
++ hdr->ext_flags = QETH_EXT_HEADER_VLAN_FRAME;
++ } else {
++ hdr->ext_flags =
++ QETH_EXT_HEADER_INCLUDE_VLAN_TAG;
++ }
+ hdr->vlan_id = vlan_tx_tag_get(skb);
+ }
+ #endif
++ }
+
+- hdr->length=skb->len-QETH_HEADER_SIZE; /* as skb->len includes
+- the header now */
++ if (is_l2) {
++ /* set byte 0 to "0x02" and byte 3 to casting flags */
++ if (multicast==RTN_MULTICAST) {
++ *(__u32*)hdr=(2<<24)+
++ QETH_QDIO_HEADER2_FLAG_MULTICAST_FRAME;
++ } else if (multicast==RTN_BROADCAST) {
++ *(__u32*)hdr=(2<<24)+
++ QETH_QDIO_HEADER2_FLAG_BROADCAST_FRAME;
++ } else {
++ /* do it on our own :-( */
++ if (!memcmp(skb->data+QETH_HEADER_SIZE,
++ skb->dev->broadcast,6)) { /* broadcast? */
++ *(__u32*)hdr=(2<<24)+
++ QETH_QDIO_HEADER2_FLAG_BROADCAST_FRAME;
++ } else {
++ __u16 hdr_mac;
++ hdr_mac=*((__u16*)skb->data);
++ /* tr multicast? */
++ switch (card->link_type) {
++ case QETH_MPC_LINK_TYPE_HSTR:
++ case QETH_MPC_LINK_TYPE_LANE_TR:
++ if ( (hdr_mac==QETH_TR_MC_MAC_NC) ||
++ (hdr_mac==QETH_TR_MC_MAC_C) )
++ *(__u32*)hdr=(2<<24)+
++ QETH_QDIO_HEADER2_FLAG_MULTICAST_FRAME;
++ else
++ *(__u32*)hdr=(2<<24)+
++ QETH_QDIO_HEADER2_FLAG_UNICAST_FRAME;
++ break;
++ /* eth or so multicast? */
++ default:
++ if ( (hdr_mac==QETH_ETH_MC_MAC_V4) ||
++ (hdr_mac==QETH_ETH_MC_MAC_V6) )
++ *(__u32*)hdr=(2<<24)+
++ QETH_QDIO_HEADER2_FLAG_MULTICAST_FRAME;
++ else
++ *(__u32*)hdr=(2<<24)+
++ QETH_QDIO_HEADER2_FLAG_UNICAST_FRAME;
++ }
++ }
++ }
+
+- /* yes, I know this is doubled code, but a small little bit
+- faster maybe */
+- if (version==4) { /* IPv4 */
++ *(__u16*)(((__u8*)hdr)+6)=skb->len-QETH_HEADER_SIZE;
++ #ifdef QETH_VLAN
++ /* VSWITCH relies on the VLAN information to be present
++ * in the QDIO header */
++ if ((card->vlangrp != NULL) &&
++ vlan_tx_tag_present(skb)) {
++ *(__u32*)hdr|=QETH_QDIO_HEADER2_FLAG_VLAN_FRAME;
++ *(__u16*)(((__u8*)hdr)+10)=vlan_tx_tag_get(skb);
++ }
++ #endif
++ } else if (version==4) { /* IPv4 */
+ if (multicast==RTN_MULTICAST) {
+ hdr->flags=QETH_CAST_MULTICAST;
+ } else if (multicast==RTN_BROADCAST) {
+@@ -1820,7 +1984,13 @@
+ skb->dev->broadcast,6)) { /* broadcast? */
+ hdr->flags=QETH_CAST_BROADCAST|QETH_HEADER_PASSTHRU;
+ } else {
+- hdr->flags=QETH_CAST_UNICAST|QETH_HEADER_PASSTHRU;
++ if (multicast==RTN_MULTICAST) {
++ hdr->flags=QETH_CAST_MULTICAST|
++ QETH_HEADER_PASSTHRU;
++ } else {
++ hdr->flags=QETH_CAST_UNICAST|
++ QETH_HEADER_PASSTHRU;
++ }
+ }
+ }
+ #ifdef QETH_DBF_LIKE_HELL
+@@ -1836,7 +2006,7 @@
+ #endif /* QETH_DBF_LIKE_HELL */
+ }
+
+-static int inline qeth_fill_buffer(qdio_buffer_t *buffer,char *dataptr,
++static inline int qeth_fill_buffer(qdio_buffer_t *buffer,char *dataptr,
+ int length,int element)
+ {
+ int length_here;
+@@ -1891,8 +2061,8 @@
+ return element;
+ }
+
+-static void qeth_flush_packed_packets(qeth_card_t *card,int queue,
+- int under_int)
++static inline void qeth_flush_packed_packets(qeth_card_t *card,int queue,
++ int under_int)
+ {
+ qdio_buffer_t *buffer;
+ int result;
+@@ -1951,8 +2121,23 @@
+ * adapter honors it or not */
+ switch (card->send_state[queue]) {
+ case SEND_STATE_DONT_PACK:
++ /* only request a PCI, if the fill level of the queue
++ * is close to the high watermark, so that we don't
++ * loose initiative during packing */
+ if (atomic_read(&card->outbound_used_buffers[queue])
+ <HIGH_WATERMARK_PACK-WATERMARK_FUZZ) break;
++
++ last_pci=atomic_read(&card->last_pci_pos[queue]);
++ /* compensate queues that wrapped around */
++ if (position_for_do_qdio<last_pci)
++ last_pci-=QDIO_MAX_BUFFERS_PER_Q;
++
++ /* reduce the number of PCIs in cases where we are always
++ * a little below the high watermark for packing -- request
++ * PCIs less frequently */
++ if (position_for_do_qdio-last_pci<
++ HIGH_WATERMARK_PACK-WATERMARK_FUZZ) break;
++
+ /* set the PCI bit */
+ card->outbound_ringbuffer[queue]->
+ buffer[position_for_do_qdio].element[0].flags|=0x40;
+@@ -1966,13 +2151,13 @@
+ * last_pci is the position of the last pci we've set
+ * position_for_do_qdio is the position we will send out now
+ * outbound_used_buffers is the number of buffers used (means
+- * all buffers hydra has, inclusive position_for_do_qdio)
++ * all buffers OSA has, inclusive position_for_do_qdio)
+ *
+ * we have to request a pci, if we have got the buffer of the
+ * last_pci position back.
+ *
+ * position_for_do_qdio-outbound_used_buffers is the newest
+- * buffer that we got back from hydra
++ * buffer that we got back from OSA
+ *
+ * if this is greater or equal than the last_pci position,
+ * we should request a pci, as no pci request is
+@@ -2055,8 +2240,8 @@
+ return ERROR_LINK_FAILURE; /* should never happen */
+ }
+
+-static void qeth_free_buffer(qeth_card_t *card,int queue,int bufno,
+- int qdio_error,int siga_error)
++static inline void qeth_free_buffer(qeth_card_t *card,int queue,int bufno,
++ int qdio_error,int siga_error)
+ {
+ struct sk_buff *skb;
+ int error;
+@@ -2139,7 +2324,8 @@
+ case ERROR_LINK_FAILURE:
+ case ERROR_KICK_THAT_PUPPY:
+ QETH_DBF_TEXT4(0,trace,"endeglnd");
+- dst_link_failure(skb);
++ card->stats->tx_dropped++;
++ card->stats->tx_errors++;
+ atomic_dec(&skb->users);
+ dev_kfree_skb_irq(skb);
+ break;
+@@ -2164,7 +2350,7 @@
+ card->send_retries[queue][bufno]=0;
+ }
+
+-static void qeth_free_all_skbs(qeth_card_t *card)
++static inline void qeth_free_all_skbs(qeth_card_t *card)
+ {
+ int q,b;
+
+@@ -2199,7 +2385,7 @@
+ }
+ #ifdef QETH_VLAN
+
+-void qeth_insert_ipv6_vlan_tag(struct sk_buff *__skb)
++static inline void qeth_insert_ipv6_vlan_tag(struct sk_buff *__skb)
+ {
+
+ /* Move the mac addresses to the beginning of the new header.
+@@ -2230,9 +2416,9 @@
+
+
+
+-static void qeth_send_packet_fast(qeth_card_t *card,struct sk_buff *skb,
+- struct net_device *dev,
+- int queue,int version,int multicast)
++static inline void qeth_send_packet_fast(qeth_card_t *card,struct sk_buff *skb,
++ struct net_device *dev,
++ int queue,int version,int multicast)
+ {
+ qeth_ringbuffer_element_t *mybuffer;
+ int position;
+@@ -2250,8 +2436,8 @@
+ if ((version)&&(!card->realloc_message)) {
+ card->realloc_message=1;
+ PRINT_WARN("%s: not enough headroom in skb. " \
+- "Try increasing the " \
+- "add_hhlen parameter by %i.\n",
++ "Increasing the " \
++ "add_hhlen parameter by %i may help.\n",
+ card->dev_name,
+ QETH_HEADER_SIZE-skb_headroom(skb));
+ }
+@@ -2277,9 +2463,11 @@
+ skb=nskb;
+ }
+ #ifdef QETH_VLAN
++ /* ATT: this assumes, L2 does want the VLAN tag in the payload,
++ * otherwise remove (version==0) */
+ if ( (card->vlangrp != NULL) &&
+ vlan_tx_tag_present(skb) &&
+- (version==6)) {
++ ((version==6)||(version==0))) {
+ qeth_insert_ipv6_vlan_tag(skb);
+ }
+ #endif
+@@ -2327,9 +2515,11 @@
+
+ /* no checks, if all elements are used, as then we would not be here (at most
+ 127 buffers are enqueued) */
+-static void qeth_send_packet_packed(qeth_card_t *card,struct sk_buff *skb,
+- struct net_device *dev,
+- int queue,int version,int multicast)
++static inline void qeth_send_packet_packed(qeth_card_t *card,
++ struct sk_buff *skb,
++ struct net_device *dev,
++ int queue,int version,
++ int multicast)
+ {
+ qeth_ringbuffer_element_t *mybuffer;
+ int elements_needed;
+@@ -2373,9 +2563,11 @@
+ skb=nskb;
+ }
+ #ifdef QETH_VLAN
++ /* ATT: this assumes, L2 does want the VLAN tag in the payload,
++ * otherwise remove (version==0) */
+ if ( (card->vlangrp != NULL) &&
+ vlan_tx_tag_present(skb) &&
+- (version==6)) {
++ ((version==6)||(version==0))) {
+ qeth_insert_ipv6_vlan_tag(skb);
+ }
+
+@@ -2490,16 +2682,25 @@
+ return old_val;
+ }
+
+-static int qeth_do_send_packet(qeth_card_t *card,struct sk_buff *skb,
+- struct net_device *dev)
++static inline int qeth_do_send_packet(qeth_card_t *card,struct sk_buff *skb,
++ struct net_device *dev)
+ {
+ int queue,result=0;
+ int multicast,version;
+ char dbf_text[15];
+ char dbf_text2[15]="stchupXX";
+
++ if (card->options.layer2 == DO_LAYER2)
++ version=0;
++ else
+ version=QETH_IP_VERSION(skb);
+ multicast=qeth_is_multicast_skb_at_all(skb,version);
++ if ((multicast == RTN_BROADCAST) && (card->broadcast_capable == 0)) {
++ card->stats->tx_dropped++;
++ card->stats->tx_errors++;
++ dev_kfree_skb_irq(skb);
++ return 0;
++ }
+ queue=qeth_get_prioqueue(card,skb,multicast,version);
+
+ #ifdef QETH_DBF_LIKE_HELL
+@@ -2610,21 +2811,37 @@
+ qeth_card_t *card;
+ char dbf_text[15];
+ int result;
++ unsigned long stackptr;
+
+ card=(qeth_card_t*)(dev->priv);
+
+- if (skb==NULL)
+- return 0;
++#ifdef CONFIG_ARCH_S390X
++ asm volatile ("lgr %0,15" : "=d" (stackptr));
++#else /* CONFIG_ARCH_S390X */
++ asm volatile ("lr %0,15" : "=d" (stackptr));
++#endif /* CONFIG_ARCH_S390X */
++ /* prevent stack overflows */
++ /* normal and async stack is both 8k on s390 and 16k on s390x,
++ * so it doesn't matter whether we're in an interrupt */
++ if ( (stackptr & STACK_PTR_MASK)<
++ (sizeof(struct task_struct) + WORST_CASE_STACK_USAGE) ) {
++ PRINT_ERR("delaying packet transmission " \
++ "due to potential stack overflow\n");
++ sprintf(dbf_text,"STOF%4x",card->irq0);
++ QETH_DBF_TEXT1(1,trace,dbf_text);
++ PRINT_ERR("Backtrace follows:\n");
++ show_trace((unsigned long *)stackptr);
++ return -EBUSY;
++ }
+
+ #ifdef QETH_DBF_LIKE_HELL
+ QETH_DBF_HEX4(0,data,skb->data,__max(QETH_DBF_DATA_LEN,skb->len));
+ #endif /* QETH_DBF_LIKE_HELL */
+
+- netif_stop_queue(dev);
+-
+ if (!card) {
+ QETH_DBF_TEXT2(0,trace,"XMNSNOCD");
+- dst_link_failure(skb);
++ card->stats->tx_dropped++;
++ card->stats->tx_errors++;
+ dev_kfree_skb_irq(skb);
+ return 0;
+ }
+@@ -2637,11 +2854,31 @@
+ card->stats->tx_carrier_errors++;
+ sprintf(dbf_text,"XMNS%4x",card->irq0);
+ QETH_DBF_TEXT2(0,trace,dbf_text);
+- dst_link_failure(skb);
++ card->stats->tx_dropped++;
++ card->stats->tx_errors++;
+ dev_kfree_skb_irq(skb);
+ return 0;
+ }
+
++ if (dev->hard_header == qeth_fake_header) {
++ /*
++ * in theory, if we run in undef-ed QETH_IPV6, we should
++ * always unshare, because we do skb_push, then overwrite
++ * that place with OSA header in qeth_send_packet_fast().
++ * But it is only visible to one application - tcpdump.
++ * Nobody else cares if (fake) MAC header gets smashed.
++ * So, we only do it if fake_ll is in effect.
++ */
++ if ((skb = qeth_pskb_unshare(skb, GFP_ATOMIC)) == NULL) {
++ card->stats->tx_dropped++;
++ dev_kfree_skb_irq(skb);
++ return 0;
++ }
++ skb_pull(skb, QETH_FAKE_LL_LEN);
++ }
++
++ netif_stop_queue(dev);
++
+ result=qeth_do_send_packet(card,skb,dev);
+
+ if (!result)
+@@ -2650,6 +2887,36 @@
+ return result;
+ }
+
++/*
++ * This function is needed to tell af_packet.c to process headers.
++ * It is not called from there, but only from the transmit path,
++ * when we do not need any actual header.
++ *
++ * N.B. Why do we insist on kludging here instead of fixing tcpdump?
++ * Because tcpdump is shared among gazillions of platforms, and
++ * there is a) no reliable way to identify qeth or its packets
++ * in pcap-linux.c (sll->sll_halen is the only hope); b) no easy
++ * way to pass this information from libpcap to tcpdump proper.
++ *
++ * XXX This fails with TR: traffic flows ok, but tcpdump remains confused.
++ */
++int qeth_fake_header(struct sk_buff *skb, struct net_device *dev,
++ unsigned short type, void *daddr, void *saddr,
++ unsigned len)
++{
++ unsigned char *hdr;
++
++ hdr = skb_push(skb, QETH_FAKE_LL_LEN);
++ memcpy(hdr, "FAKELLFAKELL", ETH_ALEN*2);
++ if (type != ETH_P_802_3)
++ *(u16 *)(hdr + ETH_ALEN*2) = htons(type);
++ else
++ *(u16 *)(hdr + ETH_ALEN*2) = htons(len);
++
++ /* XXX Maybe dev->hard_header_len here? Then skb_pull by same size. */
++ return QETH_FAKE_LL_LEN;
++}
++
+ static struct net_device_stats* qeth_get_stats(struct net_device *dev)
+ {
+ qeth_card_t *card;
+@@ -2803,23 +3070,19 @@
+ return retval;
+ }
+
+-static void qeth_wakeup_procfile(void)
++static void qeth_snmp_notify(void)
+ {
+- QETH_DBF_TEXT5(0,trace,"procwkup");
+- if (atomic_read(&qeth_procfile_ioctl_sem.count)<
+- PROCFILE_SLEEP_SEM_MAX_VALUE)
+- up(&qeth_procfile_ioctl_sem);
+-}
+-
+-
+-static int qeth_sleepon_procfile(void)
+-{
+- QETH_DBF_TEXT5(0,trace,"procslp");
+- if (down_interruptible(&qeth_procfile_ioctl_sem)) {
+- up(&qeth_procfile_ioctl_sem);
+- return -ERESTARTSYS;
++ /*notify all registered processes */
++ struct list_head *l;
++ struct qeth_notify_list *n_entry;
++
++ QETH_DBF_TEXT5(0,trace,"snmpnoti");
++ spin_lock(¬ify_lock);
++ list_for_each(l, ¬ify_list) {
++ n_entry = list_entry(l, struct qeth_notify_list, list);
++ send_sig(n_entry->signum, n_entry->task, 1);
+ }
+- return 0;
++ spin_unlock(¬ify_lock);
+ }
+
+ static char* qeth_send_control_data(qeth_card_t *card,unsigned char *buffer,
+@@ -2889,17 +3152,19 @@
+ QETH_DBF_TEXT2(0,trace,"scd:doio");
+ sprintf(dbf_text,"%4x",(__s16)result);
+ QETH_DBF_TEXT2(0,trace,dbf_text);
++ /* re-enable qeth_send_control_data again */
++ atomic_set(&card->write_busy,0);
+ return NULL;
+ }
+
+ if (intparam==IPA_IOCTL_STATE) {
+ if (qeth_sleepon_ioctl(card,QETH_IPA_TIMEOUT)) {
+- QETH_DBF_TEXT2(0,trace,"scd:ioctime");
++ QETH_DBF_TEXT2(0,trace,"scd:ioct");
+ /* re-enable qeth_send_control_data again */
+ atomic_set(&card->write_busy,0);
+ return NULL;
+ }
+- rec_buf=card->ipa_buf;
++ rec_buf=card->dma_stuff->recbuf;
+ sprintf(dbf_text,"scro%4x",card->irq0);
+ } else {
+ if (qeth_sleepon(card,(setip)?QETH_IPA_TIMEOUT:
+@@ -2929,6 +3194,7 @@
+ ipa_cmd_t *reply;
+ int ipa_cmd;
+ int result;
++ unsigned char prot;
+
+ /* don't muck around with ipv6 if there's no use to do so */
+ if ( (cmd->prot_version==6) &&
+@@ -2938,7 +3204,12 @@
+
+ memcpy(card->send_buf,IPA_PDU_HEADER,
+ IPA_PDU_HEADER_SIZE);
+-
++ if (card->options.layer2 == DO_LAYER2) {
++ prot=QETH_IPA_CMD_PROT_LAYER2;
++ } else {
++ prot=QETH_IPA_CMD_PROT_TCPIP;
++ }
++ memcpy(QETH_IPA_CMD_PROT_TYPE(card->send_buf),&prot,1);
+ memcpy(QETH_IPA_CMD_DEST_ADDR(card->send_buf),
+ &card->token.ulp_connection_r,QETH_MPC_TOKEN_LENGTH);
+
+@@ -2963,6 +3234,15 @@
+ if ((ipa_cmd==IPA_CMD_SETADAPTERPARMS)&&(result==0)) {
+ result=reply->data.setadapterparms.return_code;
+ }
++ if ( (ipa_cmd==IPA_CMD_SETASSPARMS) &&
++ (result==0) &&
++ (reply->data.setassparms.assist_no==
++ IPA_INBOUND_CHECKSUM) &&
++ (reply->data.setassparms.command_code==
++ IPA_CMD_ASS_START) ) {
++ card->csum_enable_mask=
++ reply->data.setassparms.data.flags_32bit;
++ }
+ }
+ return result;
+ }
+@@ -2976,7 +3256,11 @@
+ cmd->seq_no=card->seqno.ipa++;
+ cmd->adapter_type=qeth_get_adapter_type_for_ipa(card->link_type);
+ cmd->rel_adapter_no=(__u8)card->options.portno;
++ if (card->options.layer2 == DO_LAYER2) {
++ cmd->prim_version_no=2;
++ } else {
+ cmd->prim_version_no=1;
++ }
+ cmd->param_count=1;
+ cmd->prot_version=ip_vers;
+ cmd->ipa_supported=0;
+@@ -3072,6 +3356,7 @@
+ int ipa_cmd;
+ int result;
+ __u16 s1,s2;
++ unsigned char prot;
+
+ /* don't muck around with ipv6 if there's no use to do so */
+ if ( (cmd->prot_version==6) &&
+@@ -3081,6 +3366,12 @@
+
+ memcpy(card->send_buf,IPA_PDU_HEADER,
+ IPA_PDU_HEADER_SIZE);
++ if (card->options.layer2 == DO_LAYER2) {
++ prot=QETH_IPA_CMD_PROT_LAYER2;
++ } else {
++ prot=QETH_IPA_CMD_PROT_TCPIP;
++ }
++ memcpy(QETH_IPA_CMD_PROT_TYPE(card->send_buf),&prot,1);
+ memcpy(QETH_IPA_CMD_DEST_ADDR(card->send_buf),
+ &card->token.ulp_connection_r,QETH_MPC_TOKEN_LENGTH);
+ memcpy(card->send_buf+IPA_PDU_HEADER_SIZE,
+@@ -3183,17 +3474,29 @@
+
+ static int qeth_ioctl_handle_arp_data(qeth_card_t *card, arp_cmd_t *reply)
+ {
++ if ( (reply->data.setassparms.command_code==
++ IPA_CMD_ASS_ARP_SET_NO_ENTRIES) ||
++ (reply->data.setassparms.command_code==
++ IPA_CMD_ASS_ARP_ADD_ENTRY) ||
++ (reply->data.setassparms.command_code==
++ IPA_CMD_ASS_ARP_REMOVE_ENTRY) ) {
++ if (reply->data.setassparms.return_code) {
++ return ARP_RETURNCODE_ERROR;
++ } else {
++ return ARP_RETURNCODE_LASTREPLY;
++ }
++ }
++ /* check for corrupt data */
++ if (reply->data.setassparms.seq_no <= 0)
++ return ARP_RETURNCODE_ERROR;
++
+ if (reply->data.setassparms.seq_no == 1) {
+ if (card->ioctl_buffersize <=
+ (sizeof(__u16) + sizeof(int) + reply->data.
+- setassparms.number_of_replies * ARP_DATA_SIZE)) {
++ setassparms.number_of_replies * ARP_DATA_SIZE))
+ card->ioctl_returncode = ARP_RETURNCODE_ERROR;
+- } else {
++ else
+ card->ioctl_returncode = ARP_RETURNCODE_SUCCESS;
+- card->number_of_entries = 0;
+- card->ioctl_buffer_pointer = card->ioctl_data_buffer+
+- sizeof(__u16) + sizeof(int);
+- }
+ }
+
+ if (card->ioctl_returncode != ARP_RETURNCODE_ERROR &&
+@@ -3229,6 +3532,21 @@
+ }
+ return card->ioctl_returncode;
+ }
++static int qeth_is_arp_command(int cmd)
++{
++ switch (cmd) {
++ case IPA_CMD_ASS_ARP_SET_NO_ENTRIES:
++ case IPA_CMD_ASS_ARP_QUERY_CACHE:
++ case IPA_CMD_ASS_ARP_ADD_ENTRY:
++ case IPA_CMD_ASS_ARP_REMOVE_ENTRY:
++ case IPA_CMD_ASS_ARP_FLUSH_CACHE:
++ case IPA_CMD_ASS_ARP_QUERY_INFO:
++ case IPA_CMD_ASS_ARP_QUERY_STATS:
++ return 1;
++ default:
++ return 0;
++ }
++}
+
+ static int qeth_look_for_arp_data(qeth_card_t *card)
+ {
+@@ -3245,9 +3563,7 @@
+ result=ARP_FLUSH;
+ } else if ( (reply->command == IPA_CMD_SETASSPARMS) &&
+ (reply->data.setassparms.assist_no == IPA_ARP_PROCESSING) &&
+- (reply->data.setassparms.command_code ==
+- IPA_CMD_ASS_ARP_QUERY_INFO) &&
+- (card->ioctl_returncode == ARP_RETURNCODE_SUCCESS)) {
++ (qeth_is_arp_command(reply->data.setassparms.command_code)) ) {
+ result = qeth_ioctl_handle_arp_data(card,reply);
+ } else if ( (reply->command == IPA_CMD_SETADAPTERPARMS) &&
+ (reply->data.setadapterparms.command_code ==
+@@ -3286,8 +3602,13 @@
+ cmd->data.setassparms.return_code=0;
+ cmd->data.setassparms.seq_no=0;
+
++ /* initialize ioctl pointers and buffer sizes */
+ card->ioctl_buffersize = data_size;
+ card->ioctl_data_buffer = (char *) vmalloc(data_size);
++ memset(card->ioctl_data_buffer, 0, data_size);
++ card->ioctl_buffer_pointer = card->ioctl_data_buffer +
++ sizeof(__u16) + sizeof(int); /* flags and # of entries */
++ card->number_of_entries = 0;
+ if (!card->ioctl_data_buffer) {
+ kfree(cmd);
+ return IPA_REPLY_FAILED;
+@@ -3305,8 +3626,9 @@
+ result = IPA_REPLY_SUCCESS;
+ memcpy(((char *)(card->ioctl_data_buffer)) + sizeof(__u16),
+ &(card->number_of_entries),sizeof(int));
+- copy_to_user(req->ifr_ifru.ifru_data,
+- card->ioctl_data_buffer,data_size);
++ if (copy_to_user(req->ifr_ifru.ifru_data,
++ card->ioctl_data_buffer,data_size))
++ result =-EFAULT;
+ }
+ card->ioctl_buffer_pointer = NULL;
+ vfree(card->ioctl_data_buffer);
+@@ -3373,16 +3695,14 @@
+ result = IPA_REPLY_FAILED;
+ goto snmp_out;
+ }
+- if (result == ARP_RETURNCODE_ERROR ) {
+- copy_to_user(req->ifr_ifru.ifru_data+SNMP_REQUEST_DATA_OFFSET,
+- card->ioctl_data_buffer,card->ioctl_buffersize);
++ if (result == ARP_RETURNCODE_ERROR )
+ result = IPA_REPLY_FAILED;
+- }
+- else {
+- copy_to_user(req->ifr_ifru.ifru_data+SNMP_REQUEST_DATA_OFFSET,
+- card->ioctl_data_buffer,card->ioctl_buffersize);
++ else
+ result = IPA_REPLY_SUCCESS;
+- }
++
++ if (copy_to_user(req->ifr_ifru.ifru_data + SNMP_REQUEST_DATA_OFFSET,
++ card->ioctl_data_buffer, card->ioctl_buffersize))
++ result = -EFAULT;
+ snmp_out:
+ card->number_of_entries = 0;
+ card->ioctl_buffersize = 0;
+@@ -3556,6 +3876,43 @@
+ ((ipacmd==IPA_CMD_SETIPM)?IPA_SETIP_FLAG:0));
+ }
+
++static int qeth_send_setdelmac(qeth_card_t *card,__u8 *mac, int ipacmd)
++{
++ ipa_cmd_t cmd;
++
++ qeth_fill_ipa_cmd(card,&cmd,ipacmd,4);
++ cmd.data.setdelmac.mac_length = 6;
++ memcpy(&cmd.data.setdelmac.mac,mac,6);
++ return qeth_send_ipa_cmd(card,&cmd,0,IPA_CMD_STATE);
++}
++#ifdef QETH_VLAN
++static void qeth_send_setdel_vlan(qeth_card_t *card,int i,int ipacmd)
++{
++ int result;
++ ipa_cmd_t cmd;
++ char dbf_text[15];
++
++ qeth_fill_ipa_cmd(card,&cmd,ipacmd,4);
++ cmd.data.setdelvlan.vlan_id = i;
++ result=qeth_send_ipa_cmd(card,&cmd,0,IPA_CMD_STATE);
++
++ if (result) {
++ PRINT_ERR("Could not %s VLAN %i on %s: 0x%x. Continuing\n",
++ (ipacmd==IPA_CMD_SETVLAN)?"set":"delete",i,
++ card->dev_name,result);
++ if (ipacmd==IPA_CMD_SETVLAN) {
++ sprintf(dbf_text,"STVLF%3x",i);
++ QETH_DBF_TEXT2(0,trace,dbf_text);
++ } else {
++ sprintf(dbf_text,"DLVLF%3x",i);
++ QETH_DBF_TEXT2(0,trace,dbf_text);
++ }
++ sprintf(dbf_text,"%4x%4x",card->irq0,result);
++ QETH_DBF_TEXT2(0,trace,dbf_text);
++ }
++}
++#endif
++
+ #define PRINT_SETIP_ERROR(x) \
+ if (result) \
+ PRINT_ERR("setip%c: return code 0x%x (%s)\n",x,result, \
+@@ -3571,6 +3928,11 @@
+ (result==0xe00e)?"unsupported arp assist cmd": \
+ (result==0xe00f)?"arp assist not enabled": \
+ (result==0xe080)?"startlan disabled": \
++ (result==0xf012)?"unicast IP address invalid": \
++ (result==0xf013)?"multicast router limit reached": \
++ (result==0xf014)?"stop assist not supported": \
++ (result==0xf015)?"multicast assist not set": \
++ (result==0xf080)?"VM: startlan disabled": \
+ (result==-1)?"IPA communication timeout": \
+ "unknown return code")
+
+@@ -3612,7 +3974,8 @@
+ QETH_DBF_TEXT2(0,trace,dbf_text);
+ }
+
+- if (((result==-1)||(result==0xe080))&&(retries--)) {
++ if ( ((result==-1)||(result==0xe080)||(result==0xf080))&&
++ (retries--) ) {
+ sprintf(dbf_text,"sipr%4x",card->irq0);
+ QETH_DBF_TEXT2(0,trace,dbf_text);
+ if (ip_vers==4) {
+@@ -3648,6 +4011,29 @@
+ int retries;
+ char dbf_text[15];
+
++ if (card->options.layer2 == DO_LAYER2) {
++ result=qeth_send_setdelmac(card,mac,IPA_CMD_SETGMAC);
++ return 0;
++ if (result) {
++ QETH_DBF_TEXT2(0,trace,"SETMCFLD");
++ QETH_DBF_HEX2(0,trace,mac,QETH_DBF_TRACE_LEN);
++ sprintf(dbf_text,"%4x%4x",card->irq0,result);
++ QETH_DBF_TEXT2(0,trace,dbf_text);
++ if (result==0x2005) {
++ PRINT_WARN("Group MAC " \
++ "%02x:%02x:%02x:%02x:%02x:%02x already " \
++ "existing on %s !\n",
++ mac[0],mac[1],mac[2],mac[3],mac[4],mac[5],
++ card->dev_name);
++ result = 0;
++ } else
++ PRINT_ERR("Could not set group MAC " \
++ "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
++ mac[0],mac[1],mac[2],mac[3],mac[4],mac[5],
++ card->dev_name,result);
++ }
++ return result;
++ }
+ retries=(use_retries)?QETH_SETIP_RETRIES:1;
+ if (qeth_is_ipa_covered_by_ipato_entries(ip_vers,ip,card)) {
+ sprintf(dbf_text,"imto%4x",card->irq0);
+@@ -3694,6 +4080,23 @@
+ static inline int qeth_send_delipm(qeth_card_t *card,__u8 *ip,
+ __u8 *mac,short ip_vers)
+ {
++ int result;
++ char dbf_text[15];
++
++ if (card->options.layer2 == DO_LAYER2) {
++ result=qeth_send_setdelmac(card,mac,IPA_CMD_DELGMAC);
++ if (result) {
++ QETH_DBF_TEXT2(0,trace,"DELMCFLD");
++ QETH_DBF_HEX2(0,trace,mac,QETH_DBF_TRACE_LEN);
++ sprintf(dbf_text,"%4x%4x",card->irq0,result);
++ QETH_DBF_TEXT2(0,trace,dbf_text);
++ PRINT_ERR("Could not delete group MAC " \
++ "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n",
++ mac[0],mac[1],mac[2],mac[3],mac[4],mac[5],
++ card->dev_name,result);
++ }
++ return result;
++ } else
+ return qeth_send_setdelipm(card,ip,mac,IPA_CMD_DELIPM,ip_vers);
+ }
+
+@@ -3771,8 +4174,8 @@
+ le is last entry */
+ char dbf_text[15];
+ int result;
+- __u8 netmask[16]={0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+- 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
++ __u8 netmask[16]={0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00};
+ qeth_vipa_entry_t *priv_add_list=NULL;
+ qeth_vipa_entry_t *priv_del_list=NULL;
+
+@@ -3790,6 +4193,7 @@
+ GFP_KERNEL);
+ if (ne) {
+ ne->version=e->version;
++ ne->flag=e->flag;
+ memcpy(ne->ip,e->ip,16);
+ ne->next=priv_add_list;
+ priv_add_list=ne;
+@@ -3821,6 +4225,7 @@
+ GFP_KERNEL);
+ if (ne) {
+ ne->version=e->version;
++ ne->flag=e->flag;
+ memcpy(ne->ip,e->ip,16);
+ ne->next=priv_del_list;
+ priv_del_list=ne;
+@@ -3852,7 +4257,7 @@
+ sprintf(dbf_text,"%4x%4x",card->irq0,result);
+ QETH_DBF_TEXT2(0,trace,dbf_text);
+ if (priv_add_list->version==4) {
+- PRINT_ERR("going to leave vipa/rxip %08x" \
++ PRINT_ERR("going to leave vipa/rxip x%08x " \
+ "unset...\n",
+ *((__u32*)&priv_add_list->ip[0]));
+ sprintf(dbf_text,"%08x",
+@@ -4214,7 +4619,8 @@
+ sprintf(dbf_text,"stim%4x",card->irq0);
+ QETH_DBF_TEXT3(0,trace,dbf_text);
+
+- if (qeth_is_supported(IPA_MULTICASTING)) {
++ if (qeth_is_supported(IPA_MULTICASTING) ||
++ (card->options.layer2 == DO_LAYER2)) {
+ addr=card->ip_mc_current_state.ipm_ifa;
+ while (addr) {
+ if (!qeth_is_ipma_in_list(addr,card->
+@@ -4404,20 +4810,127 @@
+ }
+ #endif /* QETH_IPV6 */
+
++/* FIXME: new values for 10gig */
++static int mdio_read(struct net_device *dev, int phy_id, int regnum)
++{
++ int ret_val=0;
++ qeth_card_t *card = (qeth_card_t*)dev->priv;
++
++ switch(regnum){
++ case MII_BMCR: /* Basic mode control register */
++ /* XXX Get real values from card */
++ ret_val = BMCR_FULLDPLX;
++
++ if ( ( card->link_type ==
++ QETH_MPC_LINK_TYPE_GIGABIT_ETHERNET ) ||
++ ( card->link_type ==
++ QETH_MPC_LINK_TYPE_10GIG_ETHERNET ) )
++ ret_val |= BMCR_SPEED1000;
++ else
++ ret_val |= BMCR_SPEED100;
++ break;
++
++ case MII_BMSR: /* Basic mode status register */
++ /* XXX Get real values from card */
++ ret_val = BMSR_ERCAP | BMSR_ANEGCOMPLETE |
++ BMSR_LSTATUS | BMSR_10HALF | BMSR_10FULL |
++ BMSR_100HALF | BMSR_100FULL | BMSR_100BASE4;
++ if ( ( card->link_type ==
++ QETH_MPC_LINK_TYPE_GIGABIT_ETHERNET ) ||
++ ( card->link_type ==
++ QETH_MPC_LINK_TYPE_10GIG_ETHERNET ) )
++ ret_val |= BMSR_EXTSTATUS;
++
++ break;
++
++ case MII_EXTSTATUS: /* Extended status register */
++ if ( ( card->link_type ==
++ QETH_MPC_LINK_TYPE_GIGABIT_ETHERNET ) ||
++ ( card->link_type ==
++ QETH_MPC_LINK_TYPE_10GIG_ETHERNET ) )
++ ret_val = EXTSTATUS_1000TFULL |
++ EXTSTATUS_1000THALF |
++ EXTSTATUS_1000XFULL |
++ EXTSTATUS_1000XHALF;
++ else
++ ret_val = 0;
++ break;
++
++ case MII_PHYSID1: /* PHYS ID 1 */
++ ret_val = ( dev->dev_addr[0] << 16 ) |
++ ( dev->dev_addr[1] << 8) | dev->dev_addr[2];
++ ret_val = (ret_val >> 5) & 0xFFFF;
++ break;
++
++ case MII_PHYSID2: /* PHYS ID 2 */
++ ret_val = (dev->dev_addr[2] << 10) & 0xFFFF;
++ break;
++
++ case MII_ADVERTISE: /* Advertisement control reg */
++ ret_val = ADVERTISE_ALL;
++ break;
++
++ case MII_LPA: /* Link partner ability reg */
++ ret_val = LPA_10HALF | LPA_10FULL | LPA_100HALF |
++ LPA_100FULL | LPA_100BASE4 | LPA_LPACK;
++ break;
++
++ case MII_EXPANSION: /* Expansion register */
++ ret_val = 0;
++ break;
++
++ default:
++ ret_val = 0;
++ break;
++ }
++
++ return ret_val;
++}
++
++static void mdio_write(struct net_device *dev,int phy_id,int regnum,int value)
++{
++ /* writing MII registers is not yet implmented */
++
++ switch(regnum){
++ case MII_BMCR: /* Basic mode control register */
++ case MII_BMSR: /* Basic mode status register */
++ case MII_ADVERTISE: /* Advertisement control reg */
++ case MII_LPA: /* Link partner ability reg */
++ case MII_EXPANSION: /* Expansion register */
++ case MII_DCOUNTER: /* Disconnect counter */
++ case MII_FCSCOUNTER: /* False carrier counter */
++ case MII_NWAYTEST: /* N-way auto-neg test reg */
++ case MII_RERRCOUNTER: /* Receive error counter */
++ case MII_SREVISION: /* Silicon revision */
++ case MII_LBRERROR: /* Lpback, rx, bypass error */
++ case MII_PHYADDR: /* PHY address */
++ case MII_TPISTATUS: /* TPI status for 10mbps */
++ case MII_NCONFIG: /* Network interface config */
++ case MII_PHYSID1: /* PHYS ID 1 */
++ case MII_PHYSID2: /* PHYS ID 2 */
++ case MII_RESV1: /* Reserved... */
++ case MII_RESV2: /* Reserved... */
++ default:
++ break;
++ }
++}
++
+ #define QETH_STANDARD_RETVALS \
+ ret_val=-EIO; \
+ if (result==IPA_REPLY_SUCCESS) ret_val=0; \
++ if (result==-EFAULT) ret_val=-EFAULT; \
+ if (result==IPA_REPLY_FAILED) ret_val=-EIO; \
+ if (result==IPA_REPLY_OPNOTSUPP) ret_val=-EOPNOTSUPP
+
+ static int qeth_do_ioctl(struct net_device *dev,struct ifreq *rq,int cmd)
+ {
+- char *data;
+ int result,i,ret_val;
+ int version=4;
+ qeth_card_t *card;
+ char dbf_text[15];
+- char buff[100];
++ char data[100];
++ struct mii_ioctl_data* mii_data =
++ (struct mii_ioctl_data*)&(rq->ifr_ifru.ifru_data);
+
+ card=(qeth_card_t*)dev->priv;
+
+@@ -4427,20 +4940,20 @@
+ QETH_DBF_TEXT2(0,trace,dbf_text);
+ QETH_DBF_HEX2(0,trace,&rq,sizeof(void*));
+
+- if ((cmd<SIOCDEVPRIVATE) || (cmd>SIOCDEVPRIVATE+5))
+- return -EOPNOTSUPP;
+- copy_from_user(buff,rq->ifr_ifru.ifru_data,sizeof(buff));
+- data=buff;
+-
+ if ( (!atomic_read(&card->is_registered))||
+ (!atomic_read(&card->is_hardsetup))||
+ (atomic_read(&card->is_gone)) ) return -ENODEV;
+
+ if (atomic_read(&card->shutdown_phase)) return -ENODEV;
+
+- my_spin_lock(&card->ioctl_lock);
++ if (down_interruptible ( &card->ioctl_sem ) )
++ return -ERESTARTSYS;
+
+- if (atomic_read(&card->shutdown_phase)) return -ENODEV;
++ if (atomic_read(&card->shutdown_phase)) {
++ ret_val=-ENODEV;
++ goto out;
++ }
++
+ if ( (!atomic_read(&card->is_registered))||
+ (!atomic_read(&card->is_hardsetup))||
+ (atomic_read(&card->is_gone)) ) {
+@@ -4449,71 +4962,154 @@
+ }
+
+ switch (cmd) {
+- case SIOCDEVPRIVATE+0:
+- if (!capable(CAP_NET_ADMIN)) {
++ case SIOCDEVPRIVATE+0:
++ case SIOC_QETH_ARP_SET_NO_ENTRIES:
+ ret_val=-EPERM;
++ if (!capable(CAP_NET_ADMIN) ||
++ (card->options.layer2 == DO_LAYER2)) {
+ break;
+ }
+- result=qeth_send_setassparms(card,version,IPA_ARP_PROCESSING,
++ result=qeth_send_setassparms(card,version,
++ IPA_ARP_PROCESSING,
+ IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
+ rq->ifr_ifru.ifru_ivalue,4);
+ QETH_STANDARD_RETVALS;
+ if (result==3) ret_val=-EINVAL;
+ break;
+ case SIOCDEVPRIVATE+1:
+- if (!capable(CAP_NET_ADMIN)) {
++ case SIOC_QETH_ARP_QUERY_INFO:
++ if (!capable(CAP_NET_ADMIN) ||
++ (card->options.layer2 == DO_LAYER2)) {
+ ret_val=-EPERM;
+ break;
+ }
+- result = qeth_queryarp(card,rq,version,IPA_ARP_PROCESSING,
++
++ if (copy_from_user(data,rq->ifr_ifru.ifru_data,
++ sizeof(data))) {
++ ret_val = -EFAULT;
++ break;
++ }
++
++ result = qeth_queryarp(card,rq,version,
++ IPA_ARP_PROCESSING,
+ IPA_CMD_ASS_ARP_QUERY_INFO,data,4);
+
+ QETH_STANDARD_RETVALS;
+ break;
+ case SIOCDEVPRIVATE+2:
+- if (!capable(CAP_NET_ADMIN)) {
++ case SIOC_QETH_ARP_ADD_ENTRY:
++ if (!capable(CAP_NET_ADMIN) ||
++ (card->options.layer2 == DO_LAYER2)) {
+ ret_val=-EPERM;
+ break;
+ }
++
++ if (copy_from_user(data,rq->ifr_ifru.ifru_data,
++ sizeof(data))) {
++ ret_val = -EFAULT;
++ break;
++ }
++
+ for (i=12;i<24;i++) if (data[i]) version=6;
+- result=qeth_send_setassparms(card,version,IPA_ARP_PROCESSING,
++ result=qeth_send_setassparms(card,version,
++ IPA_ARP_PROCESSING,
+ IPA_CMD_ASS_ARP_ADD_ENTRY,
+ (long)data,56);
+ QETH_STANDARD_RETVALS;
+ break;
+ case SIOCDEVPRIVATE+3:
+- if (!capable(CAP_NET_ADMIN)) {
++ case SIOC_QETH_ARP_REMOVE_ENTRY:
++ if (!capable(CAP_NET_ADMIN) ||
++ (card->options.layer2 == DO_LAYER2)) {
+ ret_val=-EPERM;
+ break;
+ }
+- for (i=4;i<12;i++) if (data[i]) version=6;
+- result=qeth_send_setassparms(card,version,IPA_ARP_PROCESSING,
++
++ if (copy_from_user(data,rq->ifr_ifru.ifru_data,
++ sizeof(data))) {
++ ret_val = -EFAULT;
++ break;
++ }
++
++ for (i=12;i<24;i++) if (data[i]) version=6;
++ result=qeth_send_setassparms(card,version,
++ IPA_ARP_PROCESSING,
+ IPA_CMD_ASS_ARP_REMOVE_ENTRY,
+ (long)data,16);
+ QETH_STANDARD_RETVALS;
+ break;
+ case SIOCDEVPRIVATE+4:
+- if (!capable(CAP_NET_ADMIN)) {
++ case SIOC_QETH_ARP_FLUSH_CACHE:
++ if (!capable(CAP_NET_ADMIN) ||
++ (card->options.layer2 == DO_LAYER2)) {
+ ret_val=-EPERM;
+ break;
+ }
+- result=qeth_send_setassparms(card,version,IPA_ARP_PROCESSING,
++
++ result=qeth_send_setassparms(card,version,
++ IPA_ARP_PROCESSING,
+ IPA_CMD_ASS_ARP_FLUSH_CACHE,
+ 0,0);
+ QETH_STANDARD_RETVALS;
+ break;
+ case SIOCDEVPRIVATE+5:
+- result=qeth_send_snmp_control(card,rq,IPA_CMD_SETADAPTERPARMS,
++ case SIOC_QETH_ADP_SET_SNMP_CONTROL:
++ if (copy_from_user(data,rq->ifr_ifru.ifru_data,
++ sizeof(data))) {
++ ret_val = -EFAULT;
++ break;
++ }
++
++ result=qeth_send_snmp_control(card,rq,
++ IPA_CMD_SETADAPTERPARMS,
+ IPA_SETADP_SET_SNMP_CONTROL,
+ data,4);
+ QETH_STANDARD_RETVALS;
+ break;
++ case SIOCDEVPRIVATE+6:
++ case SIOC_QETH_GET_CARD_TYPE:
++ if (!card->is_guest_lan &&
++ (card->type == QETH_CARD_TYPE_OSAE))
++ ret_val = 1;
++ else
++ ret_val = 0;
++ break;
++ case SIOCGMIIPHY:
++ mii_data->phy_id = 0; /* for now we set this
++ fixed to one phy with ID 0 */
++ ret_val = 0;
++ break;
+
++ case SIOCGMIIREG:
++ if(mii_data->phy_id != 0) {
++ ret_val = -EINVAL;
++ break;
++ }
++ mii_data->val_out = mdio_read(dev, mii_data->phy_id,
++ mii_data->reg_num);
++ ret_val = 0;
++ break;
++
++ case SIOCSMIIREG:
++ if (!capable(CAP_NET_ADMIN)) {
++ ret_val=-EPERM;
++ break;
++ }
++ if(mii_data->phy_id != 0) {
++ ret_val = -EINVAL;
++ break;
++ }
++ mdio_write(dev, mii_data->phy_id,
++ mii_data->reg_num, mii_data->val_in );
++ ret_val = 0;
++ break;
++
+ default:
+- return -EOPNOTSUPP;
++ ret_val=-EOPNOTSUPP;
++ goto out;
+ }
+ out:
+- my_spin_unlock(&card->ioctl_lock);
++ up (&card->ioctl_sem);
+
+ sprintf(dbf_text,"ret=%4x",ret_val);
+ QETH_DBF_TEXT2(0,trace,dbf_text);
+@@ -4710,7 +5306,8 @@
+ }
+ #ifdef QETH_VLAN
+ QETH_DBF_TEXT4(0,trace,"tovipm6s");
+- if ( (qeth_is_supported(IPA_FULL_VLAN)) &&
++ if ( ((card->options.layer2 == DO_LAYER2) ||
++ (qeth_is_supported(IPA_FULL_VLAN))) &&
+ (atomic_read(&card->is_open)) ) {
+ card_group = (struct vlan_group *) card->vlangrp;
+ if (card_group) for (i=0;i<VLAN_GROUP_ARRAY_LEN;i++) {
+@@ -4785,6 +5382,53 @@
+ }
+ #endif /* QETH_IPV6 */
+
++#ifdef QETH_VLAN
++/* ATT: not a very readable order: bytes count from lower numbers, bits
++ count from lsb */
++static void qeth_set_bit(__u8 *ptr,int i)
++{
++ ptr[i/8]|=0x80>>(i%8);
++}
++
++static int qeth_get_bit(__u8 *ptr,int i)
++{
++ return (ptr[i/8]&(0x80>>(i%8)))?1:0;
++}
++
++static void qeth_takeover_vlans(qeth_card_t *card)
++{
++ int i;
++
++ /* copy new to current */
++ memcpy(&card->vlans_current[0],
++ &card->vlans_new[0],
++ VLAN_GROUP_ARRAY_LEN/(8*sizeof(__u8)));
++
++ /* clear new vector */
++ memset(&card->vlans_new[0],0,VLAN_GROUP_ARRAY_LEN/(8*sizeof(__u8)));
++
++ for (i=0;i<VLAN_GROUP_ARRAY_LEN;i++) {
++ if ( (card->vlangrp) &&
++ (card->vlangrp->vlan_devices[i]) )
++ qeth_set_bit(&card->vlans_new[0],i);
++ }
++}
++
++static void qeth_set_vlans(qeth_card_t *card)
++{
++ int i;
++
++ for (i=0;i<VLAN_GROUP_ARRAY_LEN;i++) {
++ if ( (qeth_get_bit(&card->vlans_current[0],i)) &&
++ (!qeth_get_bit(&card->vlans_new[0],i)) )
++ qeth_send_setdel_vlan(card,i,IPA_CMD_DELVLAN);
++ if ( (!qeth_get_bit(&card->vlans_current[0],i)) &&
++ (qeth_get_bit(&card->vlans_new[0],i)) )
++ qeth_send_setdel_vlan(card,i,IPA_CMD_SETVLAN);
++ }
++}
++#endif
++
+ static void qeth_clear_ifa4_list(struct in_ifaddr **ifa_list)
+ {
+ struct in_ifaddr *ifa;
+@@ -4962,7 +5606,8 @@
+
+ #ifdef QETH_VLAN
+ QETH_DBF_TEXT4(0,trace,"to-vipms");
+- if ( (qeth_is_supported(IPA_FULL_VLAN)) &&
++ if ( ((card->options.layer2 == DO_LAYER2) ||
++ (qeth_is_supported(IPA_FULL_VLAN))) &&
+ (atomic_read(&card->is_open)) ) {
+ card_group = (struct vlan_group *) card->vlangrp;
+ if (card_group) for (i=0;i<VLAN_GROUP_ARRAY_LEN;i++) {
+@@ -5192,7 +5837,51 @@
+ }
+ }
+ }
++static int qeth_do_layer2_mac_stuff(qeth_card_t *card)
++{
++ int result;
++ char dbf_text[15];
+
++ sprintf(dbf_text,"dols%4x",card->irq0);
++ QETH_DBF_TEXT4(0,trace,dbf_text);
++
++ if (atomic_read(&card->mac_registered))
++ return 0;
++ /* get mac addr */
++ /* we read the mac regardless of the return code */
++ result=qeth_send_setadapterparms_change_addr(card,
++ IPA_SETADP_ALTER_MAC_ADDRESS,
++ CHANGE_ADDR_READ_MAC,card->dev->dev_addr,
++ OSA_ADDR_LEN);
++ if (result) {
++ PRINT_WARN("couldn't get Layer 2 MAC address on " \
++ "irq 0x%x: x%x\n",card->irq0,result);
++ QETH_DBF_TEXT1(0,trace,"L2NMCADD");
++ sprintf(dbf_text,"%4x%4x",card->irq0,result);
++ QETH_DBF_TEXT1(1,trace,dbf_text);
++ return result;
++ } else {
++ QETH_DBF_HEX2(0,setup,card->dev->dev_addr,
++ __max(OSA_ADDR_LEN,QETH_DBF_SETUP_LEN));
++ QETH_DBF_HEX3(0,trace,card->dev->dev_addr,
++ __max(OSA_ADDR_LEN,QETH_DBF_TRACE_LEN));
++ }
++ result=qeth_send_setdelmac(card,&card->dev->dev_addr[0],
++ IPA_CMD_SETVMAC);
++ if (result) {
++ PRINT_WARN("couldn't register MAC address on " \
++ "irq 0x%x: x%x\n",card->irq0,result);
++ QETH_DBF_TEXT1(0,trace,"NOREGMAC");
++ sprintf(dbf_text,"%4x%4x",card->irq0,result);
++ QETH_DBF_TEXT1(1,trace,dbf_text);
++ atomic_set(&card->mac_registered,0);
++ } else {
++ atomic_set(&card->mac_registered,1);
++ }
++ return 0; /* it's ok if SETVMAC fails -- we'll track that in
++ mac_registered */
++}
++
+ static int qeth_softsetup_card(qeth_card_t *card,int wait_for_lock)
+ {
+ int result;
+@@ -5240,13 +5929,17 @@
+ "failure -- please check the " \
+ "network, plug in the cable or " \
+ "enable the OSA port":
++ (result==0xf080)?
++ "startlan disabled (VM: LAN " \
++ "is offline for functions " \
++ "requiring LAN access.":
+ "unknown return code");
+ sprintf(dbf_text,"stln%4x",result);
+ QETH_DBF_TEXT2(0,trace,dbf_text);
+ atomic_set(&card->is_softsetup,0);
+ atomic_set(&card->is_startlaned,0);
+ /* do not return an error */
+- if (result==0xe080) {
++ if ((result==0xe080)||(result==0xf080)) {
+ result=0;
+ }
+ goto out;
+@@ -5255,6 +5948,20 @@
+ }
+ netif_wake_queue(card->dev);
+
++ if (card->options.layer2 == DO_LAYER2) {
++ card->dev->features |=
++ NETIF_F_HW_VLAN_TX |
++ NETIF_F_HW_VLAN_RX;
++ card->dev->flags|=IFF_MULTICAST|IFF_BROADCAST;
++ card->broadcast_capable=1;
++ result=qeth_do_layer2_mac_stuff(card);
++ if (result) {
++ atomic_set(&card->is_softsetup,0);
++ return result;
++ }
++ atomic_set(&card->is_softsetup,1);
++ goto layer2_1;
++ } else
+ qeth_do_setadapterparms_stuff(card);
+
+ if (!qeth_is_supported(IPA_ARP_PROCESSING)) {
+@@ -5372,7 +6079,8 @@
+ QETH_DBF_TEXT2(0,trace,dbf_text);
+ atomic_set(&card->is_softsetup,0);
+ /* do not return an error */
+- if (result==0xe080) {
++ if ((result==0xe080)||
++ (result==0xf080)) {
+ result=0;
+ }
+ goto out;
+@@ -5390,9 +6098,10 @@
+ goto out;
+ }
+
+- sprintf(dbf_text,"%4x%4x",card->ipa6_supported,
+- card->ipa6_enabled);
++ sprintf(dbf_text,"%8x",card->ipa6_supported);
+ QETH_DBF_TEXT2(0,trace,dbf_text);
++ sprintf(dbf_text,"%8x",card->ipa6_enabled);
++ QETH_DBF_TEXT2(0,trace,dbf_text);
+ QETH_DBF_TEXT2(0,trace,"enaipv46");
+ result=qeth_send_setassparms_simple_with_data(
+ card,IPA_IPv6,IPA_CMD_ASS_START,3);
+@@ -5461,8 +6170,19 @@
+ goto go_on_filt;
+ }
+ card->dev->flags|=IFF_BROADCAST;
+- card->broadcast_capable=1;
++ card->broadcast_capable=BROADCAST_WITH_ECHO;
+
++ result=qeth_send_setassparms_simple_with_data(
++ card,IPA_FILTERING,IPA_CMD_ASS_ENABLE,1);
++ sprintf(dbf_text,"Flt3%4x",result);
++ QETH_DBF_TEXT2(0,trace,dbf_text);
++ QETH_DBF_TEXT2(0,setup,dbf_text);
++ if (!result) {
++ PRINT_INFO("Broadcast packets will not be " \
++ "echoed back on %s.\n",
++ card->dev_name);
++ card->broadcast_capable=BROADCAST_WITHOUT_ECHO;
++ }
+ }
+ go_on_filt:
+ if (card->options.checksum_type==HW_CHECKSUMMING) {
+@@ -5494,7 +6214,7 @@
+ result=qeth_send_setassparms_simple_with_data(
+ card,IPA_INBOUND_CHECKSUM,
+ IPA_CMD_ASS_ENABLE,
+- IPA_CHECKSUM_ENABLE_MASK);
++ card->csum_enable_mask);
+ if (result) {
+ PRINT_WARN("Could not enable inbound " \
+ "checksumming on %s: " \
+@@ -5571,7 +6291,14 @@
+
+ #ifdef QETH_IPV6
+ if (atomic_read(&card->enable_routing_attempts6)) {
+- if (card->options.routing_type6) {
++ /* for OSAs that can't do v6 multicast routing, we don't try */
++ if ( (card->type==QETH_CARD_TYPE_OSAE) &&
++ ( (card->options.routing_type6&ROUTER_MASK) ==
++ MULTICAST_ROUTER) &&
++ (!qeth_is_supported6(IPA_OSA_MC_ROUTER_AVAIL)) ) {
++ atomic_set(&card->enable_routing_attempts6,0);
++ atomic_set(&card->rt6fld,0);
++ } else if (card->options.routing_type6) {
+ sprintf(dbf_text,"strtg6%2x",
+ card->options.routing_type6);
+ QETH_DBF_TEXT2(0,trace,dbf_text);
+@@ -5625,13 +6352,22 @@
+ }
+ #endif /* QETH_IPV6 */
+
+- QETH_DBF_TEXT2(0,trace,"delvipa");
+- qeth_set_vipas(card,0);
++ if (card->options.layer2 == DONT_LAYER2) {
++ QETH_DBF_TEXT2(0,trace,"delvipa");
++ qeth_set_vipas(card,0);
++ }
++layer2_1:
+ QETH_DBF_TEXT2(0,trace,"toip/ms");
+ qeth_takeover_ip_ipms(card);
+ #ifdef QETH_IPV6
+ qeth_takeover_ip_ipms6(card);
+ #endif /* QETH_IPV6 */
++ if (card->options.layer2 == DO_LAYER2) {
++#ifdef QETH_VLAN
++ qeth_takeover_vlans(card);
++#endif
++ goto layer2_2;
++ }
+ QETH_DBF_TEXT2(0,trace,"setvipa");
+ qeth_set_vipas(card,1);
+
+@@ -5644,7 +6380,12 @@
+ atomic_set(&card->is_softsetup,0);
+ goto out;
+ }
+-
++layer2_2:
++#ifdef QETH_VLAN
++ if (card->options.layer2 == DO_LAYER2) {
++ qeth_set_vlans(card);
++ }
++#endif
+ result=qeth_setipms(card,use_setip_retries);
+ if (result) { /* by now, qeth_setipms does not return errors */
+ PRINT_WARN("couldn't set up multicast IPs on %s: 0x%x\n",
+@@ -5753,9 +6494,12 @@
+ sprintf(dbf_text,"PROB%4x",i);
+ QETH_DBF_TEXT2(0,trace,dbf_text);
+
+- PRINT_WARN("recovery was scheduled on irq 0x%x (%s) with " \
+- "problem 0x%x\n",
+- card->irq0,card->dev_name,i);
++ if (i!=PROBLEM_TX_TIMEOUT) {
++ PRINT_WARN("recovery was scheduled on irq 0x%x (%s) with " \
++ "problem 0x%x\n",
++ card->irq0,card->dev_name,i);
++ }
++
+ switch (i) {
+ case PROBLEM_RECEIVED_IDX_TERMINATE:
+ if (atomic_read(&card->in_recovery))
+@@ -5802,7 +6546,7 @@
+ }
+ }
+
+-static void qeth_schedule_recovery(qeth_card_t *card)
++static inline void qeth_schedule_recovery(qeth_card_t *card)
+ {
+ if (card) {
+ INIT_LIST_HEAD(&card->tqueue.list);
+@@ -5858,7 +6602,7 @@
+ return;
+ }
+ sbalf15=(card->inbound_qdio_buffers[(first_element+count-1)&
+- QDIO_MAX_BUFFERS_PER_Q].
++ (QDIO_MAX_BUFFERS_PER_Q-1)].
+ element[15].flags)&&0xff;
+ PRINT_STUPID("inbound qdio transfer error on irq 0x%04x. " \
+ "qdio_error=0x%x (more than one: %c), " \
+@@ -5920,6 +6664,9 @@
+ card=(qeth_card_t *)card_ptr;
+
+ if (status&QDIO_STATUS_LOOK_FOR_ERROR) {
++ sbalf15=(card->outbound_ringbuffer[queue]->buffer[
++ (first_element+count-1)&
++ (QDIO_MAX_BUFFERS_PER_Q-1)].element[15].flags)&0xff;
+ if (status&QDIO_STATUS_ACTIVATE_CHECK_CONDITION) {
+ problem=PROBLEM_ACTIVATE_CHECK_CONDITION;
+ PRINT_WARN("activate queues on irq 0x%x: " \
+@@ -5937,9 +6684,29 @@
+ qeth_schedule_recovery(card);
+ goto out;
+ }
+- sbalf15=(card->outbound_ringbuffer[queue]->buffer[
+- (first_element+count-1)&
+- QDIO_MAX_BUFFERS_PER_Q].element[15].flags)&0xff;
++ if ( (siga_error==0x01) ||
++ (siga_error==(0x02|QDIO_SIGA_ERROR_B_BIT_SET)) ||
++ (siga_error==0x03) ) {
++ sprintf(dbf_text,"BS%4x%2x",card->irq0,queue);
++ QETH_DBF_TEXT2(0,trace,dbf_text);
++ QETH_DBF_TEXT2(0,qerr,dbf_text);
++ QETH_DBF_TEXT2(1,setup,dbf_text);
++ sprintf(dbf_text,"%2x%2x%2x%2x",
++ first_element+count-1,
++ siga_error,qdio_error,sbalf15);
++ QETH_DBF_TEXT2(1,trace,dbf_text);
++ QETH_DBF_TEXT2(1,qerr,dbf_text);
++ PRINT_ERR("Outbound queue x%x on irq x%x (%s); " \
++ "errs: siga: x%x, qdio: x%x, flags15: " \
++ "x%x. The device will be taken down.\n",
++ queue,card->irq0,card->dev_name,
++ siga_error,qdio_error,sbalf15);
++ netif_stop_queue(card->dev);
++ qeth_set_dev_flag_norunning(card);
++ atomic_set(&card->problem,PROBLEM_BAD_SIGA_RESULT);
++ qeth_schedule_recovery(card);
++ goto out;
++ }
+ PRINT_STUPID("outbound qdio transfer error on irq %04x, " \
+ "queue=%i. qdio_error=0x%x (more than one: %c)," \
+ " siga_error=0x%x (more than one: %c), " \
+@@ -5988,7 +6755,7 @@
+ (&card->outbound_used_buffers[queue])<=
+ LOW_WATERMARK_PACK);
+ /* first_element is the last buffer that we got back
+- * from hydra */
++ * from OSA */
+ if (switch_state||last_pci_hit) {
+ *((__u16*)(&dbf_text2[6]))=card->irq0;
+ QETH_DBF_HEX3(0,trace,dbf_text2,QETH_DBF_TRACE_LEN);
+@@ -6010,6 +6777,14 @@
+ if (switch_state)
+ card->send_state[queue]=
+ SEND_STATE_DONT_PACK;
++
++ /* reset the last_pci position to avoid
++ * races, when we get close to packing again
++ * immediately (in order to never loose
++ * a PCI) */
++ atomic_set(&card->last_pci_pos[queue],
++ (-2*QDIO_MAX_BUFFERS_PER_Q));
++
+ netif_wake_queue(card->dev);
+ atomic_set(&card->outbound_ringbuffer_lock[
+ queue],QETH_LOCK_UNLOCKED);
+@@ -6137,8 +6912,9 @@
+ goto wakeup_out;
+ }
+
+- if (!IS_IPA(card->dma_stuff->recbuf)||
+- IS_IPA_REPLY(card->dma_stuff->recbuf)) {
++ if ( (!IS_IPA(card->dma_stuff->recbuf))||
++ (IS_IPA(card->dma_stuff->recbuf)&&
++ IS_IPA_REPLY(card->dma_stuff->recbuf)) ) {
+ /* setup or unknown data */
+ result = qeth_look_for_arp_data(card);
+ switch (result) {
+@@ -6387,10 +7163,21 @@
+ static void qeth_softshutdown(qeth_card_t *card)
+ {
+ char dbf_text[15];
++ int result;
+
+ sprintf(dbf_text,"ssht%4x",card->irq0);
+ QETH_DBF_TEXT3(0,trace,dbf_text);
+-
++ if (card->options.layer2 == DO_LAYER2) {
++ result=qeth_send_setdelmac(card,&card->dev->dev_addr[0],
++ IPA_CMD_DELVMAC);
++ if (result) {
++ PRINT_WARN("couldn't de-register MAC address on " \
++ "irq 0x%x: x%x\n",card->irq0,result);
++ QETH_DBF_TEXT1(0,trace,"NODRGMAC");
++ sprintf(dbf_text,"%4x%4x",card->irq0,result);
++ QETH_DBF_TEXT1(1,trace,dbf_text);
++ }
++ }
+ qeth_send_stoplan(card);
+ }
+
+@@ -6669,6 +7456,7 @@
+ atomic_set(&card->is_registered,0);
+ }
+
++ if (card->options.layer2 == DONT_LAYER2)
+ qeth_put_unique_id(card);
+
+ QETH_DBF_TEXT2(0,trace,"clrcard");
+@@ -6716,15 +7504,6 @@
+ qeth_start_softsetup_thread(card);
+ }
+
+-static int qeth_set_mac_address(struct net_device *dev,void *addr)
+-{
+- char dbf_text[15];
+-
+- sprintf(dbf_text,"stmc%4x",((qeth_card_t *)dev->priv)->irq0);
+- QETH_DBF_TEXT2(0,trace,dbf_text);
+- return -EOPNOTSUPP;
+-}
+-
+ static int qeth_neigh_setup(struct net_device *dev,struct neigh_parms *np)
+ {
+ char dbf_text[15];
+@@ -6870,6 +7649,10 @@
+ card->portname_required=
+ ((!QETH_IDX_NO_PORTNAME_REQUIRED(card->dma_stuff->recbuf))&&
+ (card->type==QETH_CARD_TYPE_OSAE));
++ /* however, as the portname indication of OSA is wrong, we have to
++ * do this: */
++ card->portname_required=(card->type==QETH_CARD_TYPE_OSAE);
++
+
+ memcpy(&temp,QETH_IDX_ACT_FUNC_LEVEL(card->dma_stuff->recbuf),2);
+ if (temp!=qeth_peer_func_level(card->func_level)) {
+@@ -7111,11 +7894,18 @@
+ __u16 len;
+ __u8 link_type;
+ int result;
++ char prot_type;
+ char dbf_text[15];
+
+ memcpy(card->send_buf,ULP_ENABLE,ULP_ENABLE_SIZE);
+
+ *(QETH_ULP_ENABLE_LINKNUM(card->send_buf))=(__u8)card->options.portno;
++ /*is it layer2 or tcpip*/
++ if (card->options.layer2 == DO_LAYER2)
++ prot_type = QETH_ULP_ENABLE_PROT_LAYER2;
++ else
++ prot_type = QETH_ULP_ENABLE_PROT_TCPIP;
++ memcpy(QETH_ULP_ENABLE_PROT_TYPE(card->send_buf),&prot_type,1);
+
+ memcpy(QETH_ULP_ENABLE_DEST_ADDR(card->send_buf),
+ &card->token.cm_connection_r,QETH_MPC_TOKEN_LENGTH);
+@@ -7220,12 +8010,17 @@
+ static int qeth_qdio_establish(qeth_card_t *card)
+ {
+ int result;
+- char adapter_area[15];
++ char *adapter_area;
+ char dbf_text[15];
+ void **input_array,**output_array,**ptr;
+ int i,j;
+ qdio_initialize_t init_data;
+
++ adapter_area=vmalloc(QDIO_MAX_BUFFERS_PER_Q*sizeof(char));
++ if (!adapter_area) return -ENOMEM;
++
++ memset(adapter_area,0,QDIO_MAX_BUFFERS_PER_Q*sizeof(char));
++
+ adapter_area[0]=_ascebc['P'];
+ adapter_area[1]=_ascebc['C'];
+ adapter_area[2]=_ascebc['I'];
+@@ -7235,7 +8030,10 @@
+ *((unsigned int*)(&adapter_area[12]))=PCI_TIMER_VALUE;
+
+ input_array=vmalloc(QDIO_MAX_BUFFERS_PER_Q*sizeof(void*));
+- if (!input_array) return -ENOMEM;
++ if (!input_array) {
++ vfree(adapter_area);
++ return -ENOMEM;
++ }
+ ptr=input_array;
+ for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) {
+ *ptr=(void*)virt_to_phys
+@@ -7247,6 +8045,7 @@
+ card->no_queues);
+ if (!output_array) {
+ vfree(input_array);
++ vfree(adapter_area);
+ return -ENOMEM;
+ }
+ ptr=output_array;
+@@ -7285,6 +8084,7 @@
+
+ vfree(input_array);
+ vfree(output_array);
++ vfree(adapter_area);
+
+ sprintf(dbf_text,"qde=%4i",result);
+ QETH_DBF_TEXT3(0,trace,dbf_text);
+@@ -7349,19 +8149,17 @@
+ for (;tmp&&(!result);tmp=tmp->next) {
+ if (atomic_read(&tmp->shutdown_phase))
+ continue;
+- if (dev==tmp->dev) {
++ if (dev==tmp->dev)
+ result=QETH_VERIFY_IS_REAL_DEV;
+- }
+ #ifdef QETH_VLAN
+ /* check all vlan devices */
+ vlan_grp = (struct vlan_group *) tmp->vlangrp;
+ if (vlan_grp) {
+ for (i=0;i<VLAN_GROUP_ARRAY_LEN;i++) {
+- if (vlan_grp->vlan_devices[i]==dev) {
++ if (vlan_grp->vlan_devices[i]==dev)
+ result=QETH_VERIFY_IS_VLAN_DEV;
+ }
+ }
+- }
+
+ #endif
+ }
+@@ -7388,7 +8186,111 @@
+ return result;
+ }
+
++static int qeth_set_mac_address(struct net_device *dev,void *p)
++{
++ char dbf_text[15];
++ struct sockaddr *addr=p;
++ qeth_card_t *card;
++ int result;
++#ifdef QETH_VLAN
++ if (qeth_verify_dev(dev)!=QETH_VERIFY_IS_REAL_DEV) {
++ QETH_DBF_TEXT2(0,trace,"setmcINV");
++ return -EOPNOTSUPP;
++ }
++#endif
++ card=(qeth_card_t *)dev->priv;
++ if (card->options.layer2 != DO_LAYER2) {
++ QETH_DBF_TEXT2(0,trace,"setmc_l3");
++ PRINT_WARN("Setting MAC address on %s is not supported "
++ "in Layer 3 mode\n",dev->name);
++ return -EOPNOTSUPP;
++ }
++
++ sprintf(dbf_text,"stmc%4x",card->irq0);
++ QETH_DBF_TEXT2(0,trace,dbf_text);
++ QETH_DBF_TEXT2(0,setup,dbf_text);
++ QETH_DBF_HEX2(0,trace,addr->sa_data,6);
++ QETH_DBF_HEX2(0,setup,addr->sa_data,6);
++
++ if (atomic_read(&card->mac_registered)) {
++ result=qeth_send_setdelmac(card,&card->dev->dev_addr[0],
++ IPA_CMD_DELVMAC);
++ if (result) {
++ PRINT_WARN("couldn't de-register MAC address on " \
++ "irq 0x%x: x%x\n",card->irq0,result);
++ QETH_DBF_TEXT1(0,trace,"STMCFLDd");
++ sprintf(dbf_text,"%4x%4x",card->irq0,result);
++ QETH_DBF_TEXT1(1,trace,dbf_text);
++ return -EIO;
++ }
++ atomic_set(&card->mac_registered,0);
++ }
++
++ result=qeth_send_setdelmac(card,addr->sa_data,IPA_CMD_SETVMAC);
++ if (result) {
++ PRINT_WARN("couldn't register MAC address on " \
++ "irq 0x%x: x%x\n",card->irq0,result);
++ QETH_DBF_TEXT1(0,trace,"STMCFLDr");
++ sprintf(dbf_text,"%4x%4x",card->irq0,result);
++ QETH_DBF_TEXT1(1,trace,dbf_text);
++ return -EIO;
++ }
++ atomic_set(&card->mac_registered, 1);
++ memcpy(dev->dev_addr,addr->sa_data,ETH_ALEN);
++ PRINT_INFO("MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
++ "successfully registered on device %s\n",
++ dev->dev_addr[0],dev->dev_addr[1],dev->dev_addr[2],
++ dev->dev_addr[3],dev->dev_addr[4],dev->dev_addr[5],
++ dev->name);
++ return 0;
++}
++
++static void qeth_correct_routing_status(qeth_card_t *card)
++{
++ if (card->type==QETH_CARD_TYPE_IQD) {
++ /* if it's not a mc router, it's no router */
++ if ( (card->options.routing_type4 == PRIMARY_ROUTER) ||
++ (card->options.routing_type4 == SECONDARY_ROUTER)
+ #ifdef QETH_IPV6
++ ||
++ (card->options.routing_type6 == PRIMARY_ROUTER) ||
++ (card->options.routing_type6 == SECONDARY_ROUTER)
++#endif /* QETH_IPV6 */
++ ) {
++ PRINT_WARN("routing not applicable, reset " \
++ "routing status.\n");
++ card->options.routing_type4=NO_ROUTER;
++#ifdef QETH_IPV6
++ card->options.routing_type6=NO_ROUTER;
++#endif /* QETH_IPV6 */
++ }
++ card->options.do_prio_queueing=NO_PRIO_QUEUEING;
++ } else {
++ /* if it's a mc router, it's no router */
++ if ( (((!qeth_is_supported(IPA_OSA_MC_ROUTER_AVAIL))&&
++ card->options.routing_type4 == MULTICAST_ROUTER)) ||
++ (card->options.routing_type4 == PRIMARY_CONNECTOR) ||
++ (card->options.routing_type4 == SECONDARY_CONNECTOR)
++#ifdef QETH_IPV6
++ ||
++ (((!qeth_is_supported(IPA_OSA_MC_ROUTER_AVAIL))&&
++ card->options.routing_type6 == MULTICAST_ROUTER)) ||
++ (card->options.routing_type6 == PRIMARY_CONNECTOR) ||
++ (card->options.routing_type6 == SECONDARY_CONNECTOR)
++#endif /* QETH_IPV6 */
++ ) {
++ PRINT_WARN("routing not applicable, reset " \
++ "routing status. (Did you mean " \
++ "primary_router or secondary_router?)\n");
++ card->options.routing_type4=NO_ROUTER;
++#ifdef QETH_IPV6
++ card->options.routing_type6=NO_ROUTER;
++#endif /* QETH_IPV6 */
++ }
++ }
++}
++
++#ifdef QETH_IPV6
+ extern struct neigh_table arp_tbl;
+ int (*qeth_old_arp_constructor)(struct neighbour *);
+ static struct neigh_ops arp_direct_ops_template =
+@@ -7411,13 +8313,31 @@
+ char dbf_text[15];
+ struct net_device *dev = neigh->dev;
+ struct in_device *in_dev = in_dev_get(dev);
++ int is_qeth_device;
++ qeth_card_t *card;
++ int tweak_neighbour=1;
+
+ if (in_dev == NULL)
+ return -EINVAL;
+
+ QETH_DBF_TEXT4(0,trace,"arpconst");
+- if (!qeth_verify_dev(dev)) {
+
++ is_qeth_device=qeth_verify_dev(dev);
++ /* so if we're dealing wiht a qeth device, we've got to
++ * check, whether we're in layer 2 mode. if yes, don't
++ * mind fiddling with the neighbours */
++ if (is_qeth_device) {
++ if (is_qeth_device==QETH_VERIFY_IS_VLAN_DEV)
++ card=(qeth_card_t *)VLAN_DEV_INFO(dev)->
++ real_dev->priv;
++ else
++ card=(qeth_card_t*)dev->priv;
++ if (card->options.layer2 == DO_LAYER2)
++ tweak_neighbour=0;
++ } else
++ tweak_neighbour=0;
++
++ if (!tweak_neighbour) {
+ in_dev_put(in_dev);
+ return qeth_old_arp_constructor(neigh);
+ }
+@@ -7490,10 +8410,14 @@
+ card->hard_header_cache=qeth_get_hard_header_cache(card->link_type);
+ card->header_cache_update=
+ qeth_get_header_cache_update(card->link_type);
+- card->type_trans=qeth_get_type_trans(card->link_type);
+ }
+ #endif /* QETH_IPV6 */
+
++static void qeth_tt_init_card(qeth_card_t *card)
++{
++ card->type_trans=qeth_get_type_trans(card->link_type);
++}
++
+ #ifdef QETH_VLAN
+ static void qeth_vlan_rx_register(struct net_device *dev,
+ struct vlan_group *grp)
+@@ -7503,6 +8427,7 @@
+ spin_lock_irq(&card->vlan_lock);
+ card->vlangrp = grp;
+ spin_unlock_irq(&card->vlan_lock);
++ qeth_start_softsetup_thread(card);
+ }
+ static void qeth_vlan_rx_kill_vid(struct net_device *dev,
+ unsigned short vid)
+@@ -7513,6 +8438,7 @@
+ if (card->vlangrp)
+ card->vlangrp->vlan_devices[vid] = NULL;
+ spin_unlock_irq(&card->vlan_lock);
++ qeth_start_softsetup_thread(card);
+ }
+
+ #endif /*QETH_VLAN*/
+@@ -7557,39 +8483,43 @@
+
+ dev->rebuild_header=
+ #ifdef QETH_IPV6
+- (!(qeth_get_additional_dev_flags(card->type)&IFF_NOARP))?
++ (!(qeth_get_additional_dev_flags(card)&IFF_NOARP))?
+ (qeth_get_rebuild_header(card->link_type)?
+ qeth_rebuild_header:NULL):
+ #endif /* QETH_IPV6 */
+- NULL;
++ (card->options.layer2 == DO_LAYER2) ?
++ qeth_get_rebuild_header(card->link_type) : NULL;
+ dev->hard_header=
+ #ifdef QETH_IPV6
+- (!(qeth_get_additional_dev_flags(card->type)&IFF_NOARP))?
++ (!(qeth_get_additional_dev_flags(card)&IFF_NOARP))?
+ (qeth_get_hard_header(card->link_type)?
+ qeth_hard_header:NULL):
++#else /* QETH_IPV6 */
++ (card->options.fake_ll==FAKE_LL)?qeth_fake_header:
+ #endif /* QETH_IPV6 */
+- NULL;
++ (card->options.layer2 == DO_LAYER2) ?
++ qeth_get_hard_header(card->link_type) : NULL;
+ dev->header_cache_update=
+ #ifdef QETH_IPV6
+- (!(qeth_get_additional_dev_flags(card->type)&IFF_NOARP))?
++ (!(qeth_get_additional_dev_flags(card)&IFF_NOARP))?
+ (qeth_get_header_cache_update(card->link_type)?
+ qeth_header_cache_update:NULL):
+ #endif /* QETH_IPV6 */
+- NULL;
++ (card->options.layer2 == DO_LAYER2) ?
++ qeth_get_header_cache_update(card->link_type) : NULL;
+ dev->hard_header_cache=
+ #ifdef QETH_IPV6
+- (!(qeth_get_additional_dev_flags(card->type)&IFF_NOARP))?
++ (!(qeth_get_additional_dev_flags(card)&IFF_NOARP))?
+ qeth_get_hard_header_cache(card->link_type):
+ #endif /* QETH_IPV6 */
+- NULL;
++ (card->options.layer2 == DO_LAYER2) ?
++ qeth_get_hard_header_cache(card->link_type) : NULL;
+ dev->hard_header_parse=NULL;
+ dev->destructor=qeth_destructor;
+ dev->set_multicast_list=qeth_set_multicast_list;
+ dev->set_mac_address=qeth_set_mac_address;
+ dev->neigh_setup=qeth_neigh_setup;
+-
+- dev->flags|=qeth_get_additional_dev_flags(card->type);
+-
++ dev->flags|=qeth_get_additional_dev_flags(card);
+ dev->flags|=(
+ (card->options.fake_broadcast==FAKE_BROADCAST)||
+ (card->broadcast_capable)
+@@ -7600,11 +8530,12 @@
+ qeth_send_qipassist(card,4);*/
+
+ /* that was the old place. one id. we need to make sure, that
+- * hydra knows about us going to use the same id again, so we
++ * OSA knows about us going to use the same id again, so we
+ * do that in hardsetup_card every time
+ qeth_get_unique_id(card);*/
+
+ #ifdef CONFIG_SHARED_IPV6_CARDS
++/*in Layer2 mode unique id is zero*/
+ dev->features=(card->unique_id&UNIQUE_ID_NOT_BY_CARD)?
+ 0:NETIF_F_SHARED_IPV6;
+ dev->dev_id=card->unique_id&0xffff;
+@@ -7621,6 +8552,7 @@
+ #ifdef QETH_IPV6
+ qeth_ipv6_init_card(card);
+ #endif /* QETH_IPV6 */
++ qeth_tt_init_card(card);
+
+ dev_init_buffers(dev);
+
+@@ -7650,8 +8582,9 @@
+ card->unit_addr2 = prcd[31];
+ card->cula = prcd[63];
+ /* Don't build queues with diag98 for VM guest lan. */
+- card->do_pfix = (MACHINE_HAS_PFIX) ? ((prcd[0x10]!=_ascebc['V']) ||
+- (prcd[0x11]!=_ascebc['M'])):0;
++ card->is_guest_lan= ((prcd[0x10]==_ascebc['V']) &&
++ (prcd[0x11]==_ascebc['M']));
++ card->do_pfix = (MACHINE_HAS_PFIX) ? (!(card->is_guest_lan)):0;
+
+ sprintf(dbf_text,"chpid:%02x",card->chpid);
+ QETH_DBF_TEXT2(0,trace,dbf_text);
+@@ -8032,6 +8965,7 @@
+
+ /* here we need to know, whether we should include a value
+ * into eui-64 address generation */
++ if (card->options.layer2 == DONT_LAYER2) {
+ QETH_DBF_TEXT2(0,trace,"qipassi4");
+ r=qeth_send_qipassist(card,4);
+ if (r) {
+@@ -8040,10 +8974,16 @@
+ sprintf(dbf_text,"QIP4%4x",r);
+ QETH_DBF_TEXT2(0,trace,dbf_text);
+ }
++ }
+
+- sprintf(dbf_text,"%4x%4x",card->ipa_supported,card->ipa_enabled);
++ sprintf(dbf_text,"%8x",card->ipa_supported);
+ QETH_DBF_TEXT2(0,trace,dbf_text);
++ sprintf(dbf_text,"%8x",card->ipa_enabled);
++ QETH_DBF_TEXT2(0,trace,dbf_text);
+
++ qeth_correct_routing_status(card);
++
++ if (card->options.layer2 == DONT_LAYER2)
+ qeth_get_unique_id(card);
+
+ /* print out status */
+@@ -8094,7 +9034,8 @@
+ "card%s%s%s\n" \
+ "with link type %s (portname: %s)\n",
+ card->devno0,card->devno1,card->devno2,
+- qeth_get_cardname(card->type),
++ qeth_get_cardname(card->type,
++ card->is_guest_lan),
+ (card->level[0])?" (level: ":"",
+ (card->level[0])?card->level:"",
+ (card->level[0])?")":"",
+@@ -8102,16 +9043,32 @@
+ card->link_type),
+ dbf_text);
+ } else {
+- printk("qeth: Device 0x%X/0x%X/0x%X is a%s " \
+- "card%s%s%s\nwith link type %s " \
+- "(no portname needed by interface)\n",
+- card->devno0,card->devno1,card->devno2,
+- qeth_get_cardname(card->type),
+- (card->level[0])?" (level: ":"",
+- (card->level[0])?card->level:"",
+- (card->level[0])?")":"",
+- qeth_get_link_type_name(card->type,
+- card->link_type));
++ if (card->options.portname[0]) {
++ printk("qeth: Device 0x%X/0x%X/0x%X is a%s " \
++ "card%s%s%s\nwith link type %s " \
++ "(no portname needed by interface)\n",
++ card->devno0,card->devno1,card->devno2,
++ qeth_get_cardname(card->type,
++ card->is_guest_lan),
++ (card->level[0])?" (level: ":"",
++ (card->level[0])?card->level:"",
++ (card->level[0])?")":"",
++ qeth_get_link_type_name(card->type,
++ card->
++ link_type));
++ } else {
++ printk("qeth: Device 0x%X/0x%X/0x%X is a%s " \
++ "card%s%s%s\nwith link type %s\n",
++ card->devno0,card->devno1,card->devno2,
++ qeth_get_cardname(card->type,
++ card->is_guest_lan),
++ (card->level[0])?" (level: ":"",
++ (card->level[0])?card->level:"",
++ (card->level[0])?")":"",
++ qeth_get_link_type_name(card->type,
++ card->
++ link_type));
++ }
+ }
+ }
+
+@@ -8210,7 +9167,7 @@
+ atomic_set(&card->is_startlaned,0);
+ /* show status in /proc/qeth */
+ atomic_set(&card->is_gone,1);
+- qeth_wakeup_procfile();
++ qeth_snmp_notify();
+ } else {
+ QETH_DBF_TEXT1(0,trace,"ri-sftst");
+ qeth_softsetup_card(card,QETH_LOCK_ALREADY_HELD);
+@@ -8223,7 +9180,7 @@
+ qeth_restore_dev_flag_state(card);
+ atomic_set(&card->is_gone,0);
+ netif_wake_queue(card->dev);
+- qeth_wakeup_procfile();
++ qeth_snmp_notify();
+ }
+ my_spin_unlock(&setup_lock);
+ }
+@@ -8264,6 +9221,7 @@
+ card->options.add_hhlen=DEFAULT_ADD_HHLEN;
+ card->options.fake_ll=DONT_FAKE_LL;
+ card->options.async_iqd=SYNC_IQD;
++ card->options.layer2=QETH_DEFAULT_LAYER2;
+ }
+
+ static qeth_card_t *qeth_alloc_card(void)
+@@ -8324,7 +9282,8 @@
+ spin_lock_init(&card->wait_q_lock);
+ spin_lock_init(&card->softsetup_lock);
+ spin_lock_init(&card->hardsetup_lock);
+- spin_lock_init(&card->ioctl_lock);
++ sema_init(&card->ioctl_sem, 1);
++
+ #ifdef QETH_VLAN
+ spin_lock_init(&card->vlan_lock);
+ card->vlangrp = NULL;
+@@ -8346,6 +9305,8 @@
+ card->ip_mc_new_state.ipm6_ifa=NULL;
+ #endif /* QETH_IPV6 */
+
++ card->csum_enable_mask=IPA_CHECKSUM_DEFAULT_ENABLE_MASK;
++
+ /* setup net_device stuff */
+ card->dev->priv=card;
+
+@@ -8697,6 +9658,11 @@
+ doit1("dont_fake_broadcast",PARSE_FAKE_BROADCAST,
+ card->options.fake_broadcast,DONT_FAKE_BROADCAST);
+
++ doit1("layer2",PARSE_LAYER2,
++ card->options.layer2,DO_LAYER2);
++ doit1("no_layer2",PARSE_LAYER2,
++ card->options.layer2,DONT_LAYER2);
++
+ doit1("fake_ll",PARSE_FAKE_LL,
+ card->options.fake_ll,FAKE_LL);
+ doit1("dont_fake_ll",PARSE_FAKE_LL,
+@@ -8795,6 +9761,59 @@
+ return 0;
+ }
+
++#define IGNORE_PARAM_EQ(option,value,reset_value,msg) \
++ if (card->options.option == value) { \
++ PRINT_ERR("%s not supported with layer 2 " \
++ "functionality, ignoring option on irq " \
++ "0x%x.\n",msg,card->irq0); \
++ card->options.option = reset_value; \
++ }
++#define IGNORE_PARAM_NEQ(option,value,reset_value,msg) \
++ if (card->options.option != value) { \
++ PRINT_ERR("%s not supported with layer 2 " \
++ "functionality, ignoring option on irq " \
++ "0x%x.\n",msg,card->irq0); \
++ card->options.option = reset_value; \
++ }
++
++
++static void qeth_make_parameters_consistent(qeth_card_t *card)
++{
++
++ if (card->options.layer2 == DO_LAYER2) {
++ if (card->type == QETH_CARD_TYPE_IQD) {
++ PRINT_ERR("Device on irq 0x%x does not support " \
++ "layer 2 functionality. " \
++ "Ignoring layer2 option.\n",card->irq0 );
++ }
++ IGNORE_PARAM_NEQ(routing_type4,NO_ROUTER,NO_ROUTER,
++ "Routing options are");
++#ifdef QETH_IPV6
++ IGNORE_PARAM_NEQ(routing_type6,NO_ROUTER,NO_ROUTER,
++ "Routing options are");
++#endif /* QETH_IPV6 */
++ IGNORE_PARAM_EQ(checksum_type,HW_CHECKSUMMING,
++ QETH_CHECKSUM_DEFAULT,
++ "Checksumming options are");
++ IGNORE_PARAM_NEQ(broadcast_mode,BROADCAST_ALLRINGS,
++ BROADCAST_ALLRINGS,
++ "Broadcast mode options are");
++ IGNORE_PARAM_NEQ(macaddr_mode,MACADDR_NONCANONICAL,
++ MACADDR_NONCANONICAL,
++ "Canonical MAC addr options are");
++ IGNORE_PARAM_EQ(ena_ipat,ENABLE_TAKEOVER,
++ DISABLE_TAKEOVER,
++ "Takeover options are");
++ IGNORE_PARAM_NEQ(fake_broadcast,DONT_FAKE_BROADCAST,
++ DONT_FAKE_BROADCAST,
++ "Broadcast faking options are");
++ IGNORE_PARAM_NEQ(add_hhlen,DEFAULT_ADD_HHLEN,
++ DEFAULT_ADD_HHLEN,"Option add_hhlen is");
++ IGNORE_PARAM_NEQ(fake_ll,DONT_FAKE_LL,
++ DONT_FAKE_LL,"Option fake_ll is");
++ }
++}
++
+ static void qeth_detach_handler(int irq,int status)
+ {
+ qeth_card_t *card;
+@@ -8819,7 +9838,7 @@
+ if ((card=qeth_get_card_by_irq(irq))) {
+ qeth_remove_card(card,remove_method);
+ }
+- qeth_wakeup_procfile();
++ qeth_snmp_notify();
+ my_spin_unlock(&setup_lock);
+ }
+
+@@ -8905,49 +9924,6 @@
+ return cnt;
+ }
+
+-static void qeth_correct_routing_status(qeth_card_t *card)
+-{
+- if (card->type==QETH_CARD_TYPE_IQD) {
+- /* if it's not a mc router, it's no router */
+- if ( (card->options.routing_type4 == PRIMARY_ROUTER) ||
+- (card->options.routing_type4 == SECONDARY_ROUTER)
+-#ifdef QETH_IPV6
+- ||
+- (card->options.routing_type6 == PRIMARY_ROUTER) ||
+- (card->options.routing_type6 == SECONDARY_ROUTER)
+-#endif /* QETH_IPV6 */
+- ) {
+- PRINT_WARN("routing not applicable, reset " \
+- "routing status.\n");
+- card->options.routing_type4=NO_ROUTER;
+-#ifdef QETH_IPV6
+- card->options.routing_type6=NO_ROUTER;
+-#endif /* QETH_IPV6 */
+- }
+- card->options.do_prio_queueing=NO_PRIO_QUEUEING;
+- } else {
+- /* if it's a mc router, it's no router */
+- if ( (card->options.routing_type4 == MULTICAST_ROUTER) ||
+- (card->options.routing_type4 == PRIMARY_CONNECTOR) ||
+- (card->options.routing_type4 == SECONDARY_CONNECTOR)
+-#ifdef QETH_IPV6
+- ||
+- (card->options.routing_type6 == MULTICAST_ROUTER) ||
+- (card->options.routing_type6 == PRIMARY_CONNECTOR) ||
+- (card->options.routing_type6 == SECONDARY_CONNECTOR)
+-#endif /* QETH_IPV6 */
+- ) {
+- PRINT_WARN("routing not applicable, reset " \
+- "routing status. (Did you mean " \
+- "primary_router or secondary_router?)\n");
+- card->options.routing_type4=NO_ROUTER;
+-#ifdef QETH_IPV6
+- card->options.routing_type6=NO_ROUTER;
+-#endif /* QETH_IPV6 */
+- }
+- }
+-}
+-
+ static int qeth_attach_handler(int irq_to_scan,chandev_probeinfo *probeinfo)
+ {
+ int result = 0;
+@@ -9070,6 +10046,7 @@
+ card->options.portno = 0;
+
+ qeth_chandev_parse_options(card,probeinfo->parmstr);
++ qeth_make_parameters_consistent(card);
+
+ card->has_irq=0;
+ card->irq0=irq;
+@@ -9102,7 +10079,6 @@
+ goto endloop;
+ }
+
+- qeth_correct_routing_status(card);
+ qeth_insert_card_into_list(card);
+
+ QETH_DBF_TEXT3(0,trace,"request0");
+@@ -9247,7 +10223,7 @@
+ /* means, we prevent looping in
+ * qeth_send_control_data */
+ atomic_set(&card->write_busy,0);
+- qeth_wakeup_procfile();
++ qeth_snmp_notify();
+ }
+ my_read_unlock(&list_lock);
+ }
+@@ -9297,7 +10273,7 @@
+ card->tqueue_sst.sync=0;
+ schedule_task(&card->tqueue_sst);
+ out:
+- qeth_wakeup_procfile();
++ qeth_snmp_notify();
+ return dev;
+
+ }
+@@ -9385,11 +10361,10 @@
+ sprintf(dbf_text,"ipevent");
+ QETH_DBF_TEXT3(0,trace,dbf_text);
+ QETH_DBF_HEX3(0,trace,&event,sizeof(unsigned long));
+- QETH_DBF_HEX3(0,trace,&dev,sizeof(void*));
+- sprintf(dbf_text,"%08x",ifa->ifa_address);
+- QETH_DBF_TEXT3(0,trace,dbf_text);
+- sprintf(dbf_text,"%08x",ifa->ifa_mask);
+- QETH_DBF_TEXT3(0,trace,dbf_text);
++ sprintf(dbf_text,"%08x",ifa->ifa_address);
++ QETH_DBF_TEXT3(0,trace,dbf_text);
++ sprintf(dbf_text,"%08x",ifa->ifa_mask);
++ QETH_DBF_TEXT3(0,trace,dbf_text);
+
+ #ifdef QETH_VLAN
+ if (qeth_verify_dev(dev)==QETH_VERIFY_IS_VLAN_DEV)
+@@ -9418,10 +10393,9 @@
+ sprintf(dbf_text,"ip6event");
+ QETH_DBF_TEXT3(0,trace,dbf_text);
+ QETH_DBF_HEX3(0,trace,&event,sizeof(unsigned long));
+- QETH_DBF_HEX3(0,trace,&dev,sizeof(void*));
+- QETH_DBF_HEX3(0,trace,ifa->addr.s6_addr,QETH_DBF_TRACE_LEN);
+- QETH_DBF_HEX3(0,trace,ifa->addr.s6_addr+QETH_DBF_TRACE_LEN,
+- QETH_DBF_TRACE_LEN);
++ QETH_DBF_HEX3(0,trace,ifa->addr.s6_addr,QETH_DBF_TRACE_LEN);
++ QETH_DBF_HEX3(0,trace,ifa->addr.s6_addr+QETH_DBF_TRACE_LEN,
++ QETH_DBF_TRACE_LEN);
+
+ #ifdef QETH_VLAN
+ if (qeth_verify_dev(dev)==QETH_VERIFY_IS_VLAN_DEV)
+@@ -9438,8 +10412,63 @@
+
+ return NOTIFY_DONE;
+ }
++
++static int
++qeth_multicast6_event(struct notifier_block *this,
++ unsigned long event, void *ptr)
++{
++ qeth_card_t *card;
++ struct ifmcaddr6 *mc = (struct ifmcaddr6 *) ptr;
++ struct net_device *dev = mc->idev->dev;
++ char dbf_text[15];
++
++ sprintf(dbf_text,"mc6event");
++ QETH_DBF_TEXT3(0,trace,dbf_text);
++ QETH_DBF_HEX3(0,trace,&event,sizeof(unsigned long));
++#ifdef QETH_VLAN
++ if (qeth_verify_dev(dev)==QETH_VERIFY_IS_VLAN_DEV)
++ card = (qeth_card_t *)VLAN_DEV_INFO(dev)->real_dev->priv;
++ else
++#endif
++ card=(qeth_card_t *)dev->priv;
++ if (qeth_does_card_exist(card)) {
++ QETH_DBF_HEX3(0,trace,&card,sizeof(void*));
++ qeth_save_dev_flag_state(card);
++ qeth_start_softsetup_thread(card);
++ }
++
++ return NOTIFY_DONE;
++}
++
+ #endif /* QETH_IPV6 */
+
++static int
++qeth_multicast_event(struct notifier_block *this,
++ unsigned long event, void *ptr)
++{
++ qeth_card_t *card;
++ struct ip_mc_list *mc = (struct ip_mc_list *) ptr;
++ struct net_device *dev = mc->interface->dev;
++ char dbf_text[15];
++
++ sprintf(dbf_text,"mc4event");
++ QETH_DBF_TEXT3(0,trace,dbf_text);
++ QETH_DBF_HEX3(0,trace,&event,sizeof(unsigned long));
++#ifdef QETH_VLAN
++ if (qeth_verify_dev(dev)==QETH_VERIFY_IS_VLAN_DEV)
++ card = (qeth_card_t *)VLAN_DEV_INFO(dev)->real_dev->priv;
++ else
++#endif
++ card=(qeth_card_t *)dev->priv;
++ if (qeth_does_card_exist(card)) {
++ QETH_DBF_HEX3(0,trace,&card,sizeof(void*));
++ qeth_save_dev_flag_state(card);
++ qeth_start_softsetup_thread(card);
++ }
++
++ return NOTIFY_DONE;
++}
++
+ static int qeth_reboot_event(struct notifier_block *this,
+ unsigned long event,void *ptr)
+ {
+@@ -9481,11 +10510,22 @@
+ 0
+ };
+
++static struct notifier_block qeth_mc_notifier = {
++ qeth_multicast_event,
++ 0
++};
++
+ #ifdef QETH_IPV6
+ static struct notifier_block qeth_ip6_notifier = {
+ qeth_ip6_event,
+ 0
+ };
++
++static struct notifier_block qeth_mc6_notifier = {
++ qeth_multicast6_event,
++ 0
++};
++
+ #endif /* QETH_IPV6 */
+
+ static struct notifier_block qeth_reboot_notifier = {
+@@ -9500,10 +10540,11 @@
+ QETH_DBF_TEXT5(0,trace,"regnotif");
+ /* register to be notified on events */
+ r=register_netdevice_notifier(&qeth_dev_notifier);
+-
+ r=register_inetaddr_notifier(&qeth_ip_notifier);
++ r=register_multicast_notifier(&qeth_mc_notifier);
+ #ifdef QETH_IPV6
+ r=register_inet6addr_notifier(&qeth_ip6_notifier);
++ r=register_multicast6_notifier(&qeth_mc6_notifier);
+ #endif /* QETH_IPV6 */
+ r=register_reboot_notifier(&qeth_reboot_notifier);
+ }
+@@ -9516,8 +10557,10 @@
+ QETH_DBF_TEXT5(0,trace,"unregnot");
+ r=unregister_netdevice_notifier(&qeth_dev_notifier);
+ r=unregister_inetaddr_notifier(&qeth_ip_notifier);
++ r=unregister_multicast_notifier(&qeth_mc_notifier);
+ #ifdef QETH_IPV6
+ r=unregister_inet6addr_notifier(&qeth_ip6_notifier);
++ r=unregister_multicast6_notifier(&qeth_mc6_notifier);
+ #endif /* QETH_IPV6 */
+ r=unregister_reboot_notifier(&qeth_reboot_notifier);
+ }
+@@ -9533,9 +10576,11 @@
+ int size;
+ tempinfo_t *info;
+
++ MOD_INC_USE_COUNT;
+ info = (tempinfo_t *) vmalloc (sizeof (tempinfo_t));
+ if (info == NULL) {
+ PRINT_WARN("No memory available for data\n");
++ MOD_DEC_USE_COUNT;
+ return -ENOMEM;
+ } else {
+ file->private_data = (void *) info;
+@@ -9556,6 +10601,7 @@
+ PRINT_WARN("No memory available for data\n");
+ vfree (info);
+ rc=-ENOMEM;
++ MOD_DEC_USE_COUNT;
+ goto out;
+ }
+
+@@ -9583,23 +10629,23 @@
+ "by_ToS");
+ }
+
++ /* a '+' in the routing indicator means, that broadcast
++ * packets are not echoed back to the sender */
+ #ifdef QETH_IPV6
+- if (atomic_read(&card->rt4fld) &&
+- atomic_read(&card->rt6fld))
+- strcpy(router_str, "no");
+- else if (atomic_read(&card->rt4fld) ||
+- atomic_read(&card->rt6fld))
+- strcpy(router_str, "mix");
++ if (atomic_read(&card->rt4fld) ||
++ atomic_read(&card->rt6fld))
++ strcpy(router_str, "FLD");
+ #else /* QETH_IPV6 */
+ if (atomic_read(&card->rt4fld))
+- strcpy(router_str, "no");
++ strcpy(router_str, "FLD");
+ #endif /* QETH_IPV6 */
+ else if ( ((card->options.routing_type4&ROUTER_MASK)==
+ PRIMARY_ROUTER)
+ #ifdef QETH_IPV6
+ &&
+- ((card->options.routing_type6&ROUTER_MASK)==
+- PRIMARY_ROUTER)
++ ( ((card->options.routing_type6&ROUTER_MASK)==
++ PRIMARY_ROUTER)||
++ (!qeth_is_supported(IPA_IPv6)) )
+ #endif /* QETH_IPV6 */
+ ) {
+ strcpy(router_str,"pri");
+@@ -9608,8 +10654,9 @@
+ SECONDARY_ROUTER)
+ #ifdef QETH_IPV6
+ &&
+- ((card->options.routing_type6&ROUTER_MASK)==
+- SECONDARY_ROUTER)
++ ( ((card->options.routing_type6&ROUTER_MASK)==
++ SECONDARY_ROUTER)||
++ (!qeth_is_supported(IPA_IPv6)) )
+ #endif /* QETH_IPV6 */
+ ) {
+ strcpy(router_str,"sec");
+@@ -9618,38 +10665,51 @@
+ MULTICAST_ROUTER)
+ #ifdef QETH_IPV6
+ &&
+- ((card->options.routing_type6&ROUTER_MASK)==
+- MULTICAST_ROUTER)
++ ( ((card->options.routing_type6&ROUTER_MASK)==
++ MULTICAST_ROUTER)||
++ (!qeth_is_supported(IPA_IPv6)) )
+ #endif /* QETH_IPV6 */
+ ) {
+- strcpy(router_str,"mc");
++ if (card->broadcast_capable==BROADCAST_WITHOUT_ECHO)
++ strcpy(router_str,"mc+");
++ else
++ strcpy(router_str,"mc");
+ } else
+ if ( ((card->options.routing_type4&ROUTER_MASK)==
+ PRIMARY_CONNECTOR)
+ #ifdef QETH_IPV6
+ &&
+- ((card->options.routing_type6&ROUTER_MASK)==
+- PRIMARY_CONNECTOR)
++ ( ((card->options.routing_type6&ROUTER_MASK)==
++ PRIMARY_CONNECTOR)||
++ (!qeth_is_supported(IPA_IPv6)) )
+ #endif /* QETH_IPV6 */
+ ) {
+- strcpy(router_str,"p.c");
++ if (card->broadcast_capable==BROADCAST_WITHOUT_ECHO)
++ strcpy(router_str,"p+c");
++ else
++ strcpy(router_str,"p.c");
+ } else
+ if ( ((card->options.routing_type4&ROUTER_MASK)==
+ SECONDARY_CONNECTOR)
+ #ifdef QETH_IPV6
+ &&
+- ((card->options.routing_type6&ROUTER_MASK)==
+- SECONDARY_CONNECTOR)
++ ( ((card->options.routing_type6&ROUTER_MASK)==
++ SECONDARY_CONNECTOR)||
++ (!qeth_is_supported(IPA_IPv6)) )
+ #endif /* QETH_IPV6 */
+ ) {
+- strcpy(router_str,"s.c");
++ if (card->broadcast_capable==BROADCAST_WITHOUT_ECHO)
++ strcpy(router_str,"s+c");
++ else
++ strcpy(router_str,"s.c");
+ } else
+ if ( ((card->options.routing_type4&ROUTER_MASK)==
+ NO_ROUTER)
+ #ifdef QETH_IPV6
+ &&
+- ((card->options.routing_type6&ROUTER_MASK)==
+- NO_ROUTER)
++ ( ((card->options.routing_type6&ROUTER_MASK)==
++ NO_ROUTER)||
++ (!qeth_is_supported(IPA_IPv6)) )
+ #endif /* QETH_IPV6 */
+ ) {
+ strcpy(router_str,"no");
+@@ -9670,7 +10730,8 @@
+ card->chpid,
+ card->dev_name,
+ qeth_get_cardname_short
+- (card->type,card->link_type),
++ (card->type,card->link_type,
++ card->is_guest_lan),
+ card->options.portno);
+ } else if (!atomic_read(&card->is_startlaned)) {
+ length+=sprintf(buffer+length,
+@@ -9680,8 +10741,20 @@
+ card->chpid,
+ card->dev_name,
+ qeth_get_cardname_short
+- (card->type,card->link_type),
++ (card->type,card->link_type,
++ card->is_guest_lan),
+ card->options.portno);
++ } else if (card->options.layer2 == DO_LAYER2) {
++ length+=sprintf(buffer+length,
++ "%04X/%04X/%04X x%02X %10s %14s %2i"
++ " +++ LAYER 2 +++\n",
++ card->devno0,card->devno1,card->devno2,
++ card->chpid,
++ card->dev_name,
++ qeth_get_cardname_short
++ (card->type,card->link_type,
++ card->is_guest_lan),
++ card->options.portno);
+ } else {
+ length+=sprintf(buffer+length,
+ "%04X/%04X/%04X x%02X %10s %14s %2i" \
+@@ -9689,7 +10762,8 @@
+ card->devno0,card->devno1,card->devno2,
+ card->chpid,card->dev_name,
+ qeth_get_cardname_short
+- (card->type,card->link_type),
++ (card->type,card->link_type,
++ card->is_guest_lan),
+ card->options.portno,
+ checksum_str,
+ queueing_str,router_str,bufsize_str,
+@@ -9792,7 +10866,7 @@
+ int pos=0,end_pos;
+ char dbf_text[15];
+
+- if (*offset) return user_len;
++ if (*offset>0) return user_len;
+ buffer=vmalloc(__max(user_len+1,QETH_DBF_MISC_LEN));
+ if (buffer == NULL)
+ return -ENOMEM;
+@@ -10174,6 +11248,7 @@
+ int size;
+ char entry_type[5];
+
++ MOD_INC_USE_COUNT;
+ info = (tempinfo_t *) vmalloc (sizeof (tempinfo_t));
+ if (info == NULL) {
+ PRINT_WARN("No memory available for data\n");
+@@ -10308,16 +11383,14 @@
+ {
+ loff_t len;
+ tempinfo_t *p_info = (tempinfo_t *) file->private_data;
+- loff_t n = *offset;
+- unsigned long pos = n;
+
+- if (pos != n || pos >= p_info->len) {
++ if (*offset >= p_info->len) {
+ return 0;
+ } else {
+- len = __min(user_len, (p_info->len - pos));
+- if (copy_to_user (user_buf, &(p_info->data[pos]), len))
++ len = __min(user_len, (p_info->len - *offset));
++ if (copy_to_user (user_buf, &(p_info->data[*offset]), len))
+ return -EFAULT;
+- *offset = pos + len;
++ (*offset) += len;
+ return len;
+ }
+ }
+@@ -10327,13 +11400,25 @@
+ static int qeth_procfile_release(struct inode *inode,struct file *file)
+ {
+ tempinfo_t *p_info = (tempinfo_t *) file->private_data;
++ struct list_head *l,*n;
++ struct qeth_notify_list *n_entry;
+
+ if (p_info) {
+ if (p_info->data)
+ vfree (p_info->data);
+ vfree (p_info);
+ }
+-
++/*remove task-entry to notify from list */
++ spin_lock(¬ify_lock);
++ list_for_each_safe(l, n, ¬ify_list) {
++ n_entry = list_entry(l, struct qeth_notify_list, list);
++ if (n_entry->task == current) {
++ list_del(&n_entry->list);
++ kfree(n_entry);
++ }
++ }
++ spin_unlock(¬ify_lock);
++ MOD_DEC_USE_COUNT;
+ return 0;
+ }
+
+@@ -10352,7 +11437,7 @@
+ qeth_card_t *card;
+ #define BUFFER_LEN (10+32+1+5+1+DEV_NAME_LEN+1)
+
+- if (*offset) return user_len;
++ if (*offset>0) return user_len;
+ buffer=vmalloc(__max(__max(user_len+1,BUFFER_LEN),QETH_DBF_MISC_LEN));
+
+ if (buffer == NULL)
+@@ -10476,123 +11561,40 @@
+ PRINT_ERR("unknown ipato information command\n");
+ out:
+ vfree(buffer);
+- *offset = user_len;
++ *offset = *offset + user_len;
+ #undef BUFFER_LEN
+ return user_len;
+ }
+
+-static int qeth_procfile_getinterfaces(unsigned long arg)
++static int qeth_snmp_register(struct task_struct *p, unsigned long arg)
+ {
+- qeth_card_t *card;
++ struct qeth_notify_list *n_entry;
++ struct list_head *l;
++ QETH_DBF_TEXT5(0,trace,"snmpreg");
+
+- char parms[16];
+- char *buffer;
+- char *buffer_pointer;
+- __u32 version,valid_fields,qeth_version,number_of_devices,if_index;
+- __u32 data_size,data_len;
+- unsigned long ioctl_flags;
+- int result=0;
+-
+- /* the struct of version 0 is:
+-typedef struct dev_list
+-{
+- char device_name[IFNAME_MAXLEN]; // OSA-Exp device name (e.g. eth0)
+- __u32 if_index; // interface index from kernel
+- __u32 flags; // device charateristics
+-} __attribute__((packed)) DEV_LIST;
+-
+-typedef struct osaexp_dev_ver0
+-{
+- __u32 version; // structure version
+- __u32 valid_fields; // bitmask of fields that are really filled
+- __u32 qeth_version; // qeth driver version
+- __u32 number_of_devices; // number of OSA Express devices
+- struct dev_list devices[0]; // list of OSA Express devices
+-} __attribute__((packed)) OSAEXP_DEV_VER0;
+- */
+-
+- version = 0;
+- valid_fields = 0;
+- qeth_version = 0;
+- number_of_devices= 0;
+-
+- copy_from_user((void*)parms,(void*)arg,sizeof(parms));
+- memcpy(&data_size,parms,sizeof(__u32));
+-
+- if ( !(data_size > 0) )
+- return -EFAULT;
+- if ( data_size > IOCTL_MAX_TRANSFER_SIZE )
+- return -EFAULT;
+- if ( !access_ok(VERIFY_WRITE, (void *)arg, data_size) )
+- return -EFAULT;
+-
+- my_read_lock(&list_lock);
+- card = firstcard;
+-#define IOCTL_USER_STRUCT_SIZE (DEV_NAME_LEN*sizeof(char)) + \
+- sizeof(__u32) + sizeof(__u32)
+- while (card) {
+- if (card->type == QETH_CARD_TYPE_OSAE)
+- number_of_devices=number_of_devices + IOCTL_USER_STRUCT_SIZE;
+- card = card->next;
++ /*check first if entry already exists*/
++
++ spin_lock(¬ify_lock);
++ list_for_each(l, ¬ify_list) {
++ n_entry = list_entry(l, struct qeth_notify_list, list);
++ if (n_entry->task == p) {
++ n_entry->signum = (int) arg;
++ goto reg_out;
++ }
++
+ }
+-#undef IOCTL_USER_STRUCT_SIZE
+- if ((number_of_devices + 4*sizeof(__u32)) >= data_size) {
+- result=-ENOMEM;
+- goto out;
+- }
+-
+- number_of_devices=0;
+- card = firstcard;
+- buffer = (char *)vmalloc(data_size);
+- if (!buffer) {
+- result=-EFAULT;
+- goto out;
+- }
+- buffer_pointer = ((char *)(buffer)) + (4*sizeof(__u32)) ;
+- while (card) {
+- if ((card->type == QETH_CARD_TYPE_OSAE)&&
+- (!atomic_read(&card->is_gone))&&
+- (atomic_read(&card->is_hardsetup))&&
+- (atomic_read(&card->is_registered))) {
+-
+- memcpy(buffer_pointer,card->dev_name,DEV_NAME_LEN);
+- buffer_pointer = buffer_pointer + DEV_NAME_LEN;
+- if_index=card->dev->ifindex;
+- memcpy(buffer_pointer,&if_index,sizeof(__u32));
+- buffer_pointer = buffer_pointer + sizeof(__u32);
+- memcpy(buffer_pointer,&ioctl_flags,sizeof(__u32));
+- buffer_pointer = buffer_pointer + sizeof(__u32);
+- number_of_devices=number_of_devices+1;
+- }
+- card = card->next;
+- }
+-
+- /* we copy the real size */
+- data_len=buffer_pointer-buffer;
+-
+- buffer_pointer = buffer;
+- /* copy the header information at the beginning of the buffer */
+- memcpy(buffer_pointer,&version,sizeof(__u32));
+- memcpy(((char *)buffer_pointer)+sizeof(__u32),&valid_fields,
+- sizeof(__u32));
+- memcpy(((char *)buffer_pointer)+(2*sizeof(__u32)),&qeth_version,
+- sizeof(__u32));
+- memcpy(((char *)buffer_pointer)+(3*sizeof(__u32)),&number_of_devices,
+- sizeof(__u32));
+- copy_to_user((char *)arg,buffer,data_len);
+- vfree(buffer);
+-out:
+- my_read_unlock(&list_lock);
+- return result;
+-
+-#undef PARMS_BUFFERLENGTH
+-
+-};
+-
+-static int qeth_procfile_interfacechanges(unsigned long arg)
+-{
+- return qeth_sleepon_procfile();
+-
++ spin_unlock(¬ify_lock);
++ n_entry = (struct qeth_notify_list *)
++ kmalloc(sizeof(struct qeth_notify_list),GFP_KERNEL);
++ if (!n_entry)
++ return -ENOMEM;
++ n_entry->task = p;
++ n_entry->signum = (int) arg;
++ spin_lock(¬ify_lock);
++ list_add(&n_entry->list,¬ify_list);
++reg_out:
++ spin_unlock(¬ify_lock);
++ return 0;
+ }
+
+ static int qeth_procfile_ioctl(struct inode *inode, struct file *file,
+@@ -10600,19 +11602,17 @@
+ {
+
+ int result;
+- down_interruptible(&qeth_procfile_ioctl_lock);
++
+ switch (cmd) {
+-
+- case QETH_IOCPROC_OSAEINTERFACES:
+- result = qeth_procfile_getinterfaces(arg);
++ case QETH_IOCPROC_REGISTER:
++ if ( (arg > 0) && (arg < 32) )
++ result = qeth_snmp_register(current,arg);
++ else
++ result = -EINVAL;
+ break;
+- case QETH_IOCPROC_INTERFACECHANGES:
+- result = qeth_procfile_interfacechanges(arg);
+- break;
+ default:
+ result = -EOPNOTSUPP;
+ }
+- up(&qeth_procfile_ioctl_lock);
+ return result;
+ };
+
+@@ -10644,10 +11644,6 @@
+ S_IFREG|0644,&proc_root);
+ if (qeth_proc_file) {
+ qeth_proc_file->proc_fops = &qeth_procfile_fops;
+- sema_init(&qeth_procfile_ioctl_sem,
+- PROCFILE_SLEEP_SEM_MAX_VALUE);
+- sema_init(&qeth_procfile_ioctl_lock,
+- PROCFILE_IOCTL_SEM_MAX_VALUE);
+ } else proc_file_registration=-1;
+
+ if (proc_file_registration)
+@@ -10796,8 +11792,12 @@
+ global_stay_in_mem = chandev_persist(chandev_type_qeth);
+ #endif /* MODULE */
+
+- spin_lock_init(&setup_lock);
++/*SNMP init stuff*/
++ spin_lock_init(¬ify_lock);
++ INIT_LIST_HEAD(¬ify_list);
+
++ spin_lock_init(&setup_lock);
++
+ spin_lock_init(&ipato_list_lock);
+
+ qeth_get_internal_functions();
+@@ -10918,6 +11918,7 @@
+ #endif /* QETH_IPV6 */
+ qeth_unregister_dbf_views();
+ qeth_free_all_spare_bufs();
++
+ return result;
+ }
+
+=== drivers/s390/net/qeth_mpc.h
+==================================================================
+--- drivers/s390/net/qeth_mpc.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/net/qeth_mpc.h (/trunk/2.4.27) (revision 52)
+@@ -11,7 +11,7 @@
+ #ifndef __QETH_MPC_H__
+ #define __QETH_MPC_H__
+
+-#define VERSION_QETH_MPC_H "$Revision: 1.42 $"
++#define VERSION_QETH_MPC_H "$Revision: 1.42.4.5 $"
+
+ #define QETH_IPA_TIMEOUT (card->ipa_timeout)
+ #define QETH_MPC_TIMEOUT 2000
+@@ -164,6 +164,9 @@
+
+ #define QETH_ULP_ENABLE_LINKNUM(buffer) (buffer+0x61)
+ #define QETH_ULP_ENABLE_DEST_ADDR(buffer) (buffer+0x2c)
++#define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer+0x50)
++#define QETH_ULP_ENABLE_PROT_TCPIP 0x03
++#define QETH_ULP_ENABLE_PROT_LAYER2 0x08
+ #define QETH_ULP_ENABLE_FILTER_TOKEN(buffer) (buffer+0x53)
+ #define QETH_ULP_ENABLE_PORTNAME_AND_LL(buffer) (buffer+0x62)
+
+@@ -245,6 +248,12 @@
+
+ #define IPA_CMD_STARTLAN 0x01
+ #define IPA_CMD_STOPLAN 0x02
++#define IPA_CMD_SETVMAC 0x21
++#define IPA_CMD_DELVMAC 0x22
++#define IPA_CMD_SETGMAC 0x23
++#define IPA_CMD_DELGMAC 0x24
++#define IPA_CMD_SETVLAN 0x25
++#define IPA_CMD_DELVLAN 0x26
+ #define IPA_CMD_SETIP 0xb1
+ #define IPA_CMD_DELIP 0xb7
+ #define IPA_CMD_QIPASSIST 0xb2
+@@ -288,6 +297,7 @@
+ #define IPA_PASSTHRU 0x00001000L
+ #define IPA_FULL_VLAN 0x00004000L
+ #define IPA_SOURCE_MAC_AVAIL 0x00010000L
++#define IPA_OSA_MC_ROUTER_AVAIL 0x00020000L
+
+ #define IPA_SETADP_QUERY_COMMANDS_SUPPORTED 0x01
+ #define IPA_SETADP_ALTER_MAC_ADDRESS 0x02
+@@ -331,7 +341,7 @@
+ #define IPA_CMD_ASS_ARP_QUERY_INFO 0x0104
+ #define IPA_CMD_ASS_ARP_QUERY_STATS 0x0204
+
+-#define IPA_CHECKSUM_ENABLE_MASK 0x001f
++#define IPA_CHECKSUM_DEFAULT_ENABLE_MASK 0x001a
+
+ #define IPA_CMD_ASS_FILTER_SET_TYPES 0x0003
+
+@@ -434,7 +444,15 @@
+ __u8 type;
+ } setrtg;
+ struct ipa_setadp_cmd setadapterparms;
++/*set/del Vmacs and Gmacs*/
+ struct {
++ __u32 mac_length;
++ __u8 mac[6];
++ } setdelmac;
++ struct {
++ __u16 vlan_id;
++ } setdelvlan;
++ struct {
+ __u32 command;
+ #define ADDR_FRAME_TYPE_DIX 1
+ #define ADDR_FRAME_TYPE_802_3 2
+@@ -461,12 +479,8 @@
+ } data;
+ } ipa_cmd_t __attribute__ ((packed));
+
+-#define QETH_IOC_MAGIC 0x22
+-#define QETH_IOCPROC_OSAEINTERFACES _IOWR(QETH_IOC_MAGIC, 1, arg)
+-#define QETH_IOCPROC_INTERFACECHANGES _IOWR(QETH_IOC_MAGIC, 2, arg)
+-
+ #define SNMP_QUERY_CARD_INFO 0x00000002L
+-#define SNMP_REGISETER_MIB 0x00000004L
++#define SNMP_REGISTER_MIB 0x00000004L
+ #define SNMP_GET_OID 0x00000010L
+ #define SNMP_SET_OID 0x00000011L
+ #define SNMP_GET_NEXT_OID 0x00000012L
+@@ -565,6 +579,9 @@
+ 0x00,0x00,0x00,0x40,
+ };
+
++#define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer+0x19)
++#define QETH_IPA_CMD_PROT_TCPIP 0x03
++#define QETH_IPA_CMD_PROT_LAYER2 0x08
+ #define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer+0x2c)
+
+ #define PDU_ENCAPSULATION(buffer) \
+=== drivers/s390/net/smsgiucv.h
+==================================================================
+--- drivers/s390/net/smsgiucv.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/net/smsgiucv.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,10 @@
++/*
++ * IUCV special message driver
++ *
++ * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
++ * Author(s): Martin Schwidefsky (schwidefsky at de.ibm.com)
++ */
++
++int smsg_register_callback(char *, void (*)(char *));
++void smsg_unregister_callback(char *, void (*)(char *));
++
+=== drivers/s390/net/qeth.h
+==================================================================
+--- drivers/s390/net/qeth.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/net/qeth.h (/trunk/2.4.27) (revision 52)
+@@ -15,7 +15,7 @@
+
+ #define QETH_NAME " qeth"
+
+-#define VERSION_QETH_H "$Revision: 1.113 $"
++#define VERSION_QETH_H "$Revision: 1.113.4.9 $"
+
+ /******************** CONFIG STUFF ***********************/
+ //#define QETH_DBF_LIKE_HELL
+@@ -47,7 +47,7 @@
+ /********************* TUNING STUFF **************************/
+ #define HIGH_WATERMARK_PACK 5
+ #define LOW_WATERMARK_PACK 2
+-#define WATERMARK_FUZZ 2
++#define WATERMARK_FUZZ 1
+
+ #define QETH_MAX_INPUT_THRESHOLD 500
+ #define QETH_MAX_OUTPUT_THRESHOLD 300 /* ? */
+@@ -85,6 +85,22 @@
+ #define QETH_HARDSETUP_CLEAR_LAPS 3
+ #define QETH_RECOVERY_HARDSETUP_RETRY 2
+
++/* the worst case stack usage is:
++ * qeth_hard_start_xmit
++ * do_QDIO
++ * qeth_qdio_output_handler
++ * do_QDIO
++ * qeth_qdio_output_handler
++ * (no more recursion as we have called netif_stop_queue)
++ */
++#ifdef CONFIG_ARCH_S390X
++#define STACK_PTR_MASK 0x3fff
++#define WORST_CASE_STACK_USAGE 1100
++#else /* CONFIG_ARCH_S390X */
++#define STACK_PTR_MASK 0x1fff
++#define WORST_CASE_STACK_USAGE 800
++#endif /* CONFIG_ARCH_S390X */
++
+ /************************* DEBUG FACILITY STUFF *********************/
+
+ #define QETH_DBF_HEX(ex,name,level,addr,len) \
+@@ -238,6 +254,7 @@
+ #define QETH_MPC_LINK_TYPE_FAST_ETHERNET 0x01
+ #define QETH_MPC_LINK_TYPE_HSTR 0x02
+ #define QETH_MPC_LINK_TYPE_GIGABIT_ETHERNET 0x03
++#define QETH_MPC_LINK_TYPE_10GIG_ETHERNET 0x10
+ #define QETH_MPC_LINK_TYPE_LANE_ETH100 0x81
+ #define QETH_MPC_LINK_TYPE_LANE_TR 0x82
+ #define QETH_MPC_LINK_TYPE_LANE_ETH1000 0x83
+@@ -249,10 +266,18 @@
+ #define QETH_HEADER_SIZE 32
+ #define QETH_IP_HEADER_SIZE 40
+ #define QETH_HEADER_LEN_POS 8
++/*ATT: packet length in LAYER 2 mode is at offset 0x06*/
++#define QETH_HEADER2_LEN_POS 6
+ /* flags for the header: */
+ #define QETH_HEADER_PASSTHRU 0x10
+ #define QETH_HEADER_IPV6 0x80
+
++#define QETH_ETH_MC_MAC_V4 0x0100 /* like v4 */
++#define QETH_ETH_MC_MAC_V6 0x3333 /* like v6 */
++/* tr mc mac is longer, but that will be enough to detect mc frames */
++#define QETH_TR_MC_MAC_NC 0xc000 /* non-canonical */
++#define QETH_TR_MC_MAC_C 0x0300 /* canonical */
++
+ #define QETH_CAST_FLAGS 0x07
+ #define QETH_CAST_UNICAST 6
+ #define QETH_CAST_MULTICAST 4
+@@ -260,6 +285,11 @@
+ #define QETH_CAST_ANYCAST 7
+ #define QETH_CAST_NOCAST 0
+
++#define QETH_QDIO_HEADER2_FLAG_MULTICAST_FRAME 0x01
++#define QETH_QDIO_HEADER2_FLAG_BROADCAST_FRAME 0x02
++#define QETH_QDIO_HEADER2_FLAG_UNICAST_FRAME 0x04
++#define QETH_QDIO_HEADER2_FLAG_VLAN_FRAME 0x10
++
+ /* VLAN defines */
+ #define QETH_EXT_HEADER_VLAN_FRAME 0x01
+ #define QETH_EXT_HEADER_TOKEN_ID 0x02
+@@ -284,18 +314,6 @@
+ }
+ }
+
+-inline static unsigned short qeth_get_additional_dev_flags(int cardtype)
+-{
+- switch (cardtype) {
+- case QETH_CARD_TYPE_IQD: return IFF_NOARP;
+-#ifdef QETH_IPV6
+- default: return 0;
+-#else /* QETH_IPV6 */
+- default: return IFF_NOARP;
+-#endif /* QETH_IPV6 */
+- }
+-}
+-
+ inline static int qeth_get_hlen(__u8 link_type)
+ {
+ #ifdef QETH_IPV6
+@@ -330,7 +348,6 @@
+ void (*qeth_my_eth_header_cache_update)(struct hh_cache *,struct net_device *,
+ unsigned char *);
+
+-#ifdef QETH_IPV6
+ typedef int (*__qeth_temp1)(struct sk_buff *,struct net_device *,
+ unsigned short,void *,void *,unsigned);
+ inline static __qeth_temp1 qeth_get_hard_header(__u8 link_type)
+@@ -391,7 +408,7 @@
+ struct ethhdr *eth;
+
+ skb->mac.raw=skb->data;
+- skb_pull(skb,ETH_ALEN*2+sizeof(short));
++ skb_pull(skb,ETH_ALEN*2+2); /* dest, src, type */
+ eth=skb->mac.ethernet;
+
+ if(*eth->h_dest&1) {
+@@ -400,7 +417,10 @@
+ else
+ skb->pkt_type=PACKET_MULTICAST;
+ } else {
++ if (memcmp(eth->h_dest,dev->dev_addr, ETH_ALEN))
+ skb->pkt_type=PACKET_OTHERHOST;
++ else
++ skb->pkt_type=PACKET_HOST;
+ }
+ if (ntohs(eth->h_proto)>=1536) return eth->h_proto;
+ if (*(unsigned short *)(skb->data) == 0xFFFF)
+@@ -417,9 +437,9 @@
+ return tr_type_trans;
+ default:
+ return qeth_eth_type_trans;
++
+ }
+ }
+-#endif /* QETH_IPV6 */
+
+ inline static const char *qeth_get_link_type_name(int cardtype,__u8 linktype)
+ {
+@@ -430,6 +450,7 @@
+ case QETH_MPC_LINK_TYPE_FAST_ETHERNET: return "Fast Eth";
+ case QETH_MPC_LINK_TYPE_HSTR: return "HSTR";
+ case QETH_MPC_LINK_TYPE_GIGABIT_ETHERNET: return "Gigabit Eth";
++ case QETH_MPC_LINK_TYPE_10GIG_ETHERNET: return "10Gig Eth";
+ case QETH_MPC_LINK_TYPE_LANE_ETH100: return "LANE Eth100";
+ case QETH_MPC_LINK_TYPE_LANE_TR: return "LANE TR";
+ case QETH_MPC_LINK_TYPE_LANE_ETH1000: return "LANE Eth1000";
+@@ -529,7 +550,7 @@
+ #define QETH_FAKE_LL_ADDR_LEN ETH_ALEN /* 6 */
+ #define QETH_FAKE_LL_DEST_MAC_POS 0
+ #define QETH_FAKE_LL_SRC_MAC_POS 6
+-#define QETH_FAKE_LL_SRC_MAC_POS_IN_QDIO_HDR 6
++#define QETH_FAKE_LL_SRC_MAC_POS_IN_QDIO_HDR 18
+ #define QETH_FAKE_LL_PROT_POS 12
+ #define QETH_FAKE_LL_V4_ADDR_POS 16
+ #define QETH_FAKE_LL_V6_ADDR_POS 24
+@@ -591,8 +612,10 @@
+ #define PARSE_ROUTING_TYPE6 17
+ #define PARSE_FAKE_LL 18
+ #define PARSE_ASYNC_IQD 19
++#define PARSE_LAYER2 20
++#define PARSE_COUNT 21
++#define QETH_DEFAULT_LAYER2 0
+
+-#define PARSE_COUNT 20
+
+ #define NO_PRIO_QUEUEING 0
+ #define PRIO_QUEUEING_PREC 1
+@@ -603,7 +626,7 @@
+ #define MULTICAST_ROUTER 3
+ #define PRIMARY_CONNECTOR 4
+ #define SECONDARY_CONNECTOR 5
+-#define ROUTER_MASK 0xf /* used to remove SET_ROUTING_FLAG
++#define ROUTER_MASK 0xf /* used to remove RESET_ROUTING_FLAG
+ from routing_type */
+ #define RESET_ROUTING_FLAG 0x10 /* used to indicate, that setting
+ the routing type is desired */
+@@ -621,7 +644,8 @@
+ #define DONT_FAKE_LL 1
+ #define SYNC_IQD 0
+ #define ASYNC_IQD 1
+-
++#define DONT_LAYER2 0
++#define DO_LAYER2 1
+ #define QETH_BREAKOUT_LEAVE 1
+ #define QETH_BREAKOUT_AGAIN 2
+
+@@ -629,6 +653,9 @@
+ #define QETH_DONT_WAIT_FOR_LOCK 1
+ #define QETH_LOCK_ALREADY_HELD 2
+
++#define BROADCAST_WITH_ECHO 1
++#define BROADCAST_WITHOUT_ECHO 2
++
+ #define PROBLEM_CARD_HAS_STARTLANED 1
+ #define PROBLEM_RECEIVED_IDX_TERMINATE 2
+ #define PROBLEM_ACTIVATE_CHECK_CONDITION 3
+@@ -712,6 +739,7 @@
+ int add_hhlen;
+ int fake_ll;
+ int async_iqd;
++ int layer2;
+ };
+
+ typedef struct qeth_hdr_t {
+@@ -782,6 +810,7 @@
+
+ __u8 link_type;
+
++ int is_guest_lan;
+ int do_pfix; /* to avoid doing diag98 for vm guest lan devices */
+
+ /* inbound buffer management */
+@@ -820,15 +849,16 @@
+ int (*hard_header_cache)(struct neighbour *,struct hh_cache *);
+ void (*header_cache_update)(struct hh_cache *,struct net_device *,
+ unsigned char *);
++#endif /* QETH_IPV6 */
+ unsigned short (*type_trans)(struct sk_buff *,struct net_device *);
+- int type_trans_correction;
+-#endif /* QETH_IPV6 */
+
+ #ifdef QETH_VLAN
+ struct vlan_group *vlangrp;
+ spinlock_t vlan_lock;
++#endif
++ __u8 vlans_current[VLAN_GROUP_ARRAY_LEN/(8*sizeof(__u8))];
++ __u8 vlans_new[VLAN_GROUP_ARRAY_LEN/(8*sizeof(__u8))];
+
+-#endif
+ char dev_name[DEV_NAME_LEN]; /* pointed to by dev->name */
+ char dev_basename[DEV_NAME_LEN];
+ struct net_device *dev;
+@@ -846,13 +876,14 @@
+ atomic_t is_softsetup; /* card is setup by softsetup */
+ atomic_t is_open; /* card is in use */
+ atomic_t is_gone; /* after a msck */
++ atomic_t mac_registered;
+
+ int has_irq; /* once a request_irq was successful */
+
+ /* prevents deadlocks :-O */
+ spinlock_t softsetup_lock;
+ spinlock_t hardsetup_lock;
+- spinlock_t ioctl_lock;
++ struct semaphore ioctl_sem;
+ atomic_t softsetup_thread_is_running;
+ struct semaphore softsetup_thread_sem;
+ struct tq_struct tqueue_sst;
+@@ -907,6 +938,8 @@
+ __u32 ipa6_enabled;
+ __u32 adp_supported;
+
++ __u32 csum_enable_mask;
++
+ atomic_t startlan_attempts;
+ atomic_t enable_routing_attempts4;
+ atomic_t rt4fld;
+@@ -1004,6 +1037,30 @@
+ struct mydevreg_t *prev;
+ } mydevreg_t;
+
++/*user process notification stuff */
++spinlock_t notify_lock;
++struct list_head notify_list;
++struct qeth_notify_list {
++ struct list_head list;
++ struct task_struct *task;
++ int signum;
++};
++
++inline static unsigned short
++qeth_get_additional_dev_flags(qeth_card_t *card)
++{
++ if (card->options.layer2 == DO_LAYER2)
++ return 0;
++ switch (card->type) {
++ case QETH_CARD_TYPE_IQD: return IFF_NOARP;
++#ifdef QETH_IPV6
++ default: return 0;
++#else /* QETH_IPV6 */
++ default: return IFF_NOARP;
++#endif /* QETH_IPV6 */
++ }
++}
++
+ inline static int qeth_get_arphrd_type(int cardtype,int linktype)
+ {
+ switch (cardtype) {
+@@ -1011,7 +1068,7 @@
+ case QETH_MPC_LINK_TYPE_LANE_TR:
+ /* fallthrough */
+ case QETH_MPC_LINK_TYPE_HSTR:
+- return ARPHRD_IEEE802;
++ return ARPHRD_IEEE802_TR;
+ default: return ARPHRD_ETHER;
+ }
+ case QETH_CARD_TYPE_IQD: return ARPHRD_ETHER;
+@@ -1036,28 +1093,42 @@
+ }
+ }
+
+-inline static const char *qeth_get_cardname(int cardtype)
++inline static const char *qeth_get_cardname(int cardtype,int is_guest_lan)
+ {
+- switch (cardtype) {
+- case QETH_CARD_TYPE_UNKNOWN: return "n unknown";
+- case QETH_CARD_TYPE_OSAE: return "n OSD Express";
+- case QETH_CARD_TYPE_IQD: return " HiperSockets";
+- default: return " strange";
++ if (is_guest_lan) {
++ switch (cardtype) {
++ case QETH_CARD_TYPE_UNKNOWN: return "n unknown";
++ case QETH_CARD_TYPE_OSAE: return " Guest LAN QDIO";
++ case QETH_CARD_TYPE_IQD: return " Guest LAN Hiper";
++ default: return " strange";
++ }
++ } else {
++ switch (cardtype) {
++ case QETH_CARD_TYPE_UNKNOWN: return "n unknown";
++ case QETH_CARD_TYPE_OSAE: return "n OSD Express";
++ case QETH_CARD_TYPE_IQD: return " HiperSockets";
++ default: return " strange";
++ }
+ }
+ }
+
+ /* max length to be returned: 14 */
+-inline static const char *qeth_get_cardname_short(int cardtype,__u8 link_type)
++inline static const char *qeth_get_cardname_short(int cardtype,__u8 link_type,
++ int is_guest_lan)
+ {
+ switch (cardtype) {
+ case QETH_CARD_TYPE_UNKNOWN: return "unknown";
+- case QETH_CARD_TYPE_OSAE: switch (link_type) {
++ case QETH_CARD_TYPE_OSAE: if (is_guest_lan)
++ return "GuestLAN QDIO";
++ switch (link_type) {
+ case QETH_MPC_LINK_TYPE_FAST_ETHERNET:
+ return "OSD_100";
+ case QETH_MPC_LINK_TYPE_HSTR:
+ return "HSTR";
+ case QETH_MPC_LINK_TYPE_GIGABIT_ETHERNET:
+ return "OSD_1000";
++ case QETH_MPC_LINK_TYPE_10GIG_ETHERNET:
++ return "OSD_10GIG";
+ case QETH_MPC_LINK_TYPE_LANE_ETH100:
+ return "OSD_FE_LANE";
+ case QETH_MPC_LINK_TYPE_LANE_TR:
+@@ -1068,7 +1139,7 @@
+ return "OSD_ATM_LANE";
+ default: return "OSD_Express";
+ }
+- case QETH_CARD_TYPE_IQD: return "HiperSockets";
++ case QETH_CARD_TYPE_IQD: return (is_guest_lan)?"GuestLAN Hiper":"HiperSockets";
+ default: return " strange";
+ }
+ }
+=== drivers/s390/net/iucv.c
+==================================================================
+--- drivers/s390/net/iucv.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/net/iucv.c (/trunk/2.4.27) (revision 52)
+@@ -1,5 +1,5 @@
+ /*
+- * $Id: iucv.c,v 1.41 2003/06/24 16:05:32 felfert Exp $
++ * $Id: iucv.c,v 1.40.2.5 2004/06/29 07:37:33 braunu Exp $
+ *
+ * IUCV network driver
+ *
+@@ -29,7 +29,7 @@
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+- * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.41 $
++ * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.40.2.5 $
+ *
+ */
+
+@@ -320,7 +320,7 @@
+ #define iucv_debug(lvl, fmt, args...) \
+ do { \
+ if (debuglevel >= lvl) \
+- printk(KERN_DEBUG __FUNCTION__ ": " fmt "\n", ## args); \
++ printk(KERN_DEBUG "%s: " fmt "\n", __FUNCTION__, ## args); \
+ } while (0)
+
+ #else
+@@ -334,13 +334,15 @@
+ * Internal functions
+ *******************************************************************************/
+
++static int iucv_retrieve_buffer(void);
++
+ /**
+ * print start banner
+ */
+ static void
+ iucv_banner(void)
+ {
+- char vbuf[] = "$Revision: 1.41 $";
++ char vbuf[] = "$Revision: 1.40.2.5 $";
+ char *version = vbuf;
+
+ if ((version = strchr(version, ':'))) {
+@@ -418,6 +420,7 @@
+ static void
+ iucv_exit(void)
+ {
++ iucv_retrieve_buffer();
+ if (iucv_external_int_buffer)
+ kfree(iucv_external_int_buffer);
+ if (iucv_param_pool)
+@@ -438,17 +441,19 @@
+ static __inline__ iucv_param *
+ grab_param(void)
+ {
+- iucv_param *ret;
+- int i = 0;
++ iucv_param *ptr;
++ static int hint = 0;
+
+- while (atomic_compare_and_swap(0, 1, &iucv_param_pool[i].in_use)) {
+- i++;
+- if (i >= PARAM_POOL_SIZE)
+- i = 0;
+- }
+- ret = &iucv_param_pool[i];
+- memset(&ret->param, 0, sizeof(ret->param));
+- return ret;
++ ptr = iucv_param_pool + hint;
++ do {
++ ptr++;
++ if (ptr >= iucv_param_pool + PARAM_POOL_SIZE)
++ ptr = iucv_param_pool;
++ } while (atomic_compare_and_swap(0, 1, &ptr->in_use));
++ hint = ptr - iucv_param_pool;
++
++ memset(&ptr->param, 0, sizeof(ptr->param));
++ return ptr;
+ }
+
+ /**
+@@ -549,10 +554,8 @@
+ * - ENOMEM - storage allocation for a new pathid table failed
+ */
+ static int
+-iucv_add_pathid(__u16 pathid, handler *handler)
++__iucv_add_pathid(__u16 pathid, handler *handler)
+ {
+- ulong flags;
+-
+ iucv_debug(1, "entering");
+
+ iucv_debug(1, "handler is pointing to %p", handler);
+@@ -560,21 +563,30 @@
+ if (pathid > (max_connections - 1))
+ return -EINVAL;
+
+- spin_lock_irqsave (&iucv_lock, flags);
+ if (iucv_pathid_table[pathid]) {
+- spin_unlock_irqrestore (&iucv_lock, flags);
+ iucv_debug(1, "pathid entry is %p", iucv_pathid_table[pathid]);
+ printk(KERN_WARNING
+ "%s: Pathid being used, error.\n", __FUNCTION__);
+ return -EINVAL;
+ }
+ iucv_pathid_table[pathid] = handler;
+- spin_unlock_irqrestore (&iucv_lock, flags);
+
+ iucv_debug(1, "exiting");
+ return 0;
+ } /* end of add_pathid function */
+
++static int
++iucv_add_pathid(__u16 pathid, handler *handler)
++{
++ ulong flags;
++ int rc;
++
++ spin_lock_irqsave (&iucv_lock, flags);
++ rc = __iucv_add_pathid(pathid, handler);
++ spin_unlock_irqrestore (&iucv_lock, flags);
++ return rc;
++}
++
+ static void
+ iucv_remove_pathid(__u16 pathid)
+ {
+@@ -688,7 +700,6 @@
+ spin_lock_irqsave (&iucv_lock, flags);
+ list_del(&handler->list);
+ if (list_empty(&iucv_handler_table)) {
+- iucv_retrieve_buffer();
+ if (register_flag) {
+ unregister_external_interrupt(0x4000, iucv_irq_handler);
+ register_flag = 0;
+@@ -764,6 +775,7 @@
+ if (iucv_pathid_table == NULL) {
+ printk(KERN_WARNING "%s: iucv_pathid_table storage "
+ "allocation failed\n", __FUNCTION__);
++ kfree(new_handler);
+ return NULL;
+ }
+ memset (iucv_pathid_table, 0, max_connections * sizeof(handler *));
+@@ -1002,6 +1014,8 @@
+ b2f0_result = b2f0(ACCEPT, parm);
+
+ if (b2f0_result == 0) {
++ if (msglim)
++ *msglim = parm->ipmsglim;
+ if (pgm_data)
+ h->pgm_data = pgm_data;
+ if (flags1_out)
+@@ -1133,11 +1147,15 @@
+ iucv_setmask(~(AllInterrupts));
+ messagesDisabled = 1;
+
++ spin_lock_irqsave (&iucv_lock, flags);
+ parm->ipflags1 = (__u8)flags1;
+ b2f0_result = b2f0(CONNECT, parm);
+ memcpy(&local_parm, parm, sizeof(local_parm));
+ release_param(parm);
+ parm = &local_parm;
++ if (b2f0_result == 0)
++ add_pathid_result = __iucv_add_pathid(parm->ippathid, h);
++ spin_unlock_irqrestore (&iucv_lock, flags);
+
+ if (b2f0_result) {
+ iucv_setmask(~0);
+@@ -1145,7 +1163,6 @@
+ return b2f0_result;
+ }
+
+- add_pathid_result = iucv_add_pathid(parm->ippathid, h);
+ *pathid = parm->ippathid;
+
+ /* Enable everything again */
+@@ -2333,7 +2350,8 @@
+ iucv_debug(2,
+ "found a matching handler");
+ break;
+- }
++ } else
++ h = NULL;
+ }
+ spin_unlock_irqrestore (&iucv_lock, flags);
+ if (h) {
+=== drivers/s390/net/ctcmpc.c
+==================================================================
+--- drivers/s390/net/ctcmpc.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/net/ctcmpc.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,8876 @@
++/* INTERNAL VERSION: 051804b
++ *
++ * CTC / SNA/MPC network driver
++ *
++ * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
++ * Author(s): Fritz Elfert (elfert at de.ibm.com, felfert at millenux.com)
++ * Fixes by : Jochen Röhrig (roehrig at de.ibm.com)
++ * Arnaldo Carvalho de Melo <acme at conectiva.com.br>
++ * MPC additions: Belinda Thompson (belindat at us.ibm.com)
++ * Andy Richter (richtera at us.ibm.com)
++ *
++ * Documentation used:
++ * - Principles of Operation (IBM doc#: SA22-7201-06)
++ * - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
++ * - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
++ * - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
++ * - ESCON I/O Interface (IBM doc#: SA22-7202-029
++ *
++ * and the source of the original CTC driver by:
++ * Dieter Wellerdiek (wel at de.ibm.com)
++ * Martin Schwidefsky (schwidefsky at de.ibm.com)
++ * Denis Joseph Barrow (djbarrow at de.ibm.com,barrow_dj at yahoo.com)
++ * Jochen Röhrig (roehrig at de.ibm.com)
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2, or (at your option)
++ * any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ *
++ * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.2.2.2 $
++ *
++ */
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/interrupt.h>
++#include <linux/timer.h>
++#include <linux/sched.h>
++
++#include <linux/signal.h>
++#include <linux/string.h>
++#include <linux/proc_fs.h>
++
++#include <linux/ip.h>
++#include <linux/if_arp.h>
++#include <linux/tcp.h>
++#include <linux/skbuff.h>
++#include <linux/ctype.h>
++#include <linux/netdevice.h>
++#include <net/dst.h>
++
++#include <asm/io.h>
++#include <asm/bitops.h>
++#include <asm/uaccess.h>
++#include <linux/wait.h>
++
++DECLARE_WAIT_QUEUE_HEAD(my_queue);
++
++#ifdef CONFIG_CHANDEV
++ #define CTC_CHANDEV
++#endif
++
++#ifdef CTC_CHANDEV
++ #include <asm/chandev.h>
++ #define REQUEST_IRQ chandev_request_irq
++ #define FREE_IRQ chandev_free_irq
++#else
++ #define REQUEST_IRQ request_irq
++ #define FREE_IRQ free_irq
++#endif
++
++#include <asm/idals.h>
++#include <asm/irq.h>
++
++#include "ctcmpc.h"
++#include "fsm.h"
++
++#ifdef MODULE
++MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert at millenux.com),"
++ "Belinda Thompson (belindat at us.ibm.com)");
++MODULE_DESCRIPTION("Linux for S/390 CTC/SNA MPC Driver");
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,12))
++MODULE_LICENSE("GPL");
++ #endif
++ #ifndef CTC_CHANDEV
++MODULE_PARM(mpc, "s");
++MODULE_PARM_DESC(mpc,
++ "One or more definitions in the same format like the "
++ "kernel param for mpc.\n"
++ "E.g.: mpc0:0x700:0x701:4:mpc1:0x702:0x703:4\n");
++
++char *mpc = NULL;
++ #endif
++#else
++/**
++ * Number of devices in monolithic (not module) driver version.
++ */
++ #define MAX_STATIC_DEVICES 16
++#endif /* MODULE */
++
++//#define DEBUG2 1
++//#define DEBUGCCW 1
++//#define DEBUGXID 1
++//#define DEBUGDATA 1
++//#define DEBUGSEQ 1
++//#define DEBUG 1
++//#undef DEBUG
++
++#define ETH_P_SNA_DIX 0x80D5
++
++/**
++ * CCW commands, used in this driver.
++ */
++#define CCW_CMD_WRITE 0x01
++#define CCW_CMD_READ 0x02
++#define CCW_CMD_NOOP 0x03
++#define CCW_CMD_TIC 0x08
++#define CCW_CMD_SENSE_CMD 0x14
++#define CCW_CMD_WRITE_CTL 0x17
++#define CCW_CMD_SET_EXTENDED 0xc3 /* Disable Compatibility Mode */
++#define CCW_CMD_PREPARE 0xe3
++
++#define CTC_PROTO_S390 0
++#define CTC_PROTO_LINUX 1
++#define CTC_PROTO_LINUX_TTY 2
++#define CTC_PROTO_OS390 3
++#define CTC_PROTO_MPC 4
++#define CTC_PROTO_MAX 4
++
++#define CTC_BUFSIZE_LIMIT 65535
++#define CTC_BUFSIZE_DEFAULT 32768
++#define MPC_BUFSIZE_DEFAULT 65535
++
++#define CTC_TIMEOUT_5SEC 5000
++#define CTC_TIMEOUT_1SEC 1000
++#define CTC_BUSYWAIT_10SEC 10000
++
++#define CTC_INITIAL_BLOCKLEN 2
++
++#define READ 0
++#define WRITE 1
++
++/**
++ * Enum for classifying detected devices.
++ */
++enum channel_types
++{
++ /**
++ * Device is not a channel.
++ */
++ channel_type_none,
++
++ /**
++ * Device is a channel, but we don't know
++ * anything about it.
++ */
++ channel_type_unknown,
++ /**
++ * Device is an mpc capable channel.
++ */
++ channel_type_mpc,
++
++ /**
++ * Device is a CTC/A.
++ */
++ channel_type_ctca,
++
++ /**
++ * Device is a ESCON channel.
++ */
++ channel_type_escon,
++ /**
++ * Device is an unsupported model.
++ */
++ channel_type_unsupported
++};
++
++typedef enum channel_types channel_type_t;
++
++#ifndef CTC_CHANDEV
++static int ctc_no_auto = 0;
++#endif
++
++/**
++ * If running on 64 bit, this must be changed. XXX Why? (bird)
++ */
++typedef unsigned long intparm_t;
++
++#ifndef CTC_CHANDEV
++/**
++ * Definition of a per device parameter block
++ */
++ #define MAX_PARAM_NAME_LEN 11
++typedef struct param_t
++{
++ struct param_t *next;
++ int read_dev;
++ int write_dev;
++ __u16 proto;
++ char name[MAX_PARAM_NAME_LEN];
++} param;
++
++static param *params = NULL;
++#endif
++
++typedef struct
++{
++ unsigned long maxmulti;
++ unsigned long maxcqueue;
++ unsigned long doios_single;
++ unsigned long doios_multi;
++ unsigned long txlen;
++ unsigned long tx_time;
++ struct timeval send_stamp;
++} ctc_profile;
++
++/**
++ * Definition of an XID2
++ *
++ */
++#define ALLZEROS 0x0000000000000000
++
++#define XID_FM2 0x20
++#define XID2_0 0x00
++#define XID2_7 0x07
++#define XID2_MAX_READ (2**16-1)
++#define XID2_WRITE_SIDE 0x04
++#define XID2_READ_SIDE 0x05
++
++struct xid2_t
++{
++ __u8 xid2_type_id;
++ __u8 xid2_len;
++ __u32 xid2_adj_id;
++ __u8 xid2_rlen;
++ __u8 xid2_resv1;
++ __u8 xid2_flag1;
++ __u8 xid2_fmtt;
++ __u8 xid2_flag4;
++ __u16 xid2_resv2;
++ __u8 xid2_tgnum;
++ __u32 xid2_sender_id;
++ __u8 xid2_flag2;
++ __u8 xid2_option;
++ char xid2_resv3[8];
++ __u16 xid2_resv4;
++ __u8 xid2_dlc_type;
++ __u16 xid2_resv5;
++ __u8 xid2_mpc_flag;
++ __u8 xid2_resv6;
++ __u16 xid2_buf_len;
++ char xid2_buffer[255-(sizeof(__u8)*13)-(sizeof(__u32)*2)-
++ (sizeof(__u16)*4)-(sizeof(char)*8)];
++}__attribute__ ((packed));
++typedef struct xid2_t xid2;
++
++#define XID2_LENGTH (sizeof(xid2))
++
++static const xid2 init_xid = {
++ xid2_type_id: XID_FM2,
++ xid2_len: 0x45,
++ xid2_adj_id: 0,
++ xid2_rlen: 0x31,
++ xid2_resv1: 0,
++ xid2_flag1: 0,
++ xid2_fmtt: 0,
++ xid2_flag4: 0x80,
++ xid2_resv2: 0,
++ xid2_tgnum: 0,
++ xid2_sender_id: 0,
++ xid2_flag2: 0,
++ xid2_option: XID2_0,
++ xid2_resv3: "\x00",
++ xid2_resv4: 0,
++ xid2_dlc_type: XID2_READ_SIDE,
++ xid2_resv5: 0,
++ xid2_mpc_flag: 0,
++ xid2_resv6: 0,
++ xid2_buf_len: (MPC_BUFSIZE_DEFAULT - 35),
++};
++
++struct th_header_t
++{
++ __u8 th_seg;
++ __u8 th_ch_flag;
++#define TH_HAS_PDU 0xf0
++#define TH_IS_XID 0x01
++#define TH_SWEEP_REQ 0xfe
++#define TH_SWEEP_RESP 0xff
++ __u8 th_blk_flag;
++#define TH_DATA_IS_XID 0x80
++#define TH_RETRY 0x40
++#define TH_DISCONTACT 0xc0
++#define TH_SEG_BLK 0x20
++#define TH_LAST_SEG 0x10
++#define TH_PDU_PART 0x08
++ __u8 th_is_xid; /* is 0x01 if this is XID */
++ __u32 th_seq_num;
++}__attribute__ ((packed));
++typedef struct th_header_t th_header;
++
++static const th_header thnorm = {
++ th_seg: 0x00,
++ th_ch_flag: TH_IS_XID,
++ th_blk_flag:TH_DATA_IS_XID,
++ th_is_xid: 0x01,
++ th_seq_num: 0x00000000,
++};
++
++static const th_header thdummy = {
++ th_seg: 0x00,
++ th_ch_flag: 0x00,
++ th_blk_flag:TH_DATA_IS_XID,
++ th_is_xid: 0x01,
++ th_seq_num: 0x00000000,
++};
++
++
++struct th_addon_t
++{
++ __u32 th_last_seq;
++ __u32 th_resvd;
++}__attribute__ ((packed));
++typedef struct th_addon_t th_addon;
++
++struct th_sweep_t
++{
++ th_header th;
++ th_addon sw;
++}__attribute__ ((packed));
++typedef struct th_sweep_t th_sweep;
++
++#define TH_HEADER_LENGTH (sizeof(th_header))
++#define TH_SWEEP_LENGTH (sizeof(th_sweep))
++
++#define PDU_LAST 0x80
++#define PDU_CNTL 0x40
++#define PDU_FIRST 0x20
++
++struct pdu_t
++{
++ __u32 pdu_offset;
++ __u8 pdu_flag;
++ __u8 pdu_proto; /* 0x01 is APPN SNA */
++ __u16 pdu_seq;
++}__attribute__ ((packed));
++typedef struct pdu_t pdu;
++#define PDU_HEADER_LENGTH (sizeof(pdu))
++
++struct qllc_t
++{
++ __u8 qllc_address;
++#define QLLC_REQ 0xFF
++#define QLLC_RESP 0x00
++ __u8 qllc_commands;
++#define QLLC_DISCONNECT 0x53
++#define QLLC_UNSEQACK 0x73
++#define QLLC_SETMODE 0x93
++#define QLLC_EXCHID 0xBF
++}__attribute__ ((packed));
++typedef struct qllc_t qllc;
++
++
++static void ctcmpc_bh(unsigned long);
++
++/**
++ * Definition of one channel
++ */
++struct channel_t
++{
++
++ /**
++ * Pointer to next channel in list.
++ */
++ struct channel_t *next;
++ __u16 devno;
++ int irq;
++ /**
++ * Type of this channel.
++ * CTC/A or Escon for valid channels.
++ */
++ channel_type_t type;
++ /**
++ * Misc. flags. See CHANNEL_FLAGS_... below
++ */
++ __u32 flags;
++ /**
++ * The protocol of this channel
++ */
++ __u16 protocol;
++ /**
++ * I/O and irq related stuff
++ */
++ ccw1_t *ccw;
++ devstat_t *devstat;
++ /**
++ * RX/TX buffer size
++ */
++ __u32 max_bufsize;
++ /**
++ * Transmit/Receive buffer.
++ */
++ struct sk_buff *trans_skb;
++ /**
++ * Universal I/O queue.
++ */
++ struct sk_buff_head io_queue;
++ struct tasklet_struct ch_tasklet;
++ /**
++ * TX queue for collecting skb's during busy.
++ */
++ struct sk_buff_head collect_queue;
++ /**
++ * Amount of data in collect_queue.
++ */
++ int collect_len;
++ /**
++ * spinlock for collect_queue and collect_len
++ */
++ spinlock_t collect_lock;
++ /**
++ * Timer for detecting unresposive
++ * I/O operations.
++ */
++ fsm_timer timer;
++ /**
++ * Retry counter for misc. operations.
++ */
++ int retry;
++ /**
++ * spinlock for serializing inbound SNA Segments
++ */
++ spinlock_t segment_lock;
++ /**
++ * SNA TH Seq Number
++ */
++ __u32 th_seq_num;
++ __u8 th_seg;
++ __u32 pdu_seq;
++ sk_buff *xid_skb;
++ char *xid_skb_data;
++ th_header *xid_th;
++ xid2 *xid;
++ char *xid_id;
++ th_header *rcvd_xid_th;
++ xid2 *rcvd_xid;
++ char *rcvd_xid_id;
++ __u8 in_mpcgroup;
++ fsm_timer sweep_timer;
++ struct sk_buff_head sweep_queue;
++ th_header *discontact_th;
++ struct tasklet_struct ch_disc_tasklet;
++ /**
++ * The finite state machine of this channel
++ */
++ fsm_instance *fsm;
++ /**
++ * The corresponding net_device this channel
++ * belongs to.
++ */
++ struct net_device *netdev;
++ ctc_profile prof;
++ unsigned char *trans_skb_data;
++};
++typedef struct channel_t channel;
++
++#define CHANNEL_FLAGS_READ 0
++#define CHANNEL_FLAGS_WRITE 1
++#define CHANNEL_FLAGS_INUSE 2
++#define CHANNEL_FLAGS_BUFSIZE_CHANGED 4
++#define CHANNEL_FLAGS_RWMASK 1
++#define CHANNEL_PRIMARY 0x20 /* we are the x side */
++#define CHANNEL_DIRECTION(f) (f & CHANNEL_FLAGS_RWMASK)
++
++/**
++ * Linked list of all detected channels.
++ */
++static channel *channels = NULL;
++
++#ifdef CTC_CHANDEV
++static int activated;
++#endif
++
++
++/***
++ * Definition of one MPC group
++ */
++
++#define MAX_MPCGCHAN 10
++#define MPC_XID_TIMEOUT_VALUE 10000
++#define MPC_CHANNEL_TIMEOUT_1SEC 1000
++#define MPC_CHANNEL_ADD 0
++#define MPC_CHANNEL_REMOVE 1
++#define MPC_CHANNEL_ATTN 2
++#define XSIDE 1
++#define YSIDE 0
++
++
++
++struct mpcg_info_t
++{
++ struct sk_buff *skb;
++ struct channel_t *ch;
++ struct xid2_t *xid;
++ struct th_sweep_t *sweep;
++ struct th_header_t *th;
++};
++typedef struct mpcg_info_t mpcg_info;
++
++struct mpc_group_t
++{
++ struct tasklet_struct mpc_tasklet;
++ struct tasklet_struct mpc_tasklet2;
++ int changed_side;
++ int saved_state;
++ int channels_terminating;
++ int out_of_sequence;
++ int flow_off_called;
++ int port_num;
++ int port_persist;
++ int alloc_called;
++ __u32 xid2_adj_id;
++ __u8 xid2_tgnum;
++ __u32 xid2_sender_id;
++ int num_channel_paths;
++ int active_channels[2];
++ __u16 group_max_buflen;
++ int outstanding_xid2;
++ int outstanding_xid7;
++ int outstanding_xid7_p2;
++ int sweep_req_pend_num;
++ int sweep_rsp_pend_num;
++ sk_buff *xid_skb;
++ char *xid_skb_data;
++ th_header *xid_th;
++ xid2 *xid;
++ char *xid_id;
++ th_header *rcvd_xid_th;
++ sk_buff *rcvd_xid_skb;
++ char *rcvd_xid_data;
++ __u8 in_sweep;
++ __u8 roll;
++ xid2 *saved_xid2;
++ callbacktypei2 allochanfunc;
++ int allocchan_callback_retries;
++ callbacktypei3 estconnfunc;
++ int estconn_callback_retries;
++ int estconn_called;
++ int xidnogood;
++ int send_qllc_disc;
++ fsm_timer timer;
++ fsm_instance *fsm; /* group xid fsm */
++};
++typedef struct mpc_group_t mpc_group;
++
++typedef struct ctc_priv_t
++{
++ struct net_device_stats stats;
++ unsigned long tbusy;
++
++ /**The MPC group struct of this interface
++ */
++ mpc_group *mpcg;
++ /**
++ * The finite state machine of this interface.
++ */
++ fsm_instance *fsm;
++ /**
++ * The protocol of this device
++ */
++ __u16 protocol;
++ channel *channel[2];
++ xid2 *xid;
++ struct proc_dir_entry *proc_dentry;
++ struct proc_dir_entry *proc_stat_entry;
++ struct proc_dir_entry *proc_ctrl_entry;
++ int proc_registered;
++ /**
++ * Timer for restarting after I/O Errors
++ */
++ fsm_timer restart_timer;
++
++} ctc_priv;
++
++
++static void ctcmpc_action_send_discontact(unsigned long);
++
++
++/**
++ * Compatibility macros for busy handling
++ * of network devices.
++ */
++static __inline__ void
++ctcmpc_clear_busy(struct net_device *dev)
++{
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s enter: %s()\n", dev->name,__FUNCTION__);
++#endif
++ if(((ctc_priv *)dev->priv)->mpcg->in_sweep == 0)
++ {
++ clear_bit(0, &(((ctc_priv *)dev->priv)->tbusy));
++ netif_wake_queue(dev);
++ }
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s exit: %s()\n", dev->name,__FUNCTION__);
++#endif
++
++
++}
++
++static __inline__ int
++ctcmpc_test_and_set_busy(struct net_device *dev)
++{
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s enter: %s()\n", dev->name,__FUNCTION__);
++#endif
++ netif_stop_queue(dev);
++ return test_and_set_bit(0, &((ctc_priv *)dev->priv)->tbusy);
++}
++
++#define SET_DEVICE_START(device, value)
++
++static __inline__ int ctcmpc_checkalloc_buffer(channel *,int);
++static void ctcmpc_purge_skb_queue(struct sk_buff_head *);
++
++
++/**
++ * Print Banner.
++ */
++static void
++print_banner(void)
++{
++ static int printed = 0;
++ char vbuf[] = "$Revision: 1.2.2.2 $";
++ char *version = vbuf;
++
++ if(printed)
++ return;
++ if((version = strchr(version, ':')))
++ {
++ char *p = strchr(version + 1, '$');
++ if(p)
++ *p = '\0';
++ } else
++ version = " ??? ";
++ printk(KERN_INFO
++ "CTC MPC driver Version%swith"
++#ifndef CTC_CHANDEV
++ "out"
++#endif
++ " CHANDEV support"
++#ifdef DEBUG
++ " (DEBUG-VERSION, " __DATE__ __TIME__ ")"
++#endif
++ " initialized\n", version);
++ printed = 1;
++}
++
++static inline int
++gfp_type(void)
++{
++ return in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
++}
++
++
++/**
++ * MPC Group Station FSM States
++
++State Name When In This State
++====================== =======================================
++MPCG_STATE_RESET Initial State When Driver Loaded
++ We receive and send NOTHING
++
++MPCG_STATE_INOP INOP Received.
++ Group level non-recoverable error
++
++MPCG_STATE_READY XID exchanges for at least 1 write and
++ 1 read channel have completed.
++ Group is ready for data transfer.
++
++States from ctc_mpc_alloc_channel
++==============================================================
++MPCG_STATE_XID2INITW Awaiting XID2(0) Initiation
++ ATTN from other side will start
++ XID negotiations.
++ Y-side protocol only.
++
++MPCG_STATE_XID2INITX XID2(0) negotiations are in progress.
++ At least 1, but not all, XID2(0)'s
++ have been received from partner.
++
++MPCG_STATE_XID7INITW XID2(0) complete
++ No XID2(7)'s have yet been received.
++ XID2(7) negotiations pending.
++
++MPCG_STATE_XID7INITX XID2(7) negotiations in progress.
++ At least 1, but not all, XID2(7)'s
++ have been received from partner.
++
++MPCG_STATE_XID7INITF XID2(7) negotiations complete.
++ Transitioning to READY.
++
++MPCG_STATE_READY Ready for Data Transfer.
++
++
++States from ctc_mpc_establish_connectivity call
++==============================================================
++MPCG_STATE_XID0IOWAIT Initiating XID2(0) negotiations.
++ X-side protocol only.
++ ATTN-BUSY from other side will convert
++ this to Y-side protocol and the
++ ctc_mpc_alloc_channel flow will begin.
++
++MPCG_STATE_XID0IOWAIX XID2(0) negotiations are in progress.
++ At least 1, but not all, XID2(0)'s
++ have been received from partner.
++
++MPCG_STATE_XID7INITI XID2(0) complete
++ No XID2(7)'s have yet been received.
++ XID2(7) negotiations pending.
++
++MPCG_STATE_XID7INITZ XID2(7) negotiations in progress.
++ At least 1, but not all, XID2(7)'s
++ have been received from partner.
++
++MPCG_STATE_XID7INITF XID2(7) negotiations complete.
++ Transitioning to READY.
++
++MPCG_STATE_READY Ready for Data Transfer.
++
++*/
++
++enum mpcg_events
++{
++ MPCG_EVENT_INOP,
++ MPCG_EVENT_DISCONC,
++ MPCG_EVENT_XID0DO,
++ MPCG_EVENT_XID2,
++ MPCG_EVENT_XID2DONE,
++ MPCG_EVENT_XID7DONE,
++ MPCG_EVENT_TIMER,
++ MPCG_EVENT_DOIO,
++ NR_MPCG_EVENTS,
++};
++
++static const char *mpcg_event_names[] = {
++ "INOP Condition",
++ "Discontact Received",
++ "Channel Active - Start XID",
++ "XID2 Received",
++ "XID0 Complete",
++ "XID7 Complete",
++ "XID Setup Timer",
++ "XID DoIO",
++};
++
++
++enum mpcg_states
++{
++ MPCG_STATE_RESET,
++ MPCG_STATE_INOP,
++ MPCG_STATE_XID2INITW,
++ MPCG_STATE_XID2INITX,
++ MPCG_STATE_XID7INITW,
++ MPCG_STATE_XID7INITX,
++ MPCG_STATE_XID0IOWAIT,
++ MPCG_STATE_XID0IOWAIX,
++ MPCG_STATE_XID7INITI,
++ MPCG_STATE_XID7INITZ,
++ MPCG_STATE_XID7INITF,
++ MPCG_STATE_FLOWC,
++ MPCG_STATE_READY,
++ NR_MPCG_STATES,
++};
++
++
++static const char *mpcg_state_names[] = {
++ "Reset",
++ "INOP",
++ "Passive XID- XID0 Pending Start",
++ "Passive XID- XID0 Pending Complete",
++ "Passive XID- XID7 Pending P1 Start",
++ "Passive XID- XID7 Pending P2 Complete",
++ "Active XID- XID0 Pending Start",
++ "Active XID- XID0 Pending Complete",
++ "Active XID- XID7 Pending Start",
++ "Active XID- XID7 Pending Complete ",
++ "XID - XID7 Complete ",
++ "FLOW CONTROL ON",
++ "READY",
++};
++
++#ifndef CTC_CHANDEV
++/**
++ * Return type of a detected device.
++ */
++static channel_type_t channel_type (senseid_t *id)
++{
++ channel_type_t type = channel_type_none;
++
++ switch(id->cu_type)
++ {
++ case 0x3088:
++ switch(id->cu_model)
++ {
++ case 0x1E:
++ /**
++ * 3088-1E = FICON channel
++ */
++ case 0x08:
++ /**
++ * 3088-08 = CTCA
++ */
++ case 0x1F:
++ /**
++ * 3088-1F = ESCON channel
++ */
++ type = channel_type_mpc;
++ break;
++
++ /**
++ * 3088-01 = P390 OSA emulation
++ */
++ case 0x01:
++ /* fall thru */
++
++ /**
++ * 3088-60 = OSA/2 adapter
++ */
++ case 0x60:
++ /* fall thru */
++
++ /**
++ * 3088-61 = CISCO 7206 CLAW proto
++ * on ESCON
++ */
++ case 0x61:
++ /* fall thru */
++
++ /**
++ * 3088-62 = OSA/D device
++ */
++ case 0x62:
++ type = channel_type_unsupported;
++ break;
++
++ default:
++ type = channel_type_unknown;
++ printk(KERN_INFO
++ "channel: Unknown model found "
++ "3088-%02x\n", id->cu_model);
++ }
++ break;
++
++ default:
++ type = channel_type_none;
++ }
++ return type;
++}
++#endif
++
++
++/**
++ * States of the interface statemachine.
++ */
++enum dev_states
++{
++ DEV_STATE_STOPPED,
++ DEV_STATE_STARTWAIT_RXTX,
++ DEV_STATE_STARTWAIT_RX,
++ DEV_STATE_STARTWAIT_TX,
++ DEV_STATE_STOPWAIT_RXTX,
++ DEV_STATE_STOPWAIT_RX,
++ DEV_STATE_STOPWAIT_TX,
++ DEV_STATE_RUNNING,
++ /**
++ * MUST be always the last element!!
++ */
++ NR_DEV_STATES
++};
++
++static const char *dev_state_names[] = {
++ "Stopped",
++ "StartWait RXTX",
++ "StartWait RX",
++ "StartWait TX",
++ "StopWait RXTX",
++ "StopWait RX",
++ "StopWait TX",
++ "Running",
++};
++
++/**
++ * Events of the interface statemachine.
++ */
++enum dev_events
++{
++ DEV_EVENT_START,
++ DEV_EVENT_STOP,
++ DEV_EVENT_RXUP,
++ DEV_EVENT_TXUP,
++ DEV_EVENT_RXDOWN,
++ DEV_EVENT_TXDOWN,
++ DEV_EVENT_RESTART,
++ /**
++ * MUST be always the last element!!
++ */
++ NR_DEV_EVENTS
++};
++
++static const char *dev_event_names[] = {
++ "Start",
++ "Stop",
++ "RX up",
++ "TX up",
++ "RX down",
++ "TX down",
++ "Restart",
++};
++
++/**
++ * Events of the channel statemachine
++ */
++enum ch_events
++{
++ /**
++ * Events, representing return code of
++ * I/O operations (do_IO, halt_IO et al.)
++ */
++ CH_EVENT_IO_SUCCESS,
++ CH_EVENT_IO_EBUSY,
++ CH_EVENT_IO_ENODEV,
++ CH_EVENT_IO_EIO,
++ CH_EVENT_IO_UNKNOWN,
++
++ CH_EVENT_ATTNBUSY,
++ CH_EVENT_ATTN,
++ CH_EVENT_BUSY,
++
++ /**
++ * Events, representing unit-check
++ */
++ CH_EVENT_UC_RCRESET,
++ CH_EVENT_UC_RSRESET,
++ CH_EVENT_UC_TXTIMEOUT,
++ CH_EVENT_UC_TXPARITY,
++ CH_EVENT_UC_HWFAIL,
++ CH_EVENT_UC_RXPARITY,
++ CH_EVENT_UC_ZERO,
++ CH_EVENT_UC_UNKNOWN,
++
++ /**
++ * Events, representing subchannel-check
++ */
++ CH_EVENT_SC_UNKNOWN,
++
++ /**
++ * Events, representing machine checks
++ */
++ CH_EVENT_MC_FAIL,
++ CH_EVENT_MC_GOOD,
++
++ /**
++ * Event, representing normal IRQ
++ */
++ CH_EVENT_IRQ,
++ CH_EVENT_FINSTAT,
++
++ /**
++ * Event, representing timer expiry.
++ */
++ CH_EVENT_TIMER,
++
++ /**
++ * Events, representing commands from upper levels.
++ */
++ CH_EVENT_START,
++ CH_EVENT_STOP,
++ CH_EVENT_SEND_XID,
++
++ /**
++ * Events, representing TX MPC buffer states
++ */
++ CH_EVENT_TX_LOMEM,
++ CH_EVENT_TX_MEM_OK,
++ CH_EVENT_RSWEEP1_TIMER,
++
++ /**
++ * MUST be always the last element!!
++ */
++ NR_CH_EVENTS,
++};
++
++static const char *ch_event_names[] = {
++ "do_IO success",
++ "do_IO busy",
++ "do_IO enodev",
++ "do_IO ioerr",
++ "do_IO unknown",
++
++ "Status ATTN & BUSY",
++ "Status ATTN",
++ "Status BUSY",
++
++ "Unit check remote reset",
++ "Unit check remote system reset",
++ "Unit check TX timeout",
++ "Unit check TX parity",
++ "Unit check Hardware failure",
++ "Unit check RX parity",
++ "Unit check ZERO",
++ "Unit check Unknown",
++
++ "SubChannel check Unknown",
++
++ "Machine check failure",
++ "Machine check operational",
++
++ "IRQ normal",
++ "IRQ final",
++
++ "Timer",
++
++ "Start",
++ "Stop",
++ "XID Exchange",
++
++ "TX buffer shortage",
++ "TX buffer shortage relieved",
++ "MPC Group Sweep Timer",
++};
++
++/**
++ * States of the channel statemachine.
++ */
++enum ch_states
++{
++ /**
++ * Channel not assigned to any device,
++ * initial state, direction invalid
++ */
++ CH_STATE_IDLE,
++
++ /**
++ * Channel assigned but not operating
++ */
++ CH_STATE_STOPPED,
++ CH_STATE_STARTWAIT,
++ CH_STATE_STARTRETRY,
++ CH_STATE_SETUPWAIT,
++ CH_STATE_RXINIT,
++ CH_STATE_TXINIT,
++ CH_STATE_RX,
++ CH_STATE_TX,
++ CH_STATE_RXIDLE,
++ CH_STATE_TXIDLE,
++ CH_STATE_RXERR,
++ CH_STATE_TXERR,
++ CH_STATE_TERM,
++ CH_STATE_DTERM,
++ CH_STATE_NOTOP,
++ CH_STATE_TXLOMEM,
++ CH_XID0_PENDING,
++ CH_XID0_INPROGRESS,
++ CH_XID7_PENDING,
++ CH_XID7_PENDING1,
++ CH_XID7_PENDING2,
++ CH_XID7_PENDING3,
++ CH_XID7_PENDING4,
++
++ /**
++ * MUST be always the last element!!
++ */
++ NR_CH_STATES,
++};
++
++static const char *ch_state_names[] = {
++ "Idle",
++ "Stopped",
++ "StartWait",
++ "StartRetry",
++ "SetupWait",
++ "RX init",
++ "TX init",
++ "RX",
++ "TX",
++ "RX idle",
++ "TX idle",
++ "RX error",
++ "TX error",
++ "Terminating",
++ "Restarting",
++ "Not operational",
++ "TX Buffers low",
++ "Pending XID0 Start",
++ "In XID0 Negotiations ",
++ "Pending XID7 P1 Start",
++ "Active XID7 P1 Exchange ",
++ "Pending XID7 P2 Start ",
++ "Active XID7 P2 Exchange ",
++ "XID7 Complete - Pending READY ",
++};
++
++static int transmit_skb(channel *, struct sk_buff *);
++
++#if defined(DEBUGDATA) || defined(DEBUGXID)\
++ || defined(DEBUGCCW) || defined(DEBUGSEQ)
++/*-------------------------------------------------------------------*
++* Dump buffer format *
++* *
++*--------------------------------------------------------------------*/
++static void
++dumpit(char* buf, int len)
++{
++
++ __u32 ct, sw, rm, dup;
++ char *ptr, *rptr;
++ char tbuf[82], tdup[82];
++#if (UTS_MACHINE == s390x)
++ char addr[22];
++#else
++ char addr[12];
++#endif
++ char boff[12];
++ char bhex[82], duphex[82];
++ char basc[40];
++
++ sw = 0;
++ rptr =ptr=buf;
++ rm = 16;
++ duphex[0] = 0x00;
++ dup = 0;
++
++ for(ct=0; ct < len; ct++, ptr++, rptr++)
++ {
++ if(sw == 0)
++ {
++#if (UTS_MACHINE == s390x)
++ sprintf(addr, "%16.16lx",(unsigned long)rptr);
++#else
++ sprintf(addr, "%8.8X",(__u32)rptr);
++#endif
++ sprintf(boff, "%4.4X", (__u32)ct);
++ bhex[0] = '\0';
++ basc[0] = '\0';
++ }
++ if((sw == 4) || (sw == 12))
++ {
++ strcat(bhex, " ");
++ }
++ if(sw == 8)
++ {
++ strcat(bhex, " ");
++ }
++#if (UTS_MACHINE == s390x)
++ sprintf(tbuf,"%2.2lX", (unsigned long)*ptr);
++#else
++ sprintf(tbuf,"%2.2X", (__u32)*ptr);
++#endif
++ tbuf[2] = '\0';
++ strcat(bhex, tbuf);
++ if((0!=isprint(*ptr)) && (*ptr >= 0x20))
++ {
++ basc[sw] = *ptr;
++ } else
++ {
++ basc[sw] = '.';
++ }
++ basc[sw+1] = '\0';
++ sw++;
++ rm--;
++ if(sw==16)
++ {
++ if((strcmp(duphex, bhex)) !=0)
++ {
++ if(dup !=0)
++ {
++ sprintf(tdup,"Duplicate as above "
++ "to %s", addr);
++ printk( KERN_INFO " "
++ " --- %s ---\n",tdup);
++ }
++ printk( KERN_INFO " %s (+%s) : %s [%s]\n",
++ addr, boff, bhex, basc);
++ dup = 0;
++ strcpy(duphex, bhex);
++ } else
++ {
++ dup++;
++ }
++ sw = 0;
++ rm = 16;
++ }
++ } /* endfor */
++
++ if(sw != 0)
++ {
++ for(; rm > 0; rm--, sw++)
++ {
++ if((sw==4) || (sw==12)) strcat(bhex, " ");
++ if(sw==8) strcat(bhex, " ");
++ strcat(bhex, " ");
++ strcat(basc, " ");
++ }
++ if(dup !=0)
++ {
++ sprintf(tdup,"Duplicate as above to %s", addr);
++ printk( KERN_INFO " "
++ " --- %s ---\n",tdup);
++ }
++ printk( KERN_INFO " %s (+%s) : %s [%s]\n",
++ addr, boff, bhex, basc);
++ } else
++ {
++ if(dup >=1)
++ {
++ sprintf(tdup,"Duplicate as above to %s", addr);
++ printk( KERN_INFO " "
++ " --- %s ---\n",tdup);
++ }
++ if(dup !=0)
++ {
++ printk( KERN_INFO " %s (+%s) : %s [%s]\n",
++ addr, boff, bhex, basc);
++ }
++ }
++
++ return;
++
++} /* end of dumpit */
++
++#endif
++
++#ifdef DEBUGDATA
++/**
++ * Dump header and first 16 bytes of an sk_buff for debugging purposes.
++ *
++ * @param skb The sk_buff to dump.
++ * @param offset Offset relative to skb-data, where to start the dump.
++ */
++static void
++ctcmpc_dump_skb(struct sk_buff *skb, int offset)
++{
++ unsigned char *p = skb->data;
++ th_header *header;
++ pdu *pheader;
++ int bl = skb->len;
++ int i;
++
++ if(p == NULL) return;
++ p += offset;
++ header = (th_header *)p;
++
++ printk(KERN_INFO "dump:\n");
++ printk(KERN_INFO "skb len=%d \n", skb->len);
++ if(skb->len > 2)
++ {
++ switch(header->th_ch_flag)
++ {
++ case TH_HAS_PDU:
++ break;
++ case 0x00:
++ case TH_IS_XID:
++ if((header->th_blk_flag == TH_DATA_IS_XID) &&
++ (header->th_is_xid == 0x01))
++ goto dumpth;
++ case TH_SWEEP_REQ:
++ goto dumpth;
++ case TH_SWEEP_RESP:
++ goto dumpth;
++ default:
++ break;
++
++ }
++
++ pheader = (pdu *)p;
++ printk(KERN_INFO "pdu->offset: %d hex: %04x\n",
++ pheader->pdu_offset,pheader->pdu_offset);
++ printk(KERN_INFO "pdu->flag : %02x\n",pheader->pdu_flag);
++ printk(KERN_INFO "pdu->proto : %02x\n",pheader->pdu_proto);
++ printk(KERN_INFO "pdu->seq : %02x\n",pheader->pdu_seq);
++ goto dumpdata;
++
++ dumpth:
++ printk(KERN_INFO "th->seg : %02x\n", header->th_seg);
++ printk(KERN_INFO "th->ch : %02x\n", header->th_ch_flag);
++ printk(KERN_INFO "th->blk_flag: %02x\n", header->th_blk_flag);
++ printk(KERN_INFO "th->type : %s\n",
++ (header->th_is_xid) ? "DATA" : "XID");
++ printk(KERN_INFO "th->seqnum : %04x\n", header->th_seq_num);
++
++ } /* only dump the data if the length is not greater than 2 */
++ dumpdata:
++
++ if(bl > 32)
++ bl = 32;
++ printk(KERN_INFO "data: ");
++ for(i = 0; i < bl; i++)
++ printk("%02x%s", *p++, (i % 16) ? " " : "\n<7>");
++ printk("\n");
++}
++
++#endif
++
++/**
++ * Dummy NOP action for statemachines
++ */
++static void
++fsm_action_nop(fsm_instance *fi, int event, void *arg)
++{
++}
++
++static int ctcmpc_open(net_device *);
++static void ctcmpc_ch_action_rxidle(fsm_instance *fi, int event, void *arg);
++static void ctcmpc_ch_action_txidle(fsm_instance *fi, int event, void *arg);
++static void inline ccw_check_return_code (channel *,int);
++
++
++/*
++ ctc_mpc_alloc_channel
++ Device Initialization :
++ ACTPATH driven IO operations
++*/
++int
++ctc_mpc_alloc_channel(int port_num,callbacktypei2 callback)
++{
++ char device[20];
++ char *devnam = "mpc";
++ net_device *dev = NULL;
++ mpc_group *grpptr;
++ ctc_priv *privptr;
++
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ sprintf(device, "%s%i",devnam,port_num);
++ dev = __dev_get_by_name(device);
++
++ if(dev == NULL)
++ {
++ printk(KERN_INFO "ctc_mpc_alloc_channel %s dev=NULL\n",device);
++ return(1);
++ }
++
++
++ privptr = (ctc_priv *)dev->priv;
++ grpptr = privptr->mpcg;
++ if(!grpptr)
++ return(1);
++
++ grpptr->allochanfunc = callback;
++ grpptr->port_num = port_num;
++ grpptr->port_persist = 1;
++
++ printk(KERN_INFO "%s: %s called for device %s refcount=%d state=%s\n",
++ dev->name,
++ __FUNCTION__,
++ dev->name,
++ atomic_read(&dev->refcnt),
++ fsm_getstate_str(grpptr->fsm));
++
++ switch(fsm_getstate(grpptr->fsm))
++ {
++ case MPCG_STATE_INOP:
++ /* Group is in the process of terminating */
++ grpptr->alloc_called = 1;
++ break;
++ case MPCG_STATE_RESET:
++ /* MPC Group will transition to state */
++ /* MPCG_STATE_XID2INITW iff the minimum number */
++ /* of 1 read and 1 write channel have successfully*/
++ /* activated */
++ /*fsm_newstate(grpptr->fsm, MPCG_STATE_XID2INITW);*/
++ if(callback)
++ grpptr->send_qllc_disc = 1;
++ case MPCG_STATE_XID0IOWAIT:
++ fsm_deltimer(&grpptr->timer);
++ grpptr->outstanding_xid2 = 0;
++ grpptr->outstanding_xid7 = 0;
++ grpptr->outstanding_xid7_p2 = 0;
++ grpptr->saved_xid2 = NULL;
++ if(callback)
++ ctcmpc_open(dev);
++ fsm_event(((ctc_priv *)dev->priv)->fsm,
++ DEV_EVENT_START, dev);
++ break;;
++ case MPCG_STATE_READY:
++ /* XID exchanges completed after PORT was activated */
++ /* Link station already active */
++ /* Maybe timing issue...retry callback */
++ grpptr->allocchan_callback_retries++;
++ if(grpptr->allocchan_callback_retries < 4)
++ {
++ if(grpptr->allochanfunc)
++ grpptr->allochanfunc(grpptr->port_num,
++ grpptr->group_max_buflen);
++ } else
++ {
++ /* there are problems...bail out */
++ /* there may be a state mismatch so restart */
++ grpptr->port_persist = 1;
++ fsm_event(grpptr->fsm,MPCG_EVENT_INOP,dev);
++ grpptr->allocchan_callback_retries = 0;
++ }
++ break;
++ default:
++ return(0);
++
++ }
++
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++ return(0);
++}
++
++
++void
++ctc_mpc_establish_connectivity(int port_num, callbacktypei3 callback)
++{
++
++ char device[20];
++ char *devnam = "mpc";
++ net_device *dev = NULL;
++ mpc_group *grpptr;
++ ctc_priv *privptr;
++ channel *rch,*wch;
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++
++#endif
++
++ sprintf(device,"%s%i",devnam,port_num);
++ dev = __dev_get_by_name(device);
++
++ if(dev == NULL)
++ {
++ printk(KERN_INFO "ctc_mpc_establish_connectivity %s dev=NULL\n"
++ ,device);
++ return;
++ }
++ privptr = (ctc_priv *)dev->priv;
++ rch = privptr->channel[READ];
++ wch = privptr->channel[WRITE];
++
++ grpptr = privptr->mpcg;
++
++ printk(KERN_INFO "%s: %s called for device %s refcount=%d state=%s\n",
++ dev->name,
++ __FUNCTION__,
++ dev->name,
++ atomic_read(&dev->refcnt),
++ fsm_getstate_str(grpptr->fsm));
++
++ grpptr->estconnfunc = callback;
++ grpptr->port_num = port_num;
++
++ switch(fsm_getstate(grpptr->fsm))
++ {
++ case MPCG_STATE_READY:
++ /* XID exchanges completed after PORT was activated */
++ /* Link station already active */
++ /* Maybe timing issue...retry callback */
++ fsm_deltimer(&grpptr->timer);
++ grpptr->estconn_callback_retries++;
++ if(grpptr->estconn_callback_retries < 4)
++ {
++ if(grpptr->estconnfunc)
++ {
++ grpptr->estconnfunc(grpptr->port_num,0,
++ grpptr->group_max_buflen);
++ grpptr->estconnfunc = NULL;
++ }
++ } else
++ {
++ /* there are problems...bail out */
++ fsm_event(grpptr->fsm,MPCG_EVENT_INOP,dev);
++ grpptr->estconn_callback_retries = 0;
++ }
++ break;
++ case MPCG_STATE_INOP:
++ case MPCG_STATE_RESET:
++ /* MPC Group is not ready to start XID - min number of*/
++ /* 1 read and 1 write channel have not been acquired */
++ printk(KERN_WARNING "ctcmpc: %s() REJECTED ACTIVE XID"
++ " Request - Channel Pair is not Active\n",
++ __FUNCTION__);
++ if(grpptr->estconnfunc)
++ {
++ grpptr->estconnfunc(grpptr->port_num,-1,0);
++ grpptr->estconnfunc = NULL;
++ }
++ break;
++ case MPCG_STATE_XID2INITW:
++ /* alloc channel was called but no XID exchange */
++ /* has occurred. initiate xside XID exchange */
++ /* make sure yside XID0 processing has not started */
++ if((fsm_getstate(rch->fsm) > CH_XID0_PENDING) ||
++ (fsm_getstate(wch->fsm) > CH_XID0_PENDING))
++ {
++ printk(KERN_WARNING "ctcmpc: %s() ABORT ACTIVE"
++ " XID Request - PASSIVE XID already in "
++ "process\n",
++ __FUNCTION__);
++ break;
++ }
++ grpptr->send_qllc_disc = 1;
++ fsm_newstate(grpptr->fsm, MPCG_STATE_XID0IOWAIT);
++ fsm_deltimer(&grpptr->timer);
++ fsm_addtimer(&grpptr->timer,
++ MPC_XID_TIMEOUT_VALUE,
++ MPCG_EVENT_TIMER,
++ dev);
++ grpptr->outstanding_xid7 = 0;
++ grpptr->outstanding_xid7_p2 = 0;
++ grpptr->saved_xid2 = NULL;
++ if((rch->in_mpcgroup) &&
++ (fsm_getstate(rch->fsm) == CH_XID0_PENDING))
++ fsm_event(grpptr->fsm, MPCG_EVENT_XID0DO, rch);
++ else
++ {
++ printk(KERN_WARNING "ctcmpc: %s() Unable to"
++ " start ACTIVE XID0 on read channel\n",
++ __FUNCTION__);
++ if(grpptr->estconnfunc)
++ {
++ grpptr->estconnfunc(grpptr->port_num,
++ -1,
++ 0);
++ grpptr->estconnfunc = NULL;
++ }
++ fsm_deltimer(&grpptr->timer);
++ goto done;
++ }
++ if((wch->in_mpcgroup) &&
++ (fsm_getstate(wch->fsm) == CH_XID0_PENDING))
++ fsm_event(grpptr->fsm, MPCG_EVENT_XID0DO, wch);
++ else
++ {
++ printk(KERN_WARNING "ctcmpc: %s() Unable to "
++ "start ACTIVE XID0 on write channel\n",
++ __FUNCTION__);
++ if(grpptr->estconnfunc)
++ {
++ grpptr->estconnfunc(grpptr->port_num,
++ -1,0);
++ grpptr->estconnfunc = NULL;
++ }
++ fsm_deltimer(&grpptr->timer);
++ goto done;
++
++ }
++ break;
++ case MPCG_STATE_XID0IOWAIT:
++ /* already in active XID negotiations */
++ default:
++ break;
++ }
++
++ done:
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++ return;
++}
++
++static int ctcmpc_close(net_device *);
++
++void
++ctc_mpc_dealloc_ch(int port_num)
++{
++ net_device *dev;
++ char device[20];
++ char *devnam = "mpc";
++ ctc_priv *privptr;
++ mpc_group *grpptr;
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ sprintf(device,"%s%i",devnam,port_num);
++ dev = __dev_get_by_name(device);
++
++ if(dev == NULL)
++ {
++ printk(KERN_INFO "%s() %s dev=NULL\n",__FUNCTION__,device);
++ goto done;
++ }
++
++ printk(KERN_INFO "%s: %s called for device %s refcount=%d\n",
++ dev->name,__FUNCTION__,dev->name,atomic_read(&dev->refcnt));
++
++ privptr = (ctc_priv *)dev->priv;
++ if(privptr == NULL)
++ {
++ printk(KERN_INFO "%s() %s privptr=NULL\n",__FUNCTION__,device);
++ goto done;
++ }
++ fsm_deltimer(&privptr->restart_timer);
++
++ grpptr = privptr->mpcg;
++ if(grpptr == NULL)
++ {
++ printk(KERN_INFO "%s() %s dev=NULL\n",__FUNCTION__,device);
++ goto done;
++ }
++ grpptr->channels_terminating = 0;
++
++ fsm_deltimer(&grpptr->timer);
++
++ grpptr->allochanfunc = NULL;
++ grpptr->estconnfunc = NULL;
++
++ grpptr->port_persist = 0;
++
++ grpptr->send_qllc_disc = 0;
++
++ fsm_event(grpptr->fsm,MPCG_EVENT_INOP,dev);
++
++ ctcmpc_close(dev);
++
++ done:
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++ return;
++}
++
++void
++ctc_mpc_flow_control(int port_num,int flowc)
++{
++ char device[20];
++ char *devnam = "mpc";
++ ctc_priv *privptr;
++ mpc_group *grpptr;
++ net_device *dev;
++ channel *rch = NULL;
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s() %i\n", __FUNCTION__,flowc);
++#endif
++
++ sprintf(device,"%s%i",devnam,port_num);
++ dev = __dev_get_by_name(device);
++
++ if(dev == NULL)
++ {
++ printk(KERN_INFO "ctc_mpc_flow_control %s dev=NULL\n",device);
++ return;
++ }
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s: %s called \n",
++ dev->name,__FUNCTION__);
++#endif
++
++ privptr = (ctc_priv *)dev->priv;
++ if(privptr == NULL)
++ {
++ printk(KERN_INFO "ctc_mpc_flow_control %s privptr=NULL\n",
++ device);
++ return;
++ }
++ grpptr = privptr->mpcg;
++ rch = privptr->channel[READ];
++
++ switch(flowc)
++ {
++ case 1:
++ if(fsm_getstate(grpptr->fsm) == MPCG_STATE_FLOWC)
++ break;
++ if(fsm_getstate(grpptr->fsm) == MPCG_STATE_READY)
++ {
++ if(grpptr->flow_off_called == 1)
++ grpptr->flow_off_called = 0;
++ else
++ fsm_newstate(grpptr->fsm,
++ MPCG_STATE_FLOWC);
++ break;
++ }
++ break;
++ case 0:
++ if(fsm_getstate(grpptr->fsm) == MPCG_STATE_FLOWC)
++ {
++ fsm_newstate(grpptr->fsm, MPCG_STATE_READY);
++ /* ensure any data that has accumulated */
++ /* on the io_queue will now be sent */
++ tasklet_schedule(&rch->ch_tasklet);
++ }
++ /* possible race condition */
++ if(fsm_getstate(grpptr->fsm) == MPCG_STATE_READY)
++ {
++ grpptr->flow_off_called = 1;
++ break;
++ }
++ break;
++ }
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s() %i\n", __FUNCTION__,flowc);
++#endif
++
++
++}
++
++static int ctcmpc_send_qllc_discontact(net_device *);
++/*********************************************************************/
++/*
++ invoked when the device transitions to dev_stopped
++ MPC will stop each individual channel if a single XID failure
++ occurs, or will intitiate all channels be stopped if a GROUP
++ level failure occurs.
++*/
++/*********************************************************************/
++
++static void
++ctcmpc_action_go_inop(fsm_instance *fi, int event, void *arg)
++{
++ net_device *dev = (net_device *)arg;
++ ctc_priv *privptr;
++ mpc_group *grpptr;
++ int rc = 0;
++ channel *wch,*rch;
++
++ if(dev == NULL)
++ {
++ printk(KERN_INFO "%s() dev=NULL\n",__FUNCTION__);
++ return;
++ }
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s enter: %s()\n", dev->name,__FUNCTION__);
++#endif
++
++ privptr = (ctc_priv *)dev->priv;
++ grpptr = privptr->mpcg;
++ grpptr->flow_off_called = 0;
++
++ fsm_deltimer(&grpptr->timer);
++
++ if(grpptr->channels_terminating)
++ goto done;
++
++ grpptr->channels_terminating = 1;
++
++ grpptr->saved_state = fsm_getstate(grpptr->fsm);
++ fsm_newstate(grpptr->fsm,MPCG_STATE_INOP);
++ if(grpptr->saved_state > MPCG_STATE_XID7INITF)
++ printk(KERN_NOTICE "%s:MPC GROUP INOPERATIVE\n", dev->name);
++ if((grpptr->saved_state != MPCG_STATE_RESET) ||
++ /* dealloc_channel has been called */
++ ((grpptr->saved_state == MPCG_STATE_RESET) &&
++ (grpptr->port_persist == 0)))
++ fsm_deltimer(&privptr->restart_timer);
++
++ wch = privptr->channel[WRITE];
++ rch = privptr->channel[READ];
++
++ switch(grpptr->saved_state)
++ {
++ case MPCG_STATE_RESET:
++ case MPCG_STATE_INOP:
++ case MPCG_STATE_XID2INITW:
++ case MPCG_STATE_XID0IOWAIT:
++ case MPCG_STATE_XID2INITX:
++ case MPCG_STATE_XID7INITW:
++ case MPCG_STATE_XID7INITX:
++ case MPCG_STATE_XID0IOWAIX:
++ case MPCG_STATE_XID7INITI:
++ case MPCG_STATE_XID7INITZ:
++ case MPCG_STATE_XID7INITF:
++ break;
++ case MPCG_STATE_FLOWC:
++ case MPCG_STATE_READY:
++ default:
++ tasklet_hi_schedule(&wch->ch_disc_tasklet);
++ }
++
++ grpptr->xid2_tgnum = 0;
++ grpptr->group_max_buflen = 0; /*min of all received */
++ grpptr->outstanding_xid2 = 0;
++ grpptr->outstanding_xid7 = 0;
++ grpptr->outstanding_xid7_p2 = 0;
++ grpptr->saved_xid2 = NULL;
++ grpptr->xidnogood = 0;
++ grpptr->changed_side = 0;
++
++ grpptr->rcvd_xid_skb->data =
++ grpptr->rcvd_xid_skb->tail = grpptr->rcvd_xid_data;
++ grpptr->rcvd_xid_skb->len = 0;
++ grpptr->rcvd_xid_th = (th_header *)grpptr->rcvd_xid_skb->data;
++ memcpy(skb_put(grpptr->rcvd_xid_skb,TH_HEADER_LENGTH),
++ &thnorm,
++ TH_HEADER_LENGTH);
++
++ if(grpptr->send_qllc_disc == 1)
++ {
++ grpptr->send_qllc_disc = 0;
++ rc = ctcmpc_send_qllc_discontact(dev);
++ }
++
++ /* DO NOT issue DEV_EVENT_STOP directly out of this code */
++ /* This can result in INOP of VTAM PU due to halting of */
++ /* outstanding IO which causes a sense to be returned */
++ /* Only about 3 senses are allowed and then IOS/VTAM will*/
++ /* ebcome unreachable without manual intervention */
++ if((grpptr->port_persist == 1) || (grpptr->alloc_called))
++ {
++ grpptr->alloc_called = 0;
++ fsm_deltimer(&privptr->restart_timer);
++ fsm_addtimer(&privptr->restart_timer,
++ 500,
++ DEV_EVENT_RESTART,
++ dev);
++ fsm_newstate(grpptr->fsm, MPCG_STATE_RESET);
++ if(grpptr->saved_state > MPCG_STATE_XID7INITF)
++ printk(KERN_NOTICE "%s:MPC GROUP RECOVERY SCHEDULED\n",
++ dev->name);
++ } else
++ {
++ fsm_deltimer(&privptr->restart_timer);
++ fsm_addtimer(&privptr->restart_timer, 500, DEV_EVENT_STOP, dev);
++ fsm_newstate(grpptr->fsm, MPCG_STATE_RESET);
++ printk(KERN_NOTICE "%s:MPC GROUP RECOVERY NOT ATTEMPTED\n",
++ dev->name);
++ }
++
++ done:
++#ifdef DEBUG
++ printk(KERN_INFO "%s exit: %s()\n", dev->name,__FUNCTION__);
++#endif
++
++ return;
++}
++
++
++
++static void
++ctcmpc_action_timeout(fsm_instance *fi, int event, void *arg)
++{
++ net_device *dev = (net_device *)arg;
++ ctc_priv *privptr;
++ mpc_group *grpptr;
++ channel *wch;
++ channel *rch;
++
++ if(dev == NULL)
++ {
++ printk(KERN_INFO "%s() dev=NULL\n",__FUNCTION__);
++ return;
++ }
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s enter: %s()\n", dev->name,__FUNCTION__);
++#endif
++
++ privptr = (ctc_priv *)dev->priv;
++ grpptr = privptr->mpcg;
++ wch = privptr->channel[WRITE];
++ rch = privptr->channel[READ];
++
++
++ switch(fsm_getstate(grpptr->fsm))
++ {
++ case MPCG_STATE_XID2INITW:
++ /* Unless there is outstanding IO on the */
++ /* channel just return and wait for ATTN */
++ /* interrupt to begin XID negotiations */
++ if((fsm_getstate(rch->fsm) == CH_XID0_PENDING) &&
++ (fsm_getstate(wch->fsm) == CH_XID0_PENDING))
++ break;
++ default:
++ fsm_event(grpptr->fsm,MPCG_EVENT_INOP,dev);
++ }
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s exit: %s()\n", dev->name,__FUNCTION__);
++#endif
++
++
++ return;
++}
++
++
++static void
++ctcmpc_action_discontact(fsm_instance *fi, int event, void *arg)
++{
++ mpcg_info *mpcginfo = (mpcg_info *)arg;
++ channel *ch = mpcginfo->ch;
++ net_device *dev = ch->netdev;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ mpc_group *grpptr = privptr->mpcg;
++
++
++ if(ch == NULL)
++ {
++ printk(KERN_INFO "%s() ch=NULL\n",__FUNCTION__);
++ return;
++ }
++ if(ch->netdev == NULL)
++ {
++ printk(KERN_INFO "%s() dev=NULL, irq=%d\n",__FUNCTION__,
++ ch->irq);
++ return;
++ }
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s enter: %s()\n", dev->name,__FUNCTION__);
++#endif
++
++ grpptr->send_qllc_disc = 1;
++ fsm_event(grpptr->fsm,MPCG_EVENT_INOP,dev);
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s exit: %s()\n", dev->name,__FUNCTION__);
++#endif
++
++ return;
++}
++
++static void
++ctcmpc_action_send_discontact(unsigned long thischan)
++{
++ channel *ch = (channel *)thischan;
++ int rc = 0;
++ unsigned long saveflags;
++#ifdef DEBUG
++ net_device *dev = ch->netdev;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ mpc_group = *grpptr = privptr->mpcg;
++#endif
++
++
++#ifdef DEBUG
++
++ printk(KERN_INFO "%s cp:%i enter: %s() irq=%d GrpState:%s ChState:%s\n",
++ dev->name,
++ smp_processor_id(),
++ __FUNCTION__,
++ ch->irq,
++ fsm_getstate_str(grpptr->fsm),
++ fsm_getstate_str(ch->fsm));
++#endif
++
++ s390irq_spin_lock_irqsave(ch->irq,saveflags);
++ rc = do_IO(ch->irq, &ch->ccw[15], (intparm_t)ch, 0xff, 0);
++ s390irq_spin_unlock_irqrestore(ch->irq,saveflags);
++
++ if(rc != 0)
++ {
++#ifdef DEBUG
++ printk(KERN_INFO "%s() %04x do_IO failed \n",
++ __FUNCTION__,ch->devno);
++ ccw_check_return_code(ch, rc);
++#endif
++ /* Not checking return code value here */
++ /* Making best effort to notify partner*/
++ /* that MPC Group is going down */
++ }
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s exit: %s()\n", dev->name,__FUNCTION__);
++#endif
++
++ return;
++}
++
++
++
++static int
++ctcmpc_validate_xid(mpcg_info *mpcginfo)
++{
++ channel *ch = mpcginfo->ch;
++ net_device *dev = ch->netdev;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ mpc_group *grpptr = privptr->mpcg;
++ xid2 *xid = mpcginfo->xid;
++ int failed = 0;
++ int rc = 0;
++ __u64 our_id,their_id = 0;
++ int len;
++
++ len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++
++ if(mpcginfo->xid == NULL)
++ {
++ printk(KERN_INFO "%s() xid=NULL, irq=%d\n",__FUNCTION__,
++ ch->irq);
++ rc = 1;
++ goto done;
++ }
++
++#ifdef DEBUGXID
++ printk(KERN_INFO "ctcmpc : %s xid received()\n", __FUNCTION__);
++ dumpit((char *)mpcginfo->xid,XID2_LENGTH);
++#endif
++ /*the received direction should be the opposite of ours */
++ if(((CHANNEL_DIRECTION(ch->flags) == READ) ?
++ XID2_WRITE_SIDE : XID2_READ_SIDE )
++ != xid->xid2_dlc_type)
++ {
++ failed = 1;
++ printk(KERN_INFO "%s XID REJECTED - READ-WRITE CH "
++ "Pairing Invalid \n",
++ __FUNCTION__);
++ }
++
++ if(xid->xid2_dlc_type == XID2_READ_SIDE)
++ {
++#ifdef DEBUGDATA
++ printk(KERN_INFO "%s(): grpmaxbuf:%d xid2buflen:%d\n",
++ __FUNCTION__,
++ grpptr->group_max_buflen,
++ xid->xid2_buf_len);
++#endif
++
++ if(grpptr->group_max_buflen == 0)
++ grpptr->group_max_buflen = xid->xid2_buf_len - len;
++ else
++ {
++ if((xid->xid2_buf_len - len) < grpptr->group_max_buflen)
++ {
++ grpptr->group_max_buflen =
++ xid->xid2_buf_len - len;
++ }
++ }
++
++ }
++
++ if(grpptr->saved_xid2 == NULL)
++ {
++ grpptr->saved_xid2 = (xid2 *)grpptr->rcvd_xid_skb->tail;
++ memcpy(skb_put(grpptr->rcvd_xid_skb,XID2_LENGTH),
++ xid,
++ XID2_LENGTH);
++ grpptr->rcvd_xid_skb->data =
++ grpptr->rcvd_xid_skb->tail =
++ grpptr->rcvd_xid_data;
++ grpptr->rcvd_xid_skb->len = 0;
++
++ /* convert two 32 bit numbers into 1 64 bit for id compare */
++ our_id = (__u64)privptr->xid->xid2_adj_id;
++ our_id = our_id << 32;
++ our_id = our_id + privptr->xid->xid2_sender_id;
++ their_id = (__u64)xid->xid2_adj_id;
++ their_id = their_id << 32;
++ their_id = their_id + xid->xid2_sender_id;
++ /* lower id assume the xside role */
++ if(our_id < their_id)
++ {
++ grpptr->roll = XSIDE;
++#ifdef DEBUGXID
++ printk(KERN_INFO "ctcmpc :%s() WE HAVE LOW "
++ "ID-TAKE XSIDE\n", __FUNCTION__);
++#endif
++ } else
++ {
++ grpptr->roll = YSIDE;
++#ifdef DEBUGXID
++ printk(KERN_INFO "ctcmpc :%s() WE HAVE HIGH "
++ "ID-TAKE YSIDE\n", __FUNCTION__);
++#endif
++ }
++
++ } else
++ {
++ if(xid->xid2_flag4 != grpptr->saved_xid2->xid2_flag4)
++ {
++ failed = 1;
++ printk(KERN_INFO "%s XID REJECTED - XID Flag Byte4\n",
++ __FUNCTION__);
++
++ }
++ if(xid->xid2_flag2 == 0x40)
++ {
++ failed = 1;
++ printk(KERN_INFO "%s XID REJECTED - XID NOGOOD\n",
++ __FUNCTION__);
++
++ }
++ if(xid->xid2_adj_id != grpptr->saved_xid2->xid2_adj_id)
++ {
++ failed = 1;
++ printk(KERN_INFO "%s XID REJECTED - "
++ "Adjacent Station ID Mismatch\n",
++ __FUNCTION__);
++
++ }
++ if(xid->xid2_sender_id != grpptr->saved_xid2->xid2_sender_id)
++ {
++ failed = 1;
++ printk(KERN_INFO "%s XID REJECTED - "
++ "Sender Address Mismatch\n",
++ __FUNCTION__);
++
++ }
++ }
++
++ if(failed)
++ {
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc : %s() failed\n", __FUNCTION__);
++#endif
++
++ privptr->xid->xid2_flag2 = 0x40;
++ grpptr->saved_xid2->xid2_flag2 = 0x40;
++ rc = 1;
++ goto done;
++ }
++
++ done:
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++ return(rc);
++}
++
++
++static void
++ctcmpc_action_yside_xid(fsm_instance *fsm,int event, void *arg)
++{
++ channel *ch = (channel *)arg;
++ ctc_priv *privptr;
++ mpc_group *grpptr = NULL;
++ int rc = 0;
++ unsigned long saveflags;
++ int gotlock = 0;
++
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc cp:%i enter: %s() %04x\n",
++ smp_processor_id(),__FUNCTION__,ch->devno);
++#endif
++
++ if(ch == NULL)
++ {
++ printk(KERN_INFO "%s ch=NULL\n",__FUNCTION__);
++ goto done;
++ }
++
++ net_device *dev = ch->netdev;
++ if(dev == NULL)
++ {
++ printk(KERN_INFO "%s dev=NULL, irq=%d\n",
++ __FUNCTION__,
++ ch->irq);
++ goto done;
++ }
++
++ privptr = (ctc_priv *)dev->priv;
++ if(privptr == NULL)
++ {
++ printk(KERN_INFO "%s privptr=NULL, irq=%d\n",
++ __FUNCTION__,
++ ch->irq);
++ goto done;
++ }
++
++ grpptr = privptr->mpcg;
++ if(grpptr == NULL)
++ {
++ printk(KERN_INFO "%s grpptr=NULL, irq=%d\n",
++ __FUNCTION__,
++ ch->irq);
++ goto done;
++ }
++
++ if(ctcmpc_checkalloc_buffer(ch, 0))
++ {
++ rc = -ENOMEM;
++ goto done;
++ }
++
++
++ ch->trans_skb->data = ch->trans_skb->tail = ch->trans_skb_data;
++ ch->trans_skb->len = 0;
++ memset(ch->trans_skb->data, 0, 16);
++ ch->rcvd_xid_th = (th_header *)ch->trans_skb->data;
++ skb_put(ch->trans_skb,TH_HEADER_LENGTH);
++ ch->rcvd_xid = (xid2 *)ch->trans_skb->tail;
++ skb_put(ch->trans_skb,XID2_LENGTH);
++ ch->rcvd_xid_id = ch->trans_skb->tail;
++ ch->trans_skb->data = ch->trans_skb->tail = ch->trans_skb_data;
++ ch->trans_skb->len = 0;
++
++ ch->ccw[8].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
++ ch->ccw[8].count = 0;
++ ch->ccw[8].cda = 0x00;
++
++ if(ch->rcvd_xid_th == NULL)
++ {
++ printk(KERN_INFO "%s ch->rcvd_xid_th=NULL, irq=%d\n",
++ __FUNCTION__,
++ ch->irq);
++ goto done;
++ }
++ ch->ccw[9].cmd_code = CCW_CMD_READ;
++ ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
++ ch->ccw[9].count = TH_HEADER_LENGTH;
++ ch->ccw[9].cda = virt_to_phys(ch->rcvd_xid_th);
++
++ if(ch->rcvd_xid == NULL)
++ {
++ printk(KERN_INFO "%s ch->rcvd_xid=NULL, irq=%d\n",
++ __FUNCTION__,
++ ch->irq);
++ goto done;
++ }
++ ch->ccw[10].cmd_code = CCW_CMD_READ;
++ ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
++ ch->ccw[10].count = XID2_LENGTH;
++ ch->ccw[10].cda = virt_to_phys(ch->rcvd_xid);
++
++ if(ch->xid_th == NULL)
++ {
++ printk(KERN_INFO "%s ch->xid_th=NULL, irq=%d\n",
++ __FUNCTION__,
++ ch->irq);
++ goto done;
++ }
++ ch->ccw[11].cmd_code = CCW_CMD_WRITE;
++ ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
++ ch->ccw[11].count = TH_HEADER_LENGTH;
++ ch->ccw[11].cda = virt_to_phys(ch->xid_th);
++
++ if(ch->xid == NULL)
++ {
++ printk(KERN_INFO "%s ch->xid=NULL, irq=%d\n",
++ __FUNCTION__,
++ ch->irq);
++ goto done;
++ }
++
++ ch->ccw[12].cmd_code = CCW_CMD_WRITE;
++ ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
++ ch->ccw[12].count = XID2_LENGTH;
++ ch->ccw[12].cda = virt_to_phys(ch->xid);
++
++ if(ch->xid_id == NULL)
++ {
++ printk(KERN_INFO "%s ch->xid_id=NULL, irq=%d\n",
++ __FUNCTION__,
++ ch->irq);
++ goto done;
++ }
++ ch->ccw[13].cmd_code = CCW_CMD_WRITE;
++ ch->ccw[13].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
++ ch->ccw[13].count = 4;
++ ch->ccw[13].cda = virt_to_phys(ch->xid_id);
++
++ ch->ccw[14].cmd_code = CCW_CMD_NOOP;
++ ch->ccw[14].flags = CCW_FLAG_SLI;
++ ch->ccw[14].count = 0;
++ ch->ccw[14].cda = 0;
++
++
++#ifdef DEBUGCCW
++ dumpit((char *)&ch->ccw[8],sizeof(ccw1_t) * 7);
++#endif
++#ifdef DEBUGXID
++ dumpit((char *)ch->xid_th,TH_HEADER_LENGTH);
++ dumpit((char *)ch->xid,XID2_LENGTH);
++ dumpit((char *)ch->xid_id,4);
++#endif
++
++
++ if(!in_irq())
++ {
++ s390irq_spin_lock_irqsave(ch->irq,saveflags);
++ gotlock = 1;
++ }
++
++ fsm_addtimer(&ch->timer, 5000 , CH_EVENT_TIMER, ch);
++ rc = do_IO(ch->irq, &ch->ccw[8], (intparm_t)ch, 0xff, 0);
++
++ if(gotlock)
++ s390irq_spin_unlock_irqrestore(ch->irq,saveflags);
++
++ if(rc != 0)
++ {
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc: %s() %04x do_IO failed \n",
++ __FUNCTION__,ch->devno);
++#endif
++ ccw_check_return_code(ch, rc);
++ goto done;
++ }
++
++ done:
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s() %04x\n",
++ __FUNCTION__, ch->devno);
++#endif
++
++ return;
++
++}
++
++static void
++ctcmpc_action_doxid0(fsm_instance *fsm,int event, void *arg)
++{
++ channel *ch = (channel *)arg;
++ ctc_priv *privptr;
++ mpc_group *grpptr = NULL;
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s() %04x\n",
++ __FUNCTION__, ch->devno);
++#endif
++
++ if(ch == NULL)
++ {
++ printk(KERN_WARNING "%s ch=NULL\n",__FUNCTION__);
++ goto done;
++ }
++
++ net_device *dev = ch->netdev;
++ if(dev == NULL)
++ {
++ printk(KERN_WARNING "%s dev=NULL, irq=%d\n",
++ __FUNCTION__, ch->irq);
++ goto done;
++ }
++
++ privptr = (ctc_priv *)dev->priv;
++ if(privptr == NULL)
++ {
++ printk(KERN_WARNING "%s privptr=NULL, irq=%d\n",
++ __FUNCTION__, ch->irq);
++ goto done;
++ }
++
++ grpptr = privptr->mpcg;
++ if(grpptr == NULL)
++ {
++ printk(KERN_WARNING "%s grpptr=NULL, irq=%d\n",
++ __FUNCTION__, ch->irq);
++ goto done;
++ }
++
++
++ if(ch->xid == NULL)
++ {
++ printk(KERN_WARNING "%s ch-xid=NULL, irq=%d\n",
++ __FUNCTION__,ch->irq);
++ goto done;
++ }
++
++ fsm_newstate(ch->fsm, CH_XID0_INPROGRESS);
++
++ ch->xid->xid2_option = XID2_0;
++
++ switch(fsm_getstate(grpptr->fsm))
++ {
++ case MPCG_STATE_XID2INITW:
++ case MPCG_STATE_XID2INITX:
++ ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
++ break;
++ case MPCG_STATE_XID0IOWAIT:
++ case MPCG_STATE_XID0IOWAIX:
++ ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
++ break;
++ }
++
++ fsm_event(grpptr->fsm,MPCG_EVENT_DOIO,ch);
++
++
++ done:
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s() %04x\n",
++ __FUNCTION__,ch->devno);
++#endif
++
++ return;
++
++}
++
++
++
++static void
++ctcmpc_action_doxid7(fsm_instance *fsm,int event, void *arg)
++{
++ net_device *dev = (net_device *)arg;
++ int direction;
++ int rc = 0;
++ int send = 0;
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s() \n", __FUNCTION__);
++#endif
++
++ if(dev == NULL)
++ {
++ printk(KERN_INFO "%s dev=NULL \n",__FUNCTION__);
++ rc = 1;
++ goto done;
++ }
++
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ if(privptr == NULL)
++ {
++ printk(KERN_INFO "%s privptr=NULL \n",__FUNCTION__);
++ rc = 1;
++ goto done;
++ }
++
++ mpc_group *grpptr = privptr->mpcg;
++ if(grpptr == NULL)
++ {
++ printk(KERN_INFO "%s grpptr=NULL \n",__FUNCTION__);
++ rc = 1;
++ goto done;
++ }
++
++ for(direction = READ; direction <= WRITE; direction++)
++ {
++ channel *ch = privptr->channel[direction];
++ xid2 *thisxid = ch->xid;
++ ch->xid_skb->data = ch->xid_skb->tail = ch->xid_skb_data;
++ ch->xid_skb->len = 0;
++ thisxid->xid2_option = XID2_7;
++ send = 0;
++
++ /* xid7 phase 1 */
++ if(grpptr->outstanding_xid7_p2 > 0)
++ {
++ if(grpptr->roll == YSIDE)
++ {
++ if(fsm_getstate(ch->fsm) == CH_XID7_PENDING1)
++ {
++ fsm_newstate(ch->fsm,CH_XID7_PENDING2);
++ ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
++ memcpy(skb_put(ch->xid_skb,
++ TH_HEADER_LENGTH),
++ &thdummy,TH_HEADER_LENGTH);
++ send = 1;
++ }
++ } else
++ {
++ if(fsm_getstate(ch->fsm) < CH_XID7_PENDING2)
++ {
++ fsm_newstate(ch->fsm,CH_XID7_PENDING2);
++ ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
++ memcpy(skb_put(ch->xid_skb,
++ TH_HEADER_LENGTH),
++ &thnorm,TH_HEADER_LENGTH);
++ send = 1;
++ }
++ }
++ }
++ /* xid7 phase 2 */
++ else
++ {
++ if(grpptr->roll == YSIDE)
++ {
++ if(fsm_getstate(ch->fsm) < CH_XID7_PENDING4)
++ {
++ fsm_newstate(ch->fsm,CH_XID7_PENDING4);
++ memcpy(skb_put(ch->xid_skb,
++ TH_HEADER_LENGTH),
++ &thnorm,TH_HEADER_LENGTH);
++ ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
++ send = 1;
++ }
++ } else
++ {
++ if(fsm_getstate(ch->fsm) == CH_XID7_PENDING3)
++ {
++ fsm_newstate(ch->fsm,CH_XID7_PENDING4);
++ ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
++ memcpy(skb_put(ch->xid_skb,
++ TH_HEADER_LENGTH),
++ &thdummy,TH_HEADER_LENGTH);
++ send = 1;
++ }
++ }
++ }
++
++ if(send)
++ fsm_event(grpptr->fsm,MPCG_EVENT_DOIO,ch);
++ }
++
++ done:
++
++ if(rc != 0)
++ fsm_event(grpptr->fsm,MPCG_EVENT_INOP,dev);
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++ return;
++}
++
++
++
++static void
++ctcmpc_action_xside_xid(fsm_instance *fsm,int event, void *arg)
++{
++ channel *ch = (channel *)arg;
++ ctc_priv *privptr;
++ mpc_group *grpptr = NULL;
++ int rc = 0;
++ unsigned long saveflags;
++ int gotlock = 0;
++
++ if(ch == NULL)
++ {
++ printk(KERN_INFO "%s ch=NULL\n",__FUNCTION__);
++ goto done;
++ }
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc cp:%i enter: %s() %04x\n",
++ smp_processor_id(),__FUNCTION__,ch->devno);
++#endif
++
++
++ net_device *dev = ch->netdev;
++ if(dev == NULL)
++ {
++ printk(KERN_INFO "%s dev=NULL, irq=%d\n",__FUNCTION__,ch->irq);
++ goto done;
++ }
++
++ privptr = (ctc_priv *)dev->priv;
++ if(privptr == NULL)
++ {
++ printk(KERN_INFO "%s privptr=NULL, irq=%d\n",
++ __FUNCTION__,ch->irq);
++ goto done;
++ }
++
++ grpptr = privptr->mpcg;
++ if(grpptr == NULL)
++ {
++ printk(KERN_INFO "%s grpptr=NULL, irq=%d\n",
++ __FUNCTION__,ch->irq);
++ goto done;
++ }
++
++ if(ctcmpc_checkalloc_buffer(ch, 0))
++ {
++ rc = -ENOMEM;
++ goto done;
++ }
++
++ ch->trans_skb->data = ch->trans_skb->tail = ch->trans_skb_data;
++ ch->trans_skb->len = 0;
++ memset(ch->trans_skb->data, 0, 16);
++ ch->rcvd_xid_th = (th_header *)ch->trans_skb->data;
++ skb_put(ch->trans_skb,TH_HEADER_LENGTH);
++ ch->rcvd_xid = (xid2 *)ch->trans_skb->tail;
++ skb_put(ch->trans_skb,XID2_LENGTH);
++ ch->rcvd_xid_id = ch->trans_skb->tail;
++ ch->trans_skb->data = ch->trans_skb->tail = ch->trans_skb_data;
++ ch->trans_skb->len = 0;
++
++ ch->ccw[8].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
++ ch->ccw[8].count = 0;
++ ch->ccw[8].cda = 0x00; /* null */
++
++ if(ch->xid_th == NULL)
++ {
++ printk(KERN_INFO "%s ch->xid_th=NULL, irq=%d\n",
++ __FUNCTION__,ch->irq);
++ goto done;
++ }
++ ch->ccw[9].cmd_code = CCW_CMD_WRITE;
++ ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
++ ch->ccw[9].count = TH_HEADER_LENGTH;
++ ch->ccw[9].cda = virt_to_phys(ch->xid_th);
++
++ if(ch->xid == NULL)
++ {
++ printk(KERN_INFO "%s ch->xid=NULL, irq=%d\n",
++ __FUNCTION__,ch->irq);
++ goto done;
++ }
++
++ ch->ccw[10].cmd_code = CCW_CMD_WRITE;
++ ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
++ ch->ccw[10].count = XID2_LENGTH;
++ ch->ccw[10].cda = virt_to_phys(ch->xid);
++
++ if(ch->rcvd_xid_th == NULL)
++ {
++ printk(KERN_INFO "%s ch->rcvd_xid_th=NULL, irq=%d\n",
++ __FUNCTION__,ch->irq);
++ goto done;
++ }
++ ch->ccw[11].cmd_code = CCW_CMD_READ;
++ ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
++ ch->ccw[11].count = TH_HEADER_LENGTH;
++ ch->ccw[11].cda = virt_to_phys(ch->rcvd_xid_th);
++
++ if(ch->rcvd_xid == NULL)
++ {
++ printk(KERN_INFO "%s ch->rcvd_xid=NULL, irq=%d\n",
++ __FUNCTION__,ch->irq);
++ goto done;
++ }
++ ch->ccw[12].cmd_code = CCW_CMD_READ;
++ ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
++ ch->ccw[12].count = XID2_LENGTH;
++ ch->ccw[12].cda = virt_to_phys(ch->rcvd_xid);
++
++ if(ch->xid_id == NULL)
++ {
++ printk(KERN_INFO "%s ch->xid_id=NULL, irq=%d\n",
++ __FUNCTION__,ch->irq);
++ goto done;
++ }
++ ch->ccw[13].cmd_code = CCW_CMD_READ;
++ ch->ccw[13].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
++ ch->ccw[13].count = 4;
++ ch->ccw[13].cda = virt_to_phys(ch->rcvd_xid_id);
++
++ ch->ccw[14].cmd_code = CCW_CMD_NOOP;
++ ch->ccw[14].flags = CCW_FLAG_SLI;
++ ch->ccw[14].count = 0;
++ ch->ccw[14].cda = 0;
++
++#ifdef DEBUGCCW
++ dumpit((char *)&ch->ccw[8],sizeof(ccw1_t) * 7);
++#endif
++#ifdef DEBUGXID
++ dumpit((char *)ch->xid_th,TH_HEADER_LENGTH);
++ dumpit((char *)ch->xid,XID2_LENGTH);
++#endif
++
++ if(!in_irq())
++ {
++ s390irq_spin_lock_irqsave(ch->irq,saveflags);
++ gotlock = 1;
++ }
++
++ fsm_addtimer(&ch->timer, 5000 , CH_EVENT_TIMER, ch);
++ rc = do_IO(ch->irq, &ch->ccw[8], (intparm_t)ch, 0xff, 0);
++
++ if(gotlock)
++ s390irq_spin_unlock_irqrestore(ch->irq,saveflags);
++
++ if(rc != 0)
++ {
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc: %s() %04x do_IO failed \n",
++ __FUNCTION__,ch->devno);
++#endif
++ ccw_check_return_code(ch, rc);
++ goto done;
++ }
++
++ done:
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s() %04x\n",
++ __FUNCTION__,ch->devno);
++#endif
++
++ return;
++}
++
++
++
++static void
++ctcmpc_action_rcvd_xid0(fsm_instance *fsm,int event, void *arg)
++{
++
++ mpcg_info *mpcginfo = (mpcg_info *)arg;
++ channel *ch = mpcginfo->ch;
++ net_device *dev = ch->netdev;
++ ctc_priv *privptr;
++ mpc_group *grpptr;
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s() %04x\n",
++ __FUNCTION__,ch->devno);
++#endif
++
++ privptr = (ctc_priv *)dev->priv;
++ grpptr = privptr->mpcg;
++
++#ifdef DEBUGXID
++ printk(KERN_INFO "ctcmpc in:%s() %04x xid2:%i xid7:%i xidt_p2:%i \n",
++ __FUNCTION__,ch->devno,
++ grpptr->outstanding_xid2,
++ grpptr->outstanding_xid7,
++ grpptr->outstanding_xid7_p2);
++#endif
++
++ if(fsm_getstate(ch->fsm) < CH_XID7_PENDING)
++ fsm_newstate(ch->fsm,CH_XID7_PENDING);
++
++ grpptr->outstanding_xid2--;
++ grpptr->outstanding_xid7++;
++ grpptr->outstanding_xid7_p2++;
++
++ /* must change state before validating xid to */
++ /* properly handle interim interrupts received*/
++ switch(fsm_getstate(grpptr->fsm))
++ {
++ case MPCG_STATE_XID2INITW:
++ fsm_newstate(grpptr->fsm,MPCG_STATE_XID2INITX);
++ ctcmpc_validate_xid(mpcginfo);
++ break;
++ case MPCG_STATE_XID0IOWAIT:
++ fsm_newstate(grpptr->fsm,MPCG_STATE_XID0IOWAIX);
++ ctcmpc_validate_xid(mpcginfo);
++ break;
++ case MPCG_STATE_XID2INITX:
++ if(grpptr->outstanding_xid2 == 0)
++ {
++ fsm_newstate(grpptr->fsm,MPCG_STATE_XID7INITW);
++ ctcmpc_validate_xid(mpcginfo);
++ fsm_event(grpptr->fsm,MPCG_EVENT_XID2DONE,dev);
++ }
++ break;
++ case MPCG_STATE_XID0IOWAIX:
++ if(grpptr->outstanding_xid2 == 0)
++ {
++ fsm_newstate(grpptr->fsm,MPCG_STATE_XID7INITI);
++ ctcmpc_validate_xid(mpcginfo);
++ fsm_event(grpptr->fsm,MPCG_EVENT_XID2DONE,dev);
++ }
++ break;
++ }
++ kfree(mpcginfo);
++
++#ifdef DEBUGXID
++ printk(KERN_INFO "ctcmpc out:%s() %04x xid2:%i xid7:%i xidt_p2:%i \n",
++ __FUNCTION__,ch->devno,
++ grpptr->outstanding_xid2,
++ grpptr->outstanding_xid7,
++ grpptr->outstanding_xid7_p2);
++ printk(KERN_INFO "ctcmpc out:%s() %04x groupstate: %s chanstate: %s \n",
++ __FUNCTION__,ch->devno,
++ fsm_getstate_str(grpptr->fsm),
++ fsm_getstate_str(ch->fsm));
++#endif
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s() %04x\n", __FUNCTION__,ch->devno);
++#endif
++
++ return;
++
++}
++
++
++static void
++ctcmpc_action_rcvd_xid7(fsm_instance *fsm,int event, void *arg)
++{
++
++ mpcg_info *mpcginfo = (mpcg_info *)arg;
++ channel *ch = mpcginfo->ch;
++ net_device *dev = ch->netdev;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ mpc_group *grpptr = privptr->mpcg;
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s() %04x\n",
++ __FUNCTION__,ch->devno);
++#endif
++
++#ifdef DEBUGXID
++ printk(KERN_INFO "ctcmpc: outstanding_xid7: %i, "
++ "outstanding_xid7_p2: %i\n",
++ grpptr->outstanding_xid7,
++ grpptr->outstanding_xid7_p2);
++#endif
++
++
++ grpptr->outstanding_xid7--;
++
++ ch->xid_skb->data = ch->xid_skb->tail = ch->xid_skb_data;
++ ch->xid_skb->len = 0;
++
++ switch(fsm_getstate(grpptr->fsm))
++ {
++ case MPCG_STATE_XID7INITI:
++ fsm_newstate(grpptr->fsm,MPCG_STATE_XID7INITZ);
++ ctcmpc_validate_xid(mpcginfo);
++ break;
++ case MPCG_STATE_XID7INITW:
++ fsm_newstate(grpptr->fsm,MPCG_STATE_XID7INITX);
++ ctcmpc_validate_xid(mpcginfo);
++ break;
++ case MPCG_STATE_XID7INITZ:
++ case MPCG_STATE_XID7INITX:
++ if(grpptr->outstanding_xid7 == 0)
++ {
++ if(grpptr->outstanding_xid7_p2 > 0)
++ {
++ grpptr->outstanding_xid7 =
++ grpptr->outstanding_xid7_p2;
++ grpptr->outstanding_xid7_p2 = 0;
++ } else
++ fsm_newstate(grpptr->fsm,
++ MPCG_STATE_XID7INITF);
++ ctcmpc_validate_xid(mpcginfo);
++ fsm_event(grpptr->fsm,MPCG_EVENT_XID7DONE,dev);
++ break;
++ }
++ ctcmpc_validate_xid(mpcginfo);
++ break;
++ }
++
++ kfree(mpcginfo);
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s() %04x\n",
++ __FUNCTION__,ch->devno);
++#endif
++ return;
++
++}
++
++static void
++ctcmpc_action_attn(fsm_instance *fsm,int event, void *arg)
++{
++ channel *ch = (channel *)arg;
++ net_device *dev = ch->netdev;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ mpc_group *grpptr = privptr->mpcg;
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s() %04x \nGrpState:%s ChState:%s\n",
++ __FUNCTION__,ch->devno,
++ fsm_getstate_str(grpptr->fsm),
++ fsm_getstate_str(ch->fsm));
++#endif
++
++ switch(fsm_getstate(grpptr->fsm))
++ {
++ case MPCG_STATE_XID2INITW:
++ /* ok..start yside xid exchanges */
++ if(ch->in_mpcgroup)
++ {
++ if(fsm_getstate(ch->fsm) == CH_XID0_PENDING)
++ {
++ fsm_deltimer(&grpptr->timer);
++ fsm_addtimer(&grpptr->timer,
++ MPC_XID_TIMEOUT_VALUE,
++ MPCG_EVENT_TIMER,
++ dev);
++ fsm_event(grpptr->fsm,
++ MPCG_EVENT_XID0DO,
++ ch);
++ } else
++ {/* attn rcvd before xid0 processed via bh */
++ if(fsm_getstate(ch->fsm) <
++ CH_XID7_PENDING1)
++ fsm_newstate(ch->fsm,
++ CH_XID7_PENDING1);
++ }
++ }
++ break;
++ case MPCG_STATE_XID2INITX:
++ case MPCG_STATE_XID0IOWAIT:
++ case MPCG_STATE_XID0IOWAIX:
++ /* attn rcvd before xid0 processed on ch
++ but mid-xid0 processing for group */
++ if(fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
++ fsm_newstate(ch->fsm,CH_XID7_PENDING1);
++ break;
++ case MPCG_STATE_XID7INITW:
++ case MPCG_STATE_XID7INITX:
++ case MPCG_STATE_XID7INITI:
++ case MPCG_STATE_XID7INITZ:
++ switch(fsm_getstate(ch->fsm))
++ {
++ case CH_XID7_PENDING:
++ fsm_newstate(ch->fsm,CH_XID7_PENDING1);
++ break;
++ case CH_XID7_PENDING2:
++ fsm_newstate(ch->fsm,CH_XID7_PENDING3);
++ break;
++ }
++ fsm_event(grpptr->fsm,MPCG_EVENT_XID7DONE,dev);
++ break;
++ }
++
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s() %04x\n", __FUNCTION__,ch->devno);
++#endif
++ return;
++
++}
++
++static void
++ctcmpc_action_attnbusy(fsm_instance *fsm,int event, void *arg)
++{
++ channel *ch = (channel *)arg;
++ net_device *dev = ch->netdev;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ mpc_group *grpptr = privptr->mpcg;
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s enter: %s() %04x \nGrpState:%s ChState:%s\n",
++ dev->name,
++ __FUNCTION__,ch->devno,
++ fsm_getstate_str(grpptr->fsm),
++ fsm_getstate_str(ch->fsm));
++#endif
++
++
++ fsm_deltimer(&ch->timer);
++
++ switch(fsm_getstate(grpptr->fsm))
++ {
++ case MPCG_STATE_XID0IOWAIT:
++ /* vtam wants to be primary..start yside xid exchanges*/
++ /* will only rcv one attn-busy at a time so must not */
++ /* change state each time */
++ grpptr->changed_side = 1;
++ fsm_newstate(grpptr->fsm,MPCG_STATE_XID2INITW);
++ break;
++ case MPCG_STATE_XID2INITW:
++ if(grpptr->changed_side == 1)
++ {
++ grpptr->changed_side = 2;
++ break;
++ }
++ /* process began via call to establish_conn */
++ /* so must report failure instead of reverting */
++ /* back to ready-for-xid passive state */
++ if(grpptr->estconnfunc)
++ goto done;
++ /* this attnbusy is NOT the result of xside xid */
++ /* collisions so yside must have been triggered */
++ /* by an ATTN that was not intended to start XID */
++ /* processing. Revert back to ready-for-xid and */
++ /* wait for ATTN interrupt to signal xid start */
++ if(fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS)
++ {
++ fsm_newstate(ch->fsm,CH_XID0_PENDING) ;
++ fsm_deltimer(&grpptr->timer);
++ goto done;
++ }
++ fsm_event(grpptr->fsm,MPCG_EVENT_INOP,dev);
++ goto done;
++ case MPCG_STATE_XID2INITX:
++ /* XID2 was received before ATTN Busy for second
++ channel.Send yside xid for second channel.
++ */
++ if(grpptr->changed_side == 1)
++ {
++ grpptr->changed_side = 2;
++ break;
++ }
++ case MPCG_STATE_XID0IOWAIX:
++ case MPCG_STATE_XID7INITW:
++ case MPCG_STATE_XID7INITX:
++ case MPCG_STATE_XID7INITI:
++ case MPCG_STATE_XID7INITZ:
++ default:
++ /* multiple attn-busy indicates too out-of-sync */
++ /* and they are certainly not being received as part */
++ /* of valid mpc group negotiations.. */
++ fsm_event(grpptr->fsm,MPCG_EVENT_INOP,dev);
++ goto done;
++ }
++
++ if(grpptr->changed_side == 1)
++ {
++ fsm_deltimer(&grpptr->timer);
++ fsm_addtimer(&grpptr->timer, MPC_XID_TIMEOUT_VALUE,
++ MPCG_EVENT_TIMER, dev);
++ }
++ if(ch->in_mpcgroup)
++ fsm_event(grpptr->fsm, MPCG_EVENT_XID0DO, ch);
++ else
++ printk( KERN_WARNING "ctcmpc: %s() Not all channels have "
++ "been added to group\n",
++ __FUNCTION__);
++
++ done:
++#ifdef DEBUG
++ printk(KERN_INFO "%s exit: %s() %04x\n",
++ dev->name,__FUNCTION__,ch->devno);
++#endif
++ return;
++
++}
++
++static void
++ctcmpc_action_resend(fsm_instance *fsm,int event, void *arg)
++{
++ channel *ch = (channel *)arg;
++ net_device *dev = ch->netdev;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ mpc_group *grpptr = privptr->mpcg;
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s enter: %s() %04x \nGrpState:%s ChState:%s\n",
++ dev->name,__FUNCTION__,ch->devno,
++ fsm_getstate_str(grpptr->fsm),
++ fsm_getstate_str(ch->fsm));
++#endif
++ fsm_event(grpptr->fsm, MPCG_EVENT_XID0DO, ch);
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s exit: %s() %04x\n",
++ dev->name,__FUNCTION__,ch->devno);
++#endif
++ return;
++
++}
++
++
++static int
++ctcmpc_send_qllc_discontact(net_device *dev)
++{
++ int rc = 0, space = 0;
++ __u32 new_len = 0;
++ struct sk_buff *skb;
++ qllc *qllcptr;
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n",
++ __FUNCTION__);
++#endif
++
++ if(dev == NULL)
++ {
++ printk(KERN_INFO "%s() dev=NULL\n",
++ __FUNCTION__);
++ rc = 1;
++ goto done;
++ }
++
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ if(privptr == NULL)
++ {
++ printk(KERN_INFO "%s() privptr=NULL\n",
++ __FUNCTION__);
++ rc = 1;
++ goto done;
++ }
++
++ mpc_group *grpptr = privptr->mpcg;
++ if(grpptr == NULL)
++ {
++ printk(KERN_INFO "%s() grpptr=NULL\n",
++ __FUNCTION__);
++ rc = 1;
++ goto done;
++ }
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc: %s() GROUP STATE: %s\n",
++ __FUNCTION__,mpcg_state_names[grpptr->saved_state]);
++#endif
++
++
++ switch(grpptr->saved_state)
++ { /* establish conn callback function is */
++ /* preferred method to report failure */
++ case MPCG_STATE_XID0IOWAIT:
++ case MPCG_STATE_XID0IOWAIX:
++ case MPCG_STATE_XID7INITI:
++ case MPCG_STATE_XID7INITZ:
++ case MPCG_STATE_XID2INITW:
++ case MPCG_STATE_XID2INITX:
++ case MPCG_STATE_XID7INITW:
++ case MPCG_STATE_XID7INITX:
++ if(grpptr->estconnfunc)
++ {
++ grpptr->estconnfunc(grpptr->port_num,-1,0);
++ grpptr->estconnfunc = NULL;
++ break;
++ }
++ case MPCG_STATE_FLOWC:
++ case MPCG_STATE_READY:
++ grpptr->send_qllc_disc = 2;
++ new_len = sizeof(qllc);
++ if((qllcptr = (qllc *)kmalloc(sizeof(qllc),
++ gfp_type() | GFP_DMA))
++ == NULL)
++ {
++ printk(KERN_INFO "qllc: Out of memory"
++ " in send_qllc\n");
++ rc = 1;
++ goto done;
++ }
++
++ memset(qllcptr, 0, new_len);
++ qllcptr->qllc_address = 0xcc;
++ qllcptr->qllc_commands = 0x03;
++
++ skb = __dev_alloc_skb(new_len,GFP_ATOMIC);
++
++ if(skb == NULL)
++ {
++ printk(KERN_INFO
++ "%s Out of memory in ctcmpc_send_qllc\n",
++ dev->name);
++ privptr->stats.rx_dropped++;
++ rc = 1;
++ kfree(qllcptr);
++ goto done;
++ }
++
++ memcpy(skb_put(skb, new_len), qllcptr, new_len);
++ kfree(qllcptr);
++
++ space = skb_headroom(skb);
++ if(space < 4)
++ {
++ printk(KERN_INFO "%s Unable to build "
++ "discontact for %s\n",
++ __FUNCTION__,dev->name);
++ rc = 1;
++ dev_kfree_skb_any(skb);
++ goto done;
++ }
++
++ *((__u32 *) skb_push(skb, 4)) =
++ privptr->channel[READ]->pdu_seq;
++ privptr->channel[READ]->pdu_seq++;
++#ifdef DEBUGSEQ
++ printk(KERN_INFO "%s: ToDCM_pdu_seq= %08x\n" ,
++ __FUNCTION__,privptr->channel[READ]->pdu_seq);
++#endif
++ /* receipt of CC03 resets anticipated sequence "
++ "number on receiving side */
++ privptr->channel[READ]->pdu_seq = 0x00;
++
++ skb->mac.raw = skb->data;
++ skb->dev = dev;
++ skb->protocol = htons(ETH_P_SNAP);
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++#ifdef DEBUGDATA
++ dumpit((char *)skb->data,(sizeof(qllc)+4));
++#endif
++ netif_rx(skb);
++ break;
++ default: break;
++
++ }
++
++ done:
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++ return(rc);
++
++}
++
++static void
++ctcmpc_send_sweep(fsm_instance *fsm,int event, void *arg)
++{
++ channel *ach = (channel *)arg;
++ net_device *dev = ach->netdev;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ mpc_group *grpptr = privptr->mpcg;
++ int rc = 0;
++ sk_buff *skb;
++ unsigned long saveflags;
++ channel *wch = privptr->channel[WRITE];
++ channel *rch = privptr->channel[READ];
++
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc cp:%i enter: %s() %04x\n",
++ smp_processor_id(),__FUNCTION__,ach->devno);
++#endif
++
++ if(grpptr->in_sweep == 0)
++ goto done;
++
++#ifdef DEBUGSEQ
++ printk(KERN_INFO "%s 1: ToVTAM_th_seq= %08x\n" ,
++ __FUNCTION__,wch->th_seq_num);
++ printk(KERN_INFO "%s 1: FromVTAM_th_seq= %08x\n" ,
++ __FUNCTION__,rch->th_seq_num);
++#endif
++
++
++ if(fsm_getstate(wch->fsm) != CH_STATE_TXIDLE)
++ {
++ /* give the previous do_IO time to complete */
++ fsm_addtimer(&wch->sweep_timer,200,CH_EVENT_RSWEEP1_TIMER,wch);
++ goto done;
++ }
++
++ skb = skb_dequeue(&wch->sweep_queue);
++ if(!skb)
++ goto done;
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,21))
++ if(set_normalized_cda(&wch->ccw[4], virt_to_phys(skb->data)))
++ {
++#else
++ if(set_normalized_cda(&wch->ccw[4], skb->data))
++ {
++#endif
++ grpptr->in_sweep = 0;
++ ctcmpc_clear_busy(dev);
++ fsm_event(grpptr->fsm,MPCG_EVENT_INOP,dev);
++ goto done;
++ } else
++ skb_queue_tail(&wch->io_queue, skb);
++
++ /* send out the sweep */
++ wch->ccw[4].count = skb->len;
++
++ th_sweep *header = (th_sweep *)skb->data;
++ switch(header->th.th_ch_flag)
++ {
++ case TH_SWEEP_REQ:
++ grpptr->sweep_req_pend_num--;
++ break;
++ case TH_SWEEP_RESP:
++ grpptr->sweep_rsp_pend_num--;
++ break;
++ }
++
++ header->sw.th_last_seq = wch->th_seq_num;
++
++#ifdef DEBUGCCW
++ dumpit((char *)&wch->ccw[3],sizeof(ccw1_t) * 3);
++#endif
++
++#ifdef DEBUGSEQ
++ printk(KERN_INFO "%s(): sweep packet\n", __FUNCTION__);
++ dumpit((char *)header,TH_SWEEP_LENGTH);
++#endif
++
++
++ fsm_addtimer(&wch->timer,CTC_TIMEOUT_5SEC,CH_EVENT_TIMER, wch);
++ fsm_newstate(wch->fsm, CH_STATE_TX);
++
++ s390irq_spin_lock_irqsave(wch->irq, saveflags);
++ wch->prof.send_stamp = xtime;
++ rc = do_IO(wch->irq, &wch->ccw[3], (intparm_t)wch, 0xff, 0);
++ s390irq_spin_unlock_irqrestore(wch->irq, saveflags);
++
++ if((grpptr->sweep_req_pend_num == 0) &&
++ (grpptr->sweep_rsp_pend_num == 0))
++ {
++ grpptr->in_sweep = 0;
++ rch->th_seq_num = 0x00;
++ wch->th_seq_num = 0x00;
++ ctcmpc_clear_busy(dev);
++ }
++
++#ifdef DEBUGSEQ
++ printk(KERN_INFO "%s 2: ToVTAM_th_seq= %08x\n" ,
++ __FUNCTION__,wch->th_seq_num);
++ printk(KERN_INFO "%s 2: FromVTAM_th_seq= %08x\n" ,
++ __FUNCTION__,rch->th_seq_num);
++#endif
++
++ if(rc != 0)
++ ccw_check_return_code(wch, rc);
++
++ done:
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s() %04x\n", __FUNCTION__,ach->devno);
++#endif
++ return;
++
++}
++
++
++static void
++ctcmpc_rcvd_sweep_resp(mpcg_info *mpcginfo)
++{
++ channel *rch = mpcginfo->ch;
++ net_device *dev = rch->netdev;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ mpc_group *grpptr = privptr->mpcg;
++ channel *ch = privptr->channel[WRITE];
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s() %04x\n",
++ __FUNCTION__,ch->devno);
++#endif
++
++#ifdef DEBUGSEQ
++ dumpit((char *)mpcginfo->sweep,TH_SWEEP_LENGTH);
++#endif
++
++
++ grpptr->sweep_rsp_pend_num--;
++
++ if((grpptr->sweep_req_pend_num == 0) &&
++ (grpptr->sweep_rsp_pend_num == 0))
++ {
++ fsm_deltimer(&ch->sweep_timer);
++ grpptr->in_sweep = 0;
++ rch->th_seq_num = 0x00;
++ ch->th_seq_num = 0x00;
++ ctcmpc_clear_busy(dev);
++ }
++
++ kfree(mpcginfo);
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s() %04x\n",
++ __FUNCTION__,ch->devno);
++#endif
++
++ return;
++
++}
++
++
++static void
++ctcmpc_send_sweep_req(channel *rch)
++{
++ net_device *dev = rch->netdev;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ mpc_group *grpptr = privptr->mpcg;
++ th_sweep *header;
++ struct sk_buff *sweep_skb;
++ int rc = 0;
++ channel *ch = privptr->channel[WRITE];
++
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s() %04x\n",
++ __FUNCTION__,ch->devno);
++#endif
++ /* sweep processing is not complete until response and request */
++ /* has completed for all read channels in group */
++ if(grpptr->in_sweep == 0)
++ {
++ grpptr->in_sweep = 1;
++ grpptr->sweep_rsp_pend_num = grpptr->active_channels[READ];
++ grpptr->sweep_req_pend_num = grpptr->active_channels[READ];
++ }
++
++
++ sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT,
++ GFP_ATOMIC|GFP_DMA);
++
++ if(sweep_skb == NULL)
++ {
++ printk(KERN_INFO "Couldn't alloc sweep_skb\n");
++ rc = -ENOMEM;
++ goto done;
++ }
++
++ header = (th_sweep *)kmalloc(TH_SWEEP_LENGTH, gfp_type());
++
++ if(!header)
++ {
++ dev_kfree_skb_any(sweep_skb);
++ rc = -ENOMEM;
++ goto done;
++ }
++
++ header->th.th_seg = 0x00 ;
++ header->th.th_ch_flag = TH_SWEEP_REQ; /* 0x0f */
++ header->th.th_blk_flag = 0x00;
++ header->th.th_is_xid = 0x00;
++ header->th.th_seq_num = 0x00;
++ header->sw.th_last_seq = ch->th_seq_num;
++
++ memcpy(skb_put(sweep_skb,TH_SWEEP_LENGTH),header,TH_SWEEP_LENGTH);
++
++ kfree(header);
++
++ dev->trans_start = jiffies;
++ skb_queue_tail(&ch->sweep_queue,sweep_skb);
++
++ fsm_addtimer(&ch->sweep_timer,100,CH_EVENT_RSWEEP1_TIMER,ch);
++
++ return;
++ done:
++ if(rc != 0)
++ {
++ grpptr->in_sweep = 0;
++ ctcmpc_clear_busy(dev);
++ fsm_event(grpptr->fsm,MPCG_EVENT_INOP,dev);
++ }
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s() %04x\n",
++ __FUNCTION__,ch->devno);
++#endif
++
++ return;
++}
++
++static void
++ctcmpc_send_sweep_resp(channel *rch)
++{
++ net_device *dev = rch->netdev;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ mpc_group *grpptr = privptr->mpcg;
++ int rc = 0;
++ th_sweep *header;
++ struct sk_buff *sweep_skb;
++ channel *ch = privptr->channel[WRITE];
++
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s() %04x\n",
++ __FUNCTION__,rch->devno);
++#endif
++
++ sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT,
++ GFP_ATOMIC|GFP_DMA);
++ if(sweep_skb == NULL)
++ {
++ printk(KERN_INFO
++ "Couldn't alloc sweep_skb\n");
++ rc = -ENOMEM;
++ goto done;
++ }
++
++ header = (th_sweep *)kmalloc(sizeof(struct th_sweep_t), gfp_type());
++
++ if(!header)
++ {
++ dev_kfree_skb_any(sweep_skb);
++ rc = -ENOMEM;
++ goto done;
++ }
++
++ header->th.th_seg = 0x00 ;
++ header->th.th_ch_flag = TH_SWEEP_RESP;
++ header->th.th_blk_flag = 0x00;
++ header->th.th_is_xid = 0x00;
++ header->th.th_seq_num = 0x00;
++ header->sw.th_last_seq = ch->th_seq_num;
++
++ memcpy(skb_put(sweep_skb,TH_SWEEP_LENGTH),header,TH_SWEEP_LENGTH);
++
++ kfree(header);
++
++ dev->trans_start = jiffies;
++ skb_queue_tail(&ch->sweep_queue,sweep_skb);
++
++ fsm_addtimer(&ch->sweep_timer,100,CH_EVENT_RSWEEP1_TIMER,ch);
++
++ return;
++
++ done:
++ if(rc != 0)
++ {
++ grpptr->in_sweep = 0;
++ ctcmpc_clear_busy(dev);
++ fsm_event(grpptr->fsm,MPCG_EVENT_INOP,dev);
++ }
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s() %04x\n",
++ __FUNCTION__,ch->devno);
++#endif
++
++ return;
++
++}
++
++static void
++ctcmpc_rcvd_sweep_req(mpcg_info *mpcginfo)
++{
++ channel *rch = mpcginfo->ch;
++ net_device *dev = rch->netdev;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ mpc_group *grpptr = privptr->mpcg;
++ channel *ch = privptr->channel[WRITE];
++
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s() %04x\n",
++ __FUNCTION__,ch->devno);
++#endif
++
++ if(grpptr->in_sweep == 0)
++ {
++ grpptr->in_sweep = 1;
++ ctcmpc_test_and_set_busy(dev);
++ grpptr->sweep_req_pend_num = grpptr->active_channels[READ];
++ grpptr->sweep_rsp_pend_num = grpptr->active_channels[READ];
++ }
++
++#ifdef DEBUGSEQ
++ dumpit((char *)mpcginfo->sweep,TH_SWEEP_LENGTH);
++#endif
++
++ grpptr->sweep_req_pend_num --;
++
++ ctcmpc_send_sweep_resp(ch);
++
++ kfree(mpcginfo);
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s() %04x\n",
++ __FUNCTION__,ch->devno);
++#endif
++ return;
++}
++
++static void
++ctcmpc_action_go_ready(fsm_instance *fsm,int event, void *arg)
++{
++ net_device *dev = (net_device *)arg;
++
++ if(dev == NULL)
++ {
++ printk(KERN_INFO "%s() dev=NULL\n",
++ __FUNCTION__);
++ return;
++ }
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s enter: %s()\n",
++ dev->name,__FUNCTION__);
++#endif
++
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ if(privptr == NULL)
++ {
++ printk(KERN_INFO "%s() privptr=NULL\n",
++ __FUNCTION__);
++ return;
++ }
++
++ mpc_group *grpptr = privptr->mpcg;
++ if(grpptr == NULL)
++ {
++ printk(KERN_INFO "%s() grpptr=NULL\n",
++ __FUNCTION__);
++ return;
++ }
++
++ fsm_deltimer(&grpptr->timer);
++
++ if(grpptr->saved_xid2->xid2_flag2 == 0x40)
++ {
++ privptr->xid->xid2_flag2 = 0x00;
++ if(grpptr->estconnfunc)
++ {
++ grpptr->estconnfunc(grpptr->port_num,1,
++ grpptr->group_max_buflen);
++ grpptr->estconnfunc = NULL;
++ } else
++ if(grpptr->allochanfunc)
++ grpptr->send_qllc_disc = 1;
++ goto done;
++ }
++
++ grpptr->port_persist = 1;
++ grpptr->out_of_sequence = 0;
++ grpptr->estconn_called = 0;
++
++ tasklet_hi_schedule(&grpptr->mpc_tasklet2);
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s exit: %s()\n", dev->name,__FUNCTION__);
++#endif
++ return;
++
++ done:
++ fsm_event(grpptr->fsm,MPCG_EVENT_INOP,dev);
++
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: failure occurred: %s()\n",
++ __FUNCTION__);
++#endif
++
++}
++
++
++static void
++ctcmpc_group_ready(unsigned long adev)
++{
++ net_device *dev = (net_device *)adev;
++
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ if(dev == NULL)
++ {
++ printk(KERN_INFO "%s() dev=NULL\n",__FUNCTION__);
++ return;
++ }
++
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ if(privptr == NULL)
++ {
++ printk(KERN_INFO "%s() privptr=NULL\n",__FUNCTION__);
++ return;
++ }
++
++ mpc_group *grpptr = privptr->mpcg;
++ if(grpptr == NULL)
++ {
++ printk(KERN_INFO "%s() grpptr=NULL\n",__FUNCTION__);
++ return;
++ }
++
++ printk(KERN_NOTICE "%s:GROUP TRANSITIONED TO READY maxbuf:%d\n",
++ dev->name,grpptr->group_max_buflen);
++
++ fsm_newstate(grpptr->fsm, MPCG_STATE_READY);
++
++ /* Put up a read on the channel */
++ channel *ch = privptr->channel[READ];
++ ch->pdu_seq = 0;
++#ifdef DEBUGSEQ
++ printk(KERN_INFO "%s: ToDCM_pdu_seq= %08x\n" ,
++ __FUNCTION__,ch->pdu_seq);
++#endif
++
++ ctcmpc_ch_action_rxidle(ch->fsm, CH_EVENT_START, ch);
++ /* Put the write channel in idle state */
++ ch = privptr->channel[WRITE];
++ if(ch->collect_len > 0)
++ {
++ spin_lock(&ch->collect_lock);
++ ctcmpc_purge_skb_queue(&ch->collect_queue);
++ ch->collect_len = 0;
++ spin_unlock(&ch->collect_lock);
++ }
++ ctcmpc_ch_action_txidle(ch->fsm, CH_EVENT_START, ch);
++
++ ctcmpc_clear_busy(dev);
++
++ if(grpptr->estconnfunc)
++ {
++ grpptr->estconnfunc(grpptr->port_num,0,
++ grpptr->group_max_buflen);
++ grpptr->estconnfunc = NULL;
++ } else
++ if(grpptr->allochanfunc)
++ grpptr->allochanfunc(grpptr->port_num,
++ grpptr->group_max_buflen);
++
++ grpptr->send_qllc_disc = 1;
++ grpptr->changed_side = 0;
++
++
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++ return;
++
++}
++
++/****************************************************************/
++/* Increment the MPC Group Active Channel Counts */
++/****************************************************************/
++static int
++ctcmpc_channel_action(channel *ch, int direction, int action)
++{
++ net_device *dev = ch->netdev;
++ ctc_priv *privptr;
++ mpc_group *grpptr = NULL;
++ int rc = 0;
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s() %04x\n",
++ __FUNCTION__,ch->devno);
++#endif
++
++
++ if(dev == NULL)
++ {
++ printk(KERN_INFO "mpc_channel_action %i dev=NULL, irq=%d\n",
++ action,ch->irq);
++ rc = 1;
++ goto done;
++ }
++
++ privptr = (ctc_priv *)dev->priv;
++ if(privptr == NULL)
++ {
++ printk(KERN_INFO "mpc_channel_action%i privptr=NULL, dev=%s\n",
++ action,dev->name);
++ rc = 2;
++ goto done;
++ }
++
++ grpptr = privptr->mpcg;
++
++ if(grpptr == NULL)
++ {
++ printk(KERN_INFO "mpc_channel_action%i mpcgroup=NULL, dev=%s\n",
++ action,dev->name);
++ rc = 3;
++ goto done;
++ }
++#ifdef DEBUG
++ printk(KERN_INFO
++ "ctcmpc enter : %s %i(): Grp:%s total_channel_paths=%i "
++ "active_channels read=%i,write=%i\n",
++ __FUNCTION__,
++ action,
++ fsm_getstate_str(grpptr->fsm),
++ grpptr->num_channel_paths,
++ grpptr->active_channels[READ],
++ grpptr->active_channels[WRITE]);
++#endif
++
++ switch(action)
++ {
++ case MPC_CHANNEL_ADD:
++ if(ch->in_mpcgroup == 0)
++ {
++ grpptr->num_channel_paths++;
++ grpptr->active_channels[direction]++;
++ grpptr->outstanding_xid2++;
++ ch->in_mpcgroup = 1;
++
++ if(ch->xid_skb != NULL)
++ dev_kfree_skb_any(ch->xid_skb);
++ ch->xid_skb =
++ __dev_alloc_skb(MPC_BUFSIZE_DEFAULT,
++ GFP_ATOMIC|GFP_DMA);
++ if(ch->xid_skb == NULL)
++ {
++ printk(KERN_INFO
++ "Couldn't alloc ch xid_skb\n");
++ fsm_event(grpptr->fsm,
++ MPCG_EVENT_INOP,dev);
++ return 1;
++ }
++
++ ch->xid_skb_data = ch->xid_skb->data;
++ ch->xid_th = (th_header *)ch->xid_skb->data;
++ skb_put(ch->xid_skb,TH_HEADER_LENGTH);
++ ch->xid = (xid2 *)ch->xid_skb->tail;
++ skb_put(ch->xid_skb,XID2_LENGTH);
++ ch->xid_id = ch->xid_skb->tail;
++ ch->xid_skb->data =
++ ch->xid_skb->tail =
++ ch->xid_skb_data;
++ ch->xid_skb->len = 0;
++
++
++ memcpy(skb_put(ch->xid_skb,
++ grpptr->xid_skb->len),
++ grpptr->xid_skb->data,
++ grpptr->xid_skb->len);
++
++ ch->xid->xid2_dlc_type =
++ ((CHANNEL_DIRECTION(ch->flags) == READ)
++ ? XID2_READ_SIDE : XID2_WRITE_SIDE );
++
++ if(CHANNEL_DIRECTION(ch->flags) == WRITE)
++ ch->xid->xid2_buf_len = 0x00;
++
++
++ ch->xid_skb->data =
++ ch->xid_skb->tail =
++ ch->xid_skb_data;
++ ch->xid_skb->len = 0;
++
++ fsm_newstate(ch->fsm,CH_XID0_PENDING);
++ if((grpptr->active_channels[READ] > 0) &&
++ (grpptr->active_channels[WRITE] > 0) &&
++ (fsm_getstate(grpptr->fsm) <
++ MPCG_STATE_XID2INITW))
++ {
++ fsm_newstate(grpptr->fsm,
++ MPCG_STATE_XID2INITW);
++ printk(KERN_NOTICE
++ "%s MPC GROUP CHANNELS ACTIVE\n",
++ dev->name);
++ }
++
++
++ }
++ break;
++ case MPC_CHANNEL_REMOVE:
++ if(ch->in_mpcgroup == 1)
++ {
++ ch->in_mpcgroup = 0;
++ grpptr->num_channel_paths--;
++ grpptr->active_channels[direction]--;
++
++ if(ch->xid_skb != NULL)
++ dev_kfree_skb_any(ch->xid_skb);
++ ch->xid_skb = NULL;
++
++ if(grpptr->channels_terminating)
++ break;
++
++ if(((grpptr->active_channels[READ] == 0) &&
++ (grpptr->active_channels[WRITE] > 0)) ||
++ ((grpptr->active_channels[WRITE] == 0) &&
++ (grpptr->active_channels[READ] > 0)))
++ fsm_event(grpptr->fsm,
++ MPCG_EVENT_INOP,
++ dev);
++ }
++ break;
++ }
++
++
++ done:
++#ifdef DEBUG
++ printk(KERN_INFO
++ "ctcmpc leave: %s %i(): Grp:%s total_channel_paths=%i "
++ "active_channels read=%i,write=%i\n",
++ __FUNCTION__,
++ action,
++ fsm_getstate_str(grpptr->fsm),
++ grpptr->num_channel_paths,
++ grpptr->active_channels[READ],
++ grpptr->active_channels[WRITE]);
++#endif
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s() %04x\n",
++ __FUNCTION__,ch->devno);
++#endif
++
++ return(rc);
++
++}
++
++/**
++ * The minimum required active read or write channels
++ * are no longer available. Game over.
++ */
++
++/**
++ * The MPC Group Station FSM
++ * 22 events
++ */
++static const fsm_node mpcg_fsm[] = {
++ { MPCG_STATE_RESET, MPCG_EVENT_INOP, ctcmpc_action_go_inop},
++ { MPCG_STATE_INOP, MPCG_EVENT_INOP, fsm_action_nop},
++
++ { MPCG_STATE_FLOWC, MPCG_EVENT_INOP, ctcmpc_action_go_inop},
++
++ { MPCG_STATE_READY, MPCG_EVENT_DISCONC, ctcmpc_action_discontact},
++ { MPCG_STATE_READY, MPCG_EVENT_INOP, ctcmpc_action_go_inop},
++
++ { MPCG_STATE_XID2INITW, MPCG_EVENT_XID0DO, ctcmpc_action_doxid0},
++ { MPCG_STATE_XID2INITW, MPCG_EVENT_XID2, ctcmpc_action_rcvd_xid0},
++ { MPCG_STATE_XID2INITW, MPCG_EVENT_INOP, ctcmpc_action_go_inop},
++ { MPCG_STATE_XID2INITW, MPCG_EVENT_TIMER, ctcmpc_action_timeout},
++ { MPCG_STATE_XID2INITW, MPCG_EVENT_DOIO, ctcmpc_action_yside_xid},
++
++ { MPCG_STATE_XID2INITX, MPCG_EVENT_XID0DO, ctcmpc_action_doxid0},
++ { MPCG_STATE_XID2INITX, MPCG_EVENT_XID2, ctcmpc_action_rcvd_xid0},
++ { MPCG_STATE_XID2INITX, MPCG_EVENT_INOP, ctcmpc_action_go_inop},
++ { MPCG_STATE_XID2INITX, MPCG_EVENT_TIMER, ctcmpc_action_timeout},
++ { MPCG_STATE_XID2INITX, MPCG_EVENT_DOIO, ctcmpc_action_yside_xid},
++
++ { MPCG_STATE_XID7INITW, MPCG_EVENT_XID2DONE, ctcmpc_action_doxid7},
++ { MPCG_STATE_XID7INITW, MPCG_EVENT_DISCONC, ctcmpc_action_discontact},
++ { MPCG_STATE_XID7INITW, MPCG_EVENT_XID2, ctcmpc_action_rcvd_xid7},
++ { MPCG_STATE_XID7INITW, MPCG_EVENT_INOP, ctcmpc_action_go_inop},
++ { MPCG_STATE_XID7INITW, MPCG_EVENT_TIMER, ctcmpc_action_timeout},
++ { MPCG_STATE_XID7INITW, MPCG_EVENT_XID7DONE, ctcmpc_action_doxid7},
++ { MPCG_STATE_XID7INITW, MPCG_EVENT_DOIO, ctcmpc_action_yside_xid},
++
++ { MPCG_STATE_XID7INITX, MPCG_EVENT_DISCONC, ctcmpc_action_discontact},
++ { MPCG_STATE_XID7INITX, MPCG_EVENT_XID2, ctcmpc_action_rcvd_xid7},
++ { MPCG_STATE_XID7INITX, MPCG_EVENT_INOP, ctcmpc_action_go_inop},
++ { MPCG_STATE_XID7INITX, MPCG_EVENT_XID7DONE, ctcmpc_action_doxid7},
++ { MPCG_STATE_XID7INITX, MPCG_EVENT_TIMER, ctcmpc_action_timeout},
++ { MPCG_STATE_XID7INITX, MPCG_EVENT_DOIO, ctcmpc_action_yside_xid},
++
++ { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_XID0DO, ctcmpc_action_doxid0},
++ { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_DISCONC, ctcmpc_action_discontact},
++ { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_XID2, ctcmpc_action_rcvd_xid0},
++ { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_INOP, ctcmpc_action_go_inop},
++ { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_TIMER, ctcmpc_action_timeout},
++ { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_DOIO, ctcmpc_action_xside_xid},
++
++ { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_XID0DO, ctcmpc_action_doxid0},
++ { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_DISCONC, ctcmpc_action_discontact},
++ { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_XID2, ctcmpc_action_rcvd_xid0},
++ { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_INOP, ctcmpc_action_go_inop},
++ { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_TIMER, ctcmpc_action_timeout},
++ { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_DOIO, ctcmpc_action_xside_xid},
++
++ { MPCG_STATE_XID7INITI, MPCG_EVENT_XID2DONE,ctcmpc_action_doxid7},
++ { MPCG_STATE_XID7INITI, MPCG_EVENT_XID2, ctcmpc_action_rcvd_xid7},
++ { MPCG_STATE_XID7INITI, MPCG_EVENT_DISCONC, ctcmpc_action_discontact},
++ { MPCG_STATE_XID7INITI, MPCG_EVENT_INOP, ctcmpc_action_go_inop},
++ { MPCG_STATE_XID7INITI, MPCG_EVENT_TIMER, ctcmpc_action_timeout},
++ { MPCG_STATE_XID7INITI, MPCG_EVENT_XID7DONE,ctcmpc_action_doxid7},
++ { MPCG_STATE_XID7INITI, MPCG_EVENT_DOIO, ctcmpc_action_xside_xid},
++
++ { MPCG_STATE_XID7INITZ, MPCG_EVENT_XID2, ctcmpc_action_rcvd_xid7},
++ { MPCG_STATE_XID7INITZ, MPCG_EVENT_XID7DONE,ctcmpc_action_doxid7},
++ { MPCG_STATE_XID7INITZ, MPCG_EVENT_DISCONC, ctcmpc_action_discontact},
++ { MPCG_STATE_XID7INITZ, MPCG_EVENT_INOP, ctcmpc_action_go_inop},
++ { MPCG_STATE_XID7INITZ, MPCG_EVENT_TIMER, ctcmpc_action_timeout},
++ { MPCG_STATE_XID7INITZ, MPCG_EVENT_DOIO, ctcmpc_action_xside_xid},
++
++ { MPCG_STATE_XID7INITF, MPCG_EVENT_INOP, ctcmpc_action_go_inop},
++ { MPCG_STATE_XID7INITF, MPCG_EVENT_XID7DONE,ctcmpc_action_go_ready},
++
++};
++
++static const int MPCG_FSM_LEN = sizeof(mpcg_fsm) / sizeof(fsm_node);
++
++
++/**
++ * Unpack a just received skb and hand it over to
++ * upper layers.
++ *
++ * @param ch The channel where this skb has been received.
++ * @param pskb The received packed skb.
++ */
++static __inline__ void
++ctcmpc_unpack_skb(channel *ch, struct sk_buff *pskb)
++{
++ net_device *dev = ch->netdev;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ mpc_group *grpptr = privptr->mpcg;
++ pdu *curr_pdu;
++ mpcg_info *mpcginfo;
++ int pdu_last_seen = 0;
++ __u32 new_len;
++ struct sk_buff *skb;
++ int sendrc = 0;
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s cp:%i enter:%s() %04x\n",
++ dev->name,smp_processor_id(),__FUNCTION__,ch->devno);
++#endif
++
++ th_header *header = (th_header *)pskb->data;
++ if((header->th_seg == 0) &&
++ (header->th_ch_flag == 0) &&
++ (header->th_blk_flag == 0) &&
++ (header->th_seq_num == 0))
++ goto done; /* nothing for us */
++
++#ifdef DEBUGDATA
++ printk(KERN_INFO "%s(): th_header\n", __FUNCTION__);
++ dumpit((char *)header,TH_HEADER_LENGTH);
++ printk(KERN_INFO "%s(): pskb len: %04x \n", __FUNCTION__,pskb->len);
++#endif
++
++ pskb->dev = dev;
++ pskb->ip_summed = CHECKSUM_UNNECESSARY;
++ spin_lock(&ch->segment_lock); /* make sure we are alone here */
++
++ skb_pull(pskb,TH_HEADER_LENGTH);
++
++ if(likely(header->th_ch_flag == TH_HAS_PDU))
++ {
++// #ifdef DEBUGDATA
++// printk(KERN_INFO "%s(): came into th_has_pdu\n",
++// __FUNCTION__);
++// #endif
++
++
++ if((fsm_getstate(grpptr->fsm) == MPCG_STATE_FLOWC) ||
++ ((fsm_getstate(grpptr->fsm) == MPCG_STATE_READY) &&
++ (header->th_seq_num != ch->th_seq_num + 1) &&
++ (ch->th_seq_num != 0)))
++ {
++ /* This isn't the next segment *
++ * we are not the correct race winner *
++ * go away and let someone else win *
++ * BUT..this only applies if xid negot *
++ * is done *
++ */
++ grpptr->out_of_sequence +=1;
++ __skb_push(pskb,TH_HEADER_LENGTH);
++ spin_unlock(&ch->segment_lock);
++ skb_queue_tail(&ch->io_queue, pskb);
++#ifdef DEBUGSEQ
++ printk(KERN_INFO "%s() th_seq_num expect:%08x "
++ "got:%08x\n",
++ __FUNCTION__,
++ ch->th_seq_num + 1,
++ header->th_seq_num);
++#endif
++ return;
++ }
++ grpptr->out_of_sequence = 0;
++ ch->th_seq_num = header->th_seq_num;
++
++#ifdef DEBUGSEQ
++ printk(KERN_INFO "%s: FromVTAM_th_seq= %08x\n" ,
++ __FUNCTION__,ch->th_seq_num);
++#endif
++ pdu_last_seen = 0;
++ if(fsm_getstate(grpptr->fsm) == MPCG_STATE_READY)
++ while((pskb->len > 0) && !pdu_last_seen)
++ {
++ curr_pdu = (pdu *)pskb->data;
++#ifdef DEBUGDATA
++ printk(KERN_INFO "%s(): pdu_header\n",
++ __FUNCTION__);
++ dumpit((char *)pskb->data,PDU_HEADER_LENGTH);
++ printk(KERN_INFO "%s(): pskb len: %04x \n",
++ __FUNCTION__,pskb->len);
++#endif
++ skb_pull(pskb,PDU_HEADER_LENGTH);
++ if(curr_pdu->pdu_flag & PDU_LAST)
++ pdu_last_seen = 1;
++ if(curr_pdu->pdu_flag & PDU_CNTL)
++ pskb->protocol = htons(ETH_P_SNAP);
++ else
++ pskb->protocol = htons(ETH_P_SNA_DIX);
++ if((pskb->len <= 0) ||
++ (pskb->len > ch->max_bufsize))
++ {
++ printk(KERN_INFO
++ "%s Illegal packet size %d "
++ "received "
++ "dropping\n", dev->name,
++ pskb->len);
++ privptr->stats.rx_dropped++;
++ privptr->stats.rx_length_errors++;
++ spin_unlock(&ch->segment_lock);
++ goto done;
++ }
++ pskb->mac.raw = pskb->data;
++ new_len = curr_pdu->pdu_offset;
++// #ifdef DEBUGDATA
++// printk(KERN_INFO "%s(): new_len: %04x \n",
++// __FUNCTION__,new_len);
++// #endif
++ if((new_len == 0) ||
++ (new_len > pskb->len))
++ {
++ /* should never happen */
++ /* pskb len must be hosed...bail out */
++ printk(KERN_INFO
++ "%s(): invalid pdu offset "
++ "of %04x - data may be lost\n",
++ __FUNCTION__,new_len);
++ spin_unlock(&ch->segment_lock);
++ goto done;
++ }
++ skb = __dev_alloc_skb(new_len+4,GFP_ATOMIC);
++
++ if(!skb)
++ {
++ printk(KERN_INFO
++ "%s Out of memory in %s- "
++ "request-len:%04x \n",
++ dev->name,
++ __FUNCTION__,
++ new_len+4);
++ privptr->stats.rx_dropped++;
++ spin_unlock(&ch->segment_lock);
++ fsm_event(grpptr->fsm,
++ MPCG_EVENT_INOP,
++ dev);
++ goto done;
++ }
++
++ memcpy(skb_put(skb, new_len),
++ pskb->data,
++ new_len);
++
++ skb->mac.raw = skb->data;
++ skb->dev = pskb->dev;
++ skb->protocol = pskb->protocol;
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ *((__u32 *) skb_push(skb, 4)) = ch->pdu_seq;
++ ch->pdu_seq++;
++
++#ifdef DEBUGSEQ
++ printk(KERN_INFO "%s: ToDCM_pdu_seq= %08x\n" ,
++ __FUNCTION__,ch->pdu_seq);
++#endif
++
++#ifdef DEBUGDATA
++ __u32 out_len;
++ if(skb->len > 32) out_len = 32;
++ else out_len = skb->len;
++ printk(KERN_INFO "%s(): skb:%0lx skb len:%d \n",
++ __FUNCTION__,
++ (unsigned long)skb,
++ skb->len);
++ printk(KERN_INFO "%s(): up to 32 bytes "
++ "of pdu_data sent\n",
++ __FUNCTION__);
++ dumpit((char *)skb->data,out_len);
++#endif
++
++ sendrc = netif_rx(skb);
++ privptr->stats.rx_packets++;
++ privptr->stats.rx_bytes += skb->len;
++ skb_pull(pskb, new_len); /* point to next PDU */
++ }
++ } else
++ {
++ if((mpcginfo = (mpcg_info *)kmalloc(sizeof(mpcg_info),
++ gfp_type())) == NULL)
++ {
++ spin_unlock(&ch->segment_lock);
++ goto done;
++ }
++
++ mpcginfo->ch = ch;
++ mpcginfo->th = header;
++ mpcginfo->skb = pskb;
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s(): It's not PDU it may be control pkt\n",
++ __FUNCTION__);
++#endif
++ /* it's a sweep? */
++ th_sweep *sweep = (th_sweep *) pskb->data;
++ mpcginfo->sweep = sweep;
++ if(header->th_ch_flag == TH_SWEEP_REQ)
++ ctcmpc_rcvd_sweep_req(mpcginfo);
++ else
++ if(header->th_ch_flag == TH_SWEEP_RESP)
++ ctcmpc_rcvd_sweep_resp(mpcginfo);
++ else
++ {
++ if(header->th_blk_flag == TH_DATA_IS_XID)
++ {
++ xid2 *thisxid = (xid2 *)pskb->data;
++ skb_pull(pskb,XID2_LENGTH);
++ mpcginfo->xid = thisxid;
++ fsm_event(grpptr->fsm,MPCG_EVENT_XID2,mpcginfo);
++ } else
++ {
++ if(header->th_blk_flag == TH_DISCONTACT)
++ {
++ fsm_event(grpptr->fsm,
++ MPCG_EVENT_DISCONC,mpcginfo);
++ } else
++ if(header->th_seq_num != 0)
++ {
++ printk(KERN_INFO
++ "%s unexpected packet expected"
++ " control pkt\n",
++ dev->name);
++ privptr->stats.rx_dropped++;
++#ifdef DEBUGDATA
++ ctcmpc_dump_skb(pskb, -8);
++#endif
++ kfree(mpcginfo);
++ }
++ }
++ }
++ }
++ spin_unlock(&ch->segment_lock);
++ done:
++
++ dev_kfree_skb_any(pskb);
++ switch(sendrc)
++ {
++ case NET_RX_DROP:
++ printk(KERN_WARNING "%s %s() NETWORK BACKLOG "
++ "EXCEEDED - PACKET DROPPED\n",
++ dev->name,
++ __FUNCTION__);
++ fsm_event(grpptr->fsm,MPCG_EVENT_INOP,dev);
++ break;
++ case NET_RX_SUCCESS:
++ case NET_RX_CN_LOW:
++ case NET_RX_CN_MOD:
++ case NET_RX_CN_HIGH:
++ default:
++ break;
++ }
++
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s exit: %s() %04x\n",
++ dev->name,__FUNCTION__,ch->devno);
++#endif
++
++}
++
++
++
++/**
++ * Bottom half routine.
++ *
++ * @param ch The channel to work on.
++ * Allow flow control back pressure to occur here.
++ * Throttling back channel can result in excessive
++ * channel inactivity and system deact of channel
++ */
++static void
++ctcmpc_bh(unsigned long thischan)
++{
++ channel *ch = (channel *)thischan;
++ struct sk_buff *peek_skb = NULL;
++ struct sk_buff *skb;
++ struct sk_buff *same_skb = NULL;
++ net_device *dev = ch->netdev;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ mpc_group *grpptr = privptr->mpcg;
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s cp:%i enter: %s() %04x\n",
++ dev->name,smp_processor_id(),__FUNCTION__,ch->devno);
++#endif
++ /* caller has requested driver to throttle back */
++ if(fsm_getstate(grpptr->fsm) == MPCG_STATE_FLOWC)
++ {
++ goto done;
++ } else
++ {
++ while((skb = skb_dequeue(&ch->io_queue)))
++ {
++ same_skb = skb;
++ ctcmpc_unpack_skb(ch, skb);
++ if(grpptr->out_of_sequence > 20)
++ {
++ /* assume data loss has occurred if */
++ /* missing seq_num for extended */
++ /* period of time */
++ grpptr->out_of_sequence = 0;
++ fsm_event(grpptr->fsm,MPCG_EVENT_INOP,dev);
++ goto done;
++ }
++ peek_skb = skb_peek(&ch->io_queue);
++ if(peek_skb == same_skb)
++ goto done;
++ if(fsm_getstate(grpptr->fsm) == MPCG_STATE_FLOWC)
++ goto done;
++ }
++ }
++ done:
++#ifdef DEBUG
++ printk(KERN_INFO "%s exit: %s() %04x\n",
++ dev->name,__FUNCTION__,ch->devno);
++#endif
++
++ return;
++}
++
++/**
++ * Check return code of a preceeding do_IO, halt_IO etc...
++ *
++ * @param ch The channel, the error belongs to.
++ * @param return_code The error code to inspect.
++ */
++static void inline
++ccw_check_return_code (channel *ch, int return_code)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: cp:%i %s()\n",
++ smp_processor_id(),__FUNCTION__);
++#endif
++
++
++ switch(return_code)
++ {
++ case 0:
++ fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
++ break;
++ case -EBUSY:
++ printk(KERN_INFO "ch-%04x: Busy !\n", ch->devno);
++ fsm_event(ch->fsm, CH_EVENT_IO_EBUSY, ch);
++ break;
++ case -ENODEV:
++ printk(KERN_EMERG
++ "ch-%04x: Invalid device called for IO\n",
++ ch->devno);
++ fsm_event(ch->fsm, CH_EVENT_IO_ENODEV, ch);
++ break;
++ case -EIO:
++ printk(KERN_EMERG
++ "ch-%04x: Status pending... \n", ch->devno);
++ fsm_event(ch->fsm, CH_EVENT_IO_EIO, ch);
++ break;
++ default:
++ printk(KERN_EMERG
++ "ch-%04x: Unknown error in do_IO %04x\n",
++ ch->devno, return_code);
++ fsm_event(ch->fsm, CH_EVENT_IO_UNKNOWN, ch);
++ }
++
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++
++}
++
++/**
++ * Check sense of a unit check.
++ *
++ * @param ch The channel, the sense code belongs to.
++ * @param sense The sense code to inspect.
++ */
++static void inline
++ccw_unit_check (channel *ch, unsigned char sense)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ if(sense & SNS0_INTERVENTION_REQ)
++ {
++ if(sense & 0x01)
++ {
++ printk(KERN_INFO
++ "ch-%04x: Interface disc. or Sel. reset "
++ "(remote)\n", ch->devno);
++ fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch);
++ } else
++ {
++ printk(KERN_INFO "ch-%04x: System reset (remote)\n",
++ ch->devno);
++ fsm_event(ch->fsm, CH_EVENT_UC_RSRESET, ch);
++ }
++ } else if(sense & SNS0_EQUIPMENT_CHECK)
++ {
++ if(sense & SNS0_BUS_OUT_CHECK)
++ {
++ printk(KERN_INFO
++ "ch-%04x: Hardware malfunction (remote)\n",
++ ch->devno);
++ fsm_event(ch->fsm, CH_EVENT_UC_HWFAIL, ch);
++ } else
++ {
++ printk(KERN_INFO
++ "ch-%04x: Read-data parity error (remote)\n",
++ ch->devno);
++ fsm_event(ch->fsm, CH_EVENT_UC_RXPARITY, ch);
++ }
++ } else if(sense & SNS0_BUS_OUT_CHECK)
++ {
++ if(sense & 0x04)
++ {
++ printk(KERN_INFO
++ "ch-%04x: Data-streaming timeout)\n",
++ ch->devno);
++ fsm_event(ch->fsm, CH_EVENT_UC_TXTIMEOUT, ch);
++ } else
++ {
++ printk(KERN_INFO
++ "ch-%04x: Data-transfer parity error\n",
++ ch->devno);
++ fsm_event(ch->fsm, CH_EVENT_UC_TXPARITY, ch);
++ }
++ } else if(sense & SNS0_CMD_REJECT)
++ {
++ printk(KERN_INFO "ch-%04x: Command reject\n",
++ ch->devno);
++ } else if(sense == 0)
++ {
++ printk(KERN_INFO "ch-%04x: Unit check ZERO\n", ch->devno);
++ fsm_event(ch->fsm, CH_EVENT_UC_ZERO, ch);
++ } else
++ {
++ printk(KERN_INFO
++ "ch-%04x: Unit Check with sense code: %02x\n",
++ ch->devno, sense);
++ fsm_event(ch->fsm, CH_EVENT_UC_UNKNOWN, ch);
++ }
++
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++}
++
++static void
++ctcmpc_purge_skb_queue(struct sk_buff_head *q)
++{
++ struct sk_buff *skb;
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ while((skb = skb_dequeue(q)))
++ {
++ atomic_dec(&skb->users);
++
++ dev_kfree_skb_any(skb);
++ }
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++}
++
++static __inline__ int
++ctcmpc_checkalloc_buffer(channel *ch, int warn)
++{
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ if((ch->trans_skb == NULL) ||
++ (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED))
++ {
++ if(ch->trans_skb != NULL)
++ dev_kfree_skb_any(ch->trans_skb);
++ clear_normalized_cda(&ch->ccw[1]);
++ ch->trans_skb = __dev_alloc_skb(ch->max_bufsize,
++ GFP_ATOMIC|GFP_DMA);
++ if(ch->trans_skb == NULL)
++ {
++ if(warn)
++ printk(KERN_INFO
++ "ch-%04x: Couldn't alloc %s trans_skb\n",
++ ch->devno,
++ (CHANNEL_DIRECTION(ch->flags) == READ) ?
++ "RX" : "TX");
++ return -ENOMEM;
++ }
++ ch->ccw[1].count = ch->max_bufsize;
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,21))
++ if(set_normalized_cda(&ch->ccw[1],
++ virt_to_phys(ch->trans_skb->data)))
++ {
++#else
++ if(set_normalized_cda(&ch->ccw[1],ch->trans_skb->data))
++ {
++#endif
++ dev_kfree_skb_any(ch->trans_skb);
++ ch->trans_skb = NULL;
++ if(warn)
++ printk(KERN_INFO
++ "ch-%04x: set_normalized_cda for %s "
++ "trans_skb failed, dropping packets\n",
++ ch->devno,
++ (CHANNEL_DIRECTION(ch->flags) == READ) ?
++ "RX" : "TX");
++ return -ENOMEM;
++ }
++ ch->ccw[1].count = 0;
++ ch->trans_skb_data = ch->trans_skb->data;
++ ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
++ }
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++ return 0;
++}
++
++/**
++ * Actions for channel - statemachines.
++ *****************************************************************************/
++
++/**
++ * Normal data has been sent. Free the corresponding
++ * skb (it's in io_queue), reset dev->tbusy and
++ * revert to idle state.
++ *
++ * @param fi An instance of a channel statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from channel * upon call.
++ */
++static void
++ctcmpc_ch_action_txdone(fsm_instance *fi, int event, void *arg)
++{
++ channel *ch = (channel *)arg;
++ net_device *dev = ch->netdev;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ mpc_group *grpptr = privptr->mpcg;
++ struct sk_buff *skb;
++ int first = 1;
++ int i;
++#ifdef DEBUGDATA
++ __u32 out_len = 0;
++#endif
++
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s cp:%i enter: %s()\n",
++ dev->name,smp_processor_id(),__FUNCTION__);
++#endif
++
++
++ struct timeval done_stamp = xtime;
++ unsigned long duration =
++ (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
++ done_stamp.tv_usec - ch->prof.send_stamp.tv_usec;
++ if(duration > ch->prof.tx_time)
++ ch->prof.tx_time = duration;
++
++ if(ch->devstat->rescnt != 0)
++ printk(KERN_INFO "%s: TX not complete, remaining %d bytes\n",
++ dev->name, ch->devstat->rescnt);
++
++ fsm_deltimer(&ch->timer);
++ while((skb = skb_dequeue(&ch->io_queue)))
++ {
++ privptr->stats.tx_packets++;
++ privptr->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
++ if(first)
++ {
++ privptr->stats.tx_bytes += 2;
++ first = 0;
++ }
++ atomic_dec(&skb->users);
++ dev_kfree_skb_irq(skb);
++ }
++ spin_lock(&ch->collect_lock);
++ clear_normalized_cda(&ch->ccw[4]);
++ if((ch->collect_len > 0) && (grpptr->in_sweep == 0))
++ {
++ int rc;
++ th_header *header;
++ pdu *p_header = NULL;
++
++ if(ctcmpc_checkalloc_buffer(ch, 1))
++ {
++ spin_unlock(&ch->collect_lock);
++ goto done;
++ }
++ ch->trans_skb->tail = ch->trans_skb->data = ch->trans_skb_data;
++ ch->trans_skb->len = 0;
++ if(ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH))
++ ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH;
++ if(ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
++ ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
++ i = 0;
++#ifdef DEBUGDATA
++ printk(KERN_INFO "%s(): building trans_skb from collect_q \n",
++ __FUNCTION__);
++#endif
++
++ __u32 data_space = grpptr->group_max_buflen - TH_HEADER_LENGTH;
++
++#ifdef DEBUGDATA
++ printk(KERN_INFO "%s(): building trans_skb from "
++ "collect_q data_space:%04x\n",
++ __FUNCTION__,data_space);
++#endif
++
++
++ while((skb = skb_dequeue(&ch->collect_queue)))
++ {
++ memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
++ skb->len);
++ p_header = (pdu *)(ch->trans_skb->tail - skb->len);
++ p_header->pdu_flag = 0x00;
++ if(skb->protocol == ntohs(ETH_P_SNAP))
++ {
++ p_header->pdu_flag |= 0x60;
++ } else
++ {
++ p_header->pdu_flag |= 0x20;
++ }
++#ifdef DEBUGDATA
++ __u32 out_len = 0;
++ printk(KERN_INFO "%s():trans_skb len:%04x \n",
++ __FUNCTION__,ch->trans_skb->len);
++ if(skb->len > 32) out_len = 32;
++ else out_len = skb->len;
++ printk(KERN_INFO "%s(): pdu header and data for "
++ "up to 32 bytes sent to vtam\n",
++ __FUNCTION__);
++ dumpit((char *)p_header,out_len);
++#endif
++ ch->collect_len -= skb->len;
++ data_space -= skb->len;
++ privptr->stats.tx_packets++;
++ privptr->stats.tx_bytes += skb->len;
++ atomic_dec(&skb->users);
++ dev_kfree_skb_any(skb);
++ sk_buff *peekskb = skb_peek(&ch->collect_queue);
++ if(peekskb->len > data_space)
++ break;
++ i++;
++ }
++ /* p_header points to the last one we handled */
++ if(p_header)
++ p_header->pdu_flag |= PDU_LAST;
++ header = (th_header *)kmalloc(TH_HEADER_LENGTH, gfp_type());
++
++ if(!header)
++ {
++ printk(KERN_WARNING ": OUT OF MEMORY IN %s(): "
++ "Data Lost \n",
++ __FUNCTION__);
++ spin_unlock(&ch->collect_lock);
++ fsm_event(privptr->mpcg->fsm,MPCG_EVENT_INOP,dev);
++ goto done;
++ }
++ header->th_seg = 0x00;
++ header->th_ch_flag = TH_HAS_PDU; /* Normal data */
++ header->th_blk_flag = 0x00;
++ header->th_is_xid = 0x00;
++ ch->th_seq_num++;
++ header->th_seq_num = ch->th_seq_num;
++
++#ifdef DEBUGSEQ
++ printk(KERN_INFO "%s: ToVTAM_th_seq= %08x\n" ,
++ __FUNCTION__,ch->th_seq_num);
++#endif
++ memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header,
++ TH_HEADER_LENGTH); /* put the TH on the packet */
++
++ kfree(header);
++
++#ifdef DEBUGDATA
++ printk(KERN_INFO "%s():trans_skb len:%04x \n",
++ __FUNCTION__,ch->trans_skb->len);
++ if(ch->trans_skb->len > 50) out_len = 50;
++ else out_len = ch->trans_skb->len;
++ printk(KERN_INFO "%s(): up-to-50 bytes of trans_skb "
++ "data to vtam from collect_q\n",
++ __FUNCTION__);
++ dumpit((char *)ch->trans_skb->data,out_len);
++#endif
++
++ spin_unlock(&ch->collect_lock);
++ clear_normalized_cda(&ch->ccw[1]);
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,21))
++ if(set_normalized_cda(&ch->ccw[1],
++ virt_to_phys(ch->trans_skb->data)))
++ {
++#else
++ if(set_normalized_cda(&ch->ccw[1],ch->trans_skb->data))
++ {
++#endif
++ dev_kfree_skb_any(ch->trans_skb);
++ ch->trans_skb = NULL;
++ printk(KERN_WARNING "%s():CCW failure - data lost\n",
++ __FUNCTION__);
++ fsm_event(privptr->mpcg->fsm,MPCG_EVENT_INOP,dev);
++ return;
++ }
++ ch->ccw[1].count = ch->trans_skb->len;
++ fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
++ ch->prof.send_stamp = xtime;
++
++#ifdef DEBUGCCW
++ dumpit((char *)&ch->ccw[0],sizeof(ccw1_t) * 3);
++#endif
++
++ rc = do_IO(ch->irq, &ch->ccw[0], (intparm_t)ch, 0xff, 0);
++ ch->prof.doios_multi++;
++ if(rc != 0)
++ {
++ ccw_check_return_code(ch, rc);
++ }
++ } else
++ {
++ spin_unlock(&ch->collect_lock);
++ fsm_newstate(fi, CH_STATE_TXIDLE);
++ }
++
++ done:
++ ctcmpc_clear_busy(dev);
++#ifdef DEBUG
++ printk(KERN_INFO "%s exit: %s()\n", dev->name,__FUNCTION__);
++#endif
++ return;
++
++}
++
++/**
++ * Initial data is sent.
++ * Notify device statemachine that we are up and
++ * running.
++ *
++ * @param fi An instance of a channel statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from channel * upon call.
++ */
++static void
++ctcmpc_ch_action_txidle(fsm_instance *fi, int event, void *arg)
++{
++ channel *ch = (channel *)arg;
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++ fsm_deltimer(&ch->timer);
++ fsm_newstate(fi, CH_STATE_TXIDLE);
++ fsm_event(((ctc_priv *)ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
++ ch->netdev);
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++}
++
++/**
++ * Got normal data, check for sanity, queue it up, allocate new buffer
++ * trigger bottom half, and initiate next read.
++ *
++ * @param fi An instance of a channel statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from channel * upon call.
++ */
++static void
++ctcmpc_ch_action_rx(fsm_instance *fi, int event, void *arg)
++{
++ channel *ch = (channel *)arg;
++ net_device *dev = ch->netdev;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ mpc_group *grpptr = privptr->mpcg;
++ __u32 len = ch->max_bufsize - ch->devstat->rescnt;
++ struct sk_buff *skb = ch->trans_skb;
++ struct sk_buff *new_skb;
++ int rc = 0;
++ __u32 block_len;
++ unsigned long saveflags;
++ int gotlock = 0;
++
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s cp:%i enter:%s() %04x\n",
++ dev->name,smp_processor_id(),__FUNCTION__,ch->devno);
++#endif
++#ifdef DEBUGDATA
++ printk(KERN_INFO "ctcmpc:%s() max_bufsize:%04x rescnt:%04x len:%04x\n",
++ __FUNCTION__,ch->max_bufsize,ch->devstat->rescnt,len);
++#endif
++
++ fsm_deltimer(&ch->timer);
++
++ if(skb == NULL)
++ {
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s() TRANS_SKB = NULL \n",
++ __FUNCTION__);
++#endif
++ goto again;
++ }
++
++ if(len < TH_HEADER_LENGTH)
++ {
++ printk(KERN_INFO "%s: got packet with invalid length %d\n",
++ dev->name, len);
++ privptr->stats.rx_dropped++;
++ privptr->stats.rx_length_errors++;
++ goto again;
++ } else
++ {
++ /* must have valid th header or game over */
++ block_len = len;
++ len = TH_HEADER_LENGTH + XID2_LENGTH + 4;
++ new_skb = __dev_alloc_skb(ch->max_bufsize,GFP_ATOMIC);
++
++ if(new_skb == NULL)
++ {
++ printk(KERN_INFO "ctcmpc exit:%s() NEW_SKB = NULL \n",
++ __FUNCTION__);
++ printk(KERN_WARNING "%s() MEMORY ALLOC FAILED - "
++ "DATA LOST - MPC FAILED\n",
++ __FUNCTION__);
++ fsm_event(privptr->mpcg->fsm,MPCG_EVENT_INOP,dev);
++ goto again;
++ }
++ switch(fsm_getstate(grpptr->fsm))
++ {
++ case MPCG_STATE_RESET:
++ case MPCG_STATE_INOP:
++ goto again;
++ case MPCG_STATE_FLOWC:
++ case MPCG_STATE_READY:
++ memcpy(skb_put(new_skb, block_len),
++ skb->data,
++ block_len);
++ skb_queue_tail(&ch->io_queue, new_skb);
++ tasklet_schedule(&ch->ch_tasklet);
++ goto again;
++ default:
++ memcpy(skb_put(new_skb, len), skb->data,len);
++ skb_queue_tail(&ch->io_queue, new_skb);
++ tasklet_hi_schedule(&ch->ch_tasklet);
++ goto again;
++ }
++
++ }
++
++ again:
++ switch(fsm_getstate(grpptr->fsm))
++ {
++ case MPCG_STATE_FLOWC:
++ case MPCG_STATE_READY:
++ if(ctcmpc_checkalloc_buffer(ch, 1))
++ break;
++ ch->trans_skb->data =
++ ch->trans_skb->tail =
++ ch->trans_skb_data;
++ ch->trans_skb->len = 0;
++ ch->ccw[1].count = ch->max_bufsize;
++#ifdef DEBUGCCW
++ dumpit((char *)&ch->ccw[0],sizeof(ccw1_t) * 3);
++#endif
++ if(!in_irq())
++ {
++ s390irq_spin_lock_irqsave(ch->irq,saveflags);
++ gotlock = 1;
++ }
++ rc = do_IO(ch->irq, &ch->ccw[0],
++ (intparm_t)ch, 0xff, 0);
++ if(gotlock)
++ s390irq_spin_unlock_irqrestore(ch->irq,
++ saveflags);
++ if(rc != 0)
++ ccw_check_return_code(ch, rc);
++ break;
++ default:
++ break;
++ }
++#ifdef DEBUG
++ printk(KERN_INFO "%s exit: %s() %04x\n",
++ dev->name,__FUNCTION__,ch->devno);
++#endif
++
++}
++
++/**
++ * Initialize connection by sending a __u16 of value 0.
++ *
++ * @param fi An instance of a channel statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from channel * upon call.
++ */
++static void
++ctcmpc_ch_action_firstio(fsm_instance *fi, int event, void *arg)
++{
++ channel *ch = (channel *)arg;
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s() %04x\n", __FUNCTION__,ch->devno);
++#endif
++
++#ifdef DEBUG
++ net_device *dev = ch->netdev;
++ mpc_group *grpptr = ((ctc_priv *)dev->priv)->mpcg;
++ printk(KERN_INFO "%s() %04x chstate:%i grpstate:%i chprotocol:%i\n",
++ __FUNCTION__, ch->devno,
++ fsm_getstate(fi),
++ fsm_getstate(grpptr->fsm),
++ ch->protocol);
++#endif
++
++ if(fsm_getstate(fi) == CH_STATE_TXIDLE)
++ printk(KERN_INFO "ch-%04x: remote side issued READ?, "
++ "init ...\n", ch->devno);
++ fsm_deltimer(&ch->timer);
++ if(ctcmpc_checkalloc_buffer(ch, 1))
++ {
++ goto done;
++ }
++
++ if(ch->protocol == CTC_PROTO_MPC)
++ switch(fsm_getstate(fi))
++ {
++ case CH_STATE_STARTRETRY:
++ case CH_STATE_SETUPWAIT:
++ if(CHANNEL_DIRECTION(ch->flags) == READ)
++ {
++ ctcmpc_ch_action_rxidle(fi, event, arg);
++ } else
++ {
++ net_device *dev = ch->netdev;
++ fsm_newstate(fi, CH_STATE_TXIDLE);
++ fsm_event(((ctc_priv *)dev->priv)->fsm,
++ DEV_EVENT_TXUP, dev);
++ }
++ goto done;
++ default:
++ break;
++
++ };
++
++ fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
++ ? CH_STATE_RXINIT : CH_STATE_TXINIT);
++
++ done:
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s() %04x\n", __FUNCTION__,ch->devno);
++#endif
++ return;
++}
++
++/**
++ * Got initial data, check it. If OK,
++ * notify device statemachine that we are up and
++ * running.
++ *
++ * @param fi An instance of a channel statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from channel * upon call.
++ */
++static void
++ctcmpc_ch_action_rxidle(fsm_instance *fi, int event, void *arg)
++{
++ channel *ch = (channel *)arg;
++ net_device *dev = ch->netdev;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ mpc_group *grpptr = privptr->mpcg;
++ int rc;
++ unsigned long saveflags;
++
++
++ fsm_deltimer(&ch->timer);
++#ifdef DEBUG
++ printk(KERN_INFO "%s cp:%i enter: %s()\n",
++ dev->name,smp_processor_id(),__FUNCTION__);
++#endif
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s() %04x chstate:%i grpstate:%i\n",
++ __FUNCTION__, ch->devno,
++ fsm_getstate(fi),
++ fsm_getstate(grpptr->fsm));
++#endif
++
++
++ fsm_newstate(fi, CH_STATE_RXIDLE);
++ /* XID processing complete */
++ switch(fsm_getstate(grpptr->fsm))
++ {
++ case MPCG_STATE_FLOWC:
++ case MPCG_STATE_READY:
++ if(ctcmpc_checkalloc_buffer(ch, 1)) goto done;
++ ch->trans_skb->data =
++ ch->trans_skb->tail =
++ ch->trans_skb_data;
++ ch->trans_skb->len = 0;
++ ch->ccw[1].count = ch->max_bufsize;
++#ifdef DEBUGCCW
++ dumpit((char *)&ch->ccw[0],sizeof(ccw1_t) * 3);
++#endif
++ if(event == CH_EVENT_START)
++ s390irq_spin_lock_irqsave(ch->irq, saveflags);
++ rc = do_IO(ch->irq, &ch->ccw[0],
++ (intparm_t)ch, 0xff, 0);
++ if(event == CH_EVENT_START)
++ s390irq_spin_unlock_irqrestore(ch->irq,
++ saveflags);
++ if(rc != 0)
++ {
++ fsm_newstate(fi, CH_STATE_RXINIT);
++ ccw_check_return_code(ch, rc);
++ goto done;
++ }
++ break;
++ default:
++ break;
++ }
++
++ fsm_event(((ctc_priv *)dev->priv)->fsm,
++ DEV_EVENT_RXUP, dev);
++ done:
++#ifdef DEBUG
++ printk(KERN_INFO "%s exit: %s()\n", dev->name,__FUNCTION__);
++#endif
++ return;
++}
++
++/**
++ * Set channel into extended mode.
++ *
++ * @param fi An instance of a channel statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from channel * upon call.
++ */
++static void
++ctcmpc_ch_action_setmode(fsm_instance *fi, int event, void *arg)
++{
++ channel *ch = (channel *)arg;
++ int rc;
++ unsigned long saveflags;
++
++ fsm_deltimer(&ch->timer);
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc cp:%i enter: %s()\n",
++ smp_processor_id(),__FUNCTION__);
++#endif
++
++ fsm_addtimer(&ch->timer, 1500, CH_EVENT_TIMER, ch);
++ fsm_newstate(fi, CH_STATE_SETUPWAIT);
++
++#ifdef DEBUGCCW
++ dumpit((char *)&ch->ccw[6],sizeof(ccw1_t) * 2);
++#endif
++
++ if(event == CH_EVENT_TIMER)
++ s390irq_spin_lock_irqsave(ch->irq, saveflags);
++ rc = do_IO(ch->irq, &ch->ccw[6], (intparm_t)ch, 0xff, 0);
++ if(event == CH_EVENT_TIMER)
++ s390irq_spin_unlock_irqrestore(ch->irq, saveflags);
++ if(rc != 0)
++ {
++ fsm_deltimer(&ch->timer);
++ fsm_newstate(fi, CH_STATE_STARTWAIT);
++ ccw_check_return_code(ch, rc);
++ } else
++ ch->retry = 0;
++
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++}
++
++/**
++ * Setup channel.
++ *
++ * @param fi An instance of a channel statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from channel * upon call.
++ */
++static void
++ctcmpc_ch_action_start(fsm_instance *fi, int event, void *arg)
++{
++ channel *ch = (channel *)arg;
++ unsigned long saveflags;
++ int rc;
++ net_device *dev;
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ if(ch == NULL)
++ {
++ printk(KERN_INFO "ctcmpc_ch_action_start ch=NULL\n");
++ goto done;
++ }
++ if(ch->netdev == NULL)
++ {
++ printk(KERN_INFO "ctcmpc_ch_action_start dev=NULL, irq=%d\n",
++ ch->irq);
++ goto done;
++ }
++ dev = ch->netdev;
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s: %s channel start\n", dev->name,
++ (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
++#endif
++
++ if(ch->trans_skb != NULL)
++ {
++ clear_normalized_cda(&ch->ccw[1]);
++ dev_kfree_skb(ch->trans_skb);
++ ch->trans_skb = NULL;
++ }
++ if(CHANNEL_DIRECTION(ch->flags) == READ)
++ {
++ ch->ccw[1].cmd_code = CCW_CMD_READ;
++ ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
++ ch->ccw[1].count = 0;
++ } else
++ {
++ ch->ccw[1].cmd_code = CCW_CMD_WRITE;
++ ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
++ ch->ccw[1].count = 0;
++ }
++ if(ctcmpc_checkalloc_buffer(ch, 0))
++ printk(KERN_NOTICE
++ "%s: Could not allocate %s trans_skb, delaying "
++ "allocation until first transfer\n",
++ dev->name,
++ (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
++
++
++ ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
++ ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
++ ch->ccw[0].count = 0;
++ ch->ccw[0].cda = 0;
++ ch->ccw[2].cmd_code = CCW_CMD_NOOP;
++ ch->ccw[2].flags = CCW_FLAG_SLI;
++ ch->ccw[2].count = 0;
++ ch->ccw[2].cda = 0;
++ /*************************************************
++ ch->ccw[2].cmd_code = CCW_CMD_TIC;
++ ch->ccw[2].flags = 0;
++ ch->ccw[2].count = 0;
++ ch->ccw[2].cda = virt_to_phys(&ch->ccw[15]);
++ **************************************************/
++ memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(ccw1_t) * 3);
++ ch->ccw[4].cda = 0;
++ ch->ccw[4].flags &= ~CCW_FLAG_IDA;
++
++
++ fsm_newstate(fi, CH_STATE_STARTWAIT);
++ fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
++ s390irq_spin_lock_irqsave(ch->irq, saveflags);
++ rc = halt_IO(ch->irq, (intparm_t)ch, 0);
++ s390irq_spin_unlock_irqrestore(ch->irq, saveflags);
++ if(rc != 0)
++ {
++ fsm_deltimer(&ch->timer);
++ ccw_check_return_code(ch, rc);
++ }
++
++ done:
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++ return;
++}
++
++
++
++/**
++ * Shutdown a channel.
++ *
++ * @param fi An instance of a channel statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from channel * upon call.
++ */
++static void
++ctcmpc_ch_action_haltio(fsm_instance *fi, int event, void *arg)
++{
++ channel *ch = (channel *)arg;
++ unsigned long saveflags;
++ int rc;
++ int oldstate;
++ int gotlock = 0;
++
++ fsm_deltimer(&ch->timer);
++ fsm_deltimer(&ch->sweep_timer);
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
++ if(event == CH_EVENT_STOP)
++ {
++ s390irq_spin_lock_irqsave(ch->irq, saveflags);
++ gotlock = 1;
++ }
++ oldstate = fsm_getstate(fi);
++ fsm_newstate(fi, CH_STATE_TERM);
++ rc = halt_IO (ch->irq, (intparm_t)ch, 0);
++ if(gotlock)
++ s390irq_spin_unlock_irqrestore(ch->irq, saveflags);
++ if(rc != 0)
++ {
++ fsm_deltimer(&ch->timer);
++ /* When I say stop..that means STOP */
++ if(event != CH_EVENT_STOP)
++ {
++ fsm_newstate(fi, oldstate);
++ ccw_check_return_code(ch, rc);
++ }
++ }
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++}
++
++/**
++ * A channel has successfully been halted.
++ * Cleanup it's queue and notify interface statemachine.
++ *
++ * @param fi An instance of a channel statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from channel * upon call.
++ */
++static void
++ctcmpc_ch_action_stopped(fsm_instance *fi, int event, void *arg)
++{
++ channel *ch = (channel *)arg;
++ net_device *dev = ch->netdev;
++
++ fsm_deltimer(&ch->timer);
++ fsm_deltimer(&ch->sweep_timer);
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ fsm_newstate(fi, CH_STATE_STOPPED);
++ if(ch->trans_skb != NULL)
++ {
++ clear_normalized_cda(&ch->ccw[1]);
++ dev_kfree_skb_any(ch->trans_skb);
++ ch->trans_skb = NULL;
++ }
++
++ ch->th_seg = 0x00;
++ ch->th_seq_num = 0x00;
++
++#ifdef DEBUGSEQ
++ printk(KERN_INFO "%s: CH_th_seq= %08x\n" ,__FUNCTION__,ch->th_seq_num);
++#endif
++
++ if(CHANNEL_DIRECTION(ch->flags) == READ)
++ {
++ skb_queue_purge(&ch->io_queue);
++ fsm_event(((ctc_priv *)dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
++ } else
++ {
++ ctcmpc_purge_skb_queue(&ch->io_queue);
++ ctcmpc_purge_skb_queue(&ch->sweep_queue);
++ spin_lock(&ch->collect_lock);
++ ctcmpc_purge_skb_queue(&ch->collect_queue);
++ ch->collect_len = 0;
++ spin_unlock(&ch->collect_lock);
++ fsm_event(((ctc_priv *)dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
++ }
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++}
++
++/**
++ * A stop command from device statemachine arrived and we are in
++ * not operational mode. Set state to stopped.
++ *
++ * @param fi An instance of a channel statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from channel * upon call.
++ */
++static void
++ctcmpc_ch_action_stop(fsm_instance *fi, int event, void *arg)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ fsm_newstate(fi, CH_STATE_STOPPED);
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++}
++
++/**
++ * A machine check for no path, not operational status or gone device has
++ * happened.
++ * Cleanup queue and notify interface statemachine.
++ *
++ * @param fi An instance of a channel statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from channel * upon call.
++ */
++static void
++ctcmpc_ch_action_fail(fsm_instance *fi, int event, void *arg)
++{
++ channel *ch = (channel *)arg;
++ net_device *dev = ch->netdev;
++
++ fsm_deltimer(&ch->timer);
++ fsm_deltimer(&ch->sweep_timer);
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ fsm_newstate(fi, CH_STATE_NOTOP);
++
++ ch->th_seg = 0x00;
++ ch->th_seq_num = 0x00;
++
++#ifdef DEBUGSEQ
++ printk(KERN_INFO "%s: CH_th_seq= %08x\n" ,__FUNCTION__,ch->th_seq_num);
++#endif
++ if(CHANNEL_DIRECTION(ch->flags) == READ)
++ {
++ skb_queue_purge(&ch->io_queue);
++ fsm_event(((ctc_priv *)dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
++ } else
++ {
++ ctcmpc_purge_skb_queue(&ch->io_queue);
++ ctcmpc_purge_skb_queue(&ch->sweep_queue);
++ spin_lock(&ch->collect_lock);
++ ctcmpc_purge_skb_queue(&ch->collect_queue);
++ ch->collect_len = 0;
++ spin_unlock(&ch->collect_lock);
++ fsm_event(((ctc_priv *)dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
++ }
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++}
++
++/**
++ * Handle error during setup of channel.
++ *
++ * @param fi An instance of a channel statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from channel * upon call.
++ */
++static void
++ctcmpc_ch_action_setuperr(fsm_instance *fi, int event, void *arg)
++{
++ channel *ch = (channel *)arg;
++ net_device *dev = ch->netdev;
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ /**
++ * Special case: Got UC_RCRESET on setmode.
++ * This means that remote side isn't setup. In this case
++ * simply retry after some secs...
++ */
++ if((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
++ ((event == CH_EVENT_UC_RCRESET) ||
++ (event == CH_EVENT_UC_RSRESET) ))
++ {
++ fsm_newstate(fi, CH_STATE_STARTRETRY);
++ fsm_deltimer(&ch->timer);
++ fsm_addtimer(&ch->timer, CTC_TIMEOUT_1SEC, CH_EVENT_TIMER, ch);
++// if (CHANNEL_DIRECTION(ch->flags) == READ) {
++// int rc = halt_IO (ch->irq, (intparm_t)ch, 0);
++// if (rc != 0)
++// ccw_check_return_code(ch, rc);
++// }
++ goto done;
++ }
++
++ printk(KERN_INFO "%s: Error %s during %s channel setup state=%s\n",
++ dev->name, ch_event_names[event],
++ (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
++ fsm_getstate_str(fi));
++ if(CHANNEL_DIRECTION(ch->flags) == READ)
++ {
++ fsm_newstate(fi, CH_STATE_RXERR);
++ fsm_event(((ctc_priv *)dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
++ } else
++ {
++ fsm_newstate(fi, CH_STATE_TXERR);
++ fsm_event(((ctc_priv *)dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
++ }
++ done:
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++ return;
++}
++
++/**
++ * Restart a channel after an error.
++ *
++ * @param fi An instance of a channel statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from channel * upon call.
++ */
++static void
++ctcmpc_ch_action_restart(fsm_instance *fi, int event, void *arg)
++{
++ unsigned long saveflags;
++ int oldstate;
++ int rc;
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ channel *ch = (channel *)arg;
++ net_device *dev = ch->netdev;
++
++ fsm_deltimer(&ch->timer);
++ printk(KERN_INFO "%s: %s channel restart\n", dev->name,
++ (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
++ fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
++ oldstate = fsm_getstate(fi);
++ fsm_newstate(fi, CH_STATE_STARTWAIT);
++ if(event == CH_EVENT_TIMER)
++ s390irq_spin_lock_irqsave(ch->irq, saveflags);
++ rc = halt_IO (ch->irq, (intparm_t)ch, 0);
++ if(event == CH_EVENT_TIMER)
++ s390irq_spin_unlock_irqrestore(ch->irq, saveflags);
++ if(rc != 0)
++ {
++ fsm_deltimer(&ch->timer);
++ fsm_newstate(fi, oldstate);
++ ccw_check_return_code(ch, rc);
++ }
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++}
++
++/**
++ * Handle error during RX initial handshake (exchange of
++ * 0-length block header)
++ *
++ * @param fi An instance of a channel statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from channel * upon call.
++ */
++static void
++ctcmpc_ch_action_rxiniterr(fsm_instance *fi, int event, void *arg)
++{
++ channel *ch = (channel *)arg;
++ net_device *dev = ch->netdev;
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ if(event == CH_EVENT_TIMER)
++ {
++ printk(KERN_INFO "%s: Timeout during RX init handshake\n",
++ dev->name);
++ if(ch->retry++ < 3)
++ ctcmpc_ch_action_restart(fi, event, arg);
++ else
++ {
++ fsm_newstate(fi, CH_STATE_RXERR);
++ fsm_event(((ctc_priv *)dev->priv)->fsm,
++ DEV_EVENT_RXDOWN, dev);
++ }
++ } else
++ printk(KERN_INFO "%s: Error during RX init handshake\n",
++ dev->name);
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++}
++
++/**
++ * Notify device statemachine if we gave up initialization
++ * of RX channel.
++ *
++ * @param fi An instance of a channel statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from channel * upon call.
++ */
++static void
++ctcmpc_ch_action_rxinitfail(fsm_instance *fi, int event, void *arg)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ channel *ch = (channel *)arg;
++ net_device *dev = ch->netdev;
++
++ fsm_newstate(fi, CH_STATE_RXERR);
++ printk(KERN_INFO "%s: RX initialization failed\n", dev->name);
++ printk(KERN_INFO "%s: RX <-> RX connection detected\n", dev->name);
++ fsm_event(((ctc_priv *)dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++}
++
++/**
++ * Handle error during TX channel initialization.
++ *
++ * @param fi An instance of a channel statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from channel * upon call.
++ */
++static void
++ctcmpc_ch_action_txiniterr(fsm_instance *fi, int event, void *arg)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ channel *ch = (channel *)arg;
++ net_device *dev = ch->netdev;
++
++ if(event == CH_EVENT_TIMER)
++ {
++ fsm_deltimer(&ch->timer);
++ printk(KERN_INFO "%s: Timeout during TX init handshake\n",
++ dev->name);
++ if(ch->retry++ < 3)
++ ctcmpc_ch_action_restart(fi, event, arg);
++ else
++ {
++ fsm_newstate(fi, CH_STATE_TXERR);
++ fsm_event(((ctc_priv *)dev->priv)->fsm,
++ DEV_EVENT_TXDOWN, dev);
++ }
++ } else
++ printk(KERN_INFO "%s: Error during TX init handshake\n",
++ dev->name);
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++}
++
++/**
++ * Handle TX timeout by retrying operation.
++ *
++ * @param fi An instance of a channel statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from channel * upon call.
++ */
++static void
++ctcmpc_ch_action_txretry(fsm_instance *fi, int event, void *arg)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc cp:%i enter: %s()\n",
++ smp_processor_id(),__FUNCTION__);
++#endif
++
++ channel *ch = (channel *)arg;
++ net_device *dev = ch->netdev;
++ unsigned long saveflags;
++
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ mpc_group *grpptr = privptr->mpcg;
++
++ fsm_deltimer(&ch->timer);
++
++
++ if(ch->retry++ > 3)
++ {
++ printk(KERN_INFO "%s: TX retry limit reached\n",
++ dev->name);
++ fsm_event(((ctc_priv *)dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
++ if((grpptr) && (fsm_getstate(grpptr->fsm) == MPCG_STATE_READY))
++ ctcmpc_ch_action_restart(fi, event, arg);
++ } else
++ {
++ struct sk_buff *skb;
++
++ printk(KERN_INFO "%s: TX retry %d\n", dev->name, ch->retry);
++ if((skb = skb_peek(&ch->io_queue)))
++ {
++ int rc = 0;
++
++ clear_normalized_cda(&ch->ccw[4]);
++ ch->ccw[4].count = skb->len;
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,21))
++ if(set_normalized_cda(&ch->ccw[4],
++ virt_to_phys(skb->data)))
++ {
++#else
++ if(set_normalized_cda(&ch->ccw[4],skb->data))
++ {
++#endif
++ printk(KERN_INFO "%s: IDAL alloc failed, "
++ "restarting channel\n", dev->name);
++ fsm_event(((ctc_priv *)dev->priv)->fsm,
++ DEV_EVENT_TXDOWN, dev);
++ ctcmpc_ch_action_restart(fi, event, arg);
++ goto done;
++ }
++ fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
++ if(event == CH_EVENT_TIMER)
++ s390irq_spin_lock_irqsave(ch->irq, saveflags);
++
++#ifdef DEBUGCCW
++ dumpit((char *)&ch->ccw[3],sizeof(ccw1_t) * 3);
++#endif
++
++ rc = do_IO(ch->irq, &ch->ccw[3],
++ (intparm_t)ch, 0xff, 0);
++ if(event == CH_EVENT_TIMER)
++ s390irq_spin_unlock_irqrestore(ch->irq,
++ saveflags);
++ if(rc != 0)
++ {
++ /*not all return codes are bad */
++ /* allow retries to occur */
++ /*ccw_check will result in ch */
++ /* down if rc was serious error*/
++ ccw_check_return_code(ch, rc);
++ }
++ }
++ }
++ done:
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++ return;
++}
++/**
++ * Handle TX busy by setting timer to wait for completion
++ * then normal retry logic will occur
++ *
++ * @param fi An instance of a channel statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from channel * upon call.
++ */
++static void
++ctcmpc_ch_action_txbusy(fsm_instance *fi, int event, void *arg)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ channel *ch = (channel *)arg;
++ if(ch->retry == 0)
++ {
++ fsm_deltimer(&ch->timer);
++ fsm_addtimer(&ch->timer,
++ CTC_BUSYWAIT_10SEC,
++ CH_EVENT_TIMER,
++ ch);
++ }
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++ return;
++}
++
++/**
++ * Handle fatal errors during an I/O command.
++ *
++ * @param fi An instance of a channel statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from channel * upon call.
++ */
++static void
++ctcmpc_ch_action_iofatal(fsm_instance *fi, int event, void *arg)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ channel *ch = (channel *)arg;
++ net_device *dev = ch->netdev;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++
++ fsm_deltimer(&ch->timer);
++
++ printk(KERN_WARNING "%s(): UNRECOVERABLE CHANNEL ERR - "
++ "CHANNEL REMOVED FROM MPC GROUP\n",
++ __FUNCTION__);
++ privptr->stats.tx_dropped++;
++ privptr->stats.tx_errors++;
++
++ if(CHANNEL_DIRECTION(ch->flags) == READ)
++ {
++ printk(KERN_INFO "%s: RX I/O error\n", dev->name);
++ fsm_newstate(fi, CH_STATE_RXERR);
++ fsm_event(((ctc_priv *)dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
++ } else
++ {
++ printk(KERN_INFO "%s: TX I/O error\n", dev->name);
++ fsm_newstate(fi, CH_STATE_TXERR);
++ fsm_event(((ctc_priv *)dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
++ }
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++}
++
++/**
++ * The statemachine for a channel.
++ */
++static const fsm_node ch_fsm[] = {
++ { CH_STATE_STOPPED, CH_EVENT_STOP, fsm_action_nop},
++ { CH_STATE_STOPPED, CH_EVENT_START, ctcmpc_ch_action_start},
++ { CH_STATE_STOPPED, CH_EVENT_IO_ENODEV, ctcmpc_ch_action_iofatal},
++
++ { CH_STATE_STOPPED, CH_EVENT_FINSTAT, fsm_action_nop},
++ { CH_STATE_STOPPED, CH_EVENT_MC_FAIL, fsm_action_nop},
++
++ { CH_STATE_NOTOP, CH_EVENT_STOP, ctcmpc_ch_action_stop},
++ { CH_STATE_NOTOP, CH_EVENT_START, fsm_action_nop},
++ { CH_STATE_NOTOP, CH_EVENT_FINSTAT, fsm_action_nop},
++ { CH_STATE_NOTOP, CH_EVENT_MC_FAIL, fsm_action_nop},
++ { CH_STATE_NOTOP, CH_EVENT_MC_GOOD, ctcmpc_ch_action_start},
++ { CH_STATE_NOTOP, CH_EVENT_UC_RCRESET, ctcmpc_ch_action_stop},
++ { CH_STATE_NOTOP, CH_EVENT_UC_RSRESET, ctcmpc_ch_action_stop},
++ { CH_STATE_NOTOP, CH_EVENT_IO_ENODEV, ctcmpc_ch_action_iofatal},
++
++ { CH_STATE_STARTWAIT, CH_EVENT_STOP, ctcmpc_ch_action_haltio},
++ { CH_STATE_STARTWAIT, CH_EVENT_START, fsm_action_nop},
++ { CH_STATE_STARTWAIT, CH_EVENT_FINSTAT, ctcmpc_ch_action_setmode},
++ { CH_STATE_STARTWAIT, CH_EVENT_TIMER, ctcmpc_ch_action_setuperr},
++ { CH_STATE_STARTWAIT, CH_EVENT_IO_ENODEV, ctcmpc_ch_action_iofatal},
++ { CH_STATE_STARTWAIT, CH_EVENT_IO_EIO, ctcmpc_ch_action_iofatal},
++ { CH_STATE_STARTWAIT, CH_EVENT_MC_FAIL, ctcmpc_ch_action_fail},
++
++ { CH_STATE_STARTRETRY, CH_EVENT_STOP, ctcmpc_ch_action_haltio},
++ { CH_STATE_STARTRETRY, CH_EVENT_TIMER, ctcmpc_ch_action_setmode},
++ { CH_STATE_STARTRETRY, CH_EVENT_FINSTAT, ctcmpc_ch_action_setmode},
++// { CH_STATE_STARTRETRY, CH_EVENT_FINSTAT, fsm_action_nop },
++ { CH_STATE_STARTRETRY, CH_EVENT_MC_FAIL, ctcmpc_ch_action_fail},
++ { CH_STATE_STARTRETRY, CH_EVENT_IO_ENODEV, ctcmpc_ch_action_iofatal},
++
++
++ { CH_STATE_SETUPWAIT, CH_EVENT_STOP, ctcmpc_ch_action_haltio},
++ { CH_STATE_SETUPWAIT, CH_EVENT_START, fsm_action_nop},
++ { CH_STATE_SETUPWAIT, CH_EVENT_FINSTAT, ctcmpc_ch_action_firstio},
++ { CH_STATE_SETUPWAIT, CH_EVENT_UC_RCRESET, ctcmpc_ch_action_setuperr},
++ { CH_STATE_SETUPWAIT, CH_EVENT_UC_RSRESET, ctcmpc_ch_action_setuperr},
++ { CH_STATE_SETUPWAIT, CH_EVENT_TIMER, ctcmpc_ch_action_setmode},
++ { CH_STATE_SETUPWAIT, CH_EVENT_IO_ENODEV, ctcmpc_ch_action_iofatal},
++ { CH_STATE_SETUPWAIT, CH_EVENT_IO_EIO, ctcmpc_ch_action_iofatal},
++ { CH_STATE_SETUPWAIT, CH_EVENT_MC_FAIL, ctcmpc_ch_action_fail},
++
++ { CH_STATE_RXINIT, CH_EVENT_STOP, ctcmpc_ch_action_haltio},
++ { CH_STATE_RXINIT, CH_EVENT_START, fsm_action_nop},
++ { CH_STATE_RXINIT, CH_EVENT_FINSTAT, ctcmpc_ch_action_rxidle},
++ { CH_STATE_RXINIT, CH_EVENT_UC_RCRESET, ctcmpc_ch_action_rxiniterr},
++ { CH_STATE_RXINIT, CH_EVENT_UC_RSRESET, ctcmpc_ch_action_rxiniterr},
++ { CH_STATE_RXINIT, CH_EVENT_TIMER, ctcmpc_ch_action_rxiniterr},
++ { CH_STATE_RXINIT, CH_EVENT_ATTNBUSY, ctcmpc_ch_action_rxinitfail},
++ { CH_STATE_RXINIT, CH_EVENT_IO_ENODEV, ctcmpc_ch_action_iofatal},
++ { CH_STATE_RXINIT, CH_EVENT_IO_EIO, ctcmpc_ch_action_iofatal},
++ { CH_STATE_RXINIT, CH_EVENT_UC_ZERO, ctcmpc_ch_action_firstio},
++ { CH_STATE_RXINIT, CH_EVENT_MC_FAIL, ctcmpc_ch_action_fail},
++
++ { CH_XID0_PENDING, CH_EVENT_FINSTAT, fsm_action_nop},
++ { CH_XID0_PENDING, CH_EVENT_ATTN, ctcmpc_action_attn},
++ { CH_XID0_PENDING, CH_EVENT_STOP, ctcmpc_ch_action_haltio},
++ { CH_XID0_PENDING, CH_EVENT_START, fsm_action_nop},
++ { CH_XID0_PENDING, CH_EVENT_IO_ENODEV, ctcmpc_ch_action_iofatal},
++ { CH_XID0_PENDING, CH_EVENT_IO_EIO, ctcmpc_ch_action_iofatal},
++ { CH_XID0_PENDING, CH_EVENT_MC_FAIL, ctcmpc_ch_action_fail},
++ { CH_XID0_PENDING, CH_EVENT_UC_RCRESET,ctcmpc_ch_action_setuperr},
++ { CH_XID0_PENDING, CH_EVENT_UC_RSRESET,ctcmpc_ch_action_setuperr},
++ { CH_XID0_PENDING, CH_EVENT_UC_RSRESET,ctcmpc_ch_action_setuperr},
++ { CH_XID0_PENDING, CH_EVENT_ATTNBUSY, ctcmpc_ch_action_iofatal},
++
++ { CH_XID0_INPROGRESS, CH_EVENT_FINSTAT, ctcmpc_ch_action_rx},
++ { CH_XID0_INPROGRESS, CH_EVENT_ATTN, ctcmpc_action_attn},
++ { CH_XID0_INPROGRESS, CH_EVENT_STOP, ctcmpc_ch_action_haltio},
++ { CH_XID0_INPROGRESS, CH_EVENT_START, fsm_action_nop},
++ { CH_XID0_INPROGRESS, CH_EVENT_IO_ENODEV, ctcmpc_ch_action_iofatal},
++ { CH_XID0_INPROGRESS, CH_EVENT_IO_EIO, ctcmpc_ch_action_iofatal},
++ { CH_XID0_INPROGRESS, CH_EVENT_MC_FAIL, ctcmpc_ch_action_fail},
++ { CH_XID0_INPROGRESS, CH_EVENT_UC_ZERO, ctcmpc_ch_action_rx},
++ { CH_XID0_INPROGRESS, CH_EVENT_UC_RCRESET, ctcmpc_ch_action_setuperr},
++ { CH_XID0_INPROGRESS, CH_EVENT_ATTNBUSY, ctcmpc_action_attnbusy},
++ { CH_XID0_INPROGRESS, CH_EVENT_TIMER, ctcmpc_action_resend},
++ { CH_XID0_INPROGRESS, CH_EVENT_IO_EBUSY, ctcmpc_ch_action_fail},
++
++ { CH_XID7_PENDING, CH_EVENT_FINSTAT, ctcmpc_ch_action_rx},
++ { CH_XID7_PENDING, CH_EVENT_ATTN, ctcmpc_action_attn},
++ { CH_XID7_PENDING, CH_EVENT_STOP, ctcmpc_ch_action_haltio},
++ { CH_XID7_PENDING, CH_EVENT_START, fsm_action_nop},
++ { CH_XID7_PENDING, CH_EVENT_IO_ENODEV, ctcmpc_ch_action_iofatal},
++ { CH_XID7_PENDING, CH_EVENT_IO_EIO, ctcmpc_ch_action_iofatal},
++ { CH_XID7_PENDING, CH_EVENT_MC_FAIL, ctcmpc_ch_action_fail},
++ { CH_XID7_PENDING, CH_EVENT_UC_ZERO, ctcmpc_ch_action_rx},
++ { CH_XID7_PENDING, CH_EVENT_UC_RCRESET,ctcmpc_ch_action_setuperr},
++ { CH_XID7_PENDING, CH_EVENT_UC_RSRESET,ctcmpc_ch_action_setuperr},
++ { CH_XID7_PENDING, CH_EVENT_UC_RSRESET,ctcmpc_ch_action_setuperr},
++ { CH_XID7_PENDING, CH_EVENT_ATTNBUSY, ctcmpc_ch_action_iofatal},
++ { CH_XID7_PENDING, CH_EVENT_TIMER, ctcmpc_action_resend},
++ { CH_XID7_PENDING, CH_EVENT_IO_EBUSY, ctcmpc_ch_action_fail},
++
++
++ { CH_XID7_PENDING1, CH_EVENT_FINSTAT, ctcmpc_ch_action_rx},
++ { CH_XID7_PENDING1, CH_EVENT_ATTN, ctcmpc_action_attn},
++ { CH_XID7_PENDING1, CH_EVENT_STOP, ctcmpc_ch_action_haltio},
++ { CH_XID7_PENDING1, CH_EVENT_START, fsm_action_nop},
++ { CH_XID7_PENDING1, CH_EVENT_IO_ENODEV, ctcmpc_ch_action_iofatal},
++ { CH_XID7_PENDING1, CH_EVENT_IO_EIO, ctcmpc_ch_action_iofatal},
++ { CH_XID7_PENDING1, CH_EVENT_MC_FAIL, ctcmpc_ch_action_fail},
++ { CH_XID7_PENDING1, CH_EVENT_UC_ZERO, ctcmpc_ch_action_rx},
++ { CH_XID7_PENDING1, CH_EVENT_UC_RCRESET,ctcmpc_ch_action_setuperr},
++ { CH_XID7_PENDING1, CH_EVENT_UC_RSRESET,ctcmpc_ch_action_setuperr},
++ { CH_XID7_PENDING1, CH_EVENT_ATTNBUSY, ctcmpc_ch_action_iofatal},
++ { CH_XID7_PENDING1, CH_EVENT_TIMER, ctcmpc_action_resend},
++ { CH_XID7_PENDING1, CH_EVENT_IO_EBUSY, ctcmpc_ch_action_fail},
++
++ { CH_XID7_PENDING2, CH_EVENT_FINSTAT, ctcmpc_ch_action_rx},
++ { CH_XID7_PENDING2, CH_EVENT_ATTN, ctcmpc_action_attn},
++ { CH_XID7_PENDING2, CH_EVENT_STOP, ctcmpc_ch_action_haltio},
++ { CH_XID7_PENDING2, CH_EVENT_START, fsm_action_nop},
++ { CH_XID7_PENDING2, CH_EVENT_IO_ENODEV, ctcmpc_ch_action_iofatal},
++ { CH_XID7_PENDING2, CH_EVENT_IO_EIO, ctcmpc_ch_action_iofatal},
++ { CH_XID7_PENDING2, CH_EVENT_MC_FAIL, ctcmpc_ch_action_fail},
++ { CH_XID7_PENDING2, CH_EVENT_UC_ZERO, ctcmpc_ch_action_rx},
++ { CH_XID7_PENDING2, CH_EVENT_UC_RCRESET,ctcmpc_ch_action_setuperr},
++ { CH_XID7_PENDING2, CH_EVENT_UC_RSRESET,ctcmpc_ch_action_setuperr},
++ { CH_XID7_PENDING2, CH_EVENT_ATTNBUSY, ctcmpc_ch_action_iofatal},
++ { CH_XID7_PENDING2, CH_EVENT_TIMER, ctcmpc_action_resend},
++ { CH_XID7_PENDING2, CH_EVENT_IO_EBUSY, ctcmpc_ch_action_fail},
++
++
++ { CH_XID7_PENDING3, CH_EVENT_FINSTAT, ctcmpc_ch_action_rx},
++ { CH_XID7_PENDING3, CH_EVENT_ATTN, ctcmpc_action_attn},
++ { CH_XID7_PENDING3, CH_EVENT_STOP, ctcmpc_ch_action_haltio},
++ { CH_XID7_PENDING3, CH_EVENT_START, fsm_action_nop},
++ { CH_XID7_PENDING3, CH_EVENT_IO_ENODEV, ctcmpc_ch_action_iofatal},
++ { CH_XID7_PENDING3, CH_EVENT_IO_EIO, ctcmpc_ch_action_iofatal},
++ { CH_XID7_PENDING3, CH_EVENT_MC_FAIL, ctcmpc_ch_action_fail},
++ { CH_XID7_PENDING3, CH_EVENT_UC_ZERO, ctcmpc_ch_action_rx},
++ { CH_XID7_PENDING3, CH_EVENT_UC_RCRESET,ctcmpc_ch_action_setuperr},
++ { CH_XID7_PENDING3, CH_EVENT_UC_RSRESET,ctcmpc_ch_action_setuperr},
++ { CH_XID7_PENDING3, CH_EVENT_ATTNBUSY, ctcmpc_ch_action_iofatal},
++ { CH_XID7_PENDING3, CH_EVENT_TIMER, ctcmpc_action_resend},
++ { CH_XID7_PENDING3, CH_EVENT_IO_EBUSY, ctcmpc_ch_action_fail},
++
++
++ { CH_XID7_PENDING4, CH_EVENT_FINSTAT, ctcmpc_ch_action_rx},
++ { CH_XID7_PENDING4, CH_EVENT_ATTN, ctcmpc_action_attn},
++ { CH_XID7_PENDING4, CH_EVENT_STOP, ctcmpc_ch_action_haltio},
++ { CH_XID7_PENDING4, CH_EVENT_START, fsm_action_nop},
++ { CH_XID7_PENDING4, CH_EVENT_IO_ENODEV, ctcmpc_ch_action_iofatal},
++ { CH_XID7_PENDING4, CH_EVENT_IO_EIO, ctcmpc_ch_action_iofatal},
++ { CH_XID7_PENDING4, CH_EVENT_MC_FAIL, ctcmpc_ch_action_fail},
++ { CH_XID7_PENDING4, CH_EVENT_UC_ZERO, ctcmpc_ch_action_rx},
++ { CH_XID7_PENDING4, CH_EVENT_UC_RCRESET,ctcmpc_ch_action_setuperr},
++ { CH_XID7_PENDING4, CH_EVENT_UC_RSRESET,ctcmpc_ch_action_setuperr},
++ { CH_XID7_PENDING4, CH_EVENT_ATTNBUSY, ctcmpc_ch_action_iofatal},
++ { CH_XID7_PENDING4, CH_EVENT_TIMER, ctcmpc_action_resend},
++ { CH_XID7_PENDING4, CH_EVENT_IO_EBUSY, ctcmpc_ch_action_fail},
++
++ { CH_STATE_RXIDLE, CH_EVENT_STOP, ctcmpc_ch_action_haltio},
++ { CH_STATE_RXIDLE, CH_EVENT_START, fsm_action_nop},
++ { CH_STATE_RXIDLE, CH_EVENT_FINSTAT, ctcmpc_ch_action_rx},
++ { CH_STATE_RXIDLE, CH_EVENT_UC_RCRESET, ctcmpc_ch_action_fail},
++ { CH_STATE_RXIDLE, CH_EVENT_UC_RSRESET, ctcmpc_ch_action_fail},
++ { CH_STATE_RXIDLE, CH_EVENT_IO_ENODEV, ctcmpc_ch_action_iofatal},
++ { CH_STATE_RXIDLE, CH_EVENT_IO_EIO, ctcmpc_ch_action_iofatal},
++ { CH_STATE_RXIDLE, CH_EVENT_MC_FAIL, ctcmpc_ch_action_fail},
++ { CH_STATE_RXIDLE, CH_EVENT_UC_ZERO, ctcmpc_ch_action_rx},
++
++ { CH_STATE_TXINIT, CH_EVENT_STOP, ctcmpc_ch_action_haltio},
++ { CH_STATE_TXINIT, CH_EVENT_START, fsm_action_nop},
++ { CH_STATE_TXINIT, CH_EVENT_FINSTAT, ctcmpc_ch_action_txidle},
++ { CH_STATE_TXINIT, CH_EVENT_UC_RCRESET, ctcmpc_ch_action_txiniterr},
++ { CH_STATE_TXINIT, CH_EVENT_UC_RSRESET, ctcmpc_ch_action_txiniterr},
++ { CH_STATE_TXINIT, CH_EVENT_TIMER, ctcmpc_ch_action_txiniterr},
++ { CH_STATE_TXINIT, CH_EVENT_IO_ENODEV, ctcmpc_ch_action_iofatal},
++ { CH_STATE_TXINIT, CH_EVENT_IO_EIO, ctcmpc_ch_action_iofatal},
++ { CH_STATE_TXINIT, CH_EVENT_MC_FAIL, ctcmpc_ch_action_fail},
++ { CH_STATE_TXINIT, CH_EVENT_RSWEEP1_TIMER, ctcmpc_send_sweep},
++
++ { CH_STATE_TXIDLE, CH_EVENT_STOP, ctcmpc_ch_action_haltio},
++ { CH_STATE_TXIDLE, CH_EVENT_START, fsm_action_nop},
++ { CH_STATE_TXIDLE, CH_EVENT_FINSTAT, ctcmpc_ch_action_firstio},
++ { CH_STATE_TXIDLE, CH_EVENT_UC_RCRESET, ctcmpc_ch_action_fail},
++ { CH_STATE_TXIDLE, CH_EVENT_UC_RSRESET, ctcmpc_ch_action_fail},
++ { CH_STATE_TXIDLE, CH_EVENT_IO_ENODEV, ctcmpc_ch_action_iofatal},
++ { CH_STATE_TXIDLE, CH_EVENT_IO_EIO, ctcmpc_ch_action_iofatal},
++ { CH_STATE_TXIDLE, CH_EVENT_MC_FAIL, ctcmpc_ch_action_fail},
++ { CH_STATE_TXIDLE, CH_EVENT_RSWEEP1_TIMER, ctcmpc_send_sweep},
++
++ { CH_STATE_TERM, CH_EVENT_STOP, fsm_action_nop},
++ { CH_STATE_TERM, CH_EVENT_START, ctcmpc_ch_action_restart},
++ { CH_STATE_TERM, CH_EVENT_FINSTAT, ctcmpc_ch_action_stopped},
++ { CH_STATE_TERM, CH_EVENT_UC_RCRESET, fsm_action_nop},
++ { CH_STATE_TERM, CH_EVENT_UC_RSRESET, fsm_action_nop},
++ { CH_STATE_TERM, CH_EVENT_MC_FAIL, ctcmpc_ch_action_fail},
++ { CH_STATE_TERM, CH_EVENT_IO_EBUSY, ctcmpc_ch_action_fail},
++ { CH_STATE_TERM, CH_EVENT_IO_ENODEV, ctcmpc_ch_action_iofatal},
++
++ { CH_STATE_DTERM, CH_EVENT_STOP, ctcmpc_ch_action_haltio},
++ { CH_STATE_DTERM, CH_EVENT_START, ctcmpc_ch_action_restart},
++ { CH_STATE_DTERM, CH_EVENT_FINSTAT, ctcmpc_ch_action_setmode},
++ { CH_STATE_DTERM, CH_EVENT_UC_RCRESET, fsm_action_nop},
++ { CH_STATE_DTERM, CH_EVENT_UC_RSRESET, fsm_action_nop},
++ { CH_STATE_DTERM, CH_EVENT_MC_FAIL, ctcmpc_ch_action_fail},
++ { CH_STATE_DTERM, CH_EVENT_IO_ENODEV, ctcmpc_ch_action_iofatal},
++
++ { CH_STATE_TX, CH_EVENT_STOP, ctcmpc_ch_action_haltio},
++ { CH_STATE_TX, CH_EVENT_START, fsm_action_nop},
++ { CH_STATE_TX, CH_EVENT_FINSTAT, ctcmpc_ch_action_txdone},
++ { CH_STATE_TX, CH_EVENT_UC_RCRESET, ctcmpc_ch_action_fail},
++ { CH_STATE_TX, CH_EVENT_UC_RSRESET, ctcmpc_ch_action_fail},
++ { CH_STATE_TX, CH_EVENT_TIMER, ctcmpc_ch_action_txretry},
++ { CH_STATE_TX, CH_EVENT_IO_ENODEV, ctcmpc_ch_action_iofatal},
++ { CH_STATE_TX, CH_EVENT_IO_EIO, ctcmpc_ch_action_iofatal},
++ { CH_STATE_TX, CH_EVENT_MC_FAIL, ctcmpc_ch_action_fail},
++ { CH_STATE_TX, CH_EVENT_RSWEEP1_TIMER, ctcmpc_send_sweep},
++ { CH_STATE_TX, CH_EVENT_IO_EBUSY, ctcmpc_ch_action_txbusy},
++
++ { CH_STATE_RXERR, CH_EVENT_STOP, ctcmpc_ch_action_haltio},
++ { CH_STATE_TXERR, CH_EVENT_STOP, ctcmpc_ch_action_haltio},
++ { CH_STATE_TXERR, CH_EVENT_IO_ENODEV, ctcmpc_ch_action_iofatal},
++
++ { CH_STATE_TXERR, CH_EVENT_MC_FAIL, ctcmpc_ch_action_fail},
++ { CH_STATE_RXERR, CH_EVENT_MC_FAIL, ctcmpc_ch_action_fail},
++};
++
++static const int CH_FSM_LEN = sizeof(ch_fsm) / sizeof(fsm_node);
++
++/**
++ * Functions related to setup and device detection.
++ *****************************************************************************/
++
++/**
++ * Add a new channel to the list of channels.
++ * Keeps the channel list sorted.
++ *
++ * @param irq The IRQ to be used by the new channel.
++ * @param devno The device number of the new channel.
++ * @param type The type class of the new channel.
++ *
++ * @return 0 on success, !0 on error.
++ */
++static int
++ctcmpc_add_channel(int irq, __u16 devno, channel_type_t type)
++{
++ channel **c = &channels;
++ channel *ch;
++ char name[10];
++ int rc = 0;
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++
++ if((ch = (channel *)kmalloc(sizeof(channel), gfp_type())) == NULL)
++ {
++ printk(KERN_INFO "ctcmpc: Out of memory in %s\n",__FUNCTION__);
++ rc = -1;
++ goto done;
++ }
++ memset(ch, 0, sizeof(channel));
++
++ ch->discontact_th = (th_header *)kmalloc(TH_HEADER_LENGTH,gfp_type());
++ if(ch->discontact_th == NULL)
++ {
++ kfree(ch);
++ printk(KERN_INFO "ctcmpc: Out of memory in %s\n",__FUNCTION__);
++ rc = -1;
++ goto done;
++ }
++ memset(ch->discontact_th, 0, TH_HEADER_LENGTH);
++ ch->discontact_th->th_blk_flag = TH_DISCONTACT;
++ tasklet_init(&ch->ch_disc_tasklet,
++ ctcmpc_action_send_discontact,
++ (unsigned long)ch);
++
++
++ tasklet_init(&ch->ch_tasklet,ctcmpc_bh,(unsigned long)ch);
++ if((ch->ccw = (ccw1_t *)kmalloc(sizeof(ccw1_t) * 17,
++ GFP_KERNEL|GFP_DMA)) == NULL)
++ {
++ kfree(ch);
++ printk(KERN_INFO "ctcmpc: Out of memory in %s\n",__FUNCTION__);
++ rc = -1;
++ goto done;
++ }
++
++ ch->max_bufsize = (MPC_BUFSIZE_DEFAULT - 35);
++ /**
++ * "static" ccws are used in the following way:
++ *
++ * ccw[0..2] (Channel program for generic I/O):
++ * 0: prepare
++ * 1: read or write (depending on direction) with fixed
++ * buffer (idal allocated once when buffer is allocated)
++ * 2: tic (to double noops)
++ * ccw[3..5] (Channel program for direct write of packets)
++ * 3: prepare
++ * 4: write (idal allocated on every write).
++ * 5: tic (to double noops)
++ * ccw[6..7] (Channel program for initial channel setup):
++ * 6: set extended mode
++ * 7: nop
++ *
++ * ch->ccw[0..5] are initialized in ctcmpc_ch_action_start because
++ * the channel's direction is yet unknown here.
++ *
++ * ccws used for xid2 negotiations
++ * ch-ccw[8-14] need to be used for the XID exchange either
++ * X side XID2 Processing
++ * 8: write control
++ * 9: write th
++ * 10: write XID
++ * 11: read th from secondary
++ * 12: read XID from secondary
++ * 13: read 4 byte ID
++ * 14: nop
++ * Y side XID Processing
++ * 8: sense
++ * 9: read th
++ * 10: read XID
++ * 11: write th
++ * 12: write XID
++ * 13: write 4 byte ID
++ * 14: nop
++ *
++ * ccws used for double noop due to VM timing issues
++ * which result in unrecoverable Busy on channel
++ * 15: nop
++ * 16: nop
++ */
++
++ ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
++ ch->ccw[6].flags = CCW_FLAG_SLI;
++ ch->ccw[6].count = 0;
++ ch->ccw[6].cda = 0;
++
++ ch->ccw[7].cmd_code = CCW_CMD_NOOP;
++ ch->ccw[7].flags = CCW_FLAG_SLI;
++ ch->ccw[7].count = 0;
++ ch->ccw[7].cda = 0;
++
++ ch->ccw[15].cmd_code = CCW_CMD_WRITE;
++ ch->ccw[15].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
++ ch->ccw[15].count = TH_HEADER_LENGTH;
++ ch->ccw[15].cda = virt_to_phys(ch->discontact_th);
++
++ ch->ccw[16].cmd_code = CCW_CMD_NOOP;
++ ch->ccw[16].flags = CCW_FLAG_SLI;
++ ch->ccw[16].count = 0;
++ ch->ccw[16].cda = 0;
++
++
++ ch->irq = irq;
++ ch->devno = devno;
++ ch->type = type;
++ ch->in_mpcgroup = 0;
++ sprintf(name, "ch-%04x", devno);
++ ch->fsm = init_fsm(name, ch_state_names,
++ ch_event_names, NR_CH_STATES, NR_CH_EVENTS,
++ ch_fsm, CH_FSM_LEN, GFP_KERNEL);
++ if(ch->fsm == NULL)
++ {
++ printk(KERN_INFO
++ "ctcmpc: Could not create FSM in ctcmpc_add_channel\n");
++ kfree(ch);
++ rc = -1;
++ goto done;
++ }
++ fsm_newstate(ch->fsm, CH_STATE_IDLE);
++ if((ch->devstat = (devstat_t*)kmalloc(sizeof(devstat_t), gfp_type()))
++ == NULL)
++ {
++ printk(KERN_INFO "ctcmpc: Out of memory in %s\n",__FUNCTION__);
++ kfree_fsm(ch->fsm);
++ kfree(ch);
++ rc = -1;
++ goto done;
++ }
++ memset(ch->devstat, 0, sizeof(devstat_t));
++ while(*c && ((*c)->devno < devno))
++ c = &(*c)->next;
++ if((*c)->devno == devno)
++ {
++ printk(KERN_INFO
++ "ctcmpc: %s: device %04x already in list, "
++ "using old entry\n", __FUNCTION__,(*c)->devno);
++ kfree(ch->devstat);
++ kfree_fsm(ch->fsm);
++ kfree(ch);
++ rc = 0;
++ goto done;
++ }
++ fsm_settimer(ch->fsm, &ch->timer);
++ fsm_settimer(ch->fsm, &ch->sweep_timer);
++ skb_queue_head_init(&ch->io_queue);
++ skb_queue_head_init(&ch->collect_queue);
++ skb_queue_head_init(&ch->sweep_queue);
++ ch->next = *c;
++ *c = ch;
++
++ done:
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++ return(rc);
++}
++
++#ifndef CTC_CHANDEV
++/**
++ * scan for all channels and create an entry in the channels list
++ * for every supported channel.
++ */
++static void
++channel_scan(void)
++{
++ static int print_result = 1;
++ int irq;
++ int nr_mpc = 0;
++ s390_dev_info_t di;
++
++ #ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++ #endif
++
++
++ for(irq = 0; irq < NR_IRQS; irq++)
++ {
++ if(get_dev_info_by_irq(irq, &di) == 0)
++ {
++ if((di.status == DEVSTAT_NOT_OPER) ||
++ (di.status == DEVSTAT_DEVICE_OWNED))
++ continue;
++ switch(channel_type(&di.sid_data))
++ {
++ case channel_type_mpc:
++ /* CTC/A or ESCON*/
++ if(!ctcmpc_add_channel(irq, di.devno,
++ channel_type_mpc))
++ nr_mpc++;
++ break;
++ default: break;
++ }
++ }
++ }
++ if(print_result)
++ {
++ if(nr_mpc)
++ printk(KERN_INFO
++ "ctcmpc: %d channel%s found.\n",
++ nr_mpc, (nr_mpc == 1) ? "s" : "");
++ else
++ printk(KERN_INFO "ctcmpc: No channel devices found.\n");
++ }
++ print_result = 0;
++
++ #ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++ #endif
++
++}
++#endif
++
++/**
++ * Release a specific channel in the channel list.
++ *
++ * @param ch Pointer to channel struct to be released.
++ */
++static void
++channel_free(channel *ch)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ ch->flags &= ~CHANNEL_FLAGS_INUSE;
++ fsm_newstate(ch->fsm, CH_STATE_IDLE);
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++}
++
++/**
++ * Remove a specific channel in the channel list.
++ *
++ * @param ch Pointer to channel struct to be released.
++ */
++static void
++channel_remove(channel *ch)
++{
++ channel **c = &channels;
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ if(ch == NULL)
++ goto done;
++
++#ifndef CTC_CHANDEV
++ if(ch->flags & CHANNEL_FLAGS_INUSE)
++ FREE_IRQ(ch->irq, ch->devstat);
++#endif
++ channel_free(ch);
++ while(*c)
++ {
++ if(*c == ch)
++ {
++ *c = ch->next;
++ fsm_deltimer(&ch->timer);
++ fsm_deltimer(&ch->sweep_timer);
++ kfree_fsm(ch->fsm);
++ clear_normalized_cda(&ch->ccw[4]);
++ if(ch->trans_skb != NULL)
++ {
++ clear_normalized_cda(&ch->ccw[1]);
++ dev_kfree_skb_any(ch->trans_skb);
++ }
++ tasklet_kill(&ch->ch_tasklet);
++ tasklet_kill(&ch->ch_disc_tasklet);
++ kfree(ch->discontact_th);
++ kfree(ch->ccw);
++ goto done;
++ }
++ c = &((*c)->next);
++ }
++ done:
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++ return;
++}
++
++
++/**
++ * Get a specific channel from the channel list.
++ *
++ * @param type Type of channel we are interested in.
++ * @param devno Device number of channel we are interested in.
++ * @param direction Direction we want to use this channel for.
++ *
++ * @return Pointer to a channel or NULL if no matching channel available.
++ */
++static channel
++*channel_get(channel_type_t type, int devno, int direction)
++{
++ channel *ch = channels;
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++
++#ifdef DEBUG
++ printk(KERN_INFO
++ "ctcmpc: %s(): searching for ch with devno %d and type %d\n",
++ __FUNCTION__, devno, type);
++#endif
++
++ while(ch && ((ch->devno != devno) || (ch->type != type)))
++ {
++#ifdef DEBUG
++ printk(KERN_INFO
++ "ctcmpc: %s(): ch=0x%p (devno=%d, type=%d\n",
++ __FUNCTION__, ch, ch->devno, ch->type);
++#endif
++ ch = ch->next;
++ }
++#ifdef DEBUG
++ printk(KERN_INFO
++ "ctcmpc: %s(): ch=0x%pq (devno=%d, type=%d\n",
++ __FUNCTION__, ch, ch->devno, ch->type);
++#endif
++ if(!ch)
++ {
++ printk(KERN_INFO "ctcmpc: %s(): channel with devno %d "
++ "and type %d not found in channel list\n",
++ __FUNCTION__, devno, type);
++ } else
++ {
++ if(ch->flags & CHANNEL_FLAGS_INUSE)
++ ch = NULL;
++ else
++ {
++ ch->flags |= CHANNEL_FLAGS_INUSE;
++ ch->flags &= ~CHANNEL_FLAGS_RWMASK;
++ ch->flags |= (direction == WRITE)
++ ? CHANNEL_FLAGS_WRITE:CHANNEL_FLAGS_READ;
++ fsm_newstate(ch->fsm, CH_STATE_STOPPED);
++ }
++ }
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++ return ch;
++}
++
++#ifndef CTC_CHANDEV
++/**
++ * Get the next free channel from the channel list
++ *
++ * @param type Type of channel we are interested in.
++ * @param direction Direction we want to use this channel for.
++ *
++ * @return Pointer to a channel or NULL if no matching channel available.
++ */
++static channel
++*channel_get_next(channel_type_t type, int direction)
++{
++ channel *ch = channels;
++
++ #ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++ #endif
++
++ while(ch && (ch->type != type || (ch->flags & CHANNEL_FLAGS_INUSE)))
++ ch = ch->next;
++ if(ch)
++ {
++ ch->flags |= CHANNEL_FLAGS_INUSE;
++ ch->flags &= ~CHANNEL_FLAGS_RWMASK;
++ ch->flags |= (direction == WRITE)
++ ? CHANNEL_FLAGS_WRITE:CHANNEL_FLAGS_READ;
++ fsm_newstate(ch->fsm, CH_STATE_STOPPED);
++ }
++ #ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++ #endif
++
++ return ch;
++}
++#endif
++
++/**
++ * Return the channel type by name.
++ *
++ * @param name Name of network interface.
++ *
++ * @return Type class of channel to be used for that interface.
++ */
++static channel_type_t inline
++extract_channel_media(char *name)
++{
++ channel_type_t ret = channel_type_unknown;
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++
++ if(name != NULL)
++ {
++ if(strncmp(name, "mpc", 3) == 0)
++ ret = channel_type_mpc;
++ }
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++ return ret;
++}
++
++/**
++ * Find a channel in the list by its IRQ.
++ *
++ * @param irq IRQ to search for.
++ *
++ * @return Pointer to channel or NULL if no matching channel found.
++ */
++static channel
++*find_channel_by_irq(int irq)
++{
++ channel *ch = channels;
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ while(ch && (ch->irq != irq))
++ ch = ch->next;
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++ return ch;
++}
++
++/**
++ * Main IRQ handler.
++ *
++ * @param irq The IRQ to handle.
++ * @param intparm IRQ params.
++ * @param regs CPU registers.
++ */
++static void
++ctcmpc_irq_handler (int irq, void *intparm, struct pt_regs *regs)
++{
++
++ devstat_t *devstat = (devstat_t *)intparm;
++ channel *ach = (channel *)devstat->intparm;
++ channel *ch = NULL;
++ net_device *dev;
++
++ /**
++ * Check for unsolicited interrupts.
++ * If intparm is NULL, then loop over all our known
++ * channels and try matching the irq number.
++ */
++ if(ach == NULL)
++ {
++ if((ch = find_channel_by_irq(irq)) == NULL)
++ {
++ printk(KERN_INFO
++ "ctcmpc: Got unsolicited irq: %04x c-%02x d-%02x"
++ "f-%02x\n", devstat->devno, devstat->cstat,
++ devstat->dstat, devstat->flag);
++ goto done;
++ }
++ } else
++ ch = ach;
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s() %04x\n", __FUNCTION__, ch->devno);
++#endif
++
++
++ dev = (net_device *)(ch->netdev);
++ if(dev == NULL)
++ {
++ printk(KERN_CRIT
++ "ctcmpc: %s dev = NULL irq=%d, ch=0x%p\n",
++ __FUNCTION__,irq, ch);
++ goto done;
++ }
++
++
++ if(intparm == NULL)
++ printk(KERN_INFO "%s: Channel %04x found by IRQ %d\n",
++ dev->name, ch->devno, irq);
++
++
++#ifdef DEBUG
++ printk(KERN_INFO
++ "%s: cp:%i interrupt for device: %04x received "
++ "in state:%s c-%02x d-%02x "
++ "f-%02x\n",
++ dev->name,
++ smp_processor_id(),
++ devstat->devno,
++ fsm_getstate_str(ch->fsm),
++ devstat->cstat,
++ devstat->dstat,
++ devstat->flag);
++#endif
++
++ /* Check for good subchannel return code, otherwise error message */
++ if(devstat->cstat)
++ {
++ fsm_event(ch->fsm, CH_EVENT_SC_UNKNOWN, ch);
++ printk(KERN_INFO
++ "%s: subchannel check for device: %04x - %02x %02x "
++ "%02x\n", dev->name, ch->devno, devstat->cstat,
++ devstat->dstat, devstat->flag);
++ goto done;
++ }
++
++ /* Check the reason-code of a unit check */
++ if(devstat->dstat & DEV_STAT_UNIT_CHECK)
++ {
++ ccw_unit_check(ch, devstat->ii.sense.data[0]);
++ goto done;
++ }
++ if(devstat->dstat & DEV_STAT_BUSY)
++ {
++ if(devstat->dstat & DEV_STAT_ATTENTION)
++ fsm_event(ch->fsm, CH_EVENT_ATTNBUSY, ch);
++ else
++ fsm_event(ch->fsm, CH_EVENT_BUSY, ch);
++ goto done;
++ }
++
++ if(devstat->dstat & DEV_STAT_ATTENTION)
++ {
++ fsm_event(ch->fsm, CH_EVENT_ATTN, ch);
++ goto done;
++ }
++
++ /* NOT unsolicited irq */
++ if(ach)
++ {
++ if(devstat->dstat & DEV_STAT_DEV_END)
++ {
++ if((devstat->dstat & DEV_STAT_CHN_END) ||
++ (devstat->flag & DEVSTAT_HALT_FUNCTION))
++ {
++ fsm_event(ch->fsm, CH_EVENT_FINSTAT, ch);
++ goto done;
++ } else
++ {
++ /* do_IO has not really completed */
++ fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
++ goto done;
++ }
++ }
++ }
++
++ if(devstat->flag & DEVSTAT_FINAL_STATUS)
++ fsm_event(ch->fsm, CH_EVENT_FINSTAT, ch);
++ else
++ fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
++
++ done:
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s() %04x\n", __FUNCTION__, ch->devno);
++#endif
++
++ return;
++
++}
++
++/**
++ * Actions for interface - statemachine.
++ *****************************************************************************/
++
++/**
++ * Startup channels by sending CH_EVENT_START to each channel.
++ *
++ * @param fi An instance of an interface statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from net_device * upon call.
++ */
++static void
++dev_action_start(fsm_instance *fi, int event, void *arg)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ net_device *dev = (net_device *)arg;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ mpc_group *grpptr = privptr->mpcg;
++ int direction;
++
++ fsm_deltimer(&privptr->restart_timer);
++ fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
++ grpptr->channels_terminating = 0;
++ for(direction = READ; direction <= WRITE; direction++)
++ {
++ channel *ch = privptr->channel[direction];
++ fsm_event(ch->fsm, CH_EVENT_START, ch);
++ }
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++}
++
++/**
++ * Shutdown channels by sending CH_EVENT_STOP to each channel.
++ *
++ * @param fi An instance of an interface statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from net_device * upon call.
++ */
++static void
++dev_action_stop(fsm_instance *fi, int event, void *arg)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ net_device *dev = (net_device *)arg;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ mpc_group * grpptr = privptr->mpcg;
++ int direction;
++
++ fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
++ for(direction = READ; direction <= WRITE; direction++)
++ {
++ channel *ch = privptr->channel[direction];
++ fsm_event(ch->fsm, CH_EVENT_STOP, ch);
++ ch->th_seq_num = 0x00;
++
++#ifdef DEBUGSEQ
++ printk(KERN_INFO "%s: CH_th_seq= %08x\n" ,
++ __FUNCTION__,ch->th_seq_num);
++#endif
++ }
++ fsm_newstate(grpptr->fsm, MPCG_STATE_RESET);
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++}
++
++static void
++dev_action_restart(fsm_instance *fi, int event, void *arg)
++{
++ net_device *dev = (net_device *)arg;
++ ctc_priv *privptr = dev->priv;
++ mpc_group * grpptr = privptr->mpcg;
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ printk(KERN_DEBUG "ctcmpc: Restarting %s Device and MPC Group "
++ "in 5 seconds\n",
++ dev->name);
++ dev_action_stop(fi, event, arg);
++ fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
++ fsm_newstate(grpptr->fsm, MPCG_STATE_RESET);
++ /* going back into start sequence too quickly can */
++ /* result in the other side becoming unreachable due */
++ /* to sense reported when IO is aborted */
++ fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_START, dev);
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++}
++
++/**
++ * Called from channel statemachine
++ * when a channel is up and running.
++ *
++ * @param fi An instance of an interface statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from net_device * upon call.
++ */
++static void
++dev_action_chup(fsm_instance *fi, int event, void *arg)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ net_device *dev = (net_device *)arg;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++
++ switch(fsm_getstate(fi))
++ {
++ case DEV_STATE_STARTWAIT_RXTX:
++ if(event == DEV_EVENT_RXUP)
++ fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
++ else
++ fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
++ break;
++ case DEV_STATE_STARTWAIT_RX:
++ if(event == DEV_EVENT_RXUP)
++ {
++ fsm_newstate(fi, DEV_STATE_RUNNING);
++ printk(KERN_INFO
++ "%s: connected with remote side\n",
++ dev->name);
++ ctcmpc_clear_busy(dev);
++ }
++ break;
++ case DEV_STATE_STARTWAIT_TX:
++ if(event == DEV_EVENT_TXUP)
++ {
++ fsm_newstate(fi, DEV_STATE_RUNNING);
++ printk(KERN_INFO
++ "%s: connected with remote side\n",
++ dev->name);
++ ctcmpc_clear_busy(dev);
++ }
++ break;
++ case DEV_STATE_STOPWAIT_TX:
++ if(event == DEV_EVENT_RXUP)
++ fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
++ break;
++ case DEV_STATE_STOPWAIT_RX:
++ if(event == DEV_EVENT_TXUP)
++ fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
++ break;
++ }
++
++ if(event == DEV_EVENT_RXUP)
++ ctcmpc_channel_action(privptr->channel[READ],
++ READ,MPC_CHANNEL_ADD);
++ else
++ ctcmpc_channel_action(privptr->channel[WRITE],
++ WRITE,MPC_CHANNEL_ADD);
++
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++}
++
++/**
++ * Called from channel statemachine
++ * when a channel has been shutdown.
++ *
++ * @param fi An instance of an interface statemachine.
++ * @param event The event, just happened.
++ * @param arg Generic pointer, casted from net_device * upon call.
++ */
++static void
++dev_action_chdown(fsm_instance *fi, int event, void *arg)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ net_device *dev = (net_device *)arg;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++
++ switch(fsm_getstate(fi))
++ {
++ case DEV_STATE_RUNNING:
++ if(event == DEV_EVENT_TXDOWN)
++ fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
++ else
++ fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
++ break;
++ case DEV_STATE_STARTWAIT_RX:
++ if(event == DEV_EVENT_TXDOWN)
++ fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
++ break;
++ case DEV_STATE_STARTWAIT_TX:
++ if(event == DEV_EVENT_RXDOWN)
++ fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
++ break;
++ case DEV_STATE_STOPWAIT_RXTX:
++ if(event == DEV_EVENT_TXDOWN)
++ fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
++ else
++ fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
++ break;
++ case DEV_STATE_STOPWAIT_RX:
++ if(event == DEV_EVENT_RXDOWN)
++ fsm_newstate(fi, DEV_STATE_STOPPED);
++ break;
++ case DEV_STATE_STOPWAIT_TX:
++ if(event == DEV_EVENT_TXDOWN)
++ fsm_newstate(fi, DEV_STATE_STOPPED);
++ break;
++ }
++
++
++ if(event == DEV_EVENT_RXDOWN)
++ ctcmpc_channel_action(privptr->channel[READ],
++ READ,MPC_CHANNEL_REMOVE);
++ else
++ ctcmpc_channel_action(privptr->channel[WRITE],
++ WRITE,MPC_CHANNEL_REMOVE);
++
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++}
++
++static const fsm_node dev_fsm[] = {
++ { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start},
++
++ { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start},
++ { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown},
++ { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown},
++ { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart},
++
++ { DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start},
++ { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup},
++ { DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup},
++ { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown},
++ { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart},
++
++ { DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start},
++ { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup},
++ { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup},
++ { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown},
++ { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart},
++
++ { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop},
++ { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup},
++ { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup},
++ { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown},
++ { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown},
++ { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart},
++
++ { DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop},
++ { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup},
++ { DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup},
++ { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown},
++ { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart},
++
++ { DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop},
++ { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup},
++ { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup},
++ { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown},
++ { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart},
++
++ { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop},
++ { DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown},
++ { DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown},
++ { DEV_STATE_RUNNING, DEV_EVENT_TXUP, fsm_action_nop},
++ { DEV_STATE_RUNNING, DEV_EVENT_RXUP, fsm_action_nop},
++ { DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart},
++};
++
++static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
++
++/**
++ * Transmit a packet.
++ * This is a helper function for ctcmpc_tx().
++ *
++ * @param ch Channel to be used for sending.
++ * @param skb Pointer to struct sk_buff of packet to send.
++ * The linklevel header has already been set up
++ * by ctcmpc_tx().
++ *
++ * @return 0 on success, -ERRNO on failure. (Never fails.)
++ */
++static int
++transmit_skb(channel *ch, struct sk_buff *skb)
++{
++
++ unsigned long saveflags;
++ pdu *p_header;
++ int rc = 0;
++ net_device *dev = ch->netdev;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ mpc_group *grpptr = privptr->mpcg;
++#ifdef DEBUGDATA
++ __u32 out_len = 0;
++#endif
++
++
++
++#ifdef DEBUG
++ printk(KERN_INFO "%s cp:%i enter: %s() state: %s \n",
++ dev->name,
++ smp_processor_id(),
++ __FUNCTION__,
++ fsm_getstate_str(ch->fsm));
++#endif
++
++ if((fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) ||
++ (grpptr->in_sweep))
++ {
++ spin_lock_irqsave(&ch->collect_lock, saveflags);
++ atomic_inc(&skb->users);
++ p_header = (pdu *)kmalloc(PDU_HEADER_LENGTH, gfp_type());
++
++ if(!p_header)
++ {
++ printk(KERN_WARNING ": OUT OF MEMORY IN %s(): "
++ "Data Lost \n",
++ __FUNCTION__);
++ atomic_dec(&skb->users);
++ dev_kfree_skb_any(skb);
++ spin_unlock_irqrestore(&ch->collect_lock, saveflags);
++ fsm_event(privptr->mpcg->fsm,MPCG_EVENT_INOP,dev);
++ goto done;
++ }
++
++ p_header->pdu_offset = skb->len;
++ p_header->pdu_proto = 0x01;
++ p_header->pdu_flag = 0x00;
++ if(skb->protocol == ntohs(ETH_P_SNAP))
++ {
++ p_header->pdu_flag |= PDU_FIRST | PDU_CNTL;
++ } else
++ {
++ p_header->pdu_flag |= PDU_FIRST;
++ }
++ p_header->pdu_seq = 0;
++ memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header,
++ PDU_HEADER_LENGTH);
++#ifdef DEBUGDATA
++ __u32 out_len;
++ if(skb->len > 32) out_len = 32;
++ else out_len = skb->len;
++ printk(KERN_INFO
++ "%s(): Putting on collect_q - skb len:%04x \n",
++ __FUNCTION__,skb->len);
++ printk(KERN_INFO
++ "%s(): pdu header and data for up to 32 bytes\n",
++ __FUNCTION__);
++ dumpit((char *)skb->data,out_len);
++#endif
++ skb_queue_tail(&ch->collect_queue, skb);
++ ch->collect_len += skb->len;
++ kfree(p_header);
++
++ spin_unlock_irqrestore(&ch->collect_lock, saveflags);
++ } else
++ {
++ __u16 block_len;
++ int ccw_idx;
++ struct sk_buff *nskb;
++ unsigned long hi;
++
++ /**
++ * Protect skb against beeing free'd by upper
++ * layers.
++ */
++ atomic_inc(&skb->users);
++
++ block_len = skb->len + TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
++
++ /**
++ * IDAL support in CTC is broken, so we have to
++ * care about skb's above 2G ourselves.
++ */
++ hi = ((unsigned long)skb->tail + TH_HEADER_LENGTH) >> 31;
++ if(hi)
++ {
++ nskb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
++
++ if(!nskb)
++ {
++ printk(KERN_WARNING
++ ": OUT OF MEMORY IN %s(): Data Lost \n",
++ __FUNCTION__);
++ atomic_dec(&skb->users);
++ dev_kfree_skb_any(skb);
++ fsm_event(privptr->mpcg->fsm,
++ MPCG_EVENT_INOP,dev);
++ goto done;
++ } else
++ {
++ memcpy(skb_put(nskb, skb->len),
++ skb->data, skb->len);
++ atomic_inc(&nskb->users);
++ atomic_dec(&skb->users);
++ dev_kfree_skb_any(skb);
++ skb = nskb;
++ }
++ }
++
++ p_header = (pdu *)kmalloc(PDU_HEADER_LENGTH, gfp_type());
++
++ if(!p_header)
++ {
++ printk(KERN_WARNING
++ "ctcmpc: OUT OF MEMORY IN %s(): Data Lost \n",
++ __FUNCTION__);
++ atomic_dec(&skb->users);
++ dev_kfree_skb_any(skb);
++ fsm_event(privptr->mpcg->fsm,MPCG_EVENT_INOP,dev);
++ goto done;
++ }
++
++ p_header->pdu_offset = skb->len;
++ p_header->pdu_proto = 0x01;
++ p_header->pdu_flag = 0x00;
++ p_header->pdu_seq = 0;
++ if(skb->protocol == ntohs(ETH_P_SNAP))
++ {
++ p_header->pdu_flag |= PDU_FIRST | PDU_CNTL;
++ } else
++ {
++ p_header->pdu_flag |= PDU_FIRST;
++ }
++ memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header,
++ PDU_HEADER_LENGTH);
++
++ kfree(p_header);
++
++ if(ch->collect_len > 0)
++ {
++ spin_lock_irqsave(&ch->collect_lock, saveflags);
++ skb_queue_tail(&ch->collect_queue, skb);
++ ch->collect_len += skb->len;
++ skb = skb_dequeue(&ch->collect_queue);
++ ch->collect_len -= skb->len;
++ spin_unlock_irqrestore(&ch->collect_lock, saveflags);
++ }
++
++ p_header = (pdu *)skb->data;
++ p_header->pdu_flag |= PDU_LAST;
++
++ ch->prof.txlen += skb->len - PDU_HEADER_LENGTH;
++
++ th_header *header;
++ header = (th_header *)kmalloc(TH_HEADER_LENGTH, gfp_type());
++
++ if(!header)
++ {
++ printk(KERN_WARNING
++ "ctcmpc: OUT OF MEMORY IN %s(): Data Lost \n",
++ __FUNCTION__);
++ atomic_dec(&skb->users);
++ dev_kfree_skb_any(skb);
++ fsm_event(privptr->mpcg->fsm,MPCG_EVENT_INOP,dev);
++ goto done;
++ }
++
++ header->th_seg = 0x00;
++ header->th_ch_flag = TH_HAS_PDU; /* Normal data */
++ header->th_blk_flag = 0x00;
++ header->th_is_xid = 0x00; /* Just data here */
++ ch->th_seq_num++;
++ header->th_seq_num = ch->th_seq_num;
++
++#ifdef DEBUGSEQ
++ printk(KERN_INFO "%s: ToVTAM_th_seq= %08x\n" ,
++ __FUNCTION__,ch->th_seq_num);
++#endif
++
++ memcpy(skb_push(skb, TH_HEADER_LENGTH), header,
++ TH_HEADER_LENGTH); /* put the TH on the packet */
++
++ kfree(header);
++
++#ifdef DEBUGDATA
++ if(skb->len > 32) out_len = 32;
++ else out_len = skb->len;
++ printk(KERN_INFO "%s(): skb len: %04x \n",
++ __FUNCTION__,skb->len);
++ printk(KERN_INFO
++ "%s(): pdu header and data for up to "
++ "32 bytes sent to vtam\n",
++ __FUNCTION__);
++ dumpit((char *)skb->data,out_len);
++#endif
++
++ ch->ccw[4].count = skb->len;
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,21))
++ if(set_normalized_cda(&ch->ccw[4], virt_to_phys(skb->data)))
++ {
++#else
++ if(set_normalized_cda(&ch->ccw[4], skb->data))
++ {
++#endif
++ /**
++ * idal allocation failed, try via copying to
++ * trans_skb. trans_skb usually has a pre-allocated
++ * idal.
++ */
++ if(ctcmpc_checkalloc_buffer(ch, 1))
++ {
++ /**
++ * We are toast. Data lost.
++ */
++ atomic_dec(&skb->users);
++ dev_kfree_skb_any(skb);
++ printk(KERN_WARNING
++ "ctcmpc: OUT OF MEMORY IN %s():"
++ " Data Lost \n",
++ __FUNCTION__);
++ fsm_event(privptr->mpcg->fsm,
++ MPCG_EVENT_INOP,dev);
++ goto done;
++ }
++
++ ch->trans_skb->tail = ch->trans_skb->data;
++ ch->trans_skb->len = 0;
++ ch->ccw[1].count = skb->len;
++ memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
++ skb->len);
++ atomic_dec(&skb->users);
++ dev_kfree_skb_any(skb);
++ ccw_idx = 0;
++#ifdef DEBUGDATA
++ if(ch->trans_skb->len > 32) out_len = 32;
++ else out_len = ch->trans_skb->len;
++ printk(KERN_INFO
++ "%s(): TRANS skb len: %d \n",
++ __FUNCTION__,ch->trans_skb->len);
++ printk(KERN_INFO
++ "%s(): up to 32 bytes of data sent to vtam\n",
++ __FUNCTION__);
++ dumpit((char *)ch->trans_skb->data,out_len);
++#endif
++
++
++ } else
++ {
++ skb_queue_tail(&ch->io_queue, skb);
++ ccw_idx = 3;
++ }
++
++ ch->retry = 0;
++ fsm_newstate(ch->fsm, CH_STATE_TX);
++ fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC,
++ CH_EVENT_TIMER, ch);
++
++#ifdef DEBUGCCW
++ dumpit((char *)&ch->ccw[ccw_idx],sizeof(ccw1_t) * 3);
++#endif
++
++ s390irq_spin_lock_irqsave(ch->irq, saveflags);
++ ch->prof.send_stamp = xtime;
++
++
++ rc = do_IO(ch->irq, &ch->ccw[ccw_idx], (intparm_t)ch, 0xff, 0);
++
++ s390irq_spin_unlock_irqrestore(ch->irq, saveflags);
++
++ if(ccw_idx == 3)
++ ch->prof.doios_single++;
++ if(rc != 0)
++ {
++ /* Not all rc from do_IO are bad. ccw_check will */
++ /* handle cases in which this data will not be retried*/
++ ccw_check_return_code(ch, rc);
++ } else
++ {
++ if(ccw_idx == 0)
++ {
++ privptr->stats.tx_packets++;
++ privptr->stats.tx_bytes +=
++ skb->len - TH_HEADER_LENGTH;
++ }
++ }
++ if(ch->th_seq_num > 0xf0000000)
++ { /* Chose 4Billion at random. */
++ ctcmpc_send_sweep_req(ch);
++ }
++ }
++
++ done:
++#ifdef DEBUG
++ printk(KERN_INFO "%s exit: %s()\n", dev->name,__FUNCTION__);
++#endif
++ return(0);
++}
++
++/**
++ * Interface API for upper network layers
++ *****************************************************************************/
++
++/**
++ * Open an interface.
++ * Called from generic network layer when ifconfig up is run.
++ *
++ * @param dev Pointer to interface struct.
++ *
++ * @return 0 on success, -ERRNO on failure. (Never fails.)
++ */
++static int
++ctcmpc_open(net_device *dev)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ MOD_INC_USE_COUNT;
++ /* for MPC this is called from ctc_mpc_alloc_channel */
++ /*fsm_event(((ctc_priv *)dev->priv)->fsm, DEV_EVENT_START, dev);*/
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++ return 0;
++}
++
++/**
++ * Close an interface.
++ * Called from generic network layer when ifconfig down is run.
++ *
++ * @param dev Pointer to interface struct.
++ *
++ * @return 0 on success, -ERRNO on failure. (Never fails.)
++ */
++static int
++ctcmpc_close(net_device *dev)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ SET_DEVICE_START(dev, 0);
++ /*Now called from mpc close only */
++ /*fsm_event(((ctc_priv *)dev->priv)->fsm, DEV_EVENT_STOP, dev);*/
++ MOD_DEC_USE_COUNT;
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++ return 0;
++}
++
++/**
++ * Start transmission of a packet.
++ * Called from generic network device layer.
++ *
++ * @param skb Pointer to buffer containing the packet.
++ * @param dev Pointer to interface struct.
++ *
++ * @return 0 if packet consumed, !0 if packet rejected.
++ * Note: If we return !0, then the packet is free'd by
++ * the generic network layer.
++ */
++static int
++ctcmpc_tx(struct sk_buff *skb, net_device *dev)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter:%s() skb:%0lx\n",
++ __FUNCTION__,(unsigned long)skb);
++#endif
++
++ int len;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ mpc_group *grpptr = privptr->mpcg;
++
++ /**
++ * Some sanity checks ...
++ */
++ if(skb == NULL)
++ {
++ printk(KERN_INFO
++ "%s: NULL sk_buff passed - EMPTY PACKET DROPPED\n",
++ dev->name);
++ privptr->stats.tx_dropped++;
++ goto done;
++ }
++ if(skb_headroom(skb) < (TH_HEADER_LENGTH + PDU_HEADER_LENGTH))
++ {
++ printk(KERN_INFO
++ "%s: Got sk_buff with head room < %ld bytes\n",
++ dev->name, TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
++ if(skb->len > 32) len = 32;
++ else len = skb->len;
++#ifdef DEBUGDATA
++ dumpit((char *)skb->data,len);
++#endif
++ len = skb->len + TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
++ sk_buff *newskb = __dev_alloc_skb(len,gfp_type() | GFP_DMA);
++
++ if(!newskb)
++ {
++ printk(KERN_WARNING
++ "OUT OF MEMORY - in %s Data Lost\n",
++ __FUNCTION__);
++ printk(KERN_WARNING
++ "%s: DEVICE ERROR - UNRECOVERABLE DATA LOSS\n",
++ __FUNCTION__);
++ dev_kfree_skb_any(skb);
++ privptr->stats.tx_dropped++;
++ privptr->stats.tx_errors++;
++ privptr->stats.tx_carrier_errors++;
++ fsm_event(grpptr->fsm,MPCG_EVENT_INOP,dev);
++ goto done;
++ }
++ newskb->protocol = skb->protocol;
++ skb_reserve(newskb,TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
++ memcpy(skb_put(newskb,skb->len),skb->data,skb->len);
++ dev_kfree_skb_any(skb);
++ skb = newskb;
++ }
++
++ /**
++ * If channels are not running,
++ * notify anybody about a link failure and throw
++ * away packet.
++ */
++ if((fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) ||
++ (fsm_getstate(grpptr->fsm) < MPCG_STATE_XID2INITW))
++ {
++ dev_kfree_skb_any(skb);
++ printk(KERN_INFO
++ "%s(): DATA RCVD - MPC GROUP NOT ACTIVE - DROPPED\n",
++ __FUNCTION__);
++ privptr->stats.tx_dropped++;
++ privptr->stats.tx_errors++;
++ privptr->stats.tx_carrier_errors++;
++ goto done;
++ }
++
++ if(ctcmpc_test_and_set_busy(dev))
++ {
++ printk(KERN_WARNING
++ "%s: DEVICE ERROR - UNRECOVERABLE DATA LOSS\n",
++ __FUNCTION__);
++ dev_kfree_skb_any(skb);
++ privptr->stats.tx_dropped++;
++ privptr->stats.tx_errors++;
++ privptr->stats.tx_carrier_errors++;
++ fsm_event(grpptr->fsm,MPCG_EVENT_INOP,dev);
++ goto done;
++ }
++
++ dev->trans_start = jiffies;
++ if(transmit_skb(privptr->channel[WRITE], skb) != 0)
++ {
++ printk(KERN_WARNING
++ "ctcmpc: DEVICE ERROR in %s(): Data Lost \n",
++ __FUNCTION__);
++ printk(KERN_WARNING
++ "%s: DEVICE ERROR - UNRECOVERABLE DATA LOSS\n",
++ __FUNCTION__);
++ dev_kfree_skb_any(skb);
++ privptr->stats.tx_dropped++;
++ privptr->stats.tx_errors++;
++ privptr->stats.tx_carrier_errors++;
++ ctcmpc_clear_busy(dev);
++ fsm_event(grpptr->fsm,MPCG_EVENT_INOP,dev);
++ goto done;
++ }
++ ctcmpc_clear_busy(dev);
++
++ done:
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++ return(0); /*handle freeing of skb here */
++}
++
++
++/**
++ * Sets MTU of an interface.
++ *
++ * @param dev Pointer to interface struct.
++ * @param new_mtu The new MTU to use for this interface.
++ *
++ * @return 0 on success, -EINVAL if MTU is out of valid range.
++ * (valid range is 576 .. 65527). If VM is on the
++ * remote side, maximum MTU is 32760, however this is
++ * <em>not</em> checked here.
++ */
++static int
++ctcmpc_change_mtu(net_device *dev, int new_mtu)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++
++ if((new_mtu < 576) || (new_mtu > 65527) ||
++ (new_mtu > (privptr->channel[READ]->max_bufsize -
++ TH_HEADER_LENGTH )))
++ return -EINVAL;
++ dev->mtu = new_mtu;
++ /* TH plus 4 byte sequence number on outbound path */
++ dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit: %s()\n", __FUNCTION__);
++#endif
++
++ return 0;
++}
++
++
++/**
++ * Returns interface statistics of a device.
++ *
++ * @param dev Pointer to interface struct.
++ *
++ * @return Pointer to stats struct of this interface.
++ */
++static struct net_device_stats
++*ctcmpc_stats(net_device *dev)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ return &((ctc_priv *)dev->priv)->stats;
++}
++
++/**
++ * procfs related structures and routines
++ *****************************************************************************/
++
++static net_device
++*find_netdev_by_ino(unsigned long ino)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ channel *ch = channels;
++ net_device *dev = NULL;
++ ctc_priv *privptr;
++
++ while(ch)
++ {
++ if(ch->netdev != dev)
++ {
++ dev = ch->netdev;
++ privptr = (ctc_priv *)dev->priv;
++
++ if((privptr->proc_ctrl_entry->low_ino == ino) ||
++ (privptr->proc_stat_entry->low_ino == ino))
++ return dev;
++ }
++ ch = ch->next;
++ }
++ return NULL;
++}
++
++#define CTRL_BUFSIZE 40
++
++static int
++ctcmpc_ctrl_open(struct inode *inode, struct file *file)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ file->private_data = kmalloc(CTRL_BUFSIZE, GFP_KERNEL);
++ if(file->private_data == NULL)
++ return -ENOMEM;
++ MOD_INC_USE_COUNT;
++ return 0;
++}
++
++static int
++ctcmpc_ctrl_close(struct inode *inode, struct file *file)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ kfree(file->private_data);
++ MOD_DEC_USE_COUNT;
++ return 0;
++}
++
++static ssize_t
++ctcmpc_ctrl_write(struct file *file, const char *buf, size_t count,
++ loff_t *off)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ unsigned int ino = ((struct inode *)file->f_dentry->d_inode)->i_ino;
++ net_device *dev;
++ ctc_priv *privptr;
++ char *e;
++ int bs1;
++ char tmp[40];
++
++ return 0;
++ /* This function is currently disabled in this MPC environment */
++ if(!(dev = find_netdev_by_ino(ino)))
++ return -ENODEV;
++ if(off != &file->f_pos)
++ return -ESPIPE;
++
++ privptr = (ctc_priv *)dev->priv;
++
++ if(count >= 39)
++ return -EINVAL;
++
++ if(copy_from_user(tmp, buf, count))
++ return -EFAULT;
++ tmp[count+1] = '\0';
++ bs1 = simple_strtoul(tmp, &e, 0);
++
++ if((bs1 > CTC_BUFSIZE_LIMIT) ||
++ (e && (!isspace(*e))))
++ return -EINVAL;
++ if((dev->flags & IFF_RUNNING) &&
++ (bs1 < (dev->mtu + TH_HEADER_LENGTH )))
++ return -EINVAL;
++ if(bs1 < (576 + TH_HEADER_LENGTH ))
++ return -EINVAL;
++
++
++ privptr->channel[READ]->max_bufsize =
++ privptr->channel[WRITE]->max_bufsize = bs1;
++ if(!(dev->flags & IFF_RUNNING))
++ dev->mtu = bs1 - TH_HEADER_LENGTH ;
++ privptr->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
++ privptr->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
++
++ return count;
++}
++
++static ssize_t
++ctcmpc_ctrl_read(struct file *file, char *buf, size_t count,
++ loff_t *off)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ unsigned int ino = ((struct inode *)file->f_dentry->d_inode)->i_ino;
++ char *sbuf = (char *)file->private_data;
++ net_device *dev;
++ ctc_priv *privptr;
++ ssize_t ret = 0;
++ char *p = sbuf;
++ int l;
++
++ if(!(dev = find_netdev_by_ino(ino)))
++ return -ENODEV;
++ if(off != &file->f_pos)
++ return -ESPIPE;
++
++ privptr = (ctc_priv *)dev->priv;
++
++ if(file->f_pos == 0)
++ sprintf(sbuf, "%d\n", privptr->channel[READ]->max_bufsize);
++
++ l = strlen(sbuf);
++ p = sbuf;
++ if(file->f_pos < l)
++ {
++ p += file->f_pos;
++ l = strlen(p);
++ ret = (count > l) ? l : count;
++ if(copy_to_user(buf, p, ret))
++ return -EFAULT;
++ }
++ file->f_pos += ret;
++ return ret;
++}
++
++#define STATS_BUFSIZE 2048
++
++static int
++ctcmpc_stat_open(struct inode *inode, struct file *file)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ file->private_data = kmalloc(STATS_BUFSIZE, GFP_KERNEL);
++ if(file->private_data == NULL)
++ return -ENOMEM;
++ MOD_INC_USE_COUNT;
++ return 0;
++}
++
++static int
++ctcmpc_stat_close(struct inode *inode, struct file *file)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ kfree(file->private_data);
++ MOD_DEC_USE_COUNT;
++ return 0;
++}
++
++static ssize_t
++ctcmpc_stat_write(struct file *file, const char *buf, size_t count,
++ loff_t *off)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ unsigned int ino = ((struct inode *)file->f_dentry->d_inode)->i_ino;
++ net_device *dev;
++ ctc_priv *privptr;
++
++ if(!(dev = find_netdev_by_ino(ino)))
++ return -ENODEV;
++ privptr = (ctc_priv *)dev->priv;
++ privptr->channel[WRITE]->prof.maxmulti = 0;
++ privptr->channel[WRITE]->prof.maxcqueue = 0;
++ privptr->channel[WRITE]->prof.doios_single = 0;
++ privptr->channel[WRITE]->prof.doios_multi = 0;
++ privptr->channel[WRITE]->prof.txlen = 0;
++ privptr->channel[WRITE]->prof.tx_time = 0;
++ return count;
++}
++
++static ssize_t
++ctcmpc_stat_read(struct file *file, char *buf, size_t count,
++ loff_t *off)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ unsigned int ino = ((struct inode *)file->f_dentry->d_inode)->i_ino;
++ char *sbuf = (char *)file->private_data;
++ net_device *dev;
++ ctc_priv *privptr;
++ ssize_t ret = 0;
++ char *p = sbuf;
++ int l;
++
++ if(!(dev = find_netdev_by_ino(ino)))
++ return -ENODEV;
++ if(off != &file->f_pos)
++ return -ESPIPE;
++
++ privptr = (ctc_priv *)dev->priv;
++
++ if(file->f_pos == 0)
++ {
++ p += sprintf(p, "Device FSM state: %s\n",
++ fsm_getstate_str(privptr->fsm));
++ p += sprintf(p, "RX channel FSM state: %s\n",
++ fsm_getstate_str(privptr->channel[READ]->fsm));
++ p += sprintf(p, "TX channel FSM state: %s\n",
++ fsm_getstate_str(privptr->channel[WRITE]->fsm));
++ p += sprintf(p, "Max. TX buffer used: %ld\n",
++ privptr->channel[WRITE]->prof.maxmulti);
++ p += sprintf(p, "Max. chained SKBs: %ld\n",
++ privptr->channel[WRITE]->prof.maxcqueue);
++ p += sprintf(p, "TX single write ops: %ld\n",
++ privptr->channel[WRITE]->prof.doios_single);
++ p += sprintf(p, "TX multi write ops: %ld\n",
++ privptr->channel[WRITE]->prof.doios_multi);
++ p += sprintf(p, "Netto bytes written: %ld\n",
++ privptr->channel[WRITE]->prof.txlen);
++ p += sprintf(p, "Max. TX IO-time: %ld\n",
++ privptr->channel[WRITE]->prof.tx_time);
++ }
++ l = strlen(sbuf);
++ p = sbuf;
++ if(file->f_pos < l)
++ {
++ p += file->f_pos;
++ l = strlen(p);
++ ret = (count > l) ? l : count;
++ if(copy_to_user(buf, p, ret))
++ return -EFAULT;
++ }
++ file->f_pos += ret;
++ return ret;
++}
++
++static struct file_operations ctcmpc_stat_fops = {
++ read: ctcmpc_stat_read,
++ write: ctcmpc_stat_write,
++ open: ctcmpc_stat_open,
++ release: ctcmpc_stat_close,
++};
++
++static struct file_operations ctcmpc_ctrl_fops = {
++ read: ctcmpc_ctrl_read,
++ write: ctcmpc_ctrl_write,
++ open: ctcmpc_ctrl_open,
++ release: ctcmpc_ctrl_close,
++};
++
++static struct inode_operations ctcmpc_stat_iops = {
++};
++static struct inode_operations ctcmpc_ctrl_iops = {
++};
++
++static struct proc_dir_entry stat_entry = {
++ 0, /* low_ino */
++ 10, /* namelen */
++ "statistics", /* name */
++ S_IFREG | S_IRUGO | S_IWUSR, /* mode */
++ 1, /* nlink */
++ 0, /* uid */
++ 0, /* gid */
++ 0, /* size */
++ &ctcmpc_stat_iops /* ops */
++};
++
++static struct proc_dir_entry ctrl_entry = {
++ 0, /* low_ino */
++ 10, /* namelen */
++ "buffersize", /* name */
++ S_IFREG | S_IRUSR | S_IWUSR, /* mode */
++ 1, /* nlink */
++ 0, /* uid */
++ 0, /* gid */
++ 0, /* size */
++ &ctcmpc_ctrl_iops /* ops */
++};
++
++static struct proc_dir_entry *ctcmpc_dir = NULL;
++static struct proc_dir_entry *ctcmpc_template = NULL;
++
++/**
++ * Create the driver's main directory /proc/net/ctc
++ */
++static void
++ctcmpc_proc_create_main(void)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ /**
++ * If not registered, register main proc dir-entry now
++ */
++ if(!ctcmpc_dir)
++ ctcmpc_dir = proc_mkdir("mpc", proc_net);
++
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc exit:%s() ctcmpc_dir:%lx\n",
++ __FUNCTION__,(unsigned long)ctcmpc_dir);
++#endif
++
++}
++
++#ifdef MODULE
++/**
++ * Destroy /proc/net/ctc
++ */
++static void
++ctcmpc_proc_destroy_main(void)
++{
++ #ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++ #endif
++
++ if(ctcmpc_dir)
++ remove_proc_entry("mpc", proc_net);
++}
++#endif /*MODULE*/
++
++/**
++ * Create a device specific subdirectory in /proc/net/ctc/ with the
++ * same name like the device. In that directory, create 2 entries
++ * "statistics" and "buffersize".
++ *
++ * @param dev The device for which the subdirectory should be created.
++ *
++ */
++static void
++ctcmpc_proc_create_sub(net_device *dev)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++
++ privptr->proc_dentry = proc_mkdir(dev->name, ctcmpc_dir);
++ privptr->proc_stat_entry =
++ create_proc_entry("statistics",
++ S_IFREG | S_IRUSR | S_IWUSR,
++ privptr->proc_dentry);
++ privptr->proc_stat_entry->proc_fops = &ctcmpc_stat_fops;
++ privptr->proc_stat_entry->proc_iops = &ctcmpc_stat_iops;
++ privptr->proc_ctrl_entry =
++ create_proc_entry("buffersize",
++ S_IFREG | S_IRUSR | S_IWUSR,
++ privptr->proc_dentry);
++ privptr->proc_ctrl_entry->proc_fops = &ctcmpc_ctrl_fops;
++ privptr->proc_ctrl_entry->proc_iops = &ctcmpc_ctrl_iops;
++ privptr->proc_registered = 1;
++}
++
++
++/**
++ * Destroy a device specific subdirectory.
++ *
++ * @param privptr Pointer to device private data.
++ */
++static void
++ctcmpc_proc_destroy_sub(ctc_priv *privptr)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ if(!privptr->proc_registered)
++ return;
++ remove_proc_entry("statistics", privptr->proc_dentry);
++ remove_proc_entry("buffersize", privptr->proc_dentry);
++ remove_proc_entry(privptr->proc_dentry->name, ctcmpc_dir);
++ privptr->proc_registered = 0;
++}
++
++
++
++#ifndef CTC_CHANDEV
++/**
++ * Setup related routines
++ *****************************************************************************/
++
++/**
++ * Parse a portion of the setup string describing a single device or option
++ * providing the following syntax:
++ *
++ * [Device/OptionName[:int1][:int2][:int3]]
++ *
++ *
++ * @param setup Pointer to a pointer to the remainder of the parameter
++ * string to be parsed. On return, the content of this
++ * pointer is updated to point to the first character after
++ * the parsed portion (e.g. possible start of next portion)
++ * NOTE: The string pointed to must be writeable, since a
++ * \0 is written for termination of the device/option name.
++ *
++ * @param dev_name Pointer to a pointer to the name of the device whose
++ * parameters are parsed. On return, this is set to the
++ * name of the device/option.
++ *
++ * @param ints Pointer to an array of integer parameters. On return,
++ * element 0 is set to the number of parameters found.
++ *
++ * @param maxip Maximum number of ints to parse.
++ * (ints[] must have size maxip+1)
++ *
++ * @return 0 if string "setup" was empty, !=0 otherwise
++ */
++static int
++parse_opts(char **setup, char **dev_name, int *ints, int maxip)
++{
++ char *cur = *setup;
++ int i = 1;
++ int rc = 0;
++ int in_name = 1;
++ int noauto = 0;
++
++ #ifdef DEBUG
++ printk(KERN_INFO
++ "ctcmpc: parse_opts(): *setup='%s', maxip=%d\n", *setup, maxip);
++ #endif
++ if(*setup)
++ {
++ *dev_name = *setup;
++
++ if(strncmp(cur, "noauto", 6) && strncmp(cur, "mpc", 3))
++ {
++ if((*setup = strchr(cur, ':')))
++ *(*setup)++ = '\0';
++ printk(KERN_INFO
++ "ctcmpc: Invalid device name or option '%s'\n",
++ cur);
++ return 1;
++ }
++ switch(*cur)
++ {
++ case 'c':
++ cur += 3;
++ break;
++ case 'm':
++ cur += 3;
++ break;
++ case 'e':
++ cur += 5;
++ break;
++ case 'n':
++ cur += 6;
++ *cur++ = '\0';
++ noauto = 1;
++ }
++ if(!noauto)
++ {
++ while(cur &&
++ (*cur == '-' || isdigit(*cur)) &&
++ i <= maxip)
++ {
++ if(in_name)
++ {
++ cur++;
++ if(*cur == ':')
++ {
++ *cur++ = '\0';
++ in_name = 0;
++ }
++ } else
++ {
++ ints[i++] =
++ simple_strtoul(cur, NULL, 0);
++ #ifdef DEBUG
++ printk(KERN_INFO
++ "ctcmpc: %s: ints[%d]=%d\n",
++ __FUNCTION__,
++ i-1, ints[i-1]);
++ #endif
++ if((cur = strchr(cur, ':')) != NULL)
++ cur++;
++ }
++ }
++ }
++ ints[0] = i - 1;
++ *setup = cur;
++ if(cur && (*cur == ':'))
++ (*setup)++;
++ rc = 1;
++ }
++ return rc;
++}
++
++/**
++ *
++ * Allocate one param struct
++ *
++ * If the driver is loaded as a module this functions is called during
++ * module set up and we can allocate the struct by using kmalloc()
++ *
++ * If the driver is statically linked into the kernel this function is called
++ * when kmalloc() is not yet available so we must allocate from a static array
++ *
++ */
++ #ifdef MODULE
++ #define alloc_param() ((param *)kmalloc(sizeof(param),
++ GFP_KERNEL));
++ #else
++static param parms_array[MAX_STATIC_DEVICES];
++static param *next_param = parms_array;
++ #define alloc_param() \
++ ((next_param<parms_array+MAX_STATIC_DEVICES)?next_param++:NULL)
++ #endif /*MODULE*/
++
++/**
++ * Returns commandline parameter using device name as key.
++ *
++ * @param name Name of interface to get parameters from.
++ *
++ * @return Pointer to corresponting param struct, NULL if not found.
++ */
++static param
++*find_param(char *name)
++{
++ param *p = params;
++
++ while(p && strcmp(p->name, name))
++ p = p->next;
++ return p;
++}
++
++/**
++ * maximum number of integer parametes that may be specified
++ * for one device in the setup string
++ */
++ #define CTC_MAX_INTPARMS 3
++
++/**
++ * Parse configuration options for all interfaces.
++ *
++ * This function is called from two possible locations:
++ * - If built as module, this function is called from init_module().
++ * - If built in monolithic kernel, this function is called from within
++ * init/main.c.
++ * Parsing is always done here.
++ *
++ * Valid parameters are:
++ *
++ *
++ * [NAME[:0xRRRR[:0xWWWW[:P]]]]
++ *
++ * where P is the channel protocol (always 0)
++ * 0xRRRR is the cu number for the read channel
++ * 0xWWWW is the cu number for the write channel
++ * NAME is either mpc0 ... mpcN for sna MPC
++ * or noauto
++ * which switches off auto-detection of channels.
++ *
++ * @param setup The parameter string to parse. MUST be writeable!
++ * @param ints Pointer to an array of ints.
++ */
++ #ifdef MODULE
++static void ctcmpc_setup(char *setup)
++ #define ctcmpc_setup_return return
++ #else /*MODULE*/
++static int __init ctcmpc_setup(char *setup)
++ #define ctcmpc_setup_return return(1)
++ #endif /*MODULE*/
++{
++ #ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++ #endif
++
++ int write_dev;
++ int read_dev;
++ int proto;
++ param *par;
++ char *dev_name;
++ int ints[CTC_MAX_INTPARMS+1];
++
++ while(parse_opts(&setup, &dev_name, ints, CTC_MAX_INTPARMS))
++ {
++ write_dev = -1;
++ read_dev = -1;
++ proto = CTC_PROTO_MPC;
++ #ifdef DEBUG
++ printk(KERN_INFO
++ "ctcmpc: ctcmpc_setup(): setup='%s' dev_name='%s',"
++ " ints[0]=%d)\n",
++ setup, dev_name, ints[0]);
++ #endif /*DEBUG*/
++ if(dev_name == NULL)
++ {
++ /**
++ * happens if device name is not specified in
++ * parameter line (cf. init/main.c:get_options()
++ */
++ printk(KERN_INFO
++ "ctcmpc: %s(): Device name not specified\n",
++ __FUNCTION__);
++ ctcmpc_setup_return;
++ }
++
++ #ifdef DEBUG
++ printk(KERN_INFO "name=´%s´ argc=%d\n", dev_name, ints[0]);
++ #endif
++
++ if(strcmp(dev_name, "noauto") == 0)
++ {
++ printk(KERN_INFO "ctcmpc: autoprobing disabled\n");
++ ctc_no_auto = 1;
++ continue;
++ }
++
++ if(find_param(dev_name) != NULL)
++ {
++ printk(KERN_INFO
++ "ctcmpc: Definition for device %s already set. "
++ "Ignoring second definition\n", dev_name);
++ continue;
++ }
++
++ switch(ints[0])
++ {
++ case 3: /* protocol type passed */
++ proto = ints[3];
++ if(proto != CTC_PROTO_MPC)
++ {
++ printk(KERN_INFO
++ "%s: wrong protocol type "
++ "passed\n", dev_name);
++ ctcmpc_setup_return;
++ }
++ case 2: /* write channel passed */
++ write_dev = ints[2];
++ case 1: /* read channel passed */
++ read_dev = ints[1];
++ if(write_dev == -1)
++ write_dev = read_dev + 1;
++ break;
++ default:
++ printk(KERN_INFO
++ "ctcmpc: wrong number of parameter "
++ "passed (is: %d, expected: [1..3]\n",
++ ints[0]);
++ ctcmpc_setup_return;
++ }
++ par = alloc_param();
++ if(!par)
++ {
++ #ifdef MODULE
++ printk(KERN_INFO
++ "ctcmpc: Couldn't allocate setup param block\n");
++ #else
++ printk(KERN_INFO
++ "ctcmpc: Number of device definitions in "
++ " kernel commandline exceeds builtin limit "
++ " of %d devices.\n", MAX_STATIC_DEVICES);
++ #endif
++ ctcmpc_setup_return;
++ }
++ par->read_dev = read_dev;
++ par->write_dev = write_dev;
++ par->proto = proto;
++ strncpy(par->name, dev_name, MAX_PARAM_NAME_LEN);
++ par->next = params;
++ params = par;
++ #ifdef DEBUG
++ printk(KERN_INFO "%s: protocol=%x read=%04x write=%04x\n",
++ dev_name, proto, read_dev, write_dev);
++ #endif
++ }
++ ctcmpc_setup_return;
++}
++
++__setup("mpc=", ctcmpc_setup);
++#endif /* !CTC_CHANDEV */
++
++
++static void
++ctcmpc_netdev_unregister(net_device *dev)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ ctc_priv *privptr;
++
++ if(!dev)
++ return;
++ privptr = (ctc_priv *)dev->priv;
++ unregister_netdev(dev);
++}
++
++static int
++ctcmpc_netdev_register(net_device *dev)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ return register_netdev(dev);
++}
++
++static void
++ctcmpc_free_netdevice(net_device *dev, int free_dev)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ ctc_priv *privptr;
++ mpc_group *grpptr;
++
++ if(!dev)
++ return;
++ privptr = (ctc_priv *)dev->priv;
++ if(privptr)
++ {
++ grpptr = privptr->mpcg;
++ if(privptr->fsm)
++ kfree_fsm(privptr->fsm);
++ ctcmpc_proc_destroy_sub(privptr);
++ if(grpptr)
++ {
++ if(grpptr->fsm)
++ kfree_fsm(grpptr->fsm);
++ if(grpptr->xid_skb)
++ dev_kfree_skb(grpptr->xid_skb);
++ if(grpptr->rcvd_xid_skb)
++ dev_kfree_skb(grpptr->rcvd_xid_skb);
++ tasklet_kill(&grpptr->mpc_tasklet2);
++ kfree(grpptr);
++ }
++ kfree(privptr);
++ }
++#ifdef MODULE
++ if(free_dev)
++ kfree(dev);
++#endif
++}
++
++#ifdef CTC_CHANDEV
++static int
++ctcmpc_shutdown(net_device *dev)
++{
++ #ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++ #endif
++
++ ctc_priv *privptr;
++
++ if(!dev)
++ return 0;
++ privptr = (ctc_priv *)dev->priv;
++ channel_remove(privptr->channel[READ]);
++ channel_remove(privptr->channel[WRITE]);
++ ctcmpc_free_netdevice(dev, 0);
++ return 0;
++}
++#endif
++
++/**
++ * Initialize everything of the net device except the name and the
++ * channel structs.
++ */
++static net_device *
++ctcmpc_init_netdevice(net_device *dev, int alloc_device)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++ ctc_priv *privptr;
++ mpc_group *grpptr;
++ int priv_size;
++ if(alloc_device)
++ {
++ dev = kmalloc(sizeof(net_device)
++ , GFP_KERNEL);
++ if(!dev)
++ return NULL;
++#ifdef DEBUG
++ printk(KERN_INFO "kmalloc dev: %s()\n", __FUNCTION__);
++#endif
++
++ memset(dev, 0, sizeof(net_device));
++ }
++ priv_size = sizeof(ctc_priv) + sizeof(ctcmpc_template) +
++ sizeof(stat_entry) + sizeof(ctrl_entry);
++ dev->priv = kmalloc(priv_size, GFP_KERNEL);
++ if(dev->priv == NULL)
++ {
++ if(alloc_device)
++ kfree(dev);
++ return NULL;
++ }
++
++ memset(dev->priv, 0, priv_size);
++ privptr = (ctc_priv *)dev->priv;
++
++ privptr->proc_dentry = (struct proc_dir_entry *)
++ (((char *)privptr) + sizeof(ctc_priv));
++ privptr->proc_stat_entry = (struct proc_dir_entry *)
++ (((char *)privptr) + sizeof(ctc_priv) +
++ sizeof(ctcmpc_template));
++ privptr->proc_ctrl_entry = (struct proc_dir_entry *)
++ (((char *)privptr) + sizeof(ctc_priv) +
++ sizeof(ctcmpc_template) +
++ sizeof(stat_entry));
++ memcpy(privptr->proc_dentry, &ctcmpc_template, sizeof(ctcmpc_template));
++ memcpy(privptr->proc_stat_entry, &stat_entry, sizeof(stat_entry));
++ memcpy(privptr->proc_ctrl_entry, &ctrl_entry, sizeof(ctrl_entry));
++
++ privptr->fsm = init_fsm("ctcdev", dev_state_names,
++ dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
++ dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
++ if(privptr->fsm == NULL)
++ {
++ kfree(privptr);
++ privptr = NULL;
++ if(alloc_device)
++ kfree(dev);
++ return NULL;
++ }
++
++ fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
++ fsm_settimer(privptr->fsm, &privptr->restart_timer);
++ /********************************************************/
++ /* MPC Group Initializations */
++ /********************************************************/
++ privptr->mpcg = kmalloc(sizeof(mpc_group),GFP_KERNEL);
++ if(privptr->mpcg == NULL)
++ {
++ kfree(privptr);
++ privptr = NULL;
++ if(alloc_device)
++ kfree(dev);
++ return NULL;
++ }
++ grpptr = privptr->mpcg;
++ memset(grpptr, 0, sizeof(mpc_group));
++
++ grpptr->fsm = init_fsm("mpcg", mpcg_state_names,
++ mpcg_event_names,
++ NR_MPCG_STATES,
++ NR_MPCG_EVENTS,
++ mpcg_fsm,
++ MPCG_FSM_LEN,
++ GFP_KERNEL);
++ if(grpptr->fsm == NULL)
++ {
++ kfree(grpptr);
++ grpptr = NULL;
++ kfree(privptr);
++ privptr = NULL;
++ if(alloc_device)
++ kfree(dev);
++ return NULL;
++ }
++
++ fsm_newstate(grpptr->fsm, MPCG_STATE_RESET);
++ fsm_settimer(grpptr->fsm,&grpptr->timer);
++
++
++ grpptr->xid_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT,
++ GFP_ATOMIC|GFP_DMA);
++ if(grpptr->xid_skb == NULL)
++ {
++ printk(KERN_INFO
++ "Couldn't alloc MPCgroup xid_skb\n");
++ kfree_fsm(grpptr->fsm);
++ grpptr->fsm = NULL;
++ kfree(grpptr);
++ grpptr = NULL;
++ kfree(privptr);
++ privptr = NULL;
++ if(alloc_device)
++ kfree(dev);
++ return NULL;
++ }
++ /* base xid for all channels in group */
++ grpptr->xid_skb_data = grpptr->xid_skb->data;
++ grpptr->xid_th = (th_header *)grpptr->xid_skb->data;
++ memcpy(skb_put(grpptr->xid_skb,
++ TH_HEADER_LENGTH),
++ &thnorm,
++ TH_HEADER_LENGTH);
++
++ privptr->xid = grpptr->xid = (xid2 *)grpptr->xid_skb->tail;
++ memcpy(skb_put(grpptr->xid_skb,XID2_LENGTH),&init_xid,XID2_LENGTH);
++ privptr->xid->xid2_adj_id = jiffies | 0xfff00000;
++ privptr->xid->xid2_sender_id = jiffies;
++
++ grpptr->xid_id = (char *)grpptr->xid_skb->tail;
++ memcpy(skb_put(grpptr->xid_skb,4),"VTAM",4);
++
++
++ grpptr->rcvd_xid_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT,
++ GFP_ATOMIC|GFP_DMA);
++ if(grpptr->rcvd_xid_skb == NULL)
++ {
++ printk(KERN_INFO
++ "Couldn't alloc MPCgroup rcvd_xid_skb\n");
++ kfree_fsm(grpptr->fsm);
++ grpptr->fsm = NULL;
++ dev_kfree_skb(grpptr->xid_skb);
++ grpptr->xid_skb = NULL;
++ grpptr->xid_id = NULL;
++ grpptr->xid_skb_data = NULL;
++ grpptr->xid_th = NULL;
++ kfree(grpptr);
++ grpptr = NULL;
++ privptr->xid = NULL;
++ kfree(privptr);
++ privptr = NULL;
++ if(alloc_device)
++ kfree(dev);
++ return NULL;
++ }
++
++ grpptr->rcvd_xid_data = grpptr->rcvd_xid_skb->data;
++ grpptr->rcvd_xid_th = (th_header *)grpptr->rcvd_xid_skb->data;
++ memcpy(skb_put(grpptr->rcvd_xid_skb,TH_HEADER_LENGTH),
++ &thnorm,
++ TH_HEADER_LENGTH);
++ grpptr->saved_xid2 = NULL;
++
++ tasklet_init(&grpptr->mpc_tasklet2,
++ ctcmpc_group_ready,
++ (unsigned long)dev);
++ /********************************************************/
++ /* MPC Group Initializations */
++ /********************************************************/
++
++
++ dev->mtu =
++ MPC_BUFSIZE_DEFAULT -
++ TH_HEADER_LENGTH -
++ PDU_HEADER_LENGTH;
++ dev->hard_start_xmit = ctcmpc_tx;
++ dev->open = ctcmpc_open;
++ dev->stop = ctcmpc_close;
++ dev->get_stats = ctcmpc_stats;
++ dev->change_mtu = ctcmpc_change_mtu;
++ dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
++ dev->addr_len = 0;
++ dev->type = ARPHRD_SLIP;
++ dev->tx_queue_len = 100;
++ SET_DEVICE_START(dev, 1);
++ dev_init_buffers(dev);
++ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
++ return dev;
++}
++
++#ifdef CTC_CHANDEV
++static void
++ctcmpc_chandev_msck_notify(void *dev, int msck_irq,
++ chandev_msck_status prevstatus,
++ chandev_msck_status newstatus)
++{
++
++ #ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++ #endif
++
++ net_device *device = (net_device *)dev;
++ ctc_priv *privptr;
++ int direction;
++
++ if(!dev)
++ return;
++
++ privptr = device->priv;
++ if(prevstatus == chandev_status_revalidate)
++ for(direction = READ; direction <= WRITE; direction++)
++ {
++ channel *ch = privptr->channel[direction];
++ if(ch->irq == msck_irq)
++ {
++ s390_dev_info_t devinfo;
++
++ if(get_dev_info_by_irq(ch->irq, &devinfo))
++ ch->devno = devinfo.devno;
++ else
++ printk(KERN_INFO
++ "ctcmpc_chandev_msck_notify: "
++ "get_dev_info_by_irq failed for "
++ "irq %d\n", ch->irq);
++ }
++ }
++ switch(newstatus)
++ {
++ case chandev_status_not_oper:
++ case chandev_status_no_path:
++ case chandev_status_gone:
++ for(direction = READ; direction <= WRITE; direction++)
++ {
++ channel *ch = privptr->channel[direction];
++ fsm_event(ch->fsm, CH_EVENT_MC_FAIL, ch);
++ }
++ printk(KERN_INFO
++ "ctcmpc: %s channel deactivated\n",
++ device->name);
++ break;
++ case chandev_status_all_chans_good:
++ for(direction = READ; direction <= WRITE; direction++)
++ {
++ channel *ch = privptr->channel[direction];
++ fsm_event(ch->fsm, CH_EVENT_MC_GOOD, ch);
++ }
++ printk(KERN_INFO
++ "ctcmpc: %s channel activated\n", device->name);
++ break;
++ default:
++ break;
++ }
++}
++
++/**
++ *
++ * Setup an interface.
++ *
++ * Like ctcmpc_setup(),ctcmpc_probe()can be called from two different locations:
++ * - If built as module, it is called from within init_module().
++ * - If built in monolithic kernel, it is called from within generic network
++ * layer during initialization for every corresponding device, declared in
++ * drivers/net/Space.c
++ *
++ * @param dev Pointer to net_device to be initialized.
++ *
++ * @returns 0 on success, !0 on failure.
++ */
++static int
++ctcmpc_chandev_probe(chandev_probeinfo *info)
++{
++ #ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++ #endif
++
++ int devno[2];
++ __u16 proto;
++ int rc;
++ int direction;
++ channel_type_t type;
++ ctc_priv *privptr;
++ net_device *dev;
++
++ ctcmpc_proc_create_main();
++
++
++ switch(info->chan_type)
++ {
++ case chandev_type_ctcmpc:
++ type = channel_type_mpc;
++ break;
++ case chandev_type_escon:
++ type = channel_type_escon;
++ break;
++ default:
++ printk(KERN_INFO "ctcmpc_chandev_probe called with "
++ "unsupported channel type %d\n",
++ info->chan_type);
++ return -ENODEV;
++ }
++ devno[READ] = info->read.devno;
++ devno[WRITE] = info->write.devno;
++ proto = info->port_protocol_no;
++
++ if(ctcmpc_add_channel(info->read.irq, info->read.devno, type))
++ return -ENOMEM;
++ if(ctcmpc_add_channel(info->write.irq, info->write.devno, type))
++ return -ENOMEM;
++
++ dev = ctcmpc_init_netdevice(NULL, 1);
++
++
++ if(!dev)
++ {
++ printk(KERN_INFO "ctcmpc_init_netdevice failed\n");
++ return -ENODEV;
++ }
++
++ chandev_build_device_name(info, dev->name, "mpc", 1);
++
++ privptr = (ctc_priv *)dev->priv;
++ privptr->protocol = proto;
++ privptr->xid = (xid2 *)kmalloc(sizeof(xid2), GFP_KERNEL);
++ if(privptr->xid == NULL)
++ {
++ printk(KERN_INFO "ctcmpc: Out of memory in chandev_probe\n");
++ return -1;
++ }
++ for(direction = READ; direction <= WRITE; direction++)
++ {
++ privptr->channel[direction] =
++ channel_get(type, devno[direction], direction);
++ if(privptr->channel[direction] == NULL)
++ {
++ if(direction == WRITE)
++ {
++ FREE_IRQ(privptr->channel[READ]->irq,
++ privptr->channel[READ]->devstat);
++ channel_free(privptr->channel[READ]);
++ }
++ ctcmpc_free_netdevice(dev, 1);
++ return -ENODEV;
++ }
++ privptr->channel[direction]->netdev = dev;
++ privptr->channel[direction]->protocol = proto;
++ /*todo...who put this 35 here....make it a define*/
++ privptr->channel[direction]->max_bufsize =
++ (MPC_BUFSIZE_DEFAULT - 35);
++ rc = REQUEST_IRQ(privptr->channel[direction]->irq,
++ (void *)ctcmpc_irq_handler, SA_INTERRUPT,
++ dev->name,
++ privptr->channel[direction]->devstat);
++ if(rc)
++ {
++ printk(KERN_INFO
++ "%s: requested irq %d is busy rc=%02x\n",
++ dev->name, privptr->channel[direction]->irq,
++ rc);
++ if(direction == WRITE)
++ {
++ FREE_IRQ(privptr->channel[READ]->irq,
++ privptr->channel[READ]->devstat);
++ channel_free(privptr->channel[READ]);
++ }
++ channel_free(privptr->channel[direction]);
++ ctcmpc_free_netdevice(dev, 1);
++ return -EBUSY;
++ }
++ }
++ if(ctcmpc_netdev_register(dev) != 0)
++ {
++ ctcmpc_free_netdevice(dev, 1);
++ return -ENODEV;
++ }
++
++ /**
++ * register subdir in /proc/net/mpc
++ */
++ ctcmpc_proc_create_sub(dev);
++ strncpy(privptr->fsm->name, dev->name, sizeof(privptr->fsm->name));
++ activated++;
++
++ print_banner();
++
++ printk(KERN_INFO
++ "%s: read: ch %04x (irq %04x), "
++ "write: ch %04x (irq %04x) proto: %d\n",
++ dev->name, privptr->channel[READ]->devno,
++ privptr->channel[READ]->irq, privptr->channel[WRITE]->devno,
++ privptr->channel[WRITE]->irq, proto);
++
++ chandev_initdevice(info, dev, 0, dev->name,
++ (proto == CTC_PROTO_MPC)
++ ? chandev_category_serial_device :
++ chandev_category_network_device,
++ (chandev_unregfunc)ctcmpc_netdev_unregister);
++ return 0;
++}
++#else /* ! CHANDEV */
++/**
++ *
++ * Setup an interface.
++ *
++ * Like ctcmpc_setup(),ctcmpc_probe()can be called from two different locations:
++ * - If built as module, it is called from within init_module().
++ * - If built in monolithic kernel, it is called from within generic network
++ * layer during initialization for every corresponding device, declared in
++ * drivers/net/Space.c
++ *
++ * @param dev Pointer to net_device to be initialized.
++ *
++ * @returns 0 on success, !0 on failure.
++ */
++int
++ctcmpc_probe(net_device *dev)
++{
++ #ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++ #endif
++
++ int devno[2];
++ __u16 proto;
++ int rc;
++ int direction;
++ channel_type_t type;
++ ctc_priv *privptr;
++ param *par;
++
++ ctcmpc_proc_create_main();
++
++ /**
++ * Scan for available channels only the first time,
++ * ctcmpc_probe gets control.
++ */
++ if(channels == NULL)
++ channel_scan();
++
++ type = extract_channel_media(dev->name);
++ if(type == channel_type_unknown)
++ return -ENODEV;
++
++ par = find_param(dev->name);
++ if(par)
++ {
++ devno[READ] = par->read_dev;
++ devno[WRITE] = par->write_dev;
++ proto = par->proto;
++ } else
++ {
++ if(ctc_no_auto)
++ return -ENODEV;
++ else
++ {
++ devno[READ] = -1;
++ devno[WRITE] = -1;
++ proto = CTC_PROTO_MPC;
++ }
++ }
++
++ #ifndef MODULE
++ if(ctcmpc_init_netdevice(dev, 0) == NULL)
++ return -ENODEV;
++ #endif
++ privptr = (ctc_priv *)dev->priv;
++ privptr->protocol = proto;
++
++ for(direction = READ; direction <= WRITE; direction++)
++ {
++ if((ctc_no_auto == 0) || (devno[direction] == -1))
++ privptr->channel[direction] =
++ channel_get_next(type, direction);
++ else
++ privptr->channel[direction] =
++ channel_get(type, devno[direction], direction);
++ if(privptr->channel[direction] == NULL)
++ {
++ if(direction == WRITE)
++ {
++ FREE_IRQ(privptr->channel[READ]->irq,
++ privptr->channel[READ]->devstat);
++ channel_free(privptr->channel[READ]);
++ }
++ ctcmpc_free_netdevice(dev, 1);
++ return -ENODEV;
++ }
++ privptr->channel[direction]->netdev = dev;
++ privptr->channel[direction]->protocol = proto;
++ privptr->channel[direction]->max_bufsize =
++ (MPC_BUFSIZE_DEFAULT - 35);
++ rc = REQUEST_IRQ(privptr->channel[direction]->irq,
++ (void *)ctcmpc_irq_handler, SA_INTERRUPT,
++ dev->name,
++ privptr->channel[direction]->devstat);
++ if(rc)
++ {
++ printk(KERN_INFO
++ "%s: requested irq %d is busy rc=%02x\n",
++ dev->name, privptr->channel[direction]->irq,
++ rc);
++ if(direction == WRITE)
++ {
++ FREE_IRQ(privptr->channel[READ]->irq,
++ privptr->channel[READ]->devstat);
++ channel_free(privptr->channel[READ]);
++ }
++ channel_free(privptr->channel[direction]);
++ ctcmpc_free_netdevice(dev, 1);
++ return -EBUSY;
++ }
++ }
++
++ /**
++ * register subdir in /proc/net/ctc
++ */
++ ctcmpc_proc_create_sub(dev);
++
++ print_banner();
++
++ printk(KERN_INFO
++ "%s: read: ch %04x (irq %04x), "
++ "write: ch %04x (irq %04x) proto: %d\n",
++ dev->name, privptr->channel[READ]->devno,
++ privptr->channel[READ]->irq, privptr->channel[WRITE]->devno,
++ privptr->channel[WRITE]->irq, proto);
++
++ return 0;
++}
++#endif
++
++/**
++ * Module related routines
++ *****************************************************************************/
++
++#ifdef MODULE
++/**
++ * Prepare to be unloaded. Free IRQ's and release all resources.
++ * This is called just before this module is unloaded. It is
++ * <em>not</em> called, if the usage count is !0, so we don't need to check
++ * for that.
++ */
++void
++cleanup_module(void)
++{
++ #ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++ #endif
++
++
++ /* we are called if all interfaces are down only, so no need
++ * to bother around with locking stuff
++ */
++ #ifndef CTC_CHANDEV
++ while(channels)
++ {
++ if((channels->flags & CHANNEL_FLAGS_INUSE) &&
++ (channels->netdev != NULL))
++ {
++ net_device *dev = channels->netdev;
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++
++ if(privptr)
++ {
++ privptr->channel[READ]->netdev = NULL;
++ privptr->channel[WRITE]->netdev = NULL;
++ }
++ channels->netdev = NULL;
++ ctcmpc_netdev_unregister(dev);
++ ctcmpc_free_netdevice(dev, 1);
++ }
++ channel_remove(channels);
++ }
++ channels = NULL;
++ #endif
++
++ ctcmpc_proc_destroy_main();
++ #ifdef CTC_CHANDEV
++ chandev_unregister(ctcmpc_chandev_probe, 1);
++ #endif
++ printk(KERN_INFO "MPC driver unloaded\n");
++}
++
++ #define ctcmpc_init init_module
++#endif /*MODULE*/
++
++/**
++ * Initialize module.
++ * This is called just after the module is loaded.
++ *
++ * @return 0 on success, !0 on error.
++ */
++int
++ctcmpc_init(void)
++{
++#ifdef DEBUG
++ printk(KERN_INFO "ctcmpc enter: %s()\n", __FUNCTION__);
++#endif
++
++#ifndef CTC_CHANDEV
++ int cnt;
++ int activated;
++ param *par;
++#endif
++ int ret = 0;
++ int probed = 0;
++
++ print_banner();
++
++#if defined(DEBUG) && !defined(CTC_CHANDEV)
++ printk(KERN_INFO
++ "ctcmpc: init_module(): got string '%s'\n", mpc);
++#endif
++
++#ifndef CTC_CHANDEV
++ #ifdef MODULE
++ ctcmpc_setup(mpc);
++ #endif
++ par = params;
++#endif
++
++ activated = 0;
++
++#ifdef CTC_CHANDEV
++ chandev_register_and_probe(ctcmpc_chandev_probe,
++ (chandev_shutdownfunc)ctcmpc_shutdown,
++ ctcmpc_chandev_msck_notify,
++ chandev_type_ctcmpc|chandev_type_escon);
++#else /* CTC_CHANDEV */
++ net_device *dev = NULL;
++ char *bname = "mpc";
++
++ cnt = 0;
++ do
++ {
++ dev = ctcmpc_init_netdevice(NULL, 1);
++ if(!dev)
++ {
++ ret = -ENOMEM;
++ break;
++ }
++ if(par && par->name)
++ {
++ char *p;
++ int n;
++
++ sprintf(dev->name, "%s", par->name);
++ par = par->next;
++ for(p = dev->name; p && *p; p++)
++ if(isdigit(*p))
++ break;
++ if(p && *p)
++ {
++ n = simple_strtoul(p, NULL, 0);
++ if(n >= cnt)
++ cnt = n + 1;
++ }
++ } else
++ {
++ if(ctc_no_auto)
++ {
++ ctcmpc_free_netdevice(dev, 1);
++ dev = NULL;
++ break;
++ }
++ sprintf(dev->name, "%s%d", bname,
++ cnt++);
++ }
++ #ifdef DEBUG
++ printk(KERN_INFO "ctcmpc: %s(): probing for device %s\n",
++ __FUNCTION__, dev->name);
++ #endif
++ probed = 1;
++ if(ctcmpc_probe(dev) == 0)
++ {
++ ctc_priv *privptr = (ctc_priv *)dev->priv;
++ #ifdef DEBUG
++ printk(KERN_INFO
++ "ctcmpc: %s(): probing succeeded\n",
++ __FUNCTION__);
++ printk(KERN_INFO
++ "ctcmpc: %s(): registering device %s\n",
++ __FUNCTION__, dev->name);
++ #endif
++ if(ctcmpc_netdev_register(dev) != 0)
++ {
++ printk(KERN_INFO
++ "ctcmpc: Couldn't register %s\n",
++ dev->name);
++ FREE_IRQ(
++ privptr->channel[READ]->irq,
++ privptr->channel[READ]->devstat);
++ FREE_IRQ(
++ privptr->channel[WRITE]->irq,
++ privptr->channel[WRITE]->devstat);
++ channel_free(privptr->channel[READ]);
++ channel_free(privptr->channel[WRITE]);
++ ctcmpc_free_netdevice(dev, 1);
++ dev = NULL;
++ } else
++ {
++ #ifdef DEBUG
++ printk(KERN_INFO
++ "ctcmpc: %s(): register succeed\n",
++ __FUNCTION__);
++ #endif
++ activated++;
++ }
++ } else
++ {
++ #ifdef DEBUG
++ printk(KERN_INFO
++ "ctcmpc: %s(): probing failed\n",
++ __FUNCTION__);
++ #endif
++ dev = NULL;
++ }
++ } while(dev && (ret == 0));
++#endif /* CHANDEV */
++#if !defined(CTC_CHANDEV) && defined(MODULE)
++ if(!activated)
++ {
++ printk(KERN_INFO "ctcmpc: No devices registered\n");
++ ret = -ENODEV;
++ }
++#endif
++ if(ret)
++ {
++#if defined(CTC_CHANDEV) && defined(MODULE)
++ chandev_unregister(ctcmpc_chandev_probe, 0);
++#endif
++#ifdef MODULE
++ if(probed)
++ ctcmpc_proc_destroy_main();
++#endif
++ }
++ return ret;
++}
++
++#ifndef MODULE
++__initcall(ctcmpc_init);
++#endif /* MODULE */
++
++/* --- This is the END my friend --- */
+=== drivers/s390/net/ctctty.c
+==================================================================
+--- drivers/s390/net/ctctty.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/net/ctctty.c (/trunk/2.4.27) (revision 52)
+@@ -42,7 +42,7 @@
+ #define init_waitqueue_head(x) *(x)=NULL
+ #define __set_current_state(state_value) \
+ do { current->state = state_value; } while (0)
+-#ifdef CONFIG_SMP
++#ifdef __SMP__
+ #define set_current_state(state_value) \
+ do { __set_current_state(state_value); mb(); } while (0)
+ #else
+=== drivers/s390/net/iucv.h
+==================================================================
+--- drivers/s390/net/iucv.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/net/iucv.h (/trunk/2.4.27) (revision 52)
+@@ -449,7 +449,7 @@
+ * buflen - Length of reply buffer.
+ * Output: residual_buffer - Address of buffer updated by the number
+ * of bytes you have moved.
+- * residual_length - Contains one of the following values:
++ * residual_length - Contains on the the following values
+ * If the answer buffer is the same length as the reply, this field
+ * contains zero.
+ * If the answer buffer is longer than the reply, this field contains
+@@ -483,7 +483,7 @@
+ * buffer - Address of array of reply buffers.
+ * buflen - Total length of reply buffers.
+ * Output: residual_buffer - Address of buffer which IUCV is currently working on.
+- * residual_length - Contains one of the following values:
++ * residual_length - Contains on the the following values
+ * If the answer buffer is the same length as the reply, this field
+ * contains zero.
+ * If the answer buffer is longer than the reply, this field contains
+=== drivers/s390/net/netiucv.c
+==================================================================
+--- drivers/s390/net/netiucv.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/net/netiucv.c (/trunk/2.4.27) (revision 52)
+@@ -1,5 +1,5 @@
+ /*
+- * $Id: netiucv.c,v 1.23 2003/06/24 16:05:32 felfert Exp $
++ * $Id: netiucv.c,v 1.21.8.6 2004/06/29 07:37:33 braunu Exp $
+ *
+ * IUCV network driver
+ *
+@@ -28,7 +28,7 @@
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+- * RELEASE-TAG: IUCV network driver $Revision: 1.23 $
++ * RELEASE-TAG: IUCV network driver $Revision: 1.21.8.6 $
+ *
+ */
+
+@@ -114,17 +114,13 @@
+ spinlock_t collect_lock;
+ int collect_len;
+ int max_buffsize;
+- int flags;
+ fsm_timer timer;
+- int retry;
+ fsm_instance *fsm;
+ net_device *netdev;
+ connection_profile prof;
+ char userid[9];
+ } iucv_connection;
+
+-#define CONN_FLAGS_BUFSIZE_CHANGED 1
+-
+ /**
+ * Linked list of all connection structs.
+ */
+@@ -590,7 +586,7 @@
+ iucv_MessagePending *eib = (iucv_MessagePending *)ev->data;
+ netiucv_priv *privptr = (netiucv_priv *)conn->netdev->priv;
+
+- __u16 msglen = eib->ln1msg2.ipbfln1f;
++ __u32 msglen = eib->ln1msg2.ipbfln1f;
+ int rc;
+
+ #ifdef DEBUG
+@@ -613,6 +609,7 @@
+ conn->rx_buff->data, msglen, NULL, NULL, NULL);
+ if (rc != 0 || msglen < 5) {
+ privptr->stats.rx_errors++;
++ printk(KERN_INFO "iucv_receive returned %08x\n", rc);
+ return;
+ }
+ netiucv_unpack_skb(conn, conn->rx_buff);
+@@ -637,7 +634,6 @@
+ #ifdef DEBUG
+ printk(KERN_DEBUG "%s() called\n", __FUNCTION__);
+ #endif
+- fsm_deltimer(&conn->timer);
+ if (conn && conn->netdev && conn->netdev->priv)
+ privptr = (netiucv_priv *)conn->netdev->priv;
+ conn->prof.tx_pending--;
+@@ -645,12 +641,13 @@
+ if ((skb = skb_dequeue(&conn->commit_queue))) {
+ atomic_dec(&skb->users);
+ dev_kfree_skb_any(skb);
++ if (privptr) {
++ privptr->stats.tx_packets++;
++ privptr->stats.tx_bytes +=
++ (skb->len - NETIUCV_HDRLEN
++ - NETIUCV_HDRLEN);
++ }
+ }
+- if (privptr) {
+- privptr->stats.tx_packets++;
+- privptr->stats.tx_bytes +=
+- (skb->len - NETIUCV_HDRLEN - NETIUCV_HDRLEN);
+- }
+ }
+ conn->tx_buff->data = conn->tx_buff->tail = conn->tx_buff->head;
+ conn->tx_buff->len = 0;
+@@ -677,8 +674,6 @@
+ memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
+ NETIUCV_HDRLEN);
+
+- fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
+- CONN_EVENT_TIMER, conn);
+ conn->prof.send_stamp = xtime;
+ rc = iucv_send(conn->pathid, NULL, 0, 0, 0, 0,
+ conn->tx_buff->data, conn->tx_buff->len);
+@@ -688,12 +683,11 @@
+ if (conn->prof.tx_pending > conn->prof.tx_max_pending)
+ conn->prof.tx_max_pending = conn->prof.tx_pending;
+ if (rc != 0) {
+- fsm_deltimer(&conn->timer);
+ conn->prof.tx_pending--;
+ fsm_newstate(fi, CONN_STATE_IDLE);
+ if (privptr)
+ privptr->stats.tx_errors += txpackets;
+- printk(KERN_DEBUG "iucv_send returned %08x\n",
++ printk(KERN_INFO "iucv_send returned %08x\n",
+ rc);
+ } else {
+ if (privptr) {
+@@ -762,6 +756,7 @@
+ #ifdef DEBUG
+ printk(KERN_DEBUG "%s() called\n", __FUNCTION__);
+ #endif
++ fsm_deltimer(&conn->timer);
+ fsm_newstate(fi, CONN_STATE_IDLE);
+ conn->pathid = eib->ippathid;
+ netdev->tx_queue_len = eib->ipmsglim;
+@@ -769,6 +764,19 @@
+ }
+
+ static void
++conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
++{
++ iucv_connection *conn = (iucv_connection *)arg;
++ __u8 udata[16];
++
++ pr_debug("%s() called\n", __FUNCTION__);
++
++ fsm_deltimer(&conn->timer);
++ iucv_sever(conn->pathid, udata);
++ fsm_newstate(fi, CONN_STATE_STARTWAIT);
++}
++
++static void
+ conn_action_connsever(fsm_instance *fi, int event, void *arg)
+ {
+ iucv_event *ev = (iucv_event *)arg;
+@@ -776,30 +784,17 @@
+ // iucv_ConnectionSevered *eib = (iucv_ConnectionSevered *)ev->data;
+ net_device *netdev = conn->netdev;
+ netiucv_priv *privptr = (netiucv_priv *)netdev->priv;
+- int state = fsm_getstate(fi);
++ __u8 udata[16];
+
+ #ifdef DEBUG
+ printk(KERN_DEBUG "%s() called\n", __FUNCTION__);
+ #endif
+- switch (state) {
+- case CONN_STATE_SETUPWAIT:
+- printk(KERN_INFO "%s: Remote dropped connection\n",
+- netdev->name);
+- conn->handle = 0;
+- fsm_newstate(fi, CONN_STATE_STOPPED);
+- fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
+- break;
+- case CONN_STATE_IDLE:
+- case CONN_STATE_TX:
+- printk(KERN_INFO "%s: Remote dropped connection\n",
+- netdev->name);
+- if (conn->handle)
+- iucv_unregister_program(conn->handle);
+- conn->handle = 0;
+- fsm_newstate(fi, CONN_STATE_STOPPED);
+- fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
+- break;
+- }
++ fsm_deltimer(&conn->timer);
++ iucv_sever(conn->pathid, udata);
++ printk(KERN_INFO "%s: Remote dropped connection\n",
++ netdev->name);
++ fsm_newstate(fi, CONN_STATE_STARTWAIT);
++ fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
+ }
+
+ static void
+@@ -807,7 +802,7 @@
+ {
+ iucv_event *ev = (iucv_event *)arg;
+ iucv_connection *conn = ev->conn;
+-
++ __u16 msglimit;
+ int rc;
+
+ #ifdef DEBUG
+@@ -839,10 +834,13 @@
+
+ fsm_newstate(fi, CONN_STATE_SETUPWAIT);
+ rc = iucv_connect(&(conn->pathid), NETIUCV_QUEUELEN_DEFAULT, iucvMagic,
+- conn->userid, iucv_host, 0, NULL, NULL, conn->handle,
++ conn->userid, iucv_host, 0, NULL, &msglimit, conn->handle,
+ conn);
+ switch (rc) {
+ case 0:
++ conn->netdev->tx_queue_len = msglimit;
++ fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
++ CONN_EVENT_TIMER, conn);
+ return;
+ case 11:
+ printk(KERN_NOTICE
+@@ -910,6 +908,7 @@
+ #ifdef DEBUG
+ printk(KERN_DEBUG "%s() called\n", __FUNCTION__);
+ #endif
++ fsm_deltimer(&conn->timer);
+ fsm_newstate(fi, CONN_STATE_STOPPED);
+ netiucv_purge_skb_queue(&conn->collect_queue);
+ if (conn->handle)
+@@ -934,8 +933,8 @@
+ static const fsm_node conn_fsm[] = {
+ { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval },
+ { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start },
+- { CONN_STATE_STARTWAIT, CONN_EVENT_START, conn_action_start },
+
++ { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop },
+ { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop },
+ { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop },
+ { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop },
+@@ -950,6 +949,7 @@
+ { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject },
+
+ { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack },
++ { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev },
+
+ { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever },
+ { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever },
+@@ -1026,6 +1026,7 @@
+ dev_action_connup(fsm_instance *fi, int event, void *arg)
+ {
+ net_device *dev = (net_device *)arg;
++ netiucv_priv *privptr = (netiucv_priv *)dev->priv;
+
+ #ifdef DEBUG
+ printk(KERN_DEBUG "%s() called\n", __FUNCTION__);
+@@ -1034,8 +1035,8 @@
+ case DEV_STATE_STARTWAIT:
+ fsm_newstate(fi, DEV_STATE_RUNNING);
+ printk(KERN_INFO
+- "%s: connected with remote side\n",
+- dev->name);
++ "%s: connected with remote side %s\n",
++ dev->name, privptr->conn->userid);
+ break;
+ case DEV_STATE_STOPWAIT:
+ printk(KERN_INFO
+@@ -1056,9 +1057,6 @@
+ static void
+ dev_action_conndown(fsm_instance *fi, int event, void *arg)
+ {
+- net_device *dev = (net_device *)arg;
+- netiucv_priv *privptr = dev->priv;
+- iucv_event ev;
+
+ #ifdef DEBUG
+ printk(KERN_DEBUG "%s() called\n", __FUNCTION__);
+@@ -1066,11 +1064,7 @@
+ switch (fsm_getstate(fi)) {
+ case DEV_STATE_RUNNING:
+ fsm_newstate(fi, DEV_STATE_STARTWAIT);
+- ev.conn = privptr->conn;
+- fsm_event(privptr->conn->fsm, CONN_EVENT_START, &ev);
+ break;
+- case DEV_STATE_STARTWAIT:
+- break;
+ case DEV_STATE_STOPWAIT:
+ fsm_newstate(fi, DEV_STATE_STOPPED);
+ break;
+@@ -1085,7 +1079,6 @@
+
+ { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop },
+ { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup },
+- { DEV_STATE_STARTWAIT, DEV_EVENT_CONDOWN, dev_action_conndown },
+
+ { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
+ { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
+@@ -1141,6 +1134,7 @@
+ "%s: Could not allocate tx_skb\n",
+ conn->netdev->name);
+ rc = -ENOMEM;
++ return rc;
+ } else {
+ skb_reserve(nskb, NETIUCV_HDRLEN);
+ memcpy(skb_put(nskb, skb->len),
+@@ -1156,10 +1150,7 @@
+ header.next = 0;
+ memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
+
+- conn->retry = 0;
+ fsm_newstate(conn->fsm, CONN_STATE_TX);
+- fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
+- CONN_EVENT_TIMER, conn);
+ conn->prof.send_stamp = xtime;
+
+ rc = iucv_send(conn->pathid, NULL, 0, 0, 1 /* single_flag */,
+@@ -1171,7 +1162,6 @@
+ conn->prof.tx_max_pending = conn->prof.tx_pending;
+ if (rc != 0) {
+ netiucv_priv *privptr;
+- fsm_deltimer(&conn->timer);
+ fsm_newstate(conn->fsm, CONN_STATE_IDLE);
+ conn->prof.tx_pending--;
+ privptr = (netiucv_priv *)conn->netdev->priv;
+@@ -1276,7 +1266,6 @@
+ */
+ if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
+ fsm_event(privptr->fsm, DEV_EVENT_START, dev);
+- dst_link_failure(skb);
+ dev_kfree_skb(skb);
+ privptr->stats.tx_dropped++;
+ privptr->stats.tx_errors++;
+@@ -1375,7 +1364,6 @@
+ file->private_data = kmalloc(CTRL_BUFSIZE, GFP_KERNEL);
+ if (file->private_data == NULL)
+ return -ENOMEM;
+- *(char *)file->private_data = '\0';
+ MOD_INC_USE_COUNT;
+ return 0;
+ }
+@@ -1427,7 +1415,6 @@
+ privptr->conn->max_buffsize = bs1;
+ if (!(dev->flags & IFF_RUNNING))
+ dev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
+- privptr->conn->flags |= CONN_FLAGS_BUFSIZE_CHANGED;
+
+ return count;
+ }
+@@ -1441,7 +1428,6 @@
+ netiucv_priv *privptr;
+ ssize_t ret = 0;
+ char *p = sbuf;
+- loff_t pos = *ppos;
+ int l;
+
+ if (!(dev = find_netdev_by_ino(ino)))
+@@ -1451,20 +1437,19 @@
+
+ privptr = (netiucv_priv *)dev->priv;
+
+- if (!*sbuf || pos == 0)
++ if (file->f_pos == 0)
+ sprintf(sbuf, "%d\n", privptr->conn->max_buffsize);
+
+ l = strlen(sbuf);
+ p = sbuf;
+- if (pos == (unsigned)pos && pos < l) {
+- p += pos;
++ if (file->f_pos < l) {
++ p += file->f_pos;
+ l = strlen(p);
+ ret = (count > l) ? l : count;
+ if (copy_to_user(buf, p, ret))
+ return -EFAULT;
+ }
+- pos += ret;
+- *ppos = pos;
++ file->f_pos += ret;
+ return ret;
+ }
+
+@@ -1474,7 +1459,6 @@
+ file->private_data = kmalloc(CTRL_BUFSIZE, GFP_KERNEL);
+ if (file->private_data == NULL)
+ return -ENOMEM;
+- *(char *)file->private_data = '\0';
+ MOD_INC_USE_COUNT;
+ return 0;
+ }
+@@ -1539,7 +1523,6 @@
+ netiucv_priv *privptr;
+ ssize_t ret = 0;
+ char *p = sbuf;
+- loff_t pos = *ppos;
+ int l;
+
+ if (!(dev = find_netdev_by_ino(ino)))
+@@ -1550,20 +1533,20 @@
+ privptr = (netiucv_priv *)dev->priv;
+
+
+- if (!*sbuf || pos == 0)
++ if (file->f_pos == 0)
+ sprintf(sbuf, "%s\n",
+ netiucv_printname(privptr->conn->userid));
+
+ l = strlen(sbuf);
+ p = sbuf;
+- if (pos == (unsigned)pos && pos < l) {
+- p += pos;
++ if (file->f_pos < l) {
++ p += file->f_pos;
+ l = strlen(p);
+ ret = (count > l) ? l : count;
+ if (copy_to_user(buf, p, ret))
+ return -EFAULT;
+- *ppos = pos + ret;
+ }
++ file->f_pos += ret;
+ return ret;
+ }
+
+@@ -1575,7 +1558,6 @@
+ file->private_data = kmalloc(STATS_BUFSIZE, GFP_KERNEL);
+ if (file->private_data == NULL)
+ return -ENOMEM;
+- *(char *)file->private_data = '\0';
+ MOD_INC_USE_COUNT;
+ return 0;
+ }
+@@ -1606,7 +1588,6 @@
+ netiucv_stat_read(struct file *file, char *buf, size_t count, loff_t *off)
+ {
+ unsigned int ino = ((struct inode *)file->f_dentry->d_inode)->i_ino;
+- loff_t pos = *ppos;
+ char *sbuf = (char *)file->private_data;
+ net_device *dev;
+ netiucv_priv *privptr;
+@@ -1621,7 +1602,7 @@
+
+ privptr = (netiucv_priv *)dev->priv;
+
+- if (!*sbuf || pos == 0) {
++ if (file->f_pos == 0) {
+ p += sprintf(p, "Device FSM state: %s\n",
+ fsm_getstate_str(privptr->fsm));
+ p += sprintf(p, "Connection FSM state: %s\n",
+@@ -1645,14 +1626,14 @@
+ }
+ l = strlen(sbuf);
+ p = sbuf;
+- if (pos == (unsigned)pos && pos < l) {
+- p += pos;
++ if (file->f_pos < l) {
++ p += file->f_pos;
+ l = strlen(p);
+ ret = (count > l) ? l : count;
+ if (copy_to_user(buf, p, ret))
+ return -EFAULT;
+- *ppos = pos + ret;
+ }
++ file->f_pos += ret;
+ return ret;
+ }
+
+@@ -2059,7 +2040,7 @@
+ static void
+ netiucv_banner(void)
+ {
+- char vbuf[] = "$Revision: 1.23 $";
++ char vbuf[] = "$Revision: 1.21.8.6 $";
+ char *version = vbuf;
+
+ if ((version = strchr(version, ':'))) {
+=== drivers/s390/net/ctcmpc.h
+==================================================================
+--- drivers/s390/net/ctcmpc.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/net/ctcmpc.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,52 @@
++/*
++ * $Id: ctcmpc.h,v 1.1.2.1 2004/10/04 13:28:55 ptiedem Exp $
++ *
++ * CTC / ESCON network driver, mpc interface.
++ *
++ * Copyright (C) 2003 IBM United States, IBM Corporation
++ * Author(s): Belinda Thompson (belindat at us.ibm.com)
++ * Andy Richter (richtera at us.ibm.com)
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2, or (at your option)
++ * any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ *
++ * RELEASE-TAG: CTCMPC/ESCON network driver $Revision: 1.1.2.1 $
++ */
++
++#ifndef _CTCMPC_H_
++#define _CTCMPC_H_
++
++#if LINUX_VERSION_CODE >=KERNEL_VERSION(2,3,0)
++typedef struct net_device net_device;
++#else
++typedef struct device net_device;
++#endif
++
++typedef struct sk_buff sk_buff;
++typedef void (*callbacktypei2)(int,int); /* void (*void)(int,int) */
++typedef void (*callbacktypei3)(int,int,int); /* void (*void)(int,int,int) */
++
++/* port_number is the mpc device 0,1,2 etc mpc2 is port_number 2 */
++/* passive open Just wait for XID2 exchange */
++/* ctc_mpc_alloc channel(port_number,
++ void(*callback)(port_number,max_write_size)) */
++extern int ctc_mpc_alloc_channel(int,callbacktypei2);
++/* active open Alloc then send XID2 */
++/* ctc_mpc_establish_connectivity(port_number ,
++ void(callback*)(port_number,rc,max_write_size)) */
++extern void ctc_mpc_establish_connectivity(int,callbacktypei3);
++extern void ctc_mpc_dealloc_ch(int);
++extern void ctc_mpc_flow_control(int,int);
++
++#endif
+=== drivers/s390/net/ctcmain.c
+==================================================================
+--- drivers/s390/net/ctcmain.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/net/ctcmain.c (/trunk/2.4.27) (revision 52)
+@@ -1,5 +1,5 @@
+ /*
+- * $Id: ctcmain.c,v 1.63 2003/10/22 19:32:57 felfert Exp $
++ * $Id: ctcmain.c,v 1.59.4.3 2003/10/22 20:14:47 felfert Exp $
+ *
+ * CTC / ESCON network driver
+ *
+@@ -35,7 +35,7 @@
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+- * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.63 $
++ * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.59.4.3 $
+ *
+ */
+
+@@ -419,7 +419,7 @@
+ */
+ static void print_banner(void) {
+ static int printed = 0;
+- char vbuf[] = "$Revision: 1.63 $";
++ char vbuf[] = "$Revision: 1.59.4.3 $";
+ char *version = vbuf;
+
+ if (printed)
+@@ -2934,7 +2934,6 @@
+ file->private_data = kmalloc(CTRL_BUFSIZE, GFP_KERNEL);
+ if (file->private_data == NULL)
+ return -ENOMEM;
+- *(char *)file->private_data = '\0';
+ MOD_INC_USE_COUNT;
+ return 0;
+ }
+@@ -3000,7 +2999,6 @@
+ ctc_priv *privptr;
+ ssize_t ret = 0;
+ char *p = sbuf;
+- loff_t pos = *off;
+ int l;
+
+ if (!(dev = find_netdev_by_ino(ino)))
+@@ -3010,19 +3008,19 @@
+
+ privptr = (ctc_priv *)dev->priv;
+
+- if (!*sbuf || pos == 0)
++ if (file->f_pos == 0)
+ sprintf(sbuf, "%d\n", privptr->channel[READ]->max_bufsize);
+
+ l = strlen(sbuf);
+ p = sbuf;
+- if (pos == (unsigned)pos && pos < l) {
+- p += pos;
++ if (file->f_pos < l) {
++ p += file->f_pos;
+ l = strlen(p);
+ ret = (count > l) ? l : count;
+ if (copy_to_user(buf, p, ret))
+ return -EFAULT;
+- *off = pos + ret;
+ }
++ file->f_pos += ret;
+ return ret;
+ }
+
+@@ -3086,7 +3084,6 @@
+ ctc_priv *privptr;
+ ssize_t ret = 0;
+ char *p = sbuf;
+- loff_t pos = *off;
+ int l;
+
+ if (!(dev = find_netdev_by_ino(ino)))
+@@ -3096,19 +3093,19 @@
+
+ privptr = (ctc_priv *)dev->priv;
+
+- if (!*sbus || pos == 0)
++ if (file->f_pos == 0)
+ sprintf(sbuf, "0x%02x\n", loglevel);
+
+ l = strlen(sbuf);
+ p = sbuf;
+- if (pos == (unsigned)pos && pos < l) {
+- p += pos;
++ if (file->f_pos < l) {
++ p += file->f_pos;
+ l = strlen(p);
+ ret = (count > l) ? l : count;
+ if (copy_to_user(buf, p, ret))
+ return -EFAULT;
+- *off = pos + ret;
+ }
++ file->f_pos += ret;
+ return ret;
+ }
+
+@@ -3119,7 +3116,6 @@
+ file->private_data = kmalloc(STATS_BUFSIZE, GFP_KERNEL);
+ if (file->private_data == NULL)
+ return -ENOMEM;
+- *(char *)file->private_data = '\0';
+ MOD_INC_USE_COUNT;
+ return 0;
+ }
+@@ -3159,7 +3155,6 @@
+ ctc_priv *privptr;
+ ssize_t ret = 0;
+ char *p = sbuf;
+- loff_t pos = *off;
+ int l;
+
+ if (!(dev = find_netdev_by_ino(ino)))
+@@ -3169,7 +3164,7 @@
+
+ privptr = (ctc_priv *)dev->priv;
+
+- if (!*sbus || pos == 0) {
++ if (file->f_pos == 0) {
+ p += sprintf(p, "Device FSM state: %s\n",
+ fsm_getstate_str(privptr->fsm));
+ p += sprintf(p, "RX channel FSM state: %s\n",
+@@ -3191,14 +3186,14 @@
+ }
+ l = strlen(sbuf);
+ p = sbuf;
+- if (pos == (unsigned)pos && pos < l) {
+- p += pos;
++ if (file->f_pos < l) {
++ p += file->f_pos;
+ l = strlen(p);
+ ret = (count > l) ? l : count;
+ if (copy_to_user(buf, p, ret))
+ return -EFAULT;
+- *off = pos + ret;
+ }
++ file->f_pos += ret;
+ return ret;
+ }
+
+=== drivers/s390/net/lcs.c
+==================================================================
+--- drivers/s390/net/lcs.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/net/lcs.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,2192 @@
++/*
++ * linux/drivers/s390/net/lcs.c
++ *
++ * Linux for S/390 Lan Channel Station Network Driver
++ *
++ * Copyright (C) 1999-2001 IBM Deutschland Entwicklung GmbH,
++ * IBM Corporation
++ * Author(s): Original Code written by
++ * DJ Barrow (djbarrow at de.ibm.com,barrow_dj at yahoo.com)
++ * Rewritten by
++ * Frank Pavlic (pavlic at de.ibm.com) and
++ * Martin Schwidefsky <schwidefsky at de.ibm.com>
++ *
++ * $Revision: 1.132.20.6 $ $Date: 2004/11/24 10:17:56 $
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2, or (at your option)
++ * any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/if.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/trdevice.h>
++#include <linux/fddidevice.h>
++#include <linux/inetdevice.h>
++#include <linux/in.h>
++#include <linux/igmp.h>
++#include <net/arp.h>
++#include <net/ip.h>
++
++#include <asm/debug.h>
++#include <asm/idals.h>
++#include <asm/timex.h>
++
++#include "lcs.h"
++
++#if !defined(CONFIG_CHANDEV)
++#error Cannot compile lcs.c without chandev support.
++#endif
++
++#if !defined(CONFIG_NET_ETHERNET) && \
++ !defined(CONFIG_TR) && !defined(CONFIG_FDDI)
++#error Cannot compile lcs.c without some net devices switched on.
++#endif
++
++/**
++ * initialization string for output
++ */
++#define VERSION_LCS_C "$Revision: 1.132.20.6 $"
++
++static const char *version="LCS driver ("VERSION_LCS_C "/" VERSION_LCS_H ")";
++static const char *cardname = "S390 Lan Channel Station Interface";
++static char debug_buffer[255];
++
++/**
++ * Some prototypes.
++ */
++static void lcs_irq(int, void *, struct pt_regs *);
++static void lcs_tasklet(unsigned long);
++static void lcs_start_kernel_thread(struct lcs_card *card);
++static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *);
++static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *);
++
++/**
++ * Debug Facility Stuff
++ */
++static debug_info_t *lcs_dbf_setup;
++static debug_info_t *lcs_dbf_trace;
++
++/**
++ * LCS Debug Facility functions
++ */
++static void
++lcs_unregister_debug_facility(void)
++{
++ if (lcs_dbf_setup)
++ debug_unregister(lcs_dbf_setup);
++ if (lcs_dbf_trace)
++ debug_unregister(lcs_dbf_trace);
++}
++
++static int
++lcs_register_debug_facility(void)
++{
++ lcs_dbf_setup = debug_register("lcs_setup", 1, 1, 8);
++ lcs_dbf_trace = debug_register("lcs_trace", 1, 2, 8);
++ if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) {
++ PRINT_ERR("Not enough memory for debug facility.\n");
++ lcs_unregister_debug_facility();
++ return -ENOMEM;
++ }
++ debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view);
++ debug_set_level(lcs_dbf_setup, 5);
++ debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view);
++ debug_set_level(lcs_dbf_trace, 3);
++ return 0;
++}
++
++/**
++ * Allocate io buffers.
++ */
++static int
++lcs_alloc_channel(struct lcs_channel *channel)
++{
++ int cnt;
++
++ LCS_DBF_TEXT(2, setup, "ichalloc");
++ memset(channel, 0, sizeof(struct lcs_channel));
++ for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
++ /* alloc memory fo iobuffer */
++ channel->iob[cnt].data = (void *)
++ kmalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL);
++ if (channel->iob[cnt].data == NULL)
++ break;
++ memset(channel->iob[cnt].data, 0, LCS_IOBUFFERSIZE);
++ channel->iob[cnt].state = BUF_STATE_EMPTY;
++ }
++ if (cnt < LCS_NUM_BUFFS) {
++ /* Not all io buffers could be allocated. */
++ LCS_DBF_TEXT(3, setup, "echalloc");
++ while (cnt-- > 0)
++ kfree(channel->iob[cnt].data);
++ return -ENOMEM;
++ }
++ return 0;
++}
++
++/**
++ * Free io buffers.
++ */
++static void
++lcs_free_channel(struct lcs_channel *channel)
++{
++ int cnt;
++
++ LCS_DBF_TEXT(2, setup, "ichfree");
++ for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++)
++ kfree(channel->iob[cnt].data);
++}
++
++/**
++ * LCS alloc memory for card and channels
++ */
++static struct lcs_card *
++lcs_alloc_card(void)
++{
++ struct lcs_card *card;
++ int rc;
++
++ LCS_DBF_TEXT(2, setup, "alloclcs");
++ card = kmalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA);
++ if (card == NULL)
++ return NULL;
++ memset(card, 0, sizeof(struct lcs_card));
++ card->lan_type = LCS_FRAME_TYPE_AUTO;
++ card->pkt_seq = 0;
++ /* Allocate io buffers for the read channel. */
++ rc = lcs_alloc_channel(&card->read);
++ if (rc){
++ LCS_DBF_TEXT(2, setup, "iccwerr");
++ kfree(card);
++ return NULL;
++ }
++ /* Allocate io buffers for the write channel. */
++ rc = lcs_alloc_channel(&card->write);
++ if (rc) {
++ LCS_DBF_TEXT(2, setup, "iccwerr");
++ lcs_free_channel(&card->read);
++ kfree(card);
++ return NULL;
++ }
++ LCS_DBF_HEX(2, setup, &card, sizeof(void*));
++ return card;
++}
++
++/**
++ * LCS free memory for card and channels.
++ */
++static void
++lcs_free_card(struct lcs_card *card)
++{
++ LCS_DBF_TEXT(2, setup, "remcard");
++ /* Free write channel buffers. */
++ lcs_free_channel(&card->write);
++ /* Free read channel buffers. */
++ lcs_free_channel(&card->read);
++ kfree(card);
++}
++
++/*
++ * Setup read channel.
++ */
++static void
++lcs_setup_read_ccws(struct lcs_card *card)
++{
++ int cnt;
++
++ LCS_DBF_TEXT(2, setup, "ireadccw");
++ /* Setup read ccws. */
++ memset(card->read.ccws, 0, sizeof (ccw1_t) * (LCS_NUM_BUFFS + 1));
++ for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
++ card->read.ccws[cnt].cmd_code = LCS_CCW_READ;
++ card->read.ccws[cnt].count = LCS_IOBUFFERSIZE;
++ card->read.ccws[cnt].flags =
++ CCW_FLAG_CC | CCW_FLAG_SLI | CCW_FLAG_PCI;
++ /*
++ * Note: we have allocated the buffer with GFP_DMA, so
++ * we do not need to do set_normalized_cda.
++ */
++ card->read.ccws[cnt].cda =
++ (__u32) __pa(card->read.iob[cnt].data);
++ ((struct lcs_header *)
++ card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET;
++ card->read.iob[cnt].callback = lcs_get_frames_cb;
++ card->read.iob[cnt].state = BUF_STATE_READY;
++ card->read.iob[cnt].count = LCS_IOBUFFERSIZE;
++ }
++ card->read.ccws[0].flags &= ~CCW_FLAG_PCI;
++ card->read.ccws[LCS_NUM_BUFFS - 1].flags &= ~CCW_FLAG_PCI;
++ card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND;
++ /* Last ccw is a tic (transfer in channel). */
++ card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
++ card->read.ccws[LCS_NUM_BUFFS].cda =
++ (__u32) __pa(card->read.ccws);
++ /* Set initial state of the read channel. */
++ card->read.state = CH_STATE_INIT;
++
++ card->read.io_idx = 0;
++ card->read.buf_idx = 0;
++}
++
++static int
++lcs_setup_read(struct lcs_card *card)
++{
++ char dbf_text[15];
++ int rc;
++
++ LCS_DBF_TEXT(2, setup, "setpread");
++ /* Request irq for read channel. */
++ rc = chandev_request_irq(card->read.irq, (void *) lcs_irq,
++ 0, cardname, &card->read.devstat);
++ if (rc) {
++ sprintf(dbf_text, "chre%4x", card->read.irq);
++ LCS_DBF_TEXT(3, setup, dbf_text);
++ return rc;
++ }
++ lcs_setup_read_ccws(card);
++ /* Initialize read channel tasklet. */
++ card->read.irq_tasklet.data = (unsigned long) &card->read;
++ card->read.irq_tasklet.func = lcs_tasklet;
++ /* Initialize waitqueue. */
++ init_waitqueue_head(&card->read.wait_q);
++ return 0;
++}
++
++/*
++ * Setup write channel.
++ */
++static void
++lcs_setup_write_ccws(struct lcs_card *card)
++{
++ int cnt;
++
++ LCS_DBF_TEXT(2, setup, "iwritccw");
++ /* Setup write ccws. */
++ memset(card->write.ccws, 0, sizeof(ccw1_t) * LCS_NUM_BUFFS + 1);
++ for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
++ card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE;
++ card->write.ccws[cnt].count = 0;
++ card->write.ccws[cnt].flags =
++ CCW_FLAG_SUSPEND | CCW_FLAG_CC | CCW_FLAG_SLI;
++ /*
++ * Note: we have allocated the buffer with GFP_DMA, so
++ * we do not need to do set_normalized_cda.
++ */
++ card->write.ccws[cnt].cda =
++ (__u32) __pa(card->write.iob[cnt].data);
++ }
++ /* Last ccw is a tic (transfer in channel). */
++ card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
++ card->write.ccws[LCS_NUM_BUFFS].cda =
++ (__u32) __pa(card->write.ccws);
++ /* Set initial state of the write channel. */
++ card->read.state = CH_STATE_INIT;
++
++ card->write.io_idx = 0;
++ card->write.buf_idx = 0;
++}
++
++static int
++lcs_setup_write(struct lcs_card *card)
++{
++ char dbf_text[15];
++ int rc;
++
++ LCS_DBF_TEXT(2, setup, "setpwrit");
++ /* Request irq for write channel. */
++ rc = chandev_request_irq(card->write.irq, (void *) lcs_irq,
++ 0, cardname, &card->write.devstat);
++ if (rc) {
++ sprintf(dbf_text,"chwr%4x", card->write.irq);
++ LCS_DBF_TEXT(3, setup, dbf_text);
++ return rc;
++ }
++ lcs_setup_write_ccws(card);
++ /* Initialize write channel tasklet. */
++ card->write.irq_tasklet.data = (unsigned long) &card->write;
++ card->write.irq_tasklet.func = lcs_tasklet;
++ /* Initialize waitqueue. */
++ init_waitqueue_head(&card->write.wait_q);
++ return 0;
++}
++
++static void
++lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&card->mask_lock, flags);
++ card->thread_allowed_mask = threads;
++ spin_unlock_irqrestore(&card->mask_lock, flags);
++ wake_up(&card->wait_q);
++}
++
++static inline int
++lcs_threads_running(struct lcs_card *card, unsigned long threads)
++{
++ unsigned long flags;
++ int rc = 0;
++
++ spin_lock_irqsave(&card->mask_lock, flags);
++ rc = (card->thread_running_mask & threads);
++ spin_unlock_irqrestore(&card->mask_lock, flags);
++ return rc;
++}
++
++static int
++lcs_wait_for_threads(struct lcs_card *card, unsigned long threads)
++{
++ return wait_event_interruptible(card->wait_q,
++ lcs_threads_running(card, threads) == 0);
++}
++
++static inline int
++lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&card->mask_lock, flags);
++ if ( !(card->thread_allowed_mask & thread) ||
++ (card->thread_start_mask & thread) ) {
++ spin_unlock_irqrestore(&card->mask_lock, flags);
++ return -EPERM;
++ }
++ card->thread_start_mask |= thread;
++ spin_unlock_irqrestore(&card->mask_lock, flags);
++ return 0;
++}
++
++static void
++lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&card->mask_lock, flags);
++ card->thread_running_mask &= ~thread;
++ spin_unlock_irqrestore(&card->mask_lock, flags);
++ wake_up(&card->wait_q);
++}
++
++static inline int
++__lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
++{
++ unsigned long flags;
++ int rc = 0;
++
++ spin_lock_irqsave(&card->mask_lock, flags);
++ if (card->thread_start_mask & thread){
++ if ((card->thread_allowed_mask & thread) &&
++ !(card->thread_running_mask & thread)){
++ rc = 1;
++ card->thread_start_mask &= ~thread;
++ card->thread_running_mask |= thread;
++ } else
++ rc = -EPERM;
++ }
++ spin_unlock_irqrestore(&card->mask_lock, flags);
++ return rc;
++}
++
++static int
++lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
++{
++ int rc = 0;
++ wait_event(card->wait_q,
++ (rc = __lcs_do_run_thread(card, thread)) >= 0);
++ return rc;
++}
++
++static int
++lcs_do_start_thread(struct lcs_card *card, unsigned long thread)
++{
++ unsigned long flags;
++ int rc = 0;
++
++ spin_lock_irqsave(&card->mask_lock, flags);
++ LCS_DBF_TEXT_(4, trace, " %02x%02x%02x",
++ (u8) card->thread_start_mask,
++ (u8) card->thread_allowed_mask,
++ (u8) card->thread_running_mask);
++ rc = (card->thread_start_mask & thread);
++ spin_unlock_irqrestore(&card->mask_lock, flags);
++ return rc;
++}
++
++/*
++ * Cleanup channel.
++ */
++static void
++lcs_cleanup_channel(struct lcs_channel *channel)
++{
++ LCS_DBF_TEXT(2, setup, "cleanch");
++ /* Kill write channel tasklets. */
++ tasklet_kill(&channel->irq_tasklet);
++ /* Free irq. */
++ chandev_free_irq(channel->irq, &channel->devstat);
++}
++
++/**
++ * Initialize channels,card and state machines.
++ */
++static int
++lcs_setup_card(struct lcs_card *card)
++{
++ int rc;
++
++ LCS_DBF_TEXT(2, setup, "initcard");
++
++ rc = lcs_setup_read(card);
++ if (rc) {
++ PRINT_ERR("Could not initialize read channel\n");
++ return rc;
++ }
++ rc = lcs_setup_write(card);
++ if (rc) {
++ PRINT_ERR("Could not initialize write channel\n");
++ lcs_cleanup_channel(&card->read);
++ return rc;
++ }
++ /* Set cards initial state. */
++ card->state = DEV_STATE_DOWN;
++ if (card->port_protocol_no != LCS_INVALID_PORT_NO )
++ card->portno = card->port_protocol_no;
++ else
++ card->portno = 0;
++ card->tx_buffer = NULL;
++ card->tx_emitted = 0;
++
++ /* Initialize kernel thread task used for LGW commands. */
++ card->kernel_thread_starter.routine = (void *) lcs_start_kernel_thread;
++ card->kernel_thread_starter.data = (void *) card;
++ card->thread_start_mask = 0;
++ card->thread_allowed_mask = 0;
++ card->thread_running_mask = 0;
++ init_waitqueue_head(&card->wait_q);
++ spin_lock_init(&card->lock);
++ spin_lock_init(&card->ipm_lock);
++ INIT_LIST_HEAD(&card->ipm_list);
++ INIT_LIST_HEAD(&card->kernel_thread_starter.list);
++ INIT_LIST_HEAD(&card->lancmd_waiters);
++ return 0;
++}
++
++static inline void
++lcs_clear_multicast_list(struct lcs_card *card)
++{
++#ifdef CONFIG_IP_MULTICAST
++ struct lcs_ipm_list *ipm;
++ unsigned long flags;
++
++ /* Free multicast list. */
++ LCS_DBF_TEXT(3, setup, "clmclist");
++ spin_lock_irqsave(&card->ipm_lock, flags);
++ while (!list_empty(&card->ipm_list)){
++ ipm = list_entry(card->ipm_list.next,
++ struct lcs_ipm_list, list);
++ list_del(&ipm->list);
++ if (ipm->ipm_state != LCS_IPM_STATE_SET_REQUIRED){
++ spin_unlock_irqrestore(&card->ipm_lock, flags);
++ lcs_send_delipm(card, ipm);
++ spin_lock_irqsave(&card->ipm_lock, flags);
++ }
++ kfree(ipm);
++ }
++ spin_unlock_irqrestore(&card->ipm_lock, flags);
++#endif
++}
++
++/**
++ * Cleanup channels,card and state machines.
++ */
++static void
++lcs_cleanup_card(struct lcs_card *card)
++{
++ LCS_DBF_TEXT(2, setup, "cleancrd");
++ kfree(card->dev);
++ /* Cleanup channels. */
++ lcs_cleanup_channel(&card->write);
++ lcs_cleanup_channel(&card->read);
++}
++
++/**
++ * Start channel.
++ */
++static int
++lcs_start_channel(struct lcs_channel *channel)
++{
++ char dbf_text[15];
++ unsigned long flags;
++ int rc;
++
++ sprintf(dbf_text,"ssch%4x", channel->irq);
++ LCS_DBF_TEXT(3, trace, dbf_text);
++ spin_lock_irqsave(get_irq_lock(channel->irq), flags);
++ rc = do_IO(channel->irq, channel->ccws + channel->io_idx, 0,
++ 0, DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND);
++ if (rc == 0)
++ channel->state = CH_STATE_RUNNING;
++ spin_unlock_irqrestore(get_irq_lock(channel->irq), flags);
++ if (rc) {
++ sprintf(dbf_text,"essc%4x", channel->irq);
++ LCS_DBF_TEXT(3, trace, dbf_text);
++ PRINT_ERR("Error in starting channel!\n");
++ }
++ return rc;
++}
++
++/**
++ * Stop channel.
++ */
++static int
++lcs_stop_channel(struct lcs_channel *channel)
++{
++ char dbf_text[15];
++ unsigned long flags;
++ int rc;
++
++ if (channel->state == CH_STATE_STOPPED)
++ return 0;
++ sprintf(dbf_text,"hsch%4x", channel->irq);
++ LCS_DBF_TEXT(3, trace, dbf_text);
++ channel->state = CH_STATE_INIT;
++ spin_lock_irqsave(get_irq_lock(channel->irq), flags);
++ rc = halt_IO(channel->irq, (addr_t) channel, 0);
++ spin_unlock_irqrestore(get_irq_lock(channel->irq), flags);
++ if (rc) {
++ sprintf(dbf_text,"ehsc%4x", channel->irq);
++ LCS_DBF_TEXT(3, trace, dbf_text);
++ return rc;
++ }
++ /* Asynchronous halt initialted. Wait for its completion. */
++ wait_event(channel->wait_q, (channel->state == CH_STATE_HALTED));
++ return 0;
++}
++
++/**
++ * start read and write channel
++ */
++static int
++lcs_start_channels(struct lcs_card *card)
++{
++ int rc;
++
++ LCS_DBF_TEXT(3, trace, "chstart");
++ /* start read channel */
++ rc = lcs_start_channel(&card->read);
++ if (rc)
++ return rc;
++ /* start write channel */
++ rc = lcs_start_channel(&card->write);
++ if (rc)
++ lcs_stop_channel(&card->read);
++ return rc;
++}
++
++/**
++ * stop read and write channel
++ */
++static int
++lcs_stop_channels(struct lcs_card *card)
++{
++ LCS_DBF_TEXT(3, trace, "chhalt");
++ lcs_stop_channel(&card->read);
++ lcs_stop_channel(&card->write);
++ return 0;
++}
++
++/**
++ * Get empty buffer.
++ */
++static struct lcs_buffer *
++__lcs_get_buffer(struct lcs_channel *channel)
++{
++ int index;
++
++ LCS_DBF_TEXT(5, trace, "_getbuff");
++ index = channel->io_idx;
++ do {
++ if (channel->iob[index].state == BUF_STATE_EMPTY) {
++ channel->iob[index].state = BUF_STATE_LOCKED;
++ return channel->iob + index;
++ }
++ index = (index + 1) & (LCS_NUM_BUFFS - 1);
++ } while (index != channel->io_idx);
++ return NULL;
++}
++
++static struct lcs_buffer *
++lcs_get_buffer(struct lcs_channel *channel)
++{
++ struct lcs_buffer *buffer;
++ unsigned long flags;
++
++ LCS_DBF_TEXT(5, trace, "getbuff");
++ spin_lock_irqsave(get_irq_lock(channel->irq), flags);
++ buffer = __lcs_get_buffer(channel);
++ spin_unlock_irqrestore(get_irq_lock(channel->irq), flags);
++ return buffer;
++}
++
++/**
++ * Resume channel program if the channel is suspended.
++ */
++static int
++__lcs_resume_channel(struct lcs_channel *channel)
++{
++ int rc;
++
++ if (channel->state != CH_STATE_SUSPENDED)
++ return 0;
++ if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND)
++ return 0;
++ LCS_DBF_TEXT_(5, trace, "rsch%4x",channel->irq);
++ rc = resume_IO(channel->irq);
++ if (rc) {
++ LCS_DBF_TEXT_(4, trace, "ersc%4x",channel->irq);
++ PRINT_ERR("Error in lcs_resume_channel: rc=%d\n",rc);
++ } else
++ channel->state = CH_STATE_RUNNING;
++ return rc;
++
++}
++
++/**
++ * Make a buffer ready for processing.
++ */
++static inline void
++__lcs_ready_buffer_bits(struct lcs_channel *channel, int index)
++{
++ int prev, next;
++
++ LCS_DBF_TEXT(5, trace, "rdybits");
++ prev = (index - 1) & (LCS_NUM_BUFFS - 1);
++ next = (index + 1) & (LCS_NUM_BUFFS - 1);
++ /* Check if we may clear the suspend bit of this buffer. */
++ if (channel->ccws[next].flags & CCW_FLAG_SUSPEND) {
++ /* Check if we have to set the PCI bit. */
++ if (!(channel->ccws[prev].flags & CCW_FLAG_SUSPEND))
++ /* Suspend bit of the previous buffer is not set. */
++ channel->ccws[index].flags |= CCW_FLAG_PCI;
++ /* Suspend bit of the next buffer is set. */
++ channel->ccws[index].flags &= ~CCW_FLAG_SUSPEND;
++ }
++}
++
++static int
++lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
++{
++ unsigned long flags;
++ int index, rc;
++
++ LCS_DBF_TEXT(5, trace, "rdybuff");
++ if (buffer->state != BUF_STATE_LOCKED &&
++ buffer->state != BUF_STATE_PROCESSED)
++ BUG();
++ spin_lock_irqsave(get_irq_lock(channel->irq), flags);
++ buffer->state = BUF_STATE_READY;
++ index = buffer - channel->iob;
++ /* Set length. */
++ channel->ccws[index].count = buffer->count;
++ /* Check relevant PCI/suspend bits. */
++ __lcs_ready_buffer_bits(channel, index);
++ rc = __lcs_resume_channel(channel);
++ spin_unlock_irqrestore(get_irq_lock(channel->irq), flags);
++ return rc;
++}
++
++/**
++ * Mark the buffer as processed. Take care of the suspend bit
++ * of the previous buffer. This function is called from
++ * interrupt context, so the lock must not be taken.
++ */
++static int
++__lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
++{
++ int index, prev, next;
++
++ LCS_DBF_TEXT(5, trace, "prcsbuff");
++ if (buffer->state != BUF_STATE_READY)
++ BUG();
++ buffer->state = BUF_STATE_PROCESSED;
++ index = buffer - channel->iob;
++ prev = (index - 1) & (LCS_NUM_BUFFS - 1);
++ next = (index + 1) & (LCS_NUM_BUFFS - 1);
++ /* Set the suspend bit and clear the PCI bit of this buffer. */
++ channel->ccws[index].flags |= CCW_FLAG_SUSPEND;
++ channel->ccws[index].flags &= ~CCW_FLAG_PCI;
++ /* Check the suspend bit of the previous buffer. */
++ if (channel->iob[prev].state == BUF_STATE_READY) {
++ /*
++ * Previous buffer is in state ready. It might have
++ * happened in lcs_ready_buffer that the suspend bit
++ * has not been cleared to avoid an endless loop.
++ * Do it now.
++ */
++ __lcs_ready_buffer_bits(channel, prev);
++ }
++ /* Clear PCI bit of next buffer. */
++ channel->ccws[next].flags &= ~CCW_FLAG_PCI;
++ return __lcs_resume_channel(channel);
++}
++
++/**
++ * Put a processed buffer back to state empty.
++ */
++static void
++lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
++{
++ unsigned long flags;
++
++ LCS_DBF_TEXT(5, trace, "relbuff");
++ if (buffer->state != BUF_STATE_LOCKED &&
++ buffer->state != BUF_STATE_PROCESSED)
++ BUG();
++ spin_lock_irqsave(get_irq_lock(channel->irq), flags);
++ buffer->state = BUF_STATE_EMPTY;
++ spin_unlock_irqrestore(get_irq_lock(channel->irq), flags);
++}
++
++/**
++ * Get buffer for a lan command.
++ */
++static struct lcs_buffer *
++lcs_get_lancmd(struct lcs_card *card, int count)
++{
++ struct lcs_buffer *buffer;
++ struct lcs_cmd *cmd;
++
++ LCS_DBF_TEXT(4, trace, "getlncmd");
++
++ /* Get buffer and wait if none is available. */
++ wait_event(card->write.wait_q,
++ ((buffer = lcs_get_buffer(&card->write)) != NULL));
++ count += sizeof(struct lcs_header);
++ *(__u16 *)(buffer->data + count) = 0;
++ buffer->count = count + sizeof(__u16);
++ buffer->callback = lcs_release_buffer;
++ cmd = (struct lcs_cmd *) buffer->data;
++ cmd->offset = count;
++ cmd->type = LCS_FRAME_TYPE_CONTROL;
++ cmd->slot = 0;
++ return buffer;
++}
++
++static void
++lcs_get_reply(struct lcs_reply *reply)
++{
++ atomic_inc(&reply->refcnt);
++}
++
++static void
++lcs_put_reply(struct lcs_reply *reply)
++{
++ if (atomic_dec_and_test(&reply->refcnt)) {
++ kfree(reply);
++ }
++
++}
++
++static struct lcs_reply *
++lcs_alloc_reply(struct lcs_cmd *cmd)
++{
++ struct lcs_reply *reply;
++
++ LCS_DBF_TEXT(4, trace, "getreply");
++
++ reply = kmalloc(sizeof(struct lcs_reply), GFP_ATOMIC);
++ if (!reply)
++ return NULL;
++ memset(reply,0,sizeof(struct lcs_reply));
++ atomic_set(&reply->refcnt,1);
++ reply->sequence_no = cmd->sequence_no;
++ reply->received = 0;
++ reply->rc = 0;
++ init_waitqueue_head(&reply->wait_q);
++
++ return reply;
++}
++
++
++/**
++ * Notifier function for lancmd replies. Called from read irq.
++ */
++static void
++lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
++{
++ struct list_head *l, *n;
++ struct lcs_reply *reply;
++
++ LCS_DBF_TEXT(4, trace, "notiwait");
++ spin_lock(&card->lock);
++ list_for_each_safe(l, n, &card->lancmd_waiters) {
++ reply = list_entry(l, struct lcs_reply, list);
++ if (reply->sequence_no == cmd->sequence_no) {
++ lcs_get_reply(reply);
++ list_del_init(&reply->list);
++ if (reply->callback != NULL)
++ reply->callback(card, cmd);
++ reply->received = 1;
++ reply->rc = cmd->return_code;
++ wake_up(&reply->wait_q);
++ lcs_put_reply(reply);
++ break;
++ }
++ }
++ spin_unlock(&card->lock);
++}
++
++/**
++ * Emit buffer of a lan comand.
++ */
++void
++lcs_lancmd_timeout(unsigned long data)
++{
++ struct lcs_reply *reply, *list_reply;
++ struct list_head *l, *n;
++ unsigned long flags;
++
++ LCS_DBF_TEXT(4, trace, "timeout");
++ list_reply = (struct lcs_reply *) data;
++ spin_lock_irqsave(&list_reply->card->lock, flags);
++ list_for_each_safe(l, n, &list_reply->card->lancmd_waiters) {
++ reply = list_entry(l, struct lcs_reply, list);
++ if (reply == list_reply) {
++ lcs_get_reply(reply);
++ list_del_init(&reply->list);
++ spin_unlock_irqrestore(&list_reply->card->lock, flags);
++ reply->received = 1;
++ reply->rc = -ETIME;
++ wake_up(&reply->wait_q);
++ lcs_put_reply(reply);
++ return;
++ }
++ }
++ spin_unlock_irqrestore(&list_reply->card->lock, flags);
++}
++
++static int
++lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
++ void (*reply_callback)(struct lcs_card *, struct lcs_cmd *))
++{
++ struct lcs_reply *reply;
++ struct lcs_cmd *cmd;
++ struct timer_list timer;
++ unsigned long flags;
++ int rc;
++
++ LCS_DBF_TEXT(4, trace, "sendcmd");
++ cmd = (struct lcs_cmd *) buffer->data;
++ cmd->sequence_no = ++card->sequence_no;
++ cmd->return_code = 0;
++ reply = lcs_alloc_reply(cmd);
++ if (!reply)
++ return -ENOMEM;
++ reply->callback = reply_callback;
++ reply->card = card;
++ spin_lock_irqsave(&card->lock, flags);
++ list_add_tail(&reply->list, &card->lancmd_waiters);
++ spin_unlock_irqrestore(&card->lock, flags);
++
++ buffer->callback = lcs_release_buffer;
++ rc = lcs_ready_buffer(&card->write, buffer);
++ if (rc)
++ return rc;
++ init_timer(&timer);
++ timer.function = lcs_lancmd_timeout;
++ timer.data = (unsigned long) reply;
++ timer.expires = jiffies + HZ*5;
++ add_timer(&timer);
++ wait_event(reply->wait_q, reply->received);
++ lcs_put_reply(reply);
++ del_timer_sync(&timer);
++ return reply->rc ? -EIO : 0;
++}
++
++/**
++ * LCS startup command
++ */
++static int
++lcs_send_startup(struct lcs_card *card, __u8 initiator)
++{
++ struct lcs_buffer *buffer;
++ struct lcs_cmd *cmd;
++
++ LCS_DBF_TEXT(2, trace, "startup");
++ buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
++ cmd = (struct lcs_cmd *) buffer->data;
++ cmd->cmd_code = LCS_CMD_STARTUP;
++ cmd->initiator = initiator;
++ cmd->cmd.lcs_startup.buff_size = LCS_IOBUFFERSIZE;
++ return lcs_send_lancmd(card, buffer, NULL);
++}
++
++/**
++ * LCS shutdown command
++ */
++static int
++lcs_send_shutdown(struct lcs_card *card)
++{
++ struct lcs_buffer *buffer;
++ struct lcs_cmd *cmd;
++
++ LCS_DBF_TEXT(2, trace, "shutdown");
++ buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
++ cmd = (struct lcs_cmd *) buffer->data;
++ cmd->cmd_code = LCS_CMD_SHUTDOWN;
++ cmd->initiator = LCS_INITIATOR_TCPIP;
++ return lcs_send_lancmd(card, buffer, NULL);
++}
++
++/**
++ * LCS lanstat command
++ */
++static void
++__lcs_lanstat_cb(struct lcs_card *card, struct lcs_cmd *cmd)
++{
++ LCS_DBF_TEXT(2, trace, "statcb");
++ memcpy(card->mac, cmd->cmd.lcs_lanstat_cmd.mac_addr,
++ LCS_MAC_LENGTH);
++}
++
++static int
++lcs_send_lanstat(struct lcs_card *card)
++{
++ struct lcs_buffer *buffer;
++ struct lcs_cmd *cmd;
++
++ LCS_DBF_TEXT(2, trace, "cmdstat");
++ buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
++ cmd = (struct lcs_cmd *) buffer->data;
++ /* Setup lanstat command. */
++ cmd->cmd_code = LCS_CMD_LANSTAT;
++ cmd->initiator = LCS_INITIATOR_TCPIP;
++ cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
++ cmd->cmd.lcs_std_cmd.portno = card->portno;
++ return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb);
++}
++
++/**
++ * send stoplan command
++ */
++static int
++lcs_send_stoplan(struct lcs_card *card, __u8 initiator)
++{
++ struct lcs_buffer *buffer;
++ struct lcs_cmd *cmd;
++
++ LCS_DBF_TEXT(2, trace, "cmdstpln");
++ buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
++ cmd = (struct lcs_cmd *) buffer->data;
++ cmd->cmd_code = LCS_CMD_STOPLAN;
++ cmd->initiator = initiator;
++ cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
++ cmd->cmd.lcs_std_cmd.portno = card->portno;
++ return lcs_send_lancmd(card, buffer, NULL);
++}
++
++/**
++ * send startlan command
++ */
++static void
++__lcs_send_startlan_cb(struct lcs_card *card, struct lcs_cmd *cmd)
++{
++ LCS_DBF_TEXT(2, trace, "srtlancb");
++ card->lan_type = cmd->cmd.lcs_std_cmd.lan_type;
++ card->portno = cmd->cmd.lcs_std_cmd.portno;
++}
++
++static int
++lcs_send_startlan(struct lcs_card *card, __u8 initiator)
++{
++ struct lcs_buffer *buffer;
++ struct lcs_cmd *cmd;
++
++ LCS_DBF_TEXT(2, trace, "cmdstaln");
++ buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
++ cmd = (struct lcs_cmd *) buffer->data;
++ cmd->cmd_code = LCS_CMD_STARTLAN;
++ cmd->initiator = initiator;
++ cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
++ cmd->cmd.lcs_std_cmd.portno = card->portno;
++ return lcs_send_lancmd(card, buffer, __lcs_send_startlan_cb);
++}
++
++#ifdef CONFIG_IP_MULTICAST
++/**
++ * send setipm command (Multicast)
++ */
++static int
++lcs_send_setipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
++{
++ struct lcs_buffer *buffer;
++ struct lcs_cmd *cmd;
++
++ LCS_DBF_TEXT(2, trace, "cmdsetim");
++ buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
++ cmd = (struct lcs_cmd *) buffer->data;
++ cmd->cmd_code = LCS_CMD_SETIPM;
++ cmd->initiator = LCS_INITIATOR_TCPIP;
++ cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
++ cmd->cmd.lcs_qipassist.portno = card->portno;
++ cmd->cmd.lcs_qipassist.version = 4;
++ cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
++ memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
++ &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
++ return lcs_send_lancmd(card, buffer, NULL);
++}
++
++/**
++ * send delipm command (Multicast)
++ */
++static int
++lcs_send_delipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
++{
++ struct lcs_buffer *buffer;
++ struct lcs_cmd *cmd;
++
++ LCS_DBF_TEXT(2, trace, "cmddelim");
++ buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
++ cmd = (struct lcs_cmd *) buffer->data;
++ cmd->cmd_code = LCS_CMD_DELIPM;
++ cmd->initiator = LCS_INITIATOR_TCPIP;
++ cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
++ cmd->cmd.lcs_qipassist.portno = card->portno;
++ cmd->cmd.lcs_qipassist.version = 4;
++ cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
++ memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
++ &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
++ return lcs_send_lancmd(card, buffer, NULL);
++}
++
++/**
++ * check if multicast is supported by LCS
++ */
++static void
++__lcs_check_multicast_cb(struct lcs_card *card, struct lcs_cmd *cmd)
++{
++ LCS_DBF_TEXT(2, trace, "chkmccb");
++ card->ip_assists_supported =
++ cmd->cmd.lcs_qipassist.ip_assists_supported;
++ card->ip_assists_enabled =
++ cmd->cmd.lcs_qipassist.ip_assists_enabled;
++}
++
++static int
++lcs_check_multicast_support(struct lcs_card *card)
++{
++ struct lcs_buffer *buffer;
++ struct lcs_cmd *cmd;
++ int rc;
++
++ LCS_DBF_TEXT(2, trace, "cmdqipa");
++ /* Send query ipassist. */
++ buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
++ cmd = (struct lcs_cmd *) buffer->data;
++ cmd->cmd_code = LCS_CMD_QIPASSIST;
++ cmd->initiator = LCS_INITIATOR_TCPIP;
++ cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
++ cmd->cmd.lcs_qipassist.portno = card->portno;
++ cmd->cmd.lcs_qipassist.version = 4;
++ cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
++ rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb);
++ if (rc != 0) {
++ PRINT_ERR("Query IPAssist failed. Assuming unsupported!\n");
++ return -EOPNOTSUPP;
++ }
++ /* Print out supported assists: IPv6 */
++ PRINT_INFO("LCS device %s %s IPv6 support\n", card->dev->name,
++ (card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ?
++ "with" : "without");
++ /* Print out supported assist: Multicast */
++ PRINT_INFO("LCS device %s %s Multicast support\n", card->dev->name,
++ (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ?
++ "with" : "without");
++ if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT)
++ return 0;
++ return -EOPNOTSUPP;
++}
++
++/**
++ * set or del multicast address on LCS card
++ */
++static void
++lcs_fix_multicast_list(struct lcs_card *card)
++{
++ struct list_head failed_list;
++ struct list_head *l, *n;
++ struct lcs_ipm_list *ipm;
++ unsigned long flags;
++ int rc;
++
++ LCS_DBF_TEXT(4, trace, "fixipm");
++ INIT_LIST_HEAD(&failed_list);
++ spin_lock_irqsave(&card->ipm_lock, flags);
++list_modified:
++ list_for_each_safe(l, n, &card->ipm_list) {
++ ipm = list_entry(l, struct lcs_ipm_list, list);
++ switch (ipm->ipm_state) {
++ case LCS_IPM_STATE_SET_REQUIRED:
++ list_del_init(&ipm->list);
++ spin_unlock_irqrestore(&card->ipm_lock, flags);
++ rc = lcs_send_setipm(card, ipm);
++ spin_lock_irqsave(&card->ipm_lock, flags);
++ if (rc) {
++ PRINT_INFO("Adding multicast address failed."
++ "Table possibly full!\n");
++ list_add_tail(&ipm->list, &failed_list);
++ } else {
++ ipm->ipm_state = LCS_IPM_STATE_ON_CARD;
++ list_add_tail(&ipm->list, &card->ipm_list);
++ }
++ goto list_modified;
++ case LCS_IPM_STATE_DEL_REQUIRED:
++ list_del(&ipm->list);
++ spin_unlock_irqrestore(&card->ipm_lock, flags);
++ lcs_send_delipm(card, ipm);
++ spin_lock_irqsave(&card->ipm_lock, flags);
++ kfree(ipm);
++ goto list_modified;
++ case LCS_IPM_STATE_ON_CARD:
++ break;
++ }
++ }
++ list_for_each_entry(ipm, &failed_list, list) {
++ list_del_init(&ipm->list);
++ list_add_tail(&ipm->list, &card->ipm_list);
++ }
++ spin_unlock_irqrestore(&card->ipm_lock, flags);
++ if (card->state == DEV_STATE_UP)
++ netif_wake_queue(card->dev);
++}
++
++/**
++ * get mac address for the relevant Multicast address
++ */
++static void
++lcs_get_mac_for_ipm(__u32 ipm, char *mac, struct net_device *dev)
++{
++ LCS_DBF_TEXT(4, trace, "getmac");
++ if (dev->type == ARPHRD_IEEE802_TR)
++ ip_tr_mc_map(ipm, mac);
++ else
++ ip_eth_mc_map(ipm, mac);
++}
++
++static inline void
++lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
++{
++ struct ip_mc_list *im4;
++ struct list_head *l;
++ struct lcs_ipm_list *ipm;
++ unsigned long flags;
++ char buf[MAX_ADDR_LEN];
++
++ LCS_DBF_TEXT(4, trace, "remmclst");
++ spin_lock_irqsave(&card->ipm_lock, flags);
++ list_for_each(l, &card->ipm_list) {
++ ipm = list_entry(l, struct lcs_ipm_list, list);
++ for (im4 = in4_dev->mc_list; im4 != NULL; im4 = im4->next) {
++ lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
++ if ( (ipm->ipm.ip_addr == im4->multiaddr) &&
++ (memcmp(buf, &ipm->ipm.mac_addr,
++ LCS_MAC_LENGTH) == 0) )
++ break;
++ }
++ if (im4 == NULL)
++ ipm->ipm_state = LCS_IPM_STATE_DEL_REQUIRED;
++ }
++ spin_unlock_irqrestore(&card->ipm_lock, flags);
++}
++
++static inline struct lcs_ipm_list *
++lcs_check_addr_entry(struct lcs_card *card, struct ip_mc_list *im4, char *buf)
++{
++ struct lcs_ipm_list *tmp, *ipm = NULL;
++ struct list_head *l;
++ unsigned long flags;
++
++ LCS_DBF_TEXT(4, trace, "chkmcent");
++ spin_lock_irqsave(&card->ipm_lock, flags);
++ list_for_each(l, &card->ipm_list) {
++ tmp = list_entry(l, struct lcs_ipm_list, list);
++ if ( (tmp->ipm.ip_addr == im4->multiaddr) &&
++ (memcmp(buf, &tmp->ipm.mac_addr,
++ LCS_MAC_LENGTH) == 0) ) {
++ ipm = tmp;
++ break;
++ }
++ }
++ spin_unlock_irqrestore(&card->ipm_lock, flags);
++ return ipm;
++}
++
++static inline void
++lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
++{
++
++ struct ip_mc_list *im4;
++ struct lcs_ipm_list *ipm;
++ char buf[MAX_ADDR_LEN];
++ unsigned long flags;
++
++ LCS_DBF_TEXT(4, trace, "setmclst");
++ for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
++ lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
++ ipm = lcs_check_addr_entry(card, im4, buf);
++ if (ipm != NULL)
++ continue; /* Address already in list. */
++ ipm = (struct lcs_ipm_list *)
++ kmalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC);
++ if (ipm == NULL) {
++ PRINT_INFO("Not enough memory to add "
++ "new multicast entry!\n");
++ break;
++ }
++ memset(ipm, 0, sizeof(struct lcs_ipm_list));
++ memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH);
++ ipm->ipm.ip_addr = im4->multiaddr;
++ ipm->ipm_state = LCS_IPM_STATE_SET_REQUIRED;
++ spin_lock_irqsave(&card->ipm_lock, flags);
++ list_add(&ipm->list, &card->ipm_list);
++ spin_unlock_irqrestore(&card->ipm_lock, flags);
++ }
++}
++
++/**
++ * register multicast addresses
++ */
++static int
++lcs_register_mc_addresses(void *data)
++{
++ struct lcs_card *card;
++ struct in_device *in4_dev;
++
++ card = (struct lcs_card *) data;
++ daemonize();
++
++ if (!lcs_do_run_thread(card, LCS_SET_MC_THREAD))
++ return 0;
++ LCS_DBF_TEXT(4, trace, "regmulti");
++
++ in4_dev = in_dev_get(card->dev);
++ if (in4_dev == NULL)
++ goto out;
++ read_lock(&in4_dev->lock);
++ lcs_remove_mc_addresses(card, in4_dev);
++ lcs_set_mc_addresses(card, in4_dev);
++ read_unlock(&in4_dev->lock);
++ in_dev_put(in4_dev);
++
++ lcs_fix_multicast_list(card);
++out:
++ lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD);
++ return 0;
++}
++
++/**
++ * function called by net device to handle multicast address relevant things
++ */
++static void
++lcs_set_multicast_list(struct net_device *dev)
++{
++ struct lcs_card *card;
++
++ LCS_DBF_TEXT(4, trace, "setmulti");
++ card = (struct lcs_card *) dev->priv;
++
++ if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD))
++ schedule_task(&card->kernel_thread_starter);
++}
++
++#endif /* CONFIG_IP_MULTICAST */
++
++/**
++ * IRQ Handler for LCS channels
++ */
++static void
++lcs_irq(int irq, void *devstat, struct pt_regs *p)
++{
++ struct lcs_channel *channel;
++ devstat_t *stat;
++ int index;
++
++ stat = (devstat_t *) devstat;
++ channel = (struct lcs_channel *)
++ ((char *) devstat - offsetof(struct lcs_channel, devstat));
++ LCS_DBF_TEXT_(5, trace, "Rint%4x",irq);
++ LCS_DBF_TEXT_(5, trace, "%4x%4x",stat->cstat, stat->dstat);
++
++ /* How far in the ccw chain have we processed? */
++ if (channel->state != CH_STATE_INIT) {
++ index = (ccw1_t *) __va((addr_t) stat->cpa) - channel->ccws;
++ if ((stat->ii.irb.scsw.actl & SCSW_ACTL_SUSPENDED) ||
++ (stat->cstat | SCHN_STAT_PCI))
++ /* Bloody io subsystem tells us lies about cpa... */
++ index = (index - 1) & (LCS_NUM_BUFFS - 1);
++ while (channel->io_idx != index) {
++ __lcs_processed_buffer(channel,
++ channel->iob + channel->io_idx);
++ channel->io_idx =
++ (channel->io_idx + 1) & (LCS_NUM_BUFFS - 1);
++ }
++ }
++
++ if ((stat->dstat & DEV_STAT_DEV_END) ||
++ (stat->dstat & DEV_STAT_CHN_END) ||
++ (stat->dstat & DEV_STAT_UNIT_CHECK))
++ /* Mark channel as stopped. */
++ channel->state = CH_STATE_STOPPED;
++ else if (stat->ii.irb.scsw.actl & SCSW_ACTL_SUSPENDED)
++ /* CCW execution stopped on a suspend bit. */
++ channel->state = CH_STATE_SUSPENDED;
++
++ if (stat->ii.irb.scsw.fctl & SCSW_FCTL_HALT_FUNC)
++ /* The channel has been stopped by halt_IO. */
++ channel->state = CH_STATE_HALTED;
++
++ /* Do the rest in the tasklet. */
++ tasklet_schedule(&channel->irq_tasklet);
++}
++
++/**
++ * Tasklet for IRQ handler
++ */
++static void
++lcs_tasklet(unsigned long data)
++{
++ unsigned long flags;
++ struct lcs_channel *channel;
++ struct lcs_buffer *iob;
++ int buf_idx;
++ int rc;
++
++ channel = (struct lcs_channel *) data;
++
++ LCS_DBF_TEXT_(5, trace, "tlet%4x",channel->irq);
++ /* Check for processed buffers. */
++ iob = channel->iob;
++ buf_idx = channel->buf_idx;
++ while (iob[buf_idx].state == BUF_STATE_PROCESSED) {
++ /* Do the callback thing. */
++ if (iob[buf_idx].callback != NULL)
++ iob[buf_idx].callback(channel, iob + buf_idx);
++ buf_idx = (buf_idx + 1) & (LCS_NUM_BUFFS - 1);
++ }
++ channel->buf_idx = buf_idx;
++
++ if (channel->state == CH_STATE_STOPPED)
++ // FIXME: what if rc != 0 ??
++ rc = lcs_start_channel(channel);
++ spin_lock_irqsave(get_irq_lock(channel->irq), flags);
++ if (channel->state == CH_STATE_SUSPENDED &&
++ channel->iob[channel->io_idx].state == BUF_STATE_READY) {
++ // FIXME: what if rc != 0 ??
++ rc = __lcs_resume_channel(channel);
++ }
++ spin_unlock_irqrestore(get_irq_lock(channel->irq), flags);
++
++ /* Something happened on the channel. Wake up waiters. */
++ wake_up(&channel->wait_q);
++}
++
++/**
++ * Finish current tx buffer and make it ready for transmit.
++ */
++static void
++__lcs_emit_txbuffer(struct lcs_card *card)
++{
++ LCS_DBF_TEXT(5, trace,"emittx");
++ *(__u16 *)(card->tx_buffer->data + card->tx_buffer->count) = 0;
++ card->tx_buffer->count += 2;
++ lcs_ready_buffer(&card->write, card->tx_buffer);
++ card->tx_buffer = NULL;
++ card->tx_emitted++;
++}
++
++/**
++ * Callback for finished tx buffers.
++ */
++static void
++lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
++{
++ struct lcs_card *card;
++
++ LCS_DBF_TEXT(5, trace,"txbuffcb");
++ /* Put buffer back to pool. */
++ lcs_release_buffer(channel, buffer);
++ card = (struct lcs_card *)
++ ((char *) channel - offsetof(struct lcs_card, write));
++ spin_lock(&card->lock);
++ card->tx_emitted--;
++ if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
++ /*
++ * Last running tx buffer has finished. Submit partially
++ * filled current buffer.
++ */
++ __lcs_emit_txbuffer(card);
++ spin_unlock(&card->lock);
++}
++
++/**
++ * Packet transmit function called by network stack
++ */
++static int
++__lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
++ struct net_device *dev)
++{
++ struct lcs_header *header;
++
++ LCS_DBF_TEXT(5, trace,"hardxmit");
++ if (skb == NULL) {
++ card->stats.tx_dropped++;
++ card->stats.tx_errors++;
++ return -EIO;
++ }
++ if (card->state != DEV_STATE_UP) {
++ dst_link_failure(skb);
++ dev_kfree_skb(skb);
++ card->stats.tx_dropped++;
++ card->stats.tx_errors++;
++ card->stats.tx_carrier_errors++;
++ return 0;
++ }
++ if (netif_queue_stopped(dev) ) {
++ card->stats.tx_dropped++;
++ return -EBUSY;
++ }
++ if (card->tx_buffer != NULL &&
++ card->tx_buffer->count + sizeof(struct lcs_header) +
++ skb->len + sizeof(u16) > LCS_IOBUFFERSIZE)
++ /* skb too big for current tx buffer. */
++ __lcs_emit_txbuffer(card);
++ if (card->tx_buffer == NULL) {
++ /* Get new tx buffer */
++ card->tx_buffer = lcs_get_buffer(&card->write);
++ if (card->tx_buffer == NULL) {
++ card->stats.tx_dropped++;
++ return -EBUSY;
++ }
++ card->tx_buffer->callback = lcs_txbuffer_cb;
++ card->tx_buffer->count = 0;
++ }
++ header = (struct lcs_header *)
++ (card->tx_buffer->data + card->tx_buffer->count);
++ card->tx_buffer->count += skb->len + sizeof(struct lcs_header);
++ header->offset = card->tx_buffer->count;
++ header->type = card->lan_type;
++ header->slot = card->portno;
++ memcpy(header + 1, skb->data, skb->len);
++ card->stats.tx_bytes += skb->len;
++ card->stats.tx_packets++;
++ dev_kfree_skb(skb);
++ if (card->tx_emitted <= 0)
++ /* If this is the first tx buffer emit it immediatly. */
++ __lcs_emit_txbuffer(card);
++ return 0;
++}
++
++static int
++lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct lcs_card *card;
++ int rc;
++
++ LCS_DBF_TEXT(5, trace, "pktxmit");
++ card = (struct lcs_card *) dev->priv;
++ spin_lock(&card->lock);
++ rc = __lcs_start_xmit(card, skb, dev);
++ spin_unlock(&card->lock);
++ return rc;
++}
++
++/**
++ * send startlan and lanstat command to make LCS device ready
++ */
++static int
++lcs_startlan_auto(struct lcs_card *card)
++{
++ int rc;
++
++ LCS_DBF_TEXT(2, trace,"strtauto");
++#ifdef CONFIG_NET_ETHERNET
++ card->lan_type = LCS_FRAME_TYPE_ENET;
++ rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
++ if (rc == 0)
++ return 0;
++
++#endif
++#ifdef CONFIG_TR
++ card->lan_type = LCS_FRAME_TYPE_TR;
++ rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
++ if (rc == 0)
++ return 0;
++#endif
++#ifdef CONFIG_FDDI
++ card->lan_type = LCS_FRAME_TYPE_FDDI;
++ rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
++ if (rc == 0)
++ return 0;
++#endif
++ return -EIO;
++}
++
++static int
++lcs_startlan(struct lcs_card *card)
++{
++ int rc, i;
++
++ LCS_DBF_TEXT(2, trace, "startlan");
++ rc = 0;
++ if (card->device_forced) {
++ card->portno = card->port_protocol_no;
++ if (card->lan_type == LCS_FRAME_TYPE_AUTO)
++ rc = lcs_startlan_auto(card);
++ else
++ rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
++ } else {
++ for (i = 0; i <= card->max_port_no; i++) {
++ card->portno = i;
++ if (i == 0)
++ card->portno = card->hint_port_no;
++ else if (i == card->hint_port_no)
++ card->portno = 0;
++
++ if (card->lan_type != LCS_FRAME_TYPE_AUTO)
++ rc = lcs_send_startlan(card,
++ LCS_INITIATOR_TCPIP);
++ else
++ /* autodetecting lan type */
++ rc = lcs_startlan_auto(card);
++ if (rc == 0)
++ break;
++ }
++ }
++ if (rc == 0)
++ return lcs_send_lanstat(card);
++ return rc;
++}
++
++/**
++ * LCS detect function
++ * setup channels and make them I/O ready
++ */
++static int
++lcs_detect(struct lcs_card *card)
++{
++ int rc;
++
++ LCS_DBF_TEXT(2, setup,"lcsdetct");
++
++ /* start/reset card */
++ if (card->dev)
++ netif_stop_queue(card->dev);
++ rc = lcs_stop_channels(card);
++ if (rc == 0) {
++ rc = lcs_start_channels(card);
++ if (rc == 0) {
++ rc = lcs_send_startup(card, LCS_INITIATOR_TCPIP);
++ if (rc == 0)
++ rc = lcs_startlan(card);
++ }
++ }
++ if (rc == 0) {
++ card->state = DEV_STATE_UP;
++ } else {
++ card->state = DEV_STATE_DOWN;
++ card->write.state = CH_STATE_INIT;
++ card->read.state = CH_STATE_INIT;
++ }
++ return rc;
++}
++
++/**
++ * reset card
++ */
++static int
++lcs_resetcard(struct lcs_card *card)
++{
++ int retries;
++
++ LCS_DBF_TEXT(2, trace, "rescard");
++ for (retries = 0; retries < 10; retries++) {
++ if (lcs_detect(card) == 0) {
++ netif_wake_queue(card->dev);
++ card->state = DEV_STATE_UP;
++ PRINT_INFO("LCS device %s successfully restarted!\n",
++ card->dev->name);
++ return 0;
++ }
++ schedule_timeout(3 * HZ);
++ }
++ PRINT_ERR("Error in Reseting LCS card!\n");
++ return -EIO;
++}
++
++/**
++ * LCS Stop card
++ */
++static int
++lcs_stopcard(struct lcs_card *card)
++{
++ int rc;
++
++ LCS_DBF_TEXT(2, setup, "stopcard");
++ if (card->read.state != CH_STATE_STOPPED &&
++ card->write.state != CH_STATE_STOPPED &&
++ card->state == DEV_STATE_UP) {
++ lcs_clear_multicast_list(card);
++ rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP);
++ rc = lcs_send_shutdown(card);
++ }
++ rc = lcs_stop_channels(card);
++ card->state = DEV_STATE_DOWN;
++ return rc;
++}
++
++/**
++ * LGW initiated commands
++ */
++static int
++lcs_lgw_startlan_thread(void *data)
++{
++ struct lcs_card *card;
++
++ card = (struct lcs_card *) data;
++ daemonize();
++
++ if (!lcs_do_run_thread(card, LCS_STARTLAN_THREAD))
++ return 0;
++ LCS_DBF_TEXT(2, trace, "lgwstpln");
++ if (card->dev)
++ netif_stop_queue(card->dev);
++ if (lcs_startlan(card) == 0) {
++ netif_wake_queue(card->dev);
++ card->state = DEV_STATE_UP;
++ PRINT_INFO("LCS Startlan for device %s succeeded!\n",
++ card->dev->name);
++
++ } else
++ PRINT_ERR("LCS Startlan for device %s failed!\n",
++ card->dev->name);
++ lcs_clear_thread_running_bit(card, LCS_STARTLAN_THREAD);
++ return 0;
++}
++
++/**
++ * Send startup command initiated by Lan Gateway
++ */
++static int
++lcs_lgw_startup_thread(void *data)
++{
++ int rc;
++
++ struct lcs_card *card;
++
++ card = (struct lcs_card *) data;
++ daemonize();
++
++ if (!lcs_do_run_thread(card, LCS_STARTUP_THREAD))
++ return 0;
++ LCS_DBF_TEXT(2, trace, "lgwstaln");
++ if (card->dev)
++ netif_stop_queue(card->dev);
++ rc = lcs_send_startup(card, LCS_INITIATOR_LGW);
++ if (rc != 0) {
++ PRINT_ERR("Startup for LCS device %s initiated " \
++ "by LGW failed!\nReseting card ...\n",
++ card->dev->name);
++ /* do a card reset */
++ rc = lcs_resetcard(card);
++ if (rc == 0)
++ goto Done;
++ }
++ rc = lcs_startlan(card);
++ if (rc == 0) {
++ netif_wake_queue(card->dev);
++ card->state = DEV_STATE_UP;
++ }
++Done:
++ if (rc == 0)
++ PRINT_INFO("LCS Startup for device %s succeeded!\n",
++ card->dev->name);
++ else
++ PRINT_ERR("LCS Startup for device %s failed!\n",
++ card->dev->name);
++ lcs_clear_thread_running_bit(card, LCS_STARTUP_THREAD);
++ return 0;
++}
++
++
++/**
++ * send stoplan command initiated by Lan Gateway
++ */
++static int
++lcs_lgw_stoplan_thread(void *data)
++{
++ struct lcs_card *card;
++ int rc;
++
++ card = (struct lcs_card *) data;
++ daemonize();
++
++ if (!lcs_do_run_thread(card, LCS_STOPLAN_THREAD))
++ return 0;
++ LCS_DBF_TEXT(2, trace, "lgwstop");
++ if (card->dev)
++ netif_stop_queue(card->dev);
++ if (lcs_send_stoplan(card, LCS_INITIATOR_LGW) == 0)
++ PRINT_INFO("Stoplan for %s initiated by LGW succeeded!\n",
++ card->dev->name);
++ else
++ PRINT_ERR("Stoplan %s initiated by LGW failed!\n",
++ card->dev->name);
++ /*Try to reset the card, stop it on failure */
++ rc = lcs_resetcard(card);
++ if (rc != 0)
++ rc = lcs_stopcard(card);
++ lcs_clear_thread_running_bit(card, LCS_STOPLAN_THREAD);
++ return rc;
++}
++
++/**
++ * Kernel Thread helper functions for LGW initiated commands
++ */
++static void
++lcs_start_kernel_thread(struct lcs_card *card)
++{
++ LCS_DBF_TEXT(5, trace, "krnthrd");
++ if (lcs_do_start_thread(card, LCS_STARTUP_THREAD))
++ kernel_thread(lcs_lgw_startup_thread, (void *) card, SIGCHLD);
++ if (lcs_do_start_thread(card, LCS_STARTLAN_THREAD))
++ kernel_thread(lcs_lgw_startlan_thread, (void *) card, SIGCHLD);
++ if (lcs_do_start_thread(card, LCS_STOPLAN_THREAD))
++ kernel_thread(lcs_lgw_stoplan_thread, (void *) card, SIGCHLD);
++#ifdef CONFIG_IP_MULTICAST
++ if (lcs_do_start_thread(card, LCS_SET_MC_THREAD))
++ kernel_thread(lcs_register_mc_addresses, (void *) card, SIGCHLD);
++#endif
++}
++
++/**
++ * Process control frames.
++ */
++static void
++lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
++{
++ LCS_DBF_TEXT(5, trace, "getctrl");
++ if (cmd->initiator == LCS_INITIATOR_LGW) {
++ switch(cmd->cmd_code) {
++ case LCS_CMD_STARTUP:
++ if (!lcs_set_thread_start_bit(card,
++ LCS_STARTUP_THREAD))
++ schedule_task(&card->kernel_thread_starter);
++ break;
++ case LCS_CMD_STARTLAN:
++ if (!lcs_set_thread_start_bit(card,
++ LCS_STARTLAN_THREAD))
++ schedule_task(&card->kernel_thread_starter);
++ break;
++ case LCS_CMD_STOPLAN:
++ if (!lcs_set_thread_start_bit(card,
++ LCS_STOPLAN_THREAD))
++ schedule_task(&card->kernel_thread_starter);
++ break;
++ default:
++ PRINT_INFO("UNRECOGNIZED LGW COMMAND\n");
++ break;
++ }
++ } else
++ lcs_notify_lancmd_waiters(card, cmd);
++}
++
++/**
++ * Unpack network packet.
++ */
++static void
++lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len)
++{
++ struct sk_buff *skb;
++
++ LCS_DBF_TEXT(5, trace, "getskb");
++ if (card->dev == NULL ||
++ card->state != DEV_STATE_UP)
++ /* The card isn't up. Ignore the packet. */
++ return;
++
++ skb = dev_alloc_skb(skb_len);
++ if (skb == NULL) {
++ PRINT_ERR("LCS: alloc_skb failed for device=%s\n",
++ card->dev->name);
++ card->stats.rx_dropped++;
++ return;
++ }
++ skb->dev = card->dev;
++ memcpy(skb_put(skb, skb_len), skb_data, skb_len);
++ skb->protocol = card->lan_type_trans(skb, card->dev);
++ card->stats.rx_bytes += skb_len;
++ card->stats.rx_packets++;
++ *((__u32 *)skb->cb) = ++card->pkt_seq;
++ netif_rx(skb);
++}
++
++/**
++ * LCS main routine to get packets and lancmd replies from the buffers
++ */
++static void
++lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
++{
++ struct lcs_card *card;
++ struct lcs_header *lcs_hdr;
++ __u16 offset;
++
++ LCS_DBF_TEXT(5, trace, "lcsgtpkt");
++ lcs_hdr = (struct lcs_header *) buffer->data;
++ if (lcs_hdr->offset == LCS_ILLEGAL_OFFSET) {
++ LCS_DBF_TEXT(4, trace, "-eiogpkt");
++ return;
++ }
++ card = (struct lcs_card *)
++ ((char *) channel - offsetof(struct lcs_card, read));
++ offset = 0;
++ while (lcs_hdr->offset != 0) {
++ if (lcs_hdr->offset <= 0 ||
++ lcs_hdr->offset > LCS_IOBUFFERSIZE ||
++ lcs_hdr->offset < offset) {
++ /* Offset invalid. */
++ card->stats.rx_length_errors++;
++ card->stats.rx_errors++;
++ return;
++ }
++ /* What kind of frame is it? */
++ if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL)
++ /* Control frame. */
++ lcs_get_control(card, (struct lcs_cmd *) lcs_hdr);
++ else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET ||
++ lcs_hdr->type == LCS_FRAME_TYPE_TR ||
++ lcs_hdr->type == LCS_FRAME_TYPE_FDDI)
++ /* Normal network packet. */
++ lcs_get_skb(card, (char *)(lcs_hdr + 1),
++ lcs_hdr->offset - offset -
++ sizeof(struct lcs_header));
++ else
++ /* Unknown frame type. */
++ ; // FIXME: error message ?
++ /* Proceed to next frame. */
++ offset = lcs_hdr->offset;
++ lcs_hdr->offset = LCS_ILLEGAL_OFFSET;
++ lcs_hdr = (struct lcs_header *) (buffer->data + offset);
++ }
++ /* The buffer is now empty. Make it ready again. */
++ lcs_ready_buffer(&card->read, buffer);
++}
++
++/**
++ * get network statistics for ifconfig and other user programs
++ */
++struct net_device_stats *
++lcs_getstats(struct net_device *dev)
++{
++ struct lcs_card *card;
++
++ LCS_DBF_TEXT(4, trace, "netstats");
++ card = (struct lcs_card *) dev->priv;
++ return &card->stats;
++}
++
++/**
++ * stop lcs device
++ * This function will be called by user doing ifconfig xxx down
++ */
++int
++lcs_stop_device(struct net_device *dev)
++{
++ struct lcs_card *card;
++
++ LCS_DBF_TEXT(2, trace, "stopdev");
++ card = (struct lcs_card *) dev->priv;
++ netif_stop_queue(dev);
++ MOD_DEC_USE_COUNT;
++ return 0;
++}
++
++/**
++ * start lcs device and make it runnable
++ * This function will be called by user doing ifconfig xxx up
++ */
++int
++lcs_open_device(struct net_device *dev)
++{
++ struct lcs_card *card;
++
++ LCS_DBF_TEXT(2, trace, "opendev");
++ LCS_DBF_TEXT_(3,trace,"%s",dev->name);
++ card = (struct lcs_card *) dev->priv;
++ LCS_DBF_HEX(2, trace, &card, sizeof(void*));
++ /* initialize statistics */
++ MOD_INC_USE_COUNT;
++ netif_wake_queue(dev);
++ card->state = DEV_STATE_UP;
++ return 0;
++}
++
++/**
++ * LCS probe function
++ * Main device detection routine called whenever lcs will be started
++ * either as module or on bootup
++ */
++static int
++lcs_probe(chandev_probeinfo *info)
++{
++ struct lcs_card *card;
++ struct net_device *dev;
++ int rc;
++
++ LCS_DBF_TEXT(2, setup, "lcsprobe");
++ card = lcs_alloc_card();
++ if (card == NULL) {
++ PRINT_ERR("Allocation of lcs card failed\n");
++ return -ENOMEM;
++ }
++ card->read.irq = info->read.irq;
++ card->write.irq = info->write.irq;
++ card->device_forced = info->device_forced;
++ card->max_port_no = info->max_port_no;
++ card->hint_port_no = info->hint_port_no;
++ card->port_protocol_no = info->port_protocol_no;
++ rc = lcs_setup_card(card);
++ if (rc) {
++ LCS_DBF_TEXT(3, setup, "errinit");
++ PRINT_ERR("LCS card Initialization failed\n");
++ lcs_free_card(card);
++ return rc;
++ }
++
++ /* Now let's detect and start LCS. */
++ rc = lcs_detect(card);
++ if (rc) {
++ LCS_DBF_TEXT(2, setup, "dtctfail");
++ lcs_stopcard(card);
++ lcs_cleanup_card(card);
++ lcs_free_card(card);
++ return -ENODEV;
++ }
++ info->memory_usage_in_k =
++ -((LCS_IOBUFFERSIZE * LCS_NUM_BUFFS +
++ LCS_NUM_BUFFS * LCS_IOBUFFERSIZE) / 1024);
++ switch (card->lan_type) {
++#ifdef CONFIG_NET_ETHERNET
++ case LCS_FRAME_TYPE_ENET:
++ card->lan_type_trans = eth_type_trans;
++ dev = chandev_initnetdevice(info, card->portno,
++ NULL, 0, "eth",
++ init_etherdev,
++ unregister_netdev);
++ break;
++#endif
++#ifdef CONFIG_TR
++ case LCS_FRAME_TYPE_TR:
++ card->lan_type_trans = tr_type_trans;
++ dev = chandev_initnetdevice(info, card->portno,
++ NULL, 0, "tr",
++ init_trdev,
++ unregister_netdev);
++ break;
++#endif
++#ifdef CONFIG_FDDI
++ case LCS_FRAME_TYPE_FDDI:
++ card->lan_type_trans = fddi_type_trans;
++ dev = chandev_initnetdevice(info, card->portno,
++ NULL, 0, "fddi",
++ init_fddidev,
++ unregister_netdev);
++ break;
++#endif
++ default:
++ LCS_DBF_TEXT(2, setup, "errinit");
++ PRINT_ERR("LCS: Initialization failed\n");
++ PRINT_ERR("LCS: No device found!\n");
++ lcs_cleanup_channel(&card->read);
++ lcs_cleanup_channel(&card->write);
++ lcs_free_card(card);
++ return -ENODEV;
++ }
++ memcpy(dev->dev_addr, card->mac, LCS_MAC_LENGTH);
++ card->dev = dev;
++ dev->priv = card;
++ dev->open = lcs_open_device;
++ dev->stop = lcs_stop_device;
++ dev->hard_start_xmit = lcs_start_xmit;
++#ifdef CONFIG_IP_MULTICAST
++ if (lcs_check_multicast_support(card))
++ dev->set_multicast_list = lcs_set_multicast_list;
++#endif
++ dev->get_stats = lcs_getstats;
++ netif_stop_queue(dev);
++ lcs_set_allowed_threads(card, 0xffffffff);
++ return 0;
++}
++
++/**
++ * shutdown function called by chandev
++ */
++static int
++lcs_shutdown(struct net_device *dev)
++{
++ struct lcs_card *card;
++
++ LCS_DBF_TEXT(2, setup, "shtdndev");
++ card = dev->priv;
++ lcs_set_allowed_threads(card, 0);
++ if (lcs_wait_for_threads(card, LCS_SET_MC_THREAD))
++ return -ERESTARTSYS;
++ if (card != NULL) {
++ lcs_stopcard(card);
++ lcs_cleanup_card(card);
++ lcs_free_card(card);
++ }
++ return 0;
++}
++
++/**
++ * chandev notification function,only used by 2.4 kernel
++ */
++static void
++lcs_msck_notify(struct net_device *device, int msck_irq,
++ chandev_msck_status prevstatus,
++ chandev_msck_status newstatus)
++{
++ struct lcs_card *card;
++
++ if (device->priv == NULL)
++ return;
++ LCS_DBF_TEXT(2, trace, "mscknot");
++ card = (struct lcs_card *) device->priv;
++
++ if ((prevstatus != chandev_status_good) ||
++ (prevstatus != chandev_status_all_chans_good))
++ if ((newstatus == chandev_status_good) ||
++ (newstatus == chandev_status_all_chans_good))
++ lcs_resetcard(card);
++ if ((newstatus == chandev_status_gone) ||
++ (newstatus == chandev_status_no_path) ||
++ (newstatus == chandev_status_not_oper))
++ lcs_stopcard(card);
++}
++
++static int
++lcs_verify_dev(struct net_device *dev)
++{
++ return (dev->hard_start_xmit==lcs_start_xmit);
++}
++
++/**
++ * multicast notifier structures
++ */
++#ifdef CONFIG_IP_MULTICAST
++static int lcs_mc_event(struct notifier_block *this,
++ unsigned long event,void *ptr)
++{
++ struct ip_mc_list *mc = (struct ip_mc_list *) ptr;
++ struct net_device *dev = mc->interface->dev;
++ struct lcs_card *card;
++
++ LCS_DBF_TEXT(3,trace,"mcevent");
++
++ if (!lcs_verify_dev(dev))
++ return NOTIFY_DONE;
++ card = (struct lcs_card *) dev->priv;
++ if (!card)
++ return NOTIFY_DONE;
++ lcs_set_multicast_list(dev);
++ return NOTIFY_DONE;
++}
++
++static struct notifier_block lcs_mc_notifier = {
++ lcs_mc_event,
++ 0
++};
++#endif
++
++/**
++ * LCS Module/Kernel initialization function
++ */
++static int
++__init lcs_init_module(void)
++{
++ int cardsfound;
++ int rc;
++
++ LCS_DBF_TEXT(0, setup, "lcsinit");
++ PRINT_INFO("Loading %s\n",version);
++ rc = lcs_register_debug_facility();
++ if (rc) {
++ PRINT_ERR("Initialization failed\n");
++ return rc;
++ }
++
++ cardsfound =
++ chandev_register_and_probe(lcs_probe,
++ (chandev_shutdownfunc)
++ lcs_shutdown,
++ (chandev_msck_notification_func)
++ lcs_msck_notify, chandev_type_lcs);
++#ifdef MODULE
++ if (cardsfound <= 0 &&
++ !chandev_persist(chandev_type_lcs)) {
++ chandev_unregister(lcs_probe,0);
++ return -ENODEV;
++ }
++#else
++ if (cardsfound <= 0)
++ return -ENODEV;
++#endif
++
++#ifdef CONFIG_IP_MULTICAST
++ if (register_multicast_notifier(&lcs_mc_notifier)) {
++ PRINT_ERR("register_multicast_notifier failed, maybe not " \
++ "all multicast addresses will be registered\n");
++ }
++#endif
++
++ return 0;
++}
++
++
++/**
++ * LCS module cleanup function
++ */
++static void
++__exit lcs_cleanup_module(void)
++{
++ PRINT_INFO("Terminating lcs module.\n");
++ LCS_DBF_TEXT(0, trace, "cleanup");
++ chandev_unregister(lcs_probe, 1);
++
++#ifdef CONFIG_IP_MULTICAST
++ unregister_multicast_notifier(&lcs_mc_notifier);
++#endif
++
++ lcs_unregister_debug_facility();
++}
++
++module_init(lcs_init_module);
++module_exit(lcs_cleanup_module);
++
++MODULE_AUTHOR("Frank Pavlic <pavlic at de.ibm.com>");
++MODULE_LICENSE("GPL");
++
+=== drivers/s390/net/lcs.h
+==================================================================
+--- drivers/s390/net/lcs.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/net/lcs.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,312 @@
++/*lcs.h*/
++
++#include <linux/interrupt.h>
++#include <linux/netdevice.h>
++#include <linux/skbuff.h>
++#include <asm/chandev.h>
++#include <asm/irq.h>
++
++#define VERSION_LCS_H "$Revision: 1.1.4.1 $"
++
++#define LCS_DBF_TEXT(level, name, text) \
++ do { \
++ debug_text_event(lcs_dbf_##name, level, text); \
++ } while (0)
++
++#define LCS_DBF_HEX(level,name,addr,len) \
++do { \
++ debug_event(lcs_dbf_##name,level,(void*)(addr),len); \
++} while (0)
++
++
++#define LCS_DBF_TEXT_(level,name,text...) \
++do { \
++ sprintf(debug_buffer, text); \
++ debug_text_event(lcs_dbf_##name,level, debug_buffer);\
++} while (0)
++
++/**
++ * some more definitions for debug or output stuff
++ */
++#define PRINTK_HEADER " lcs: "
++
++/**
++ * CCW commands used in this driver
++ */
++#define LCS_CCW_WRITE 0x01
++#define LCS_CCW_READ 0x02
++#define LCS_CCW_TRANSFER 0x08
++
++/**
++ * LCS device status primitives
++ */
++#define LCS_CMD_STARTLAN 0x01
++#define LCS_CMD_STOPLAN 0x02
++#define LCS_CMD_LANSTAT 0x04
++#define LCS_CMD_STARTUP 0x07
++#define LCS_CMD_SHUTDOWN 0x08
++#define LCS_CMD_QIPASSIST 0xb2
++#define LCS_CMD_SETIPM 0xb4
++#define LCS_CMD_DELIPM 0xb5
++
++#define LCS_INITIATOR_TCPIP 0x00
++#define LCS_INITIATOR_LGW 0x01
++#define LCS_STD_CMD_SIZE 16
++#define LCS_MULTICAST_CMD_SIZE 404
++
++/**
++ * LCS IPASSIST MASKS,only used when multicast is switched on
++ */
++/* Not supported by LCS */
++#define LCS_IPASS_ARP_PROCESSING 0x0001
++#define LCS_IPASS_IN_CHECKSUM_SUPPORT 0x0002
++#define LCS_IPASS_OUT_CHECKSUM_SUPPORT 0x0004
++#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008
++#define LCS_IPASS_IP_FILTERING 0x0010
++/* Supported by lcs 3172 */
++#define LCS_IPASS_IPV6_SUPPORT 0x0020
++#define LCS_IPASS_MULTICAST_SUPPORT 0x0040
++
++/**
++ * LCS sense byte definitions
++ */
++#define LCS_SENSE_INTERFACE_DISCONNECT 0x01
++#define LCS_SENSE_EQUIPMENT_CHECK 0x10
++#define LCS_SENSE_BUS_OUT_CHECK 0x20
++#define LCS_SENSE_INTERVENTION_REQUIRED 0x40
++#define LCS_SENSE_CMD_REJECT 0x80
++#define LCS_SENSE_RESETTING_EVENT 0x0080
++#define LCS_SENSE_DEVICE_ONLINE 0x0020
++
++/**
++ * LCS packet type definitions
++ */
++#define LCS_FRAME_TYPE_CONTROL 0
++#define LCS_FRAME_TYPE_ENET 1
++#define LCS_FRAME_TYPE_TR 2
++#define LCS_FRAME_TYPE_FDDI 7
++#define LCS_FRAME_TYPE_AUTO -1
++
++/**
++ * some more definitions,we will sort them later
++ */
++#define LCS_ILLEGAL_OFFSET 0xffff
++#define LCS_IOBUFFERSIZE 0x5000
++#define LCS_NUM_BUFFS 8 /* needs to be power of 2 */
++#define LCS_MAC_LENGTH 6
++#define LCS_INVALID_PORT_NO -1
++
++/**
++ * Multicast state
++ */
++#define LCS_IPM_STATE_SET_REQUIRED 0
++#define LCS_IPM_STATE_DEL_REQUIRED 1
++#define LCS_IPM_STATE_ON_CARD 2
++
++/**
++ * LCS IP Assist declarations
++ * seems to be only used for multicast
++ */
++#define LCS_IPASS_ARP_PROCESSING 0x0001
++#define LCS_IPASS_INBOUND_CSUM_SUPP 0x0002
++#define LCS_IPASS_OUTBOUND_CSUM_SUPP 0x0004
++#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008
++#define LCS_IPASS_IP_FILTERING 0x0010
++#define LCS_IPASS_IPV6_SUPPORT 0x0020
++#define LCS_IPASS_MULTICAST_SUPPORT 0x0040
++
++/**
++ * LCS Buffer states
++ */
++enum lcs_buffer_states {
++ BUF_STATE_EMPTY, /* buffer is empty */
++ BUF_STATE_LOCKED, /* buffer is locked, don't touch */
++ BUF_STATE_READY, /* buffer is ready for read/write */
++ BUF_STATE_PROCESSED,
++};
++
++/**
++ * LCS Channel State Machine declarations
++ */
++enum lcs_channel_states {
++ CH_STATE_INIT,
++ CH_STATE_HALTED,
++ CH_STATE_STOPPED,
++ CH_STATE_RUNNING,
++ CH_STATE_SUSPENDED,
++};
++
++/**
++ * LCS device state machine
++ */
++enum lcs_dev_states {
++ DEV_STATE_DOWN,
++ DEV_STATE_UP,
++};
++
++enum lcs_threads {
++ LCS_SET_MC_THREAD = 1,
++ LCS_STARTLAN_THREAD = 2,
++ LCS_STOPLAN_THREAD = 4,
++ LCS_STARTUP_THREAD = 8,
++};
++
++/**
++ * LCS struct declarations
++ */
++struct lcs_header {
++ __u16 offset;
++ __u8 type;
++ __u8 slot;
++} __attribute__ ((packed));
++
++struct lcs_ip_mac_pair {
++ __u32 ip_addr;
++ __u8 mac_addr[LCS_MAC_LENGTH];
++ __u8 reserved[2];
++} __attribute__ ((packed));
++
++struct lcs_ipm_list {
++ struct list_head list;
++ struct lcs_ip_mac_pair ipm;
++ __u8 ipm_state;
++};
++
++struct lcs_cmd {
++ __u16 offset;
++ __u8 type;
++ __u8 slot;
++ __u8 cmd_code;
++ __u8 initiator;
++ __u16 sequence_no;
++ __u16 return_code;
++ union {
++ struct {
++ __u8 lan_type;
++ __u8 portno;
++ __u16 parameter_count;
++ __u8 operator_flags[3];
++ __u8 reserved[3];
++ } lcs_std_cmd;
++ struct {
++ __u16 unused1;
++ __u16 buff_size;
++ __u8 unused2[6];
++ } lcs_startup;
++ struct {
++ __u8 lan_type;
++ __u8 portno;
++ __u8 unused[10];
++ __u8 mac_addr[LCS_MAC_LENGTH];
++ __u32 num_packets_deblocked;
++ __u32 num_packets_blocked;
++ __u32 num_packets_tx_on_lan;
++ __u32 num_tx_errors_detected;
++ __u32 num_tx_packets_disgarded;
++ __u32 num_packets_rx_from_lan;
++ __u32 num_rx_errors_detected;
++ __u32 num_rx_discarded_nobuffs_avail;
++ __u32 num_rx_packets_too_large;
++ } lcs_lanstat_cmd;
++#ifdef CONFIG_IP_MULTICAST
++ struct {
++ __u8 lan_type;
++ __u8 portno;
++ __u16 num_ip_pairs;
++ __u16 ip_assists_supported;
++ __u16 ip_assists_enabled;
++ __u16 version;
++ struct {
++ struct lcs_ip_mac_pair
++ ip_mac_pair[32];
++ __u32 response_data;
++ } lcs_ipass_ctlmsg __attribute__ ((packed));
++ } lcs_qipassist __attribute__ ((packed));
++#endif /*CONFIG_IP_MULTICAST */
++ } cmd __attribute__ ((packed));
++} __attribute__ ((packed));
++
++/**
++ * Forward declarations.
++ */
++struct lcs_card;
++struct lcs_channel;
++
++/**
++ * Definition of an lcs buffer.
++ */
++struct lcs_buffer {
++ enum lcs_buffer_states state;
++ void *data;
++ int count;
++ /* Callback for completion notification. */
++ void (*callback)(struct lcs_channel *, struct lcs_buffer *);
++};
++
++struct lcs_reply {
++ struct list_head list;
++ __u16 sequence_no;
++ atomic_t refcnt;
++ /* Callback for completion notification. */
++ void (*callback)(struct lcs_card *, struct lcs_cmd *);
++ wait_queue_head_t wait_q;
++ struct lcs_card *card;
++ int received;
++ int rc;
++};
++
++/**
++ * Definition of an lcs channel
++ */
++struct lcs_channel {
++ enum lcs_channel_states state;
++ __u16 irq;
++ ccw1_t ccws[LCS_NUM_BUFFS + 1];
++ devstat_t devstat;
++ wait_queue_head_t wait_q;
++ struct tasklet_struct irq_tasklet;
++ struct lcs_buffer iob[LCS_NUM_BUFFS];
++ int io_idx;
++ int buf_idx;
++};
++
++/**
++ * definition of the lcs card
++ */
++struct lcs_card {
++ spinlock_t lock;
++ spinlock_t ipm_lock;
++ enum lcs_dev_states state;
++ struct net_device *dev;
++ struct net_device_stats stats;
++ unsigned short (*lan_type_trans)(struct sk_buff *skb,
++ struct net_device *dev);
++ struct lcs_channel read;
++ struct lcs_channel write;
++ struct lcs_buffer *tx_buffer;
++ int tx_emitted;
++ struct list_head lancmd_waiters;
++
++ struct tq_struct kernel_thread_starter;
++ spinlock_t mask_lock;
++ unsigned long thread_start_mask;
++ unsigned long thread_running_mask;
++ unsigned long thread_allowed_mask;
++ wait_queue_head_t wait_q;
++
++#ifdef CONFIG_IP_MULTICAST
++ struct list_head ipm_list;
++#endif
++ __u8 mac[LCS_MAC_LENGTH];
++ __u16 ip_assists_supported;
++ __u16 ip_assists_enabled;
++ __s8 lan_type;
++ __u32 pkt_seq;
++ __u16 sequence_no;
++ __u16 portno;
++ /* Some info copied from probeinfo */
++ u8 device_forced;
++ u8 max_port_no;
++ u8 hint_port_no;
++ s16 port_protocol_no;
++} __attribute__ ((aligned(8)));
+=== drivers/s390/net/Makefile
+==================================================================
+--- drivers/s390/net/Makefile (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/net/Makefile (/trunk/2.4.27) (revision 52)
+@@ -9,12 +9,14 @@
+
+ ctc-objs := ctcmain.o ctctty.o
+
+-obj-$(CONFIG_IUCV) += iucv.o fsm.o
++obj-$(CONFIG_IUCV) += iucv.o
+ obj-$(CONFIG_CTC) += ctc.o fsm.o
+-obj-$(CONFIG_IUCV) += netiucv.o
+-obj-$(CONFIG_C7000) += c7000.o
++obj-$(CONFIG_MPC) += ctcmpc.o fsm.o
++obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
++obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
++obj-$(CONFIG_LCS) += lcs.o
+ obj-$(CONFIG_QETH) += qeth.o
+-export-objs += qeth.o
++export-objs += qeth.o smsgiucv.o
+
+ include $(TOPDIR)/Rules.make
+
+=== drivers/s390/s390io.c
+==================================================================
+--- drivers/s390/s390io.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/s390io.c (/trunk/2.4.27) (revision 52)
+@@ -1,7 +1,7 @@
+ /*
+ * drivers/s390/s390io.c
+ * S/390 common I/O routines
+- * $Revision: 1.258 $
++ * $Revision: 1.247.4.4 $
+ *
+ * S390 version
+ * Copyright (C) 1999, 2000 IBM Deutschland Entwicklung GmbH,
+@@ -3043,7 +3043,16 @@
+ if (!ioinfo[irq]->ui.flags.ready)
+ return (ending_status);
+
+- memcpy (udp, &(ioinfo[irq]->devstat), sdevstat);
++ /*
++ * Special case: We got a deferred cc 3 on a basic sense.
++ * We have to notify the device driver of the former unit
++ * check, but must not confuse it by calling it with the status
++ * for the failed basic sense.
++ */
++ if (ioinfo[irq]->ui.flags.w4sense)
++ ioinfo[irq]->ui.flags.w4sense = 0;
++ else
++ memcpy (udp, &(ioinfo[irq]->devstat), sdevstat);
+
+ ioinfo[irq]->devstat.intparm = 0;
+
+@@ -8328,15 +8337,14 @@
+ {
+ loff_t len;
+ tempinfo_t *p_info = (tempinfo_t *) file->private_data;
+- loff_t pos = *offset;
+
+- if (pos < 0 || pos >= p_info->len) {
++ if (*offset >= p_info->len) {
+ return 0;
+ } else {
+- len = MIN (user_len, (p_info->len - pos));
+- if (copy_to_user (user_buf, &(p_info->data[pos]), len))
++ len = MIN (user_len, (p_info->len - *offset));
++ if (copy_to_user (user_buf, &(p_info->data[*offset]), len))
+ return -EFAULT;
+- *offset = pos + len;
++ (*offset) += len;
+ return len;
+ }
+ }
+@@ -8411,15 +8419,14 @@
+ {
+ loff_t len;
+ tempinfo_t *p_info = (tempinfo_t *) file->private_data;
+- loff_t pos = *offset;
+
+- if (pos < 0 || pos >= p_info->len) {
++ if (*offset >= p_info->len) {
+ return 0;
+ } else {
+- len = MIN (user_len, (p_info->len - pos));
+- if (copy_to_user (user_buf, &(p_info->data[pos]), len))
++ len = MIN (user_len, (p_info->len - *offset));
++ if (copy_to_user (user_buf, &(p_info->data[*offset]), len))
+ return -EFAULT;
+- *offset = pos + len;
++ (*offset) += len;
+ return len;
+ }
+ }
+@@ -8876,15 +8883,14 @@
+ {
+ loff_t len;
+ tempinfo_t *p_info = (tempinfo_t *) file->private_data;
+- loff_t pos = *offset;
+
+- if (pos < 0 || pos >= p_info->len) {
++ if (*offset >= p_info->len) {
+ return 0;
+ } else {
+ len = MIN (user_len, (p_info->len - *offset));
+ if (copy_to_user (user_buf, &(p_info->data[*offset]), len))
+ return -EFAULT;
+- (*offset) = pos + len;
++ (*offset) += len;
+ return len;
+ }
+ }
+@@ -8997,15 +9003,14 @@
+ {
+ loff_t len;
+ tempinfo_t *p_info = (tempinfo_t *) file->private_data;
+- loff_t pos = *offset;
+
+- if (pos < 0 || pos >= p_info->len) {
++ if (*offset >= p_info->len) {
+ return 0;
+ } else {
+ len = MIN (user_len, (p_info->len - *offset));
+ if (copy_to_user (user_buf, &(p_info->data[*offset]), len))
+ return -EFAULT;
+- (*offset) = pos + len;
++ (*offset) += len;
+ return len;
+ }
+ }
+@@ -9127,15 +9132,14 @@
+ {
+ loff_t len;
+ tempinfo_t *p_info = (tempinfo_t *) file->private_data;
+- loff_t pos = *offset;
+
+- if (pos < 0 || pos >= p_info->len) {
++ if ( *offset>=p_info->len) {
+ return 0;
+ } else {
+- len = MIN(user_len, (p_info->len - pos));
+- if (copy_to_user( user_buf, &(p_info->data[pos]), len))
++ len = MIN(user_len, (p_info->len - *offset));
++ if (copy_to_user( user_buf, &(p_info->data[*offset]), len))
+ return -EFAULT;
+- *offset = pos + len;
++ (* offset) += len;
+ return len;
+ }
+ }
+=== drivers/s390/Config.in
+==================================================================
+--- drivers/s390/Config.in (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/Config.in (/trunk/2.4.27) (revision 52)
+@@ -9,6 +9,7 @@
+ fi
+ dep_bool ' Initial RAM disk (initrd) support' CONFIG_BLK_DEV_INITRD $CONFIG_BLK_DEV_RAM
+ tristate 'XPRAM disk support' CONFIG_BLK_DEV_XPRAM
++tristate 'z/VM discontiguos saved segments (DCSS) block device driver' CONFIG_DCSSBLK
+
+ comment 'S/390 block device drivers'
+
+@@ -29,6 +30,7 @@
+ bool ' Automatic activation of DIAG module' CONFIG_DASD_AUTO_DIAG
+ fi
+ fi
++ dep_tristate ' Support for Channel Measurement on DASD devices' CONFIG_S390_CMF $CONFIG_DASD
+ fi
+
+ endmenu
+@@ -52,20 +54,26 @@
+ if [ "$CONFIG_TN3215" = "y" ]; then
+ bool 'Support for console on 3215 line mode terminal' CONFIG_TN3215_CONSOLE
+ fi
+-bool 'Support for HWC line mode terminal' CONFIG_HWC
+-if [ "$CONFIG_HWC" = "y" ]; then
+- bool ' console on HWC line mode terminal' CONFIG_HWC_CONSOLE
+- tristate ' Control-Program Identification' CONFIG_HWC_CPI
++bool 'Support for SCLP' CONFIG_SCLP
++if [ "$CONFIG_SCLP" = "y" ]; then
++ bool ' Support for SCLP line mode terminal' CONFIG_SCLP_TTY
++ if [ "$CONFIG_SCLP_TTY" = "y" ]; then
++ bool ' Support for console on SCLP line mode terminal' CONFIG_SCLP_CONSOLE
++ fi
++ bool ' Support for SCLP VT220-compatible terminal' CONFIG_SCLP_VT220_TTY
++ if [ "$CONFIG_SCLP_VT220_TTY" = "y" ]; then
++ bool ' Support for console on SCLP VT220-compatible terminal' CONFIG_SCLP_VT220_CONSOLE
++ fi
++ tristate ' Control-Program Identification' CONFIG_SCLP_CPI
+ fi
+ tristate 'S/390 tape device support' CONFIG_S390_TAPE
+ if [ "$CONFIG_S390_TAPE" != "n" ]; then
+ comment 'S/390 tape interface support'
+- bool ' Support for tape character devices' CONFIG_S390_TAPE_CHAR
+ bool ' Support for tape block devices' CONFIG_S390_TAPE_BLOCK
+ comment 'S/390 tape hardware support'
+- bool ' Support for 3490 tape hardware' CONFIG_S390_TAPE_3490
+- bool ' Support for 3480 tape hardware' CONFIG_S390_TAPE_3480
++ dep_tristate ' Support for 3480/3490 tape hardware' CONFIG_S390_TAPE_34XX $CONFIG_S390_TAPE
+ fi
++dep_tristate 'Support for the z/VM recording system services (VM only)' CONFIG_VMLOGRDR $CONFIG_IUCV
+ endmenu
+
+ if [ "$CONFIG_NET" = "y" ]; then
+@@ -88,9 +96,38 @@
+ define_bool CONFIG_HOTPLUG y
+ fi
+
++ if [ "$CONFIG_NET_ETHERNET" != "n" -o "$CONFIG_TR" != "n" ]; then
++ tristate 'Lan Channel Station Interface' CONFIG_LCS
++ fi
++
++ if [ "$CONFIG_QDIO" != "n" -a "$CONFIG_CHANDEV" = "y" -a "$CONFIG_IP_MULTICAST" = "y" ]; then
++ dep_tristate 'Support for Gigabit Ethernet' CONFIG_QETH $CONFIG_QDIO
++ if [ "$CONFIG_QETH" != "n" ]; then
++ comment 'Gigabit Ethernet default settings'
++ if [ "$CONFIG_IPV6" = "y" -o "$CONFIG_IPV6" = "$CONFIG_QETH" ]; then
++ bool ' IPv6 support for qeth' CONFIG_QETH_IPV6
++ else
++ define_bool CONFIG_QETH_IPV6 n
++ fi
++ if [ "$CONFIG_VLAN_8021Q" = "y" -o "$CONFIG_VLAN_8021Q" = "$CONFIG_QETH" ]; then
++ bool ' VLAN support for qeth' CONFIG_QETH_VLAN
++ else
++ define_bool CONFIG_QETH_VLAN n
++ fi
++ bool ' Performance statistics in /proc' CONFIG_QETH_PERF_STATS
++ fi
++ fi
+ tristate 'CTC device support' CONFIG_CTC
+- tristate 'IUCV device support (VM only)' CONFIG_IUCV
++ tristate 'CTCMPC device support' CONFIG_MPC
++ tristate 'IUCV support (VM only)' CONFIG_IUCV
++ dep_tristate 'IUCV network device support (VM only)' CONFIG_NETIUCV $CONFIG_IUCV
++ dep_tristate 'IUCV special message support (VM only)' CONFIG_SMSGIUCV $CONFIG_IUCV
+ fi
+ endmenu
+ fi
+
++
++mainmenu_option next_comment
++comment 'Miscellaneous'
++ tristate 'Z90CRYPT support' CONFIG_Z90CRYPT
++endmenu
+=== drivers/s390/block/dasd_diag.c
+==================================================================
+--- drivers/s390/block/dasd_diag.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/block/dasd_diag.c (/trunk/2.4.27) (revision 52)
+@@ -6,7 +6,7 @@
+ * Bugreports.to..: <Linux390 at de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ *
+- * $Revision: 1.49 $
++ * $Revision: 1.47.6.2 $
+ *
+ * History of changes
+ * 07/13/00 Added fixup sections for diagnoses ans saved some registers
+=== drivers/s390/block/dasd_3990_erp.c
+==================================================================
+--- drivers/s390/block/dasd_3990_erp.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/block/dasd_3990_erp.c (/trunk/2.4.27) (revision 52)
+@@ -5,7 +5,7 @@
+ * Bugreports.to..: <Linux390 at de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000, 2001
+ *
+- * $Revision: 1.52 $
++ * $Revision: 1.52.2.4 $
+ *
+ * History of changes:
+ * 05/14/01 fixed PL030160GTO (BUG() in erp_action_5)
+@@ -585,9 +585,8 @@
+ ioinfo[irq]->opm);
+
+ /* reset status to queued to handle the request again... */
+- check_then_set (&erp->status,
+- CQR_STATUS_ERROR,
+- CQR_STATUS_QUEUED);
++ if (erp->status > CQR_STATUS_QUEUED)
++ erp->status = CQR_STATUS_QUEUED;
+
+ erp->retries = 1;
+
+@@ -598,12 +597,10 @@
+ "opm=%x) -> permanent error",
+ erp->dstat->lpum,
+ ioinfo[irq]->opm);
+-
++
+ /* post request with permanent error */
+- check_then_set (&erp->status,
+- CQR_STATUS_ERROR,
+- CQR_STATUS_FAILED);
+-
++ if (erp->status > CQR_STATUS_QUEUED)
++ erp->status = CQR_STATUS_FAILED;
+ }
+
+ } /* end dasd_3990_erp_alternate_path */
+@@ -757,6 +754,12 @@
+
+ dasd_3990_erp_block_queue (erp,
+ 30);
++ } else if (sense[25] == 0x1E) { /* busy */
++ DEV_MESSAGE (KERN_INFO, device,
++ "busy - redriving request later, "
++ "%d retries left",
++ erp->retries);
++ dasd_3990_erp_block_queue (erp, 1);
+ } else {
+ DEV_MESSAGE (KERN_INFO, device,
+ "redriving request immediately, "
+@@ -768,7 +771,6 @@
+ CQR_STATUS_QUEUED);
+ }
+ }
+-
+ return erp;
+
+ } /* end dasd_3990_erp_action_4 */
+@@ -2386,7 +2388,7 @@
+ switch (sense[28]) {
+ case 0x17:
+ /* issue a Diagnostic Control command with an
+- * Inhibit Write subcommand and controler modifier */
++ * Inhibit Write subcommand and controler modifier */
+ erp = dasd_3990_erp_DCTL (erp,
+ 0x20);
+ break;
+@@ -2603,9 +2605,9 @@
+ "Data recovered during retry with PCI "
+ "fetch mode active");
+
+- /* not possible to handle this situation in Linux */
+- panic("Invalid data - No way to inform appliction about "
+- "the possibly incorret data");
++ /* not possible to handle this situation in Linux */
++ panic("Invalid data - No way to inform application "
++ "about the possibly incorrect data");
+ break;
+
+ case 0x1D: /* state-change pending */
+@@ -2617,6 +2619,12 @@
+ sense);
+ break;
+
++ case 0x1E: /* busy */
++ DEV_MESSAGE (KERN_DEBUG, device, "%s",
++ "Busy condition exists "
++ "for the subsystem or device");
++ erp = dasd_3990_erp_action_4 (erp, sense);
++ break;
+ default:
+ ; /* all others errors - default erp */
+ }
+@@ -2699,7 +2707,8 @@
+ if (!erp) {
+ if (cqr->retries <= 0) {
+ DEV_MESSAGE (KERN_ERR, device, "%s",
+- "Unable to allocate ERP request (NO retries left)");
++ "Unable to allocate ERP request "
++ "(NO retries left)");
+
+ check_then_set (&cqr->status,
+ CQR_STATUS_ERROR,
+@@ -2709,7 +2718,8 @@
+
+ } else {
+ DEV_MESSAGE (KERN_ERR, device,
+- "Unable to allocate ERP request (%i retries left)",
++ "Unable to allocate ERP request "
++ "(%i retries left)",
+ cqr->retries);
+
+ if (!timer_pending(&device->timer)) {
+@@ -3169,8 +3179,9 @@
+ dasd_chanq_enq_head (&device->queue,
+ erp);
+ } else {
+- if ((erp->status == CQR_STATUS_FILLED ) || (erp != device->queue.head)) {
+- /* something strange happened - log the error and panic */
++ if ((erp->status == CQR_STATUS_FILLED ) ||
++ (erp != device->queue.head)) {
++ /* something strange happened - log error and panic */
+ /* print current erp_chain */
+ DEV_MESSAGE (KERN_DEBUG, device, "%s",
+ "ERP chain at END of ERP-ACTION");
+@@ -3188,7 +3199,8 @@
+ temp_erp->refers);
+ }
+ }
+- panic ("Problems with ERP chain!!! Please report to linux390 at de.ibm.com");
++ panic ("Problems with ERP chain!!! "
++ "Please report to linux390 at de.ibm.com");
+ }
+
+ }
+=== drivers/s390/block/dasd.c
+==================================================================
+--- drivers/s390/block/dasd.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/block/dasd.c (/trunk/2.4.27) (revision 52)
+@@ -6,7 +6,7 @@
+ * Bugreports.to..: <Linux390 at de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
+ *
+- * $Revision: 1.311 $
++ * $Revision: 1.298.2.14 $
+ *
+ * History of changes (starts July 2000)
+ * 11/09/00 complete redesign after code review
+@@ -311,7 +311,7 @@
+ /* and add the remaining subranges */
+ for (start = index = from, end = -EINVAL; index <= to; index++) {
+
+- if (dasd_devindex_from_devno(index) > 0) {
++ if (dasd_devindex_from_devno(index) >= 0) {
+ /* current device is already in range */
+ MESSAGE (KERN_DEBUG,
+ "dasd_add_range %04x-%04x: "
+@@ -741,6 +741,15 @@
+
+ memset (major_info->gendisk.flags, 0, DASD_PER_MAJOR * sizeof (char));
+
++ /* init label array */
++ major_info->gendisk.label_arr = (devfs_handle_t *)
++ kmalloc (DASD_PER_MAJOR * sizeof (devfs_handle_t), GFP_KERNEL);
++ if(major_info->gendisk.label_arr == NULL)
++ goto out_gd_label_arr;
++
++ memset (major_info->gendisk.label_arr, 0,
++ DASD_PER_MAJOR * sizeof(devfs_handle_t));
++
+ /* register blockdevice */
+ rc = devfs_register_blkdev (major, DASD_NAME, &dasd_device_operations);
+ if (rc < 0) {
+@@ -861,6 +870,9 @@
+ }
+
+ out_reg_blkdev:
++ kfree (major_info->gendisk.label_arr);
++
++out_gd_label_arr:
+ kfree (major_info->gendisk.flags);
+
+ out_gd_flags:
+@@ -916,6 +928,7 @@
+ major_info->flags &= ~DASD_MAJOR_INFO_REGISTERED;
+ }
+
++ kfree (major_info->gendisk.label_arr);
+ kfree (major_info->gendisk.flags);
+ kfree (major_info->gendisk.de_arr);
+
+@@ -2148,11 +2161,10 @@
+ if (cqr == ERR_PTR(-ENOMEM)) {
+ break;
+ }
+-
+- MESSAGE (KERN_EMERG,
+- "(%04x) CCW creation failed "
+- "on request %p",
+- device->devinfo.devno, req);
++ DEV_MESSAGE (KERN_EMERG, device,
++ "CCW creation failed "
++ "on request %p rc = %ld",
++ req, PTR_ERR(cqr));
+ dasd_dequeue_request (queue,req);
+ dasd_end_request (req, 0);
+ continue;
+@@ -2434,6 +2446,10 @@
+ era = dasd_era_recover;
+ }
+
++ /* process channel measurement facility configuration while
++ the channel is idle */
++ cmf_device_callback(&device->cdev);
++
+ switch (era) {
+ case dasd_era_none:
+ check_then_set(&cqr->status,
+@@ -3157,12 +3173,15 @@
+ spin_lock_irqsave (&range_lock, flags);
+ list_for_each (l, &dasd_range_head.list) {
+ temp = list_entry (l, dasd_range_t, list);
+- if (device->devinfo.devno >= temp->from && device->devinfo.devno <= temp->to) {
++ if (device->devinfo.devno >= temp->from &&
++ device->devinfo.devno <= temp->to) {
+ spin_unlock_irqrestore (&range_lock, flags);
+ if (intval)
+- temp->features |= DASD_FEATURE_READONLY;
++ temp->features |=
++ DASD_FEATURE_READONLY;
+ else
+- temp->features &= ~DASD_FEATURE_READONLY;
++ temp->features &=
++ ~DASD_FEATURE_READONLY;
+ goto continue_blkroset;
+ }
+ devindex += temp->to - temp->from + 1;
+@@ -3717,7 +3736,8 @@
+ unsigned long flags;
+ int i, devno;
+
+- /* find out devno of leaving device: CIO has already deleted this information ! */
++ /* find out devno of leaving device: CIO has already deleted this */
++ /* information ! */
+ devno = -ENODEV;
+ device = NULL;
+ list_for_each (l, &dasd_major_info) {
+@@ -3815,7 +3835,7 @@
+ }
+
+ if (device &&
+- device->level >= DASD_STATE_READY) {
++ device->level >= DASD_STATE_NEW) {
+ s390irq_spin_lock_irqsave (device->devinfo.irq,
+ flags);
+ DEV_MESSAGE (KERN_DEBUG, device, "%s",
+@@ -3876,6 +3896,7 @@
+ int i;
+ dasd_device_t* device;
+ dasd_lowmem_t *lowmem;
++ struct list_head *lmem, *next;
+ int rc;
+
+
+@@ -3892,7 +3913,9 @@
+ memset (device, 0, sizeof (dasd_device_t));
+ dasd_plug_device (device);
+ INIT_LIST_HEAD (&device->lowmem_pool);
+-
++
++ cmf_device_init(&device->cdev, devno);
++
+ /* allocate pages for lowmem pool */
+ for (i = 0; i < DASD_LOWMEM_PAGES; i++) {
+
+@@ -3906,7 +3929,8 @@
+
+ if (i < DASD_LOWMEM_PAGES) {
+ /* didn't get the needed lowmem pages */
+- list_for_each_entry (lowmem, &device->lowmem_pool, list) {
++ list_for_each_safe (lmem, next, &device->lowmem_pool) {
++ lowmem = list_entry (lmem, dasd_lowmem_t, list);
+ MESSAGE (KERN_DEBUG,
+ "<devno: %04x> not enough memory - "
+ "Free page again :%p",
+@@ -3926,6 +3950,7 @@
+ dasd_state_new_to_del (dasd_device_t **addr, int devno)
+ {
+ dasd_lowmem_t *lowmem;
++ struct list_head *l,*n;
+
+ dasd_device_t *device = *addr;
+
+@@ -3935,7 +3960,9 @@
+ }
+
+ /* free lowmem_pool */
+- list_for_each_entry (lowmem, &device->lowmem_pool, list) {
++ list_for_each_safe (l, n, &device->lowmem_pool) {
++ lowmem = list_entry (l, dasd_lowmem_t, list);
++ list_del(&lowmem->list);
+ free_page ((unsigned long) lowmem);
+ }
+
+@@ -4655,17 +4682,15 @@
+ loff_t * offset)
+ {
+ loff_t len;
+- loff_t n = *offset;
+- unsigned pos = n;
+ tempinfo_t *p_info = (tempinfo_t *) file->private_data;
+
+- if (n != pos || pos >= p_info->len) {
++ if (*offset >= p_info->len) {
+ return 0; /* EOF */
+ } else {
+- len = MIN (user_len, (p_info->len - pos));
+- if (copy_to_user (user_buf, &(p_info->data[pos]), len))
++ len = MIN (user_len, (p_info->len - *offset));
++ if (copy_to_user (user_buf, &(p_info->data[*offset]), len))
+ return -EFAULT;
+- *offset = pos + len;
++ (*offset) += len;
+ return len; /* number of bytes "read" */
+ }
+ }
+@@ -5188,6 +5213,7 @@
+ "/proc/dasd/statistics: only 'set' and "
+ "'reset' are supported verbs");
+
++ vfree (buffer);
+ return -EINVAL;
+ }
+
+@@ -5243,6 +5269,7 @@
+
+
+ #endif /* DASD_PROFILE */
++ vfree (buffer);
+ return user_len;
+ }
+
+=== drivers/s390/block/dasd_fba.c
+==================================================================
+--- drivers/s390/block/dasd_fba.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/block/dasd_fba.c (/trunk/2.4.27) (revision 52)
+@@ -4,7 +4,7 @@
+ * Bugreports.to..: <Linux390 at de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ *
+- * $Revision: 1.50 $
++ * $Revision: 1.49.6.3 $
+ *
+ * History of changes
+ * fixed partition handling and HDIO_GETGEO
+@@ -94,14 +94,18 @@
+ return rc;
+ }
+
+-static inline void
++static inline int
+ locate_record (ccw1_t * ccw, LO_fba_data_t * LO_data, int rw, int block_nr,
+ int block_ct, ccw_req_t* cqr, dasd_device_t* device)
+ {
++ int errcode;
++
+ memset (LO_data, 0, sizeof (LO_fba_data_t));
+ ccw->cmd_code = DASD_FBA_CCW_LOCATE;
+ ccw->count = 8;
+- dasd_set_normalized_cda (ccw, __pa (LO_data), cqr, device);
++ if ((errcode = dasd_set_normalized_cda (ccw, __pa (LO_data), cqr,
++ device)))
++ return errcode;
+ if (rw == WRITE)
+ LO_data->operation.cmd = 0x5;
+ else if (rw == READ)
+@@ -110,6 +114,8 @@
+ LO_data->operation.cmd = 0x8;
+ LO_data->blk_nr = block_nr;
+ LO_data->blk_ct = block_ct;
++
++ return 0;
+ }
+
+ static int
+@@ -248,7 +254,7 @@
+ stat->dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+ return dasd_era_none;
+
+- switch (device->devinfo.sid_data.dev_model) {
++ switch (device->devinfo.sid_data.dev_type) {
+ case 0x3370:
+ return dasd_3370_erp_examine (cqr, stat);
+ case 0x9336:
+@@ -292,7 +298,8 @@
+ int byt_per_blk = device->sizes.bp_block;
+ unsigned long reloc_sector = req->sector +
+ device->major_info->gendisk.part[MINOR (req->rq_dev)].start_sect;
+-
++ int errcode;
++
+ if (req->cmd == READ) {
+ rw_cmd = DASD_FBA_CCW_READ;
+ } else if (req->cmd == WRITE) {
+@@ -337,29 +344,30 @@
+ LO_data = rw_cp->data + sizeof (DE_fba_data_t);
+ ccw = rw_cp->cpaddr;
+
+- if (define_extent (ccw, DE_data, req->cmd, byt_per_blk,
+- reloc_sector, req->nr_sectors, rw_cp, device)) {
++ if ((errcode = define_extent (ccw, DE_data, req->cmd, byt_per_blk,
++ reloc_sector, req->nr_sectors, rw_cp,
++ device)))
+ goto clear_rw_cp;
+- }
++
+ ccw->flags |= CCW_FLAG_CC;
+ ccw ++;
+- locate_record (ccw, LO_data, req->cmd, 0,
+- private->rdc_data.mode.bits.data_chain ? bhct : 1, rw_cp, device);
+- if (ccw->cda == 0) {
++ if ((errcode = locate_record (ccw, LO_data, req->cmd, 0,
++ private->rdc_data.mode.bits.data_chain ?
++ bhct : 1, rw_cp, device)))
+ goto clear_rw_cp;
+- }
++
+ ccw->flags |= CCW_FLAG_CC;
+
+- bh = req -> bh;
+- i = 0;
+- while ( bh != NULL ) {
++ for (bh = req->bh, i = 0; bh != NULL; ) {
+ for (size = 0; size < bh->b_size; size += byt_per_blk) {
+ ccw ++;
+ ccw->cmd_code = rw_cmd;
+ ccw->count = byt_per_blk;
+- if (dasd_set_normalized_cda (ccw,__pa (bh->b_data + size), rw_cp, device)) {
++ if ((errcode = dasd_set_normalized_cda (ccw,
++ __pa (bh->b_data + size),
++ rw_cp, device)))
+ goto clear_rw_cp;
+- }
++
+ if (private->rdc_data.mode.bits.data_chain) {
+ ccw->flags |= CCW_FLAG_DC;
+ } else {
+@@ -372,23 +380,25 @@
+ ccw++;
+ i++;
+ LO_data++;
+- locate_record (ccw, LO_data, req->cmd, i, 1, rw_cp, device);
+- if (ccw->cda == 0) {
++ if ((errcode = locate_record (ccw, LO_data, req->cmd,
++ i, 1, rw_cp, device)))
+ goto clear_rw_cp;
+- }
++
+ ccw->flags |= CCW_FLAG_CC;
+ }
+ }
+ ccw->flags &= ~(CCW_FLAG_DC | CCW_FLAG_CC);
+-
+ rw_cp->device = device;
+ rw_cp->expires = 5 * TOD_MIN; /* 5 minutes */
+ rw_cp->req = req;
++ rw_cp->lpm = LPM_ANYPATH;
++ rw_cp->retries = 256;
++ rw_cp->buildclk = get_clock ();
+ check_then_set (&rw_cp->status, CQR_STATUS_EMPTY, CQR_STATUS_FILLED);
+ goto out;
+ clear_rw_cp:
+ dasd_free_request (rw_cp, device);
+- rw_cp = NULL;
++ rw_cp = ERR_PTR(errcode);
+ out:
+ return rw_cp;
+ }
+=== drivers/s390/block/dcssblk.c
+==================================================================
+--- drivers/s390/block/dcssblk.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/block/dcssblk.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,780 @@
++/*
++ * dcssblk.c -- the S/390 block driver for dcss memory
++ *
++ * Author: Carsten Otte
++ */
++
++#include <linux/module.h>
++#include <linux/version.h>
++#ifdef CONFIG_PROC_FS
++#include <linux/proc_fs.h>
++#endif
++#include <linux/devfs_fs_kernel.h>
++#include <linux/ctype.h> /* isdigit, isxdigit */
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/blk.h>
++#include <linux/blkpg.h>
++#include <linux/hdreg.h> /* HDIO_GETGEO */
++#include <linux/kdev_t.h>
++#include <asm/uaccess.h>
++#include <asm/dcss.h>
++
++#define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSSBLK_NAME " debug:" x)
++#define PRINT_INFO(x...) printk(KERN_INFO DCSSBLK_NAME " info:" x)
++#define PRINT_WARN(x...) printk(KERN_WARNING DCSSBLK_NAME " warning:" x)
++#define PRINT_ERR(x...) printk(KERN_ERR DCSSBLK_NAME " error:" x)
++#define DCSSBLK_NAME "dcssblk"
++
++#ifdef CONFIG_PROC_FS
++static struct proc_dir_entry *dcssblk_proc_root_entry;
++static struct proc_dir_entry *dcssblk_add_entry;
++static struct proc_dir_entry *dcssblk_remove_entry;
++static struct proc_dir_entry *dcssblk_list_entry;
++#endif
++static devfs_handle_t dcssblk_devfs_dir;
++unsigned int dcssblk_blksizes[1<<MINORBITS];
++
++static int dcssblk_open (struct inode *inode, struct file *filp);
++static int dcssblk_release (struct inode *inode, struct file *filp);
++
++static struct block_device_operations dcssblk_devops =
++ {
++ owner: THIS_MODULE,
++ open: dcssblk_open,
++ release: dcssblk_release,
++ };
++
++typedef struct _dcss_device_t {
++ struct list_head lh;
++ devfs_handle_t devfs_entry;
++ unsigned int use_count;
++ unsigned long start;
++ unsigned long end;
++ int segment_type;
++ unsigned char save_pending;
++ unsigned char is_shared;
++#ifdef CONFIG_PROC_FS
++ struct proc_dir_entry *dcssblk_subdir_entry;
++ struct proc_dir_entry *dcssblk_save_entry;
++ struct proc_dir_entry *dcssblk_shared_entry;
++#endif
++ char name [9];
++ kdev_t kdev;
++} dcss_device_t;
++
++typedef struct _dcssblk_proc_private_t {
++ char name[8];
++ int bytes_written;
++} dcssblk_proc_private_t;
++
++static int dcssblk_major;
++static struct list_head dcssblk_devices = LIST_HEAD_INIT(dcssblk_devices);
++static rwlock_t dcssblk_devices_lock = RW_LOCK_UNLOCKED;
++
++
++MODULE_LICENSE("GPL");
++
++/*
++ * get a minor number. needs to be called with
++ * write_lock(dcssblk_devices_lock) and the
++ * device needs to be enqueued before the lock is
++ * freed.
++ */
++static int dcssblk_assign_free_minor (dcss_device_t* device) {
++ unsigned int minor,found;
++ struct list_head* entry;
++ if (device == NULL)
++ return -EINVAL;
++ for (minor=0; minor< (1<<MINORBITS); minor++) {
++ found = 0;
++ // test if minor available
++ list_for_each (entry, &dcssblk_devices)
++ if (minor == MINOR((list_entry(entry, dcss_device_t,
++ lh)->kdev)))
++ found++;
++ if (!found) break; // got unused minor
++ }
++ if (found)
++ return -EBUSY;
++ device->kdev = MKDEV(dcssblk_major, minor);
++ return 0;
++}
++
++/*
++ * get the dcss_device_t from dcssblk_devices
++ * with the struct device supplied.
++ * needs to be called with read_lock(dcssblk_devices_lock)
++ */
++static dcss_device_t * dcssblk_get_device_by_minor (unsigned int minor) {
++ struct list_head* entry;
++ list_for_each (entry, &dcssblk_devices)
++ if (MINOR(list_entry(entry, dcss_device_t, lh)->kdev)==minor) {
++ return list_entry(entry,dcss_device_t, lh);
++ }
++ return NULL;
++}
++
++/*
++ * get the dcss_device_t from dcssblk_devices
++ * with the segment name supplied.
++ * needs to be called with read_lock(dcssblk_devices_lock)
++ */
++static dcss_device_t * dcssblk_get_device_by_name (char* name) {
++ struct list_head* entry;
++ list_for_each (entry, &dcssblk_devices)
++ if (!strcmp (name, list_entry (entry, dcss_device_t, lh)
++ ->name)) {
++ return list_entry(entry,dcss_device_t, lh);
++ }
++ return NULL;
++}
++
++static void dcssblk_do_save (dcss_device_t* device) {
++ segment_replace (device->name);
++}
++
++/*
++ * device attribute for switching shared/nonshared
++ * operation
++ */
++static int dcssblk_shared_store (struct file *file,
++ const char *buffer,
++ unsigned long count, void *data) {
++ dcss_device_t* device = data;
++ char* buf;
++ int rc;
++ long value;
++
++ if (device == NULL) {
++ rc = -EINVAL;
++ goto out_nobuf;
++ }
++ write_lock(&dcssblk_devices_lock);
++ if (device->use_count) {
++ rc = -EBUSY;
++ write_unlock (&dcssblk_devices_lock);
++ goto out_nobuf;
++ }
++
++ /*
++ * fetch buffer from userland
++ */
++ buf = kmalloc(count,GFP_ATOMIC);
++ if (buf == NULL) {
++ rc = -ENOMEM;
++ write_unlock(&dcssblk_devices_lock);
++ goto out_nobuf;
++ }
++ if (copy_from_user(buf, buffer, count)) {
++ rc = -EFAULT;
++ write_unlock(&dcssblk_devices_lock);
++ goto out;
++ }
++ value = simple_strtoul(buf, &buf, 10);
++
++ if (value) {
++ // reload segment in shared mode
++ segment_unload (device->name);
++ rc = segment_load (device->name, SEGMENT_SHARED_RO,
++ &device->start, &device->end);
++ if (rc < 0) {
++ PRINT_WARN ("SEGMENT %s NOT RELOADED RC=%d\n",
++ device->name, rc);
++ goto removeseg;
++ }
++ device->segment_type = rc;
++ device->is_shared = 1;
++ rc = count;
++ } else {
++ // reload segment in exclusive mode
++ segment_unload (device->name);
++ rc = segment_load (device->name, SEGMENT_EXCLUSIVE_RW,
++ &device->start, &device->end);
++ if (rc < 0) {
++ PRINT_WARN("SEGMENT %s NOT RELOADED RC=%d\n",
++ device->name, rc);
++ goto removeseg;
++ }
++ device->segment_type = rc;
++ device->is_shared = 0;
++ rc = count;
++ }
++ switch (device->segment_type) {
++ case SEGMENT_SHARED_RO:
++ case SEGMENT_EXCLUSIVE_RO:
++ set_device_ro (device->kdev, 1);
++ break;
++ case SEGMENT_SHARED_RW:
++ case SEGMENT_EXCLUSIVE_RW:
++ set_device_ro (device->kdev, 0);
++ break;
++ }
++ if (value && ((device->segment_type == SEGMENT_EXCLUSIVE_RO) ||
++ (device->segment_type == SEGMENT_EXCLUSIVE_RW))) {
++ PRINT_WARN(
++ "dcssblk: could not get shared copy of segment %s\n",
++ device->name);
++ device->is_shared = 0;
++ rc = -EPERM;
++ }
++ if ((value == 0) && ((device->segment_type == SEGMENT_SHARED_RO) ||
++ (device->segment_type == SEGMENT_SHARED_RW))) {
++ PRINT_WARN(
++ "dcssblk: could not get exclusive copy of segment %s\n",
++ device->name);
++ device->is_shared = 1;
++ rc = -EPERM;
++ }
++ write_unlock(&dcssblk_devices_lock);
++ goto out;
++
++ removeseg:
++ PRINT_WARN (
++ "dcssblk: could not reload segment %s, removing it!\n",
++ device->name);
++ list_del(&device->lh);
++ write_unlock(&dcssblk_devices_lock);
++#ifdef CONFIG_PROC_FS
++ remove_proc_entry("save", device->dcssblk_subdir_entry);
++ remove_proc_entry("shared", device->dcssblk_subdir_entry);
++ remove_proc_entry(device->name, dcssblk_proc_root_entry);
++#endif
++ devfs_unregister(device->devfs_entry );
++
++ kfree (device);
++ MOD_DEC_USE_COUNT; // permanent
++ out:
++ kfree (buf);
++ out_nobuf:
++ return rc;
++
++}
++
++/*
++ * device attribute for showing status of "shared / non-shared"
++ */
++static int dcssblk_shared_status(char *buffer, char **start, off_t offset,
++ int count, int *eof, void *data) {
++ dcss_device_t* device = data;
++
++ *eof = 1;
++ return sprintf(buffer, device->is_shared ? "1\n" : "0\n");
++}
++
++/*
++ * device attribute for save operation on current copy
++ * of the segment. If the segment is busy, saving will
++ * become pending until it gets released which can be
++ * undone by storing a non-true value to this entry
++ */
++static int dcssblk_save_store (struct file *file,
++ const char *buffer,
++ unsigned long count, void *data) {
++ dcss_device_t* device = data;
++ char* buf;
++ int rc,value;
++
++ if (device == NULL) {
++ rc = -EINVAL;
++ goto out_nobuf;
++ }
++ read_lock(&dcssblk_devices_lock);
++ /*
++ * fetch buffer from userland
++ */
++ buf = kmalloc(count,GFP_ATOMIC);
++ if (buf == NULL) {
++ rc = -ENOMEM;
++ write_unlock(&dcssblk_devices_lock);
++ goto out_nobuf;
++ }
++ if (copy_from_user(buf, buffer, count)) {
++ rc = -EFAULT;
++ write_unlock(&dcssblk_devices_lock);
++ goto out;
++ }
++ value = simple_strtoul(buf, &buf, 10);
++ if (value) {
++ if (device->use_count == 0) {
++ /* device is idle => we save immediately */
++ PRINT_WARN ("saving segment %s\n", device->name);
++ dcssblk_do_save (device);
++ } else {
++ /* device is busy => we save it when it becomes
++ idle in dcssblk_release */
++ PRINT_WARN ("segment %s is currently busy\n",
++ device->name);
++ PRINT_WARN ("segment %s will be saved when it becomes idle\n",
++ device->name);
++ device->save_pending = 1;
++ }
++ } else {
++ if (device->save_pending) {
++ /* device is busy & the user wants to undo his save
++ request */
++ device->save_pending = 0;
++ PRINT_WARN ("deactivating pending save for segment %s\n",
++ device->name);
++ }
++ }
++ read_unlock(&dcssblk_devices_lock);
++ rc = count;
++ out:
++ kfree (buf);
++ out_nobuf:
++ return rc;
++}
++
++/*
++ * device attribute for showing status of "save pending"
++ */
++static int dcssblk_save_status(char *buffer, char **start, off_t offset,
++ int count, int *eof, void *data) {
++ dcss_device_t* device = data;
++
++ *eof = 1;
++ return sprintf(buffer, device->save_pending ? "1\n" : "0\n");
++}
++
++/*
++ * device attribute for adding devices
++ */
++static int dcssblk_add_store (struct file *file,
++ const char *buffer,
++ unsigned long count, void *data)
++{
++ int rc=0, i;
++ char* buf;
++ dcss_device_t* dcssdev;
++ struct list_head* entry;
++
++ MOD_INC_USE_COUNT; // released at end of func
++
++ /*
++ * fetch buffer from userland
++ */
++ buf = kmalloc(count+1,GFP_KERNEL);
++ if (buf == NULL) {
++ rc = -ENOMEM;
++ goto out_nobuf;
++ }
++ if (copy_from_user(buf, buffer, count)) {
++ rc = -EFAULT;
++ goto out;
++ }
++
++
++ /*
++ * check input parameters
++ */
++ for (i=0; ((*(buf+i)!='\0') && (*(buf+i)!='\n') && i<count); i++) {
++ buf[i] = toupper(buf[i]);
++ }
++ *(buf+i) = '\0';
++ if ((i==0) || (i>8)) {
++ rc = -ENAMETOOLONG;
++ goto out;
++ }
++ /*
++ * already loaded?
++ */
++ read_lock(&dcssblk_devices_lock);
++ list_for_each (entry, &dcssblk_devices) {
++ if (!strcmp(buf, list_entry(entry, dcss_device_t, lh)->name)) {
++ read_unlock(&dcssblk_devices_lock);
++ PRINT_WARN ("SEGMENT %s ALREADY LOADED!\n", buf);
++ rc = -EEXIST;
++ goto out;
++ }
++ }
++ read_unlock(&dcssblk_devices_lock);
++ /*
++ * try to get the dcss
++ */
++ dcssdev = kmalloc (sizeof(dcss_device_t),GFP_KERNEL);
++ if (dcssdev == NULL) {
++ rc = -ENOMEM;
++ goto out;
++ }
++ memset (dcssdev, 0, sizeof(dcss_device_t));
++ memcpy (dcssdev->name, buf, i);
++ dcssdev->name[i+1] = '\0';
++ INIT_LIST_HEAD(&dcssdev->lh);
++ /*
++ * load the segment
++ */
++ rc = segment_load (dcssdev->name, SEGMENT_SHARED_RO,
++ &dcssdev->start, &dcssdev->end);
++ if (rc < 0) {
++ PRINT_WARN ("SEGMENT %s NOT LOADED RC=%d\n",
++ dcssdev->name, rc);
++ goto free_dcssdev;
++ }
++ if (rc == SEGMENT_EXCLUSIVE_RO || rc == SEGMENT_EXCLUSIVE_RW)
++ dcssdev->is_shared = 0;
++ else
++ dcssdev->is_shared = 1;
++ PRINT_WARN ("LOADED SEGMENT %s from %08lx to %08lx\n",dcssdev->name,dcssdev->start,dcssdev->end);
++ dcssdev->segment_type = rc;
++ dcssdev->save_pending = 0;
++ /*
++ * get minor
++ */
++ write_lock(&dcssblk_devices_lock);
++ rc = dcssblk_assign_free_minor(dcssdev);
++ if (rc) {
++ write_unlock (&dcssblk_devices_lock);
++ goto unload_seg;
++ }
++ /*
++ * create devfs device node
++ */
++ dcssdev->devfs_entry = devfs_register (dcssblk_devfs_dir,dcssdev->name,
++ DEVFS_FL_DEFAULT,
++ MAJOR(dcssdev->kdev),
++ MINOR(dcssdev->kdev),
++ S_IFBLK | S_IRUSR | S_IWUSR,
++ &dcssblk_devops,NULL);
++ /*
++ * create procfs subdirectory
++ */
++#ifdef CONFIG_PROC_FS
++ dcssdev->dcssblk_subdir_entry = proc_mkdir (dcssdev->name,
++ dcssblk_proc_root_entry);
++ dcssdev->dcssblk_shared_entry =
++ create_proc_entry ("shared",
++ S_IRUSR|S_IWUSR,
++ dcssdev->dcssblk_subdir_entry);
++ dcssdev->dcssblk_save_entry =
++ create_proc_entry ("save",
++ S_IRUSR|S_IWUSR,
++ dcssdev->dcssblk_subdir_entry);
++ dcssdev->dcssblk_shared_entry->write_proc =
++ dcssblk_shared_store;
++ dcssdev->dcssblk_shared_entry->read_proc =
++ dcssblk_shared_status;
++ dcssdev->dcssblk_save_entry->write_proc =
++ dcssblk_save_store;
++ dcssdev->dcssblk_save_entry->read_proc =
++ dcssblk_save_status;
++ dcssdev->dcssblk_shared_entry->data =
++ dcssdev;
++ dcssdev->dcssblk_save_entry->data =
++ dcssdev;
++#endif
++
++ /*
++ * enqueue
++ */
++ list_add_tail (&dcssdev->lh, &dcssblk_devices);
++ write_unlock(&dcssblk_devices_lock);
++ switch (dcssdev->segment_type) {
++ case SEGMENT_SHARED_RO:
++ case SEGMENT_EXCLUSIVE_RO:
++ set_device_ro (dcssdev->kdev, 1);
++ break;
++ case SEGMENT_SHARED_RW:
++ case SEGMENT_EXCLUSIVE_RW:
++ set_device_ro (dcssdev->kdev, 0);
++ break;
++ }
++ PRINT_DEBUG ("SEGMENT %s loaded successfully\n",
++ dcssdev->name);
++ rc = count;
++ MOD_INC_USE_COUNT; // second time -> permanent
++ goto out;
++ unload_seg:
++ segment_unload (dcssdev->name);
++ free_dcssdev:
++ kfree (dcssdev);
++ out:
++ kfree (buf);
++ out_nobuf:
++ MOD_DEC_USE_COUNT;
++ return rc;
++}
++
++/*
++ * device attribute for removing devices
++ */
++static int dcssblk_remove_store (struct file *file,
++ const char *buffer,
++ unsigned long count, void *data) {
++ dcss_device_t* device;
++ char * buf;
++ int rc=count,i;
++
++ MOD_INC_USE_COUNT;
++ /*
++ * fetch buffer from userland
++ */
++ buf = kmalloc(count,GFP_KERNEL);
++ if (buf == NULL) {
++ rc = -ENOMEM;
++ goto out_nobuf;
++ }
++ if (copy_from_user(buf, buffer, count)) {
++ rc = -EFAULT;
++ goto out;
++ }
++ for (i=0; ((*(buf+i)!='\0') && (*(buf+i)!='\n') && i<count); i++) {
++ buf[i] = toupper(buf[i]);
++ }
++ *(buf+i) = '\0';
++ write_lock(&dcssblk_devices_lock);
++ device = dcssblk_get_device_by_name (buf);
++ if (device == NULL) {
++ rc = -ENODEV;
++ write_unlock (&dcssblk_devices_lock);
++ goto out;
++ }
++ if (device->use_count != 0) {
++ rc = -EBUSY;
++ write_unlock (&dcssblk_devices_lock);
++ goto out;
++ }
++ list_del(&device->lh);
++ write_unlock(&dcssblk_devices_lock);
++ segment_unload (device->name);
++#ifdef CONFIG_PROC_FS
++ remove_proc_entry("save", device->dcssblk_subdir_entry);
++ remove_proc_entry("shared", device->dcssblk_subdir_entry);
++ remove_proc_entry(device->name, dcssblk_proc_root_entry);
++#endif
++ devfs_unregister(device->devfs_entry );
++
++ PRINT_DEBUG ("SEGMENT %s unloaded successfully\n",
++ device->name);
++ kfree (device);
++ rc = count;
++ MOD_DEC_USE_COUNT; // permanent
++ out:
++ kfree (buf);
++ out_nobuf:
++ MOD_DEC_USE_COUNT;
++ return rc;
++}
++
++/*
++ * device attribute for listing devices
++ */
++static int dcssblk_list_store(char *buffer, char **start, off_t offset,
++ int count, int *eof, void *data)
++{
++ unsigned int minor, len;
++ struct list_head* entry;
++
++ len = 0;
++ read_lock(&dcssblk_devices_lock);
++ for (minor=0; minor< (1<<MINORBITS); minor++) {
++ // test if minor available
++ list_for_each (entry, &dcssblk_devices)
++ if (minor == MINOR((list_entry(entry, dcss_device_t,
++ lh)->kdev))) {
++ len += sprintf(buffer + len, "%i\t%s\n", minor,
++ list_entry(entry, dcss_device_t,
++ lh)->name);
++ }
++ }
++ read_unlock(&dcssblk_devices_lock);
++ *eof = 1;
++ return len;
++}
++
++static int dcssblk_open (struct inode *inode, struct file *filp)
++{
++ dcss_device_t *dcssdev;
++ int rc;
++ write_lock(&dcssblk_devices_lock);
++ if ((dcssdev=dcssblk_get_device_by_minor(MINOR(inode->i_rdev)))
++ == NULL) {
++ rc=-ENODEV;
++ goto out_unlock;
++ }
++ dcssdev->use_count ++;
++ rc = 0;
++ out_unlock:
++ write_unlock(&dcssblk_devices_lock);
++ return rc;
++}
++
++static int dcssblk_release (struct inode *inode, struct file *filp)
++{
++ dcss_device_t *dcssdev;
++ int rc;
++ write_lock(&dcssblk_devices_lock);
++ if ((dcssdev=dcssblk_get_device_by_minor(MINOR(inode->i_rdev)))
++ == NULL) {
++ rc=-ENODEV;
++ goto out_unlock;
++ }
++ dcssdev->use_count --;
++ if ((dcssdev->use_count==0) && (dcssdev->save_pending)) {
++ PRINT_WARN ("Segment %s became idle and is being saved\n",
++ dcssdev->name);
++ dcssblk_do_save (dcssdev);
++ dcssdev->save_pending = 0;
++ }
++ rc = 0;
++ out_unlock:
++ write_unlock(&dcssblk_devices_lock);
++ return rc;
++}
++
++static int dcssblk_make_request (request_queue_t * q, int rw,
++ struct buffer_head * bh) {
++ dcss_device_t *dcssdev;
++ unsigned long index;
++ unsigned long page_addr;
++ unsigned long source_addr;
++ unsigned long bytes;
++
++ read_lock(&dcssblk_devices_lock);
++ dcssdev = dcssblk_get_device_by_minor (MINOR(bh->b_rdev));
++ read_unlock(&dcssblk_devices_lock);
++
++ if (dcssdev == NULL)
++ /* No such device. */
++ goto fail;
++ if ((bh->b_rsector & 3) != 0 || (bh->b_size & 4095) != 0)
++ /* Request is not page-aligned. */
++ goto fail;
++ if ((bh->b_size + (bh->b_rsector<<9))
++ > (dcssdev->end - dcssdev->start + 1))
++ /* Request beyond end of DCSS segment. */
++ goto fail;
++ index = (bh->b_rsector >> 3);
++ page_addr = (unsigned long) bh->b_data;
++ source_addr = dcssdev->start + (index<<12);
++ bytes = bh->b_size;
++ if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
++ /* More paranoia. */
++ goto fail;
++ if ((rw == READ) || (rw == READA)) {
++ memcpy ((void*)page_addr, (void*)source_addr,
++ bytes);
++ } else {
++ memcpy ((void*)source_addr, (void*)page_addr,
++ bytes);
++ }
++ bh->b_end_io(bh, 1);
++ return 0;
++ fail:
++ bh->b_end_io(bh, 0);
++ return 0;
++}
++
++
++/*
++ * setup the block device related things
++ */
++static int __init dcssblk_setup_blkdev(void)
++{
++ request_queue_t *q;
++ int i,rc;
++
++ for (i=0; i < (1<<MINORBITS); i++)
++ dcssblk_blksizes[i] = PAGE_SIZE;
++
++ /*
++ * Register blockdev
++ */
++ rc = register_blkdev(0, DCSSBLK_NAME, &dcssblk_devops);
++ if (rc < 0) {
++ PRINT_ERR("Can't get register major\n");
++ return rc;
++ }
++ dcssblk_major = rc;
++ blksize_size[dcssblk_major] = dcssblk_blksizes;
++ hardsect_size[dcssblk_major] = dcssblk_blksizes;
++
++
++ /*
++ * Assign the other needed values: make request function, sizes and
++ * hardsect size. All the minor devices feature the same value.
++ */
++
++ q = BLK_DEFAULT_QUEUE(dcssblk_major);
++ blk_queue_make_request (q, dcssblk_make_request);
++ return 0;
++}
++
++static void dcssblk_unregister_blkdev(void) {
++ int rc;
++ rc = unregister_blkdev (dcssblk_major, DCSSBLK_NAME);
++ if (rc) {
++ PRINT_ERR ("Can't unregister blockdev\n");
++ }
++}
++
++/*
++ * Register procfs entries
++ */
++static int dcssblk_register_procfs(void)
++{
++#ifdef CONFIG_PROC_FS
++ dcssblk_proc_root_entry = proc_mkdir ("dcssblk", &proc_root);
++ dcssblk_add_entry = create_proc_entry ("add",
++ S_IFREG | S_IWUSR,
++ dcssblk_proc_root_entry);
++
++ dcssblk_remove_entry = create_proc_entry ("remove",
++ S_IFREG | S_IWUSR,
++ dcssblk_proc_root_entry);
++
++ dcssblk_list_entry = create_proc_entry ("list",
++ S_IFREG | S_IRUGO,
++ dcssblk_proc_root_entry);
++
++ dcssblk_add_entry->write_proc = dcssblk_add_store;
++ dcssblk_add_entry->owner = THIS_MODULE;
++ dcssblk_remove_entry->write_proc = dcssblk_remove_store;
++ dcssblk_remove_entry->owner = THIS_MODULE;
++ dcssblk_list_entry->read_proc = dcssblk_list_store;
++ dcssblk_list_entry->owner = THIS_MODULE;
++#endif
++ dcssblk_devfs_dir = devfs_mk_dir (NULL,
++ "dcssblk",
++ NULL);
++ return 0;
++
++}
++
++static void dcssblk_unregister_procfs(void) {
++ devfs_unregister(dcssblk_devfs_dir);
++#ifdef CONFIG_PROC_FS
++ remove_proc_entry ("list", dcssblk_proc_root_entry);
++ remove_proc_entry ("remove", dcssblk_proc_root_entry);
++ remove_proc_entry ("add", dcssblk_proc_root_entry);
++ remove_proc_entry ("dcssblk", NULL);
++#endif
++}
++
++/*
++ * Finally, the init/exit functions.
++ */
++static void __exit dcssblk_exit(void)
++{
++ dcssblk_unregister_procfs();
++ dcssblk_unregister_blkdev();
++}
++
++static int __init dcssblk_init(void)
++{
++ int rc;
++ PRINT_DEBUG ("DCSSBLOCK INIT\n");
++ rc = dcssblk_register_procfs();
++ if (rc) goto out;
++ rc = dcssblk_setup_blkdev();
++ if (rc) {
++ dcssblk_unregister_procfs();
++ goto out;
++ }
++ out:
++ return rc;
++}
++
++module_init(dcssblk_init);
++module_exit(dcssblk_exit);
+=== drivers/s390/block/dasd_int.h
+==================================================================
+--- drivers/s390/block/dasd_int.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/block/dasd_int.h (/trunk/2.4.27) (revision 52)
+@@ -5,7 +5,7 @@
+ * Bugreports.to..: <Linux390 at de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ *
+- * $Revision: 1.36 $
++ * $Revision: 1.30.4.5 $
+ *
+ * History of changes (starts July 2000)
+ * 02/01/01 added dynamic registration of ioctls
+@@ -15,6 +15,7 @@
+ #define DASD_INT_H
+
+ #include <asm/dasd.h>
++#include <asm/cmb.h>
+
+ #define CONFIG_DASD_DYNAMIC
+
+@@ -443,6 +444,7 @@
+ atomic_t plugged;
+ int stopped; /* device (do_IO) was stopped */
+ struct list_head lowmem_pool;
++ struct cmf_device cdev;
+ } dasd_device_t;
+
+ /* reasons why device (do_IO) was stopped */
+=== drivers/s390/block/dasd_cmb.c
+==================================================================
+--- drivers/s390/block/dasd_cmb.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/block/dasd_cmb.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,236 @@
++/*
++ * linux/drivers/s390/block/dasd_cmb.c ($Revision: 1.7.6.2 $)
++ *
++ * Linux on zSeries Channel Measurement Facility support
++ * (dasd device driver interface)
++ *
++ * Copyright 2000,2003 IBM Corporation
++ *
++ * Author: Arnd Bergmann <arndb at de.ibm.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2, or (at your option)
++ * any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <asm/cmb.h>
++#include <asm/ioctl32.h>
++
++#include "dasd_int.h"
++
++/* This mutex protects us from a race between enable and disable for
++ * a single device. Making it global instead of per device reduces
++ * the memory requirement and makes it possible to use a single
++ * completion handler and return value */
++static DECLARE_MUTEX(cmf_setup_mutex);
++static DECLARE_COMPLETION(cmf_setup_completion);
++static int cmf_setup_return;
++
++static void
++dasd_cmf_enable_callback(struct cmf_device *cdev)
++{
++ cdev->callback = NULL;
++ cmf_setup_return = set_cmf(cdev, 2);
++ complete(&cmf_setup_completion);
++}
++
++static void
++dasd_cmf_disable_callback(struct cmf_device *cdev)
++{
++ cdev->callback = NULL;
++ cmf_setup_return = set_cmf(cdev, 0);
++ complete(&cmf_setup_completion);
++}
++
++static inline int
++dasd_cmf_device_busy(struct dasd_device_t *device)
++{
++ ccw_req_t *cqr;
++ for (cqr = device->queue.head; cqr; cqr = cqr->next) {
++ if (cqr->status == CQR_STATUS_IN_IO)
++ return 1;
++ }
++ return 0;
++}
++
++static int
++dasd_ioctl_cmf_enable(void *inp, int no, long args)
++{
++ struct dasd_device_t *device;
++ int ret;
++
++ device = dasd_device_from_kdev (((struct inode*)inp)->i_rdev);
++ if (!device)
++ return -EINVAL;
++
++ if (down_interruptible(&cmf_setup_mutex))
++ return -ERESTARTSYS;
++
++ /* the device may already be enabled, in this case
++ we just reset the cmb to 0 */
++ if (!list_empty(&device->cdev.cmb_list)) {
++ ret = 0;
++ goto out_reset;
++ }
++
++ ret = enable_cmf(&device->cdev);
++ if (ret)
++ goto out;
++
++ MOD_INC_USE_COUNT;
++
++ spin_lock_irq(device->cdev.ccwlock);
++ if (!dasd_cmf_device_busy(device)) {
++ ret = set_cmf(&device->cdev, 2);
++ spin_unlock_irq(device->cdev.ccwlock);
++ } else {
++ device->cdev.callback = &dasd_cmf_enable_callback;
++ spin_unlock_irq(device->cdev.ccwlock);
++ wait_for_completion(&cmf_setup_completion);
++ ret = cmf_setup_return;
++ }
++
++ if (ret) {
++ disable_cmf(&device->cdev);
++ MOD_DEC_USE_COUNT;
++ }
++
++out_reset:
++ cmf_reset(&device->cdev);
++out:
++ up(&cmf_setup_mutex);
++ return ret;
++}
++
++static int
++dasd_ioctl_cmf_disable(void *inp, int no, long args)
++{
++ struct dasd_device_t *device;
++ int ret;
++
++ device = dasd_device_from_kdev (((struct inode*)inp)->i_rdev);
++ if (!device)
++ return -EINVAL;
++
++ if (down_interruptible(&cmf_setup_mutex))
++ return -ERESTARTSYS;
++
++ spin_lock_irq(device->cdev.ccwlock);
++
++ if (!dasd_cmf_device_busy(device)) {
++ ret = set_cmf(&device->cdev, 0);
++ spin_unlock_irq(device->cdev.ccwlock);
++ } else {
++ device->cdev.callback = &dasd_cmf_disable_callback;
++ spin_unlock_irq(device->cdev.ccwlock);
++ wait_for_completion(&cmf_setup_completion);
++ ret = cmf_setup_return;
++ }
++
++ if(!ret) {
++ disable_cmf(&device->cdev);
++ MOD_DEC_USE_COUNT;
++ }
++ up(&cmf_setup_mutex);
++ return ret;
++
++}
++
++static int
++dasd_ioctl_readall_cmb(void *inp, int no, long args)
++{
++ struct dasd_device_t *device;
++ struct cmbdata * udata;
++ struct cmbdata data;
++ size_t size;
++ int ret;
++
++ device = dasd_device_from_kdev (((struct inode*)inp)->i_rdev);
++ if (!device)
++ return -EINVAL;
++ udata = (void *) args;
++ size = _IOC_SIZE(no);
++
++ if (!access_ok(VERIFY_WRITE, udata, size))
++ return -EFAULT;
++ ret = cmf_readall(&device->cdev, &data);
++ if (ret)
++ return ret;
++ if (copy_to_user(udata, &data, min(size, sizeof(*udata))))
++ return -EFAULT;
++ return 0;
++}
++
++/* module initialization below here. dasd already provides a mechanism
++ * to dynamically register ioctl functions, so we simply use this.
++ * FIXME: register ioctl32 functions as well. */
++static inline int
++ioctl_reg(unsigned int no, dasd_ioctl_fn_t handler)
++{
++ int ret;
++ ret = dasd_ioctl_no_register(THIS_MODULE, no, handler);
++ if (ret)
++ return ret;
++
++ ret = register_ioctl32_conversion(no, sys_ioctl);
++ if (ret)
++ dasd_ioctl_no_unregister(THIS_MODULE, no, handler);
++
++ return ret;
++}
++
++static inline void
++ioctl_unreg(unsigned int no, dasd_ioctl_fn_t handler)
++{
++ dasd_ioctl_no_unregister(THIS_MODULE, no, handler);
++ unregister_ioctl32_conversion(no);
++
++}
++
++static void
++dasd_cmf_exit(void)
++{
++ ioctl_unreg(BIODASDCMFENABLE, dasd_ioctl_cmf_enable);
++ ioctl_unreg(BIODASDCMFDISABLE, dasd_ioctl_cmf_disable);
++ ioctl_unreg(BIODASDREADALLCMB, dasd_ioctl_readall_cmb);
++}
++
++static int __init
++dasd_cmf_init(void)
++{
++ int ret;
++ ret = ioctl_reg (BIODASDCMFENABLE, dasd_ioctl_cmf_enable);
++ if (ret)
++ goto err;
++ ret = ioctl_reg (BIODASDCMFDISABLE, dasd_ioctl_cmf_disable);
++ if (ret)
++ goto err;
++ ret = ioctl_reg (BIODASDREADALLCMB, dasd_ioctl_readall_cmb);
++ if (ret)
++ goto err;
++
++ return 0;
++err:
++ dasd_cmf_exit();
++
++ return ret;
++}
++
++module_init(dasd_cmf_init);
++module_exit(dasd_cmf_exit);
++
++MODULE_AUTHOR("Arnd Bergmann <arndb at de.ibm.com>");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("channel measurement facility interface for dasd\n"
++ "Copyright 2003 IBM Corporation\n");
+=== drivers/s390/block/Makefile
+==================================================================
+--- drivers/s390/block/Makefile (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/block/Makefile (/trunk/2.4.27) (revision 52)
+@@ -17,6 +17,8 @@
+ obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o
+ obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o
+ obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
++obj-$(CONFIG_DCSSBLK) += dcssblk.o
++obj-$(CONFIG_S390_CMF) += dasd_cmb.o
+
+ include $(TOPDIR)/Rules.make
+
+@@ -31,4 +33,3 @@
+
+ dasd_diag_mod.o: $(dasd_diag_mod-objs)
+ $(LD) -r -o $@ $(dasd_diag_mod-objs)
+-
+=== drivers/s390/misc/z90common.h
+==================================================================
+--- drivers/s390/misc/z90common.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/misc/z90common.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,149 @@
++/*
++ * linux/drivers/s390/misc/z90common.h
++ *
++ * z90crypt 1.3.2
++ *
++ * Copyright (C) 2001, 2004 IBM Corporation
++ * Author(s): Robert Burroughs (burrough at us.ibm.com)
++ * Eric Rossman (edrossma at us.ibm.com)
++ *
++ * Hotplug & misc device support: Jochen Roehrig (roehrig at de.ibm.com)
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2, or (at your option)
++ * any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++#ifndef _Z90COMMON_
++#define _Z90COMMON_
++#define VERSION_Z90COMMON_H "$Revision: 1.4.6.9 $"
++#define RESPBUFFSIZE 256
++#define PCI_FUNC_KEY_DECRYPT 0x5044
++#define PCI_FUNC_KEY_ENCRYPT 0x504B
++extern int ext_bitlens;
++enum devstat {
++ DEV_GONE,
++ DEV_ONLINE,
++ DEV_QUEUE_FULL,
++ DEV_EMPTY,
++ DEV_NO_WORK,
++ DEV_BAD_MESSAGE,
++ DEV_TSQ_EXCEPTION,
++ DEV_RSQ_EXCEPTION,
++ DEV_SEN_EXCEPTION,
++ DEV_REC_EXCEPTION
++};
++enum hdstat {
++ HD_NOT_THERE,
++ HD_BUSY,
++ HD_DECONFIGURED,
++ HD_CHECKSTOPPED,
++ HD_ONLINE,
++ HD_TSQ_EXCEPTION
++};
++#define Z90C_NO_DEVICES 1
++#define Z90C_AMBIGUOUS_DOMAIN 2
++#define Z90C_INCORRECT_DOMAIN 3
++#define ENOTINIT 4
++#define SEN_BUSY 7
++#define SEN_USER_ERROR 8
++#define SEN_QUEUE_FULL 11
++#define SEN_NOT_AVAIL 16
++#define SEN_PAD_ERROR 17
++#define SEN_RETRY 18
++#define SEN_RELEASED 24
++#define REC_EMPTY 4
++#define REC_BUSY 6
++#define REC_OPERAND_INV 8
++#define REC_OPERAND_SIZE 9
++#define REC_EVEN_MOD 10
++#define REC_NO_WORK 11
++#define REC_HARDWAR_ERR 12
++#define REC_NO_RESPONSE 13
++#define REC_RETRY_DEV 14
++#define REC_USER_GONE 15
++#define REC_BAD_MESSAGE 16
++#define REC_INVALID_PAD 17
++#define REC_USE_PCICA 18
++#define WRONG_DEVICE_TYPE 20
++#define REC_FATAL_ERROR 32
++#define SEN_FATAL_ERROR 33
++#define TSQ_FATAL_ERROR 34
++#define RSQ_FATAL_ERROR 35
++#define Z90CRYPT_NUM_TYPES 5
++#define PCICA 0
++#define PCICC 1
++#define PCIXCC_MCL2 2
++#define PCIXCC_MCL3 3
++#define CEX2C 4
++#define NILDEV -1
++#define ANYDEV -1
++#define PCIXCC_UNK -2
++enum hdevice_type {
++ PCICC_HW = 3,
++ PCICA_HW = 4,
++ PCIXCC_HW = 5,
++ OTHER_HW = 6,
++ CEX2C_HW = 7
++};
++struct CPRBX {
++ unsigned short cprb_len;
++ unsigned char cprb_ver_id;
++ unsigned char pad_000[3];
++ unsigned char func_id[2];
++ unsigned char cprb_flags[4];
++ unsigned int req_parml;
++ unsigned int req_datal;
++ unsigned int rpl_msgbl;
++ unsigned int rpld_parml;
++ unsigned int rpl_datal;
++ unsigned int rpld_datal;
++ unsigned int req_extbl;
++ unsigned char pad_001[4];
++ unsigned int rpld_extbl;
++ unsigned char req_parmb[16];
++ unsigned char req_datab[16];
++ unsigned char rpl_parmb[16];
++ unsigned char rpl_datab[16];
++ unsigned char req_extb[16];
++ unsigned char rpl_extb[16];
++ unsigned short ccp_rtcode;
++ unsigned short ccp_rscode;
++ unsigned int mac_data_len;
++ unsigned char logon_id[8];
++ unsigned char mac_value[8];
++ unsigned char mac_content_flgs;
++ unsigned char pad_002;
++ unsigned short domain;
++ unsigned char pad_003[12];
++ unsigned char pad_004[36];
++};
++#ifndef DEV_NAME
++#define DEV_NAME "z90crypt"
++#endif
++#define PRINTK(fmt, args...) \
++ printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
++#define PRINTKN(fmt, args...) \
++ printk(KERN_DEBUG DEV_NAME ": " fmt, ## args)
++#define PRINTKW(fmt, args...) \
++ printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
++#define PRINTKC(fmt, args...) \
++ printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
++#ifdef Z90CRYPT_DEBUG
++#define PDEBUG(fmt, args...) \
++ printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
++#else
++#define PDEBUG(fmt, args...) do {} while (0)
++#endif
++#define UMIN(a,b) ((a) < (b) ? (a) : (b))
++#define IS_EVEN(x) ((x) == (2 * ((x) / 2)))
++#endif
+=== drivers/s390/misc/z90hardware.c
+==================================================================
+--- drivers/s390/misc/z90hardware.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/misc/z90hardware.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,2092 @@
++/*
++ * linux/drivers/s390/misc/z90hardware.c
++ *
++ * z90crypt 1.3.2
++ *
++ * Copyright (C) 2001, 2004 IBM Corporation
++ * Author(s): Robert Burroughs (burrough at us.ibm.com)
++ * Eric Rossman (edrossma at us.ibm.com)
++ *
++ * Hotplug & misc device support: Jochen Roehrig (roehrig at de.ibm.com)
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2, or (at your option)
++ * any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++#include <asm/uaccess.h>
++#include <linux/compiler.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include "z90crypt.h"
++#include "z90common.h"
++#define VERSION_Z90HARDWARE_C "$Revision: 1.7.6.10 $"
++char z90chardware_version[] __initdata =
++ "z90hardware.o (" VERSION_Z90HARDWARE_C "/"
++ VERSION_Z90COMMON_H "/" VERSION_Z90CRYPT_H ")";
++struct cca_token_hdr {
++ unsigned char token_identifier;
++ unsigned char version;
++ unsigned short token_length;
++ unsigned char reserved[4];
++};
++#define CCA_TKN_HDR_ID_EXT 0x1E
++struct cca_private_ext_ME_sec {
++ unsigned char section_identifier;
++ unsigned char version;
++ unsigned short section_length;
++ unsigned char private_key_hash[20];
++ unsigned char reserved1[4];
++ unsigned char key_format;
++ unsigned char reserved2;
++ unsigned char key_name_hash[20];
++ unsigned char key_use_flags[4];
++ unsigned char reserved3[6];
++ unsigned char reserved4[24];
++ unsigned char confounder[24];
++ unsigned char exponent[128];
++ unsigned char modulus[128];
++};
++#define CCA_PVT_USAGE_ALL 0x80
++struct cca_public_sec {
++ unsigned char section_identifier;
++ unsigned char version;
++ unsigned short section_length;
++ unsigned char reserved[2];
++ unsigned short exponent_len;
++ unsigned short modulus_bit_len;
++ unsigned short modulus_byte_len;
++ unsigned char exponent[3];
++};
++struct cca_private_ext_ME {
++ struct cca_token_hdr pvtMEHdr;
++ struct cca_private_ext_ME_sec pvtMESec;
++ struct cca_public_sec pubMESec;
++};
++struct cca_public_key {
++ struct cca_token_hdr pubHdr;
++ struct cca_public_sec pubSec;
++};
++struct cca_pvt_ext_CRT_sec {
++ unsigned char section_identifier;
++ unsigned char version;
++ unsigned short section_length;
++ unsigned char private_key_hash[20];
++ unsigned char reserved1[4];
++ unsigned char key_format;
++ unsigned char reserved2;
++ unsigned char key_name_hash[20];
++ unsigned char key_use_flags[4];
++ unsigned short p_len;
++ unsigned short q_len;
++ unsigned short dp_len;
++ unsigned short dq_len;
++ unsigned short u_len;
++ unsigned short mod_len;
++ unsigned char reserved3[4];
++ unsigned short pad_len;
++ unsigned char reserved4[52];
++ unsigned char confounder[8];
++};
++#define CCA_PVT_EXT_CRT_SEC_ID_PVT 0x08
++#define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40
++struct cca_private_ext_CRT {
++ struct cca_token_hdr pvtCrtHdr;
++ struct cca_pvt_ext_CRT_sec pvtCrtSec;
++ struct cca_public_sec pubCrtSec;
++};
++struct ap_status_word {
++ unsigned char q_stat_flags;
++ unsigned char response_code;
++ unsigned char reserved[2];
++};
++#define AP_Q_STATUS_EMPTY 0x80
++#define AP_Q_STATUS_REPLIES_WAITING 0x40
++#define AP_Q_STATUS_ARRAY_FULL 0x20
++#define AP_RESPONSE_NORMAL 0x00
++#define AP_RESPONSE_Q_NOT_AVAIL 0x01
++#define AP_RESPONSE_RESET_IN_PROGRESS 0x02
++#define AP_RESPONSE_DECONFIGURED 0x03
++#define AP_RESPONSE_CHECKSTOPPED 0x04
++#define AP_RESPONSE_BUSY 0x05
++#define AP_RESPONSE_Q_FULL 0x10
++#define AP_RESPONSE_NO_PENDING_REPLY 0x10
++#define AP_RESPONSE_INDEX_TOO_BIG 0x11
++#define AP_RESPONSE_NO_FIRST_PART 0x13
++#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
++#define AP_MAX_CDX_BITL 4
++#define AP_RQID_RESERVED_BITL 4
++#define SKIP_BITL (AP_MAX_CDX_BITL + AP_RQID_RESERVED_BITL)
++struct type4_hdr {
++ unsigned char reserved1;
++ unsigned char msg_type_code;
++ unsigned short msg_len;
++ unsigned char request_code;
++ unsigned char msg_fmt;
++ unsigned short reserved2;
++};
++#define TYPE4_TYPE_CODE 0x04
++#define TYPE4_REQU_CODE 0x40
++#define TYPE4_SME_LEN 0x0188
++#define TYPE4_LME_LEN 0x0308
++#define TYPE4_SCR_LEN 0x01E0
++#define TYPE4_LCR_LEN 0x03A0
++#define TYPE4_SME_FMT 0x00
++#define TYPE4_LME_FMT 0x10
++#define TYPE4_SCR_FMT 0x40
++#define TYPE4_LCR_FMT 0x50
++struct type4_sme {
++ struct type4_hdr header;
++ unsigned char message[128];
++ unsigned char exponent[128];
++ unsigned char modulus[128];
++};
++struct type4_lme {
++ struct type4_hdr header;
++ unsigned char message[256];
++ unsigned char exponent[256];
++ unsigned char modulus[256];
++};
++struct type4_scr {
++ struct type4_hdr header;
++ unsigned char message[128];
++ unsigned char dp[72];
++ unsigned char dq[64];
++ unsigned char p[72];
++ unsigned char q[64];
++ unsigned char u[72];
++};
++struct type4_lcr {
++ struct type4_hdr header;
++ unsigned char message[256];
++ unsigned char dp[136];
++ unsigned char dq[128];
++ unsigned char p[136];
++ unsigned char q[128];
++ unsigned char u[136];
++};
++union type4_msg {
++ struct type4_sme sme;
++ struct type4_lme lme;
++ struct type4_scr scr;
++ struct type4_lcr lcr;
++};
++struct type84_hdr {
++ unsigned char reserved1;
++ unsigned char code;
++ unsigned short len;
++ unsigned char reserved2[4];
++};
++#define TYPE84_RSP_CODE 0x84
++struct type6_hdr {
++ unsigned char reserved1;
++ unsigned char type;
++ unsigned char reserved2[2];
++ unsigned char right[4];
++ unsigned char reserved3[2];
++ unsigned char reserved4[2];
++ unsigned char apfs[4];
++ unsigned int offset1;
++ unsigned int offset2;
++ unsigned int offset3;
++ unsigned int offset4;
++ unsigned char agent_id[16];
++
++
++
++
++
++
++ unsigned char rqid[2];
++ unsigned char reserved5[2];
++ unsigned char function_code[2];
++ unsigned char reserved6[2];
++ unsigned int ToCardLen1;
++ unsigned int ToCardLen2;
++ unsigned int ToCardLen3;
++ unsigned int ToCardLen4;
++ unsigned int FromCardLen1;
++ unsigned int FromCardLen2;
++ unsigned int FromCardLen3;
++ unsigned int FromCardLen4;
++};
++struct CPRB {
++ unsigned char cprb_len[2];
++ unsigned char cprb_ver_id;
++ unsigned char pad_000;
++ unsigned char srpi_rtcode[4];
++ unsigned char srpi_verb;
++ unsigned char flags;
++ unsigned char func_id[2];
++ unsigned char checkpoint_flag;
++ unsigned char resv2;
++ unsigned char req_parml[2];
++
++ unsigned char req_parmp[4];
++ unsigned char req_datal[4];
++
++ unsigned char req_datap[4];
++
++ unsigned char rpl_parml[2];
++
++ unsigned char pad_001[2];
++ unsigned char rpl_parmp[4];
++ unsigned char rpl_datal[4];
++ unsigned char rpl_datap[4];
++
++ unsigned char ccp_rscode[2];
++ unsigned char ccp_rtcode[2];
++ unsigned char repd_parml[2];
++ unsigned char mac_data_len[2];
++ unsigned char repd_datal[4];
++ unsigned char req_pc[2];
++ unsigned char res_origin[8];
++ unsigned char mac_value[8];
++ unsigned char logon_id[8];
++ unsigned char usage_domain[2];
++ unsigned char resv3[18];
++ unsigned char svr_namel[2];
++ unsigned char svr_name[8];
++};
++struct type6_msg {
++ struct type6_hdr header;
++ struct CPRB CPRB;
++};
++union request_msg {
++ union type4_msg t4msg;
++ struct type6_msg t6msg;
++};
++struct request_msg_ext {
++ int q_nr;
++ unsigned char *psmid;
++ union request_msg reqMsg;
++};
++struct type82_hdr {
++ unsigned char reserved1;
++ unsigned char type;
++ unsigned char reserved2[2];
++ unsigned char reply_code;
++ unsigned char reserved3[3];
++};
++#define TYPE82_RSP_CODE 0x82
++#define REPLY_ERROR_MACHINE_FAILURE 0x10
++#define REPLY_ERROR_PREEMPT_FAILURE 0x12
++#define REPLY_ERROR_CHECKPT_FAILURE 0x14
++#define REPLY_ERROR_MESSAGE_TYPE 0x20
++#define REPLY_ERROR_INVALID_COMM_CD 0x21
++#define REPLY_ERROR_INVALID_MSG_LEN 0x23
++#define REPLY_ERROR_RESERVD_FIELD 0x24
++#define REPLY_ERROR_FORMAT_FIELD 0x29
++#define REPLY_ERROR_INVALID_COMMAND 0x30
++#define REPLY_ERROR_MALFORMED_MSG 0x40
++#define REPLY_ERROR_RESERVED_FIELDO 0x50
++#define REPLY_ERROR_WORD_ALIGNMENT 0x60
++#define REPLY_ERROR_MESSAGE_LENGTH 0x80
++#define REPLY_ERROR_OPERAND_INVALID 0x82
++#define REPLY_ERROR_OPERAND_SIZE 0x84
++#define REPLY_ERROR_EVEN_MOD_IN_OPND 0x85
++#define REPLY_ERROR_RESERVED_FIELD 0x88
++#define REPLY_ERROR_TRANSPORT_FAIL 0x90
++#define REPLY_ERROR_PACKET_TRUNCATED 0xA0
++#define REPLY_ERROR_ZERO_BUFFER_LEN 0xB0
++struct type86_hdr {
++ unsigned char reserved1;
++ unsigned char type;
++ unsigned char format;
++ unsigned char reserved2;
++ unsigned char reply_code;
++ unsigned char reserved3[3];
++};
++#define TYPE86_RSP_CODE 0x86
++#define TYPE86_FMT2 0x02
++struct type86_fmt2_msg {
++ struct type86_hdr hdr;
++ unsigned char reserved[4];
++ unsigned char apfs[4];
++ unsigned int count1;
++ unsigned int offset1;
++ unsigned int count2;
++ unsigned int offset2;
++ unsigned int count3;
++ unsigned int offset3;
++ unsigned int count4;
++ unsigned int offset4;
++};
++static struct type6_hdr static_type6_hdr = {
++ 0x00,
++ 0x06,
++ {0x00,0x00},
++ {0x00,0x00,0x00,0x00},
++ {0x00,0x00},
++ {0x00,0x00},
++ {0x00,0x00,0x00,0x00},
++ 0x00000058,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50,
++ 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01},
++ {0x00,0x00},
++ {0x00,0x00},
++ {0x50,0x44},
++ {0x00,0x00},
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000
++};
++static struct type6_hdr static_type6_hdrX = {
++ 0x00,
++ 0x06,
++ {0x00,0x00},
++ {0x00,0x00,0x00,0x00},
++ {0x00,0x00},
++ {0x00,0x00},
++ {0x00,0x00,0x00,0x00},
++ 0x00000058,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ {0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
++ {0x00,0x00},
++ {0x00,0x00},
++ {0x50,0x44},
++ {0x00,0x00},
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000
++};
++static struct CPRB static_cprb = {
++ {0x70,0x00},
++ 0x41,
++ 0x00,
++ {0x00,0x00,0x00,0x00},
++ 0x00,
++ 0x00,
++ {0x54,0x32},
++ 0x01,
++ 0x00,
++ {0x00,0x00},
++ {0x00,0x00,0x00,0x00},
++ {0x00,0x00,0x00,0x00},
++ {0x00,0x00,0x00,0x00},
++ {0x00,0x00},
++ {0x00,0x00},
++ {0x00,0x00,0x00,0x00},
++ {0x00,0x00,0x00,0x00},
++ {0x00,0x00,0x00,0x00},
++ {0x00,0x00},
++ {0x00,0x00},
++ {0x00,0x00},
++ {0x00,0x00},
++ {0x00,0x00,0x00,0x00},
++ {0x00,0x00},
++ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
++ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
++ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
++ {0x00,0x00},
++ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00},
++ {0x08,0x00},
++ {0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20}
++};
++struct function_and_rules_block {
++ unsigned char function_code[2];
++ unsigned char ulen[2];
++ unsigned char only_rule[8];
++};
++static struct function_and_rules_block static_pkd_function_and_rules = {
++ {0x50,0x44},
++ {0x0A,0x00},
++ {'P','K','C','S','-','1','.','2'}
++};
++static struct function_and_rules_block static_pke_function_and_rules = {
++ {0x50,0x4B},
++ {0x0A,0x00},
++ {'P','K','C','S','-','1','.','2'}
++};
++struct T6_keyBlock_hdr {
++ unsigned char blen[2];
++ unsigned char ulen[2];
++ unsigned char flags[2];
++};
++static struct T6_keyBlock_hdr static_T6_keyBlock_hdr = {
++ {0x89,0x01},
++ {0x87,0x01},
++ {0x00}
++};
++static struct CPRBX static_cprbx = {
++ 0x00DC,
++ 0x02,
++ {0x00,0x00,0x00},
++ {0x54,0x32},
++ {0x00,0x00,0x00,0x00},
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ {0x00,0x00,0x00,0x00},
++ 0x00000000,
++ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
++ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
++ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
++ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
++ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
++ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
++ 0x0000,
++ 0x0000,
++ 0x00000000,
++ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
++ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
++ 0x00,
++ 0x00,
++ 0x0000,
++ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
++ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}
++};
++static struct function_and_rules_block static_pkd_function_and_rulesX_MCL2 = {
++ {0x50,0x44},
++ {0x00,0x0A},
++ {'P','K','C','S','-','1','.','2'}
++};
++static struct function_and_rules_block static_pke_function_and_rulesX_MCL2 = {
++ {0x50,0x4B},
++ {0x00,0x0A},
++ {'Z','E','R','O','-','P','A','D'}
++};
++static struct function_and_rules_block static_pkd_function_and_rulesX = {
++ {0x50,0x44},
++ {0x00,0x0A},
++ {'Z','E','R','O','-','P','A','D'}
++};
++static struct function_and_rules_block static_pke_function_and_rulesX = {
++ {0x50,0x4B},
++ {0x00,0x0A},
++ {'M','R','P',' ',' ',' ',' ',' '}
++};
++struct T6_keyBlock_hdrX {
++ unsigned short blen;
++ unsigned short ulen;
++ unsigned char flags[2];
++};
++static unsigned char static_pad[256] = {
++0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
++0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
++0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
++0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
++0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
++0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
++0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
++0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
++0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
++0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
++0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
++0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
++0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
++0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
++0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
++0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
++};
++static struct cca_private_ext_ME static_pvt_me_key = {
++ {
++ 0x1E,
++ 0x00,
++ 0x0183,
++ {0x00,0x00,0x00,0x00}
++ },
++ {
++ 0x02,
++ 0x00,
++ 0x016C,
++ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00},
++ {0x00,0x00,0x00,0x00},
++ 0x00,
++ 0x00,
++ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00},
++ {0x80,0x00,0x00,0x00},
++ {0x00,0x00,0x00,0x00,0x00,0x00},
++ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
++ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
++ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
++ {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}
++ },
++ {
++ 0x04,
++ 0x00,
++ 0x000F,
++ {0x00,0x00},
++ 0x0003,
++ 0x0000,
++ 0x0000,
++ {0x01,0x00,0x01}
++ }
++};
++static struct cca_public_key static_public_key = {
++ {
++ 0x1E,
++ 0x00,
++ 0x0000,
++ {0x00,0x00,0x00,0x00}
++ },
++ {
++ 0x04,
++ 0x00,
++ 0x0000,
++ {0x00,0x00},
++ 0x0000,
++ 0x0000,
++ 0x0000,
++ {0x01,0x00,0x01}
++ }
++};
++#define FIXED_TYPE6_ME_LEN 0x0000025F
++#define FIXED_TYPE6_ME_EN_LEN 0x000000F0
++#define FIXED_TYPE6_ME_LENX 0x000002CB
++#define FIXED_TYPE6_ME_EN_LENX 0x0000015C
++static struct cca_public_sec static_cca_pub_sec = {
++ 0x04,
++ 0x00,
++ 0x000f,
++ {0x00,0x00},
++ 0x0003,
++ 0x0000,
++ 0x0000,
++ {0x01,0x00,0x01}
++};
++#define FIXED_TYPE6_CR_LEN 0x00000177
++#define FIXED_TYPE6_CR_LENX 0x000001E3
++#define MAX_RESPONSE_SIZE 0x00000710
++#define MAX_RESPONSEX_SIZE 0x0000077C
++#define RESPONSE_CPRB_SIZE 0x000006B8
++#define RESPONSE_CPRBX_SIZE 0x00000724
++#define CALLER_HEADER 12
++static unsigned char static_PKE_function_code[2] = {0x50, 0x4B};
++static inline int
++testq(int q_nr, int *q_depth, int *dev_type, struct ap_status_word *stat)
++{
++ int ccode;
++ asm volatile
++#ifdef __s390x__
++ (" llgfr 0,%4 \n"
++ " slgr 1,1 \n"
++ " lgr 2,1 \n"
++ "0: .long 0xb2af0000 \n"
++ "1: ipm %0 \n"
++ " srl %0,28 \n"
++ " iihh %0,0 \n"
++ " iihl %0,0 \n"
++ " lgr %1,1 \n"
++ " lgr %3,2 \n"
++ " srl %3,24 \n"
++ " sll 2,24 \n"
++ " srl 2,24 \n"
++ " lgr %2,2 \n"
++ "2: \n"
++ ".section .fixup,\"ax\" \n"
++ "3: \n"
++ " lhi %0,%h5 \n"
++ " jg 2b \n"
++ ".previous \n"
++ ".section __ex_table,\"a\" \n"
++ " .align 8 \n"
++ " .quad 0b,3b \n"
++ " .quad 1b,3b \n"
++ ".previous"
++ :"=d" (ccode),"=d" (*stat),"=d" (*q_depth), "=d" (*dev_type)
++ :"d" (q_nr), "K" (DEV_TSQ_EXCEPTION)
++ :"cc","0","1","2","memory");
++#else
++ (" lr 0,%4 \n"
++ " slr 1,1 \n"
++ " lr 2,1 \n"
++ "0: .long 0xb2af0000 \n"
++ "1: ipm %0 \n"
++ " srl %0,28 \n"
++ " lr %1,1 \n"
++ " lr %3,2 \n"
++ " srl %3,24 \n"
++ " sll 2,24 \n"
++ " srl 2,24 \n"
++ " lr %2,2 \n"
++ "2: \n"
++ ".section .fixup,\"ax\" \n"
++ "3: \n"
++ " lhi %0,%h5 \n"
++ " bras 1,4f \n"
++ " .long 2b \n"
++ "4: \n"
++ " l 1,0(1) \n"
++ " br 1 \n"
++ ".previous \n"
++ ".section __ex_table,\"a\" \n"
++ " .align 4 \n"
++ " .long 0b,3b \n"
++ " .long 1b,3b \n"
++ ".previous"
++ :"=d" (ccode),"=d" (*stat),"=d" (*q_depth), "=d" (*dev_type)
++ :"d" (q_nr), "K" (DEV_TSQ_EXCEPTION)
++ :"cc","0","1","2","memory");
++#endif
++ return ccode;
++}
++static inline int
++resetq(int q_nr, struct ap_status_word *stat_p)
++{
++ int ccode;
++ asm volatile
++#ifdef __s390x__
++ (" llgfr 0,%2 \n"
++ " lghi 1,1 \n"
++ " sll 1,24 \n"
++ " or 0,1 \n"
++ " slgr 1,1 \n"
++ " lgr 2,1 \n"
++ "0: .long 0xb2af0000 \n"
++ "1: ipm %0 \n"
++ " srl %0,28 \n"
++ " iihh %0,0 \n"
++ " iihl %0,0 \n"
++ " lgr %1,1 \n"
++ "2: \n"
++ ".section .fixup,\"ax\" \n"
++ "3: \n"
++ " lhi %0,%h3 \n"
++ " jg 2b \n"
++ ".previous \n"
++ ".section __ex_table,\"a\" \n"
++ " .align 8 \n"
++ " .quad 0b,3b \n"
++ " .quad 1b,3b \n"
++ ".previous"
++ :"=d" (ccode),"=d" (*stat_p)
++ :"d" (q_nr), "K" (DEV_RSQ_EXCEPTION)
++ :"cc","0","1","2","memory");
++#else
++ (" lr 0,%2 \n"
++ " lhi 1,1 \n"
++ " sll 1,24 \n"
++ " or 0,1 \n"
++ " slr 1,1 \n"
++ " lr 2,1 \n"
++ "0: .long 0xb2af0000 \n"
++ "1: ipm %0 \n"
++ " srl %0,28 \n"
++ " lr %1,1 \n"
++ "2: \n"
++ ".section .fixup,\"ax\" \n"
++ "3: \n"
++ " lhi %0,%h3 \n"
++ " bras 1,4f \n"
++ " .long 2b \n"
++ "4: \n"
++ " l 1,0(1) \n"
++ " br 1 \n"
++ ".previous \n"
++ ".section __ex_table,\"a\" \n"
++ " .align 4 \n"
++ " .long 0b,3b \n"
++ " .long 1b,3b \n"
++ ".previous"
++ :"=d" (ccode),"=d" (*stat_p)
++ :"d" (q_nr), "K" (DEV_RSQ_EXCEPTION)
++ :"cc","0","1","2","memory");
++#endif
++ return ccode;
++}
++static inline int
++sen(int msg_len, unsigned char *msg_ext, struct ap_status_word *stat)
++{
++ int ccode;
++ asm volatile
++#ifdef __s390x__
++ (" lgr 6,%3 \n"
++ " llgfr 7,%2 \n"
++ " llgt 0,0(6) \n"
++ " lghi 1,64 \n"
++ " sll 1,24 \n"
++ " or 0,1 \n"
++ " la 6,4(6) \n"
++ " llgt 2,0(6) \n"
++ " llgt 3,4(6) \n"
++ " la 6,8(6) \n"
++ " slr 1,1 \n"
++ "0: .long 0xb2ad0026 \n"
++ "1: brc 2,0b \n"
++ " ipm %0 \n"
++ " srl %0,28 \n"
++ " iihh %0,0 \n"
++ " iihl %0,0 \n"
++ " lgr %1,1 \n"
++ "2: \n"
++ ".section .fixup,\"ax\" \n"
++ "3: \n"
++ " lhi %0,%h4 \n"
++ " jg 2b \n"
++ ".previous \n"
++ ".section __ex_table,\"a\" \n"
++ " .align 8 \n"
++ " .quad 0b,3b \n"
++ " .quad 1b,3b \n"
++ ".previous"
++ :"=d" (ccode),"=d" (*stat)
++ :"d" (msg_len),"a" (msg_ext), "K" (DEV_SEN_EXCEPTION)
++ :"cc","0","1","2","3","6","7","memory");
++#else
++ (" lr 6,%3 \n"
++ " lr 7,%2 \n"
++ " l 0,0(6) \n"
++ " lhi 1,64 \n"
++ " sll 1,24 \n"
++ " or 0,1 \n"
++ " la 6,4(6) \n"
++ " l 2,0(6) \n"
++ " l 3,4(6) \n"
++ " la 6,8(6) \n"
++ " slr 1,1 \n"
++ "0: .long 0xb2ad0026 \n"
++ "1: brc 2,0b \n"
++ " ipm %0 \n"
++ " srl %0,28 \n"
++ " lr %1,1 \n"
++ "2: \n"
++ ".section .fixup,\"ax\" \n"
++ "3: \n"
++ " lhi %0,%h4 \n"
++ " bras 1,4f \n"
++ " .long 2b \n"
++ "4: \n"
++ " l 1,0(1) \n"
++ " br 1 \n"
++ ".previous \n"
++ ".section __ex_table,\"a\" \n"
++ " .align 4 \n"
++ " .long 0b,3b \n"
++ " .long 1b,3b \n"
++ ".previous"
++ :"=d" (ccode),"=d" (*stat)
++ :"d" (msg_len),"a" (msg_ext), "K" (DEV_SEN_EXCEPTION)
++ :"cc","0","1","2","3","6","7","memory");
++#endif
++ return ccode;
++}
++static inline int
++rec(int q_nr, int buff_l, unsigned char *rsp, unsigned char *id,
++ struct ap_status_word *st)
++{
++ int ccode;
++ asm volatile
++#ifdef __s390x__
++ (" llgfr 0,%2 \n"
++ " lgr 3,%4 \n"
++ " lgr 6,%3 \n"
++ " llgfr 7,%5 \n"
++ " lghi 1,128 \n"
++ " sll 1,24 \n"
++ " or 0,1 \n"
++ " slgr 1,1 \n"
++ " lgr 2,1 \n"
++ " lgr 4,1 \n"
++ " lgr 5,1 \n"
++ "0: .long 0xb2ae0046 \n"
++ "1: brc 2,0b \n"
++ " brc 4,0b \n"
++ " ipm %0 \n"
++ " srl %0,28 \n"
++ " iihh %0,0 \n"
++ " iihl %0,0 \n"
++ " lgr %1,1 \n"
++ " st 4,0(3) \n"
++ " st 5,4(3) \n"
++ "2: \n"
++ ".section .fixup,\"ax\" \n"
++ "3: \n"
++ " lhi %0,%h6 \n"
++ " jg 2b \n"
++ ".previous \n"
++ ".section __ex_table,\"a\" \n"
++ " .align 8 \n"
++ " .quad 0b,3b \n"
++ " .quad 1b,3b \n"
++ ".previous"
++ :"=d"(ccode),"=d"(*st)
++ :"d" (q_nr), "d" (rsp), "d" (id), "d" (buff_l), "K" (DEV_REC_EXCEPTION)
++ :"cc","0","1","2","3","4","5","6","7","memory");
++#else
++ (" lr 0,%2 \n"
++ " lr 3,%4 \n"
++ " lr 6,%3 \n"
++ " lr 7,%5 \n"
++ " lhi 1,128 \n"
++ " sll 1,24 \n"
++ " or 0,1 \n"
++ " slr 1,1 \n"
++ " lr 2,1 \n"
++ " lr 4,1 \n"
++ " lr 5,1 \n"
++ "0: .long 0xb2ae0046 \n"
++ "1: brc 2,0b \n"
++ " brc 4,0b \n"
++ " ipm %0 \n"
++ " srl %0,28 \n"
++ " lr %1,1 \n"
++ " st 4,0(3) \n"
++ " st 5,4(3) \n"
++ "2: \n"
++ ".section .fixup,\"ax\" \n"
++ "3: \n"
++ " lhi %0,%h6 \n"
++ " bras 1,4f \n"
++ " .long 2b \n"
++ "4: \n"
++ " l 1,0(1) \n"
++ " br 1 \n"
++ ".previous \n"
++ ".section __ex_table,\"a\" \n"
++ " .align 4 \n"
++ " .long 0b,3b \n"
++ " .long 1b,3b \n"
++ ".previous"
++ :"=d"(ccode),"=d"(*st)
++ :"d" (q_nr), "d" (rsp), "d" (id), "d" (buff_l), "K" (DEV_REC_EXCEPTION)
++ :"cc","0","1","2","3","4","5","6","7","memory");
++#endif
++ return ccode;
++}
++static inline void
++itoLe2(int *i_p, unsigned char *lechars)
++{
++ *lechars = *((unsigned char *) i_p + sizeof(int) - 1);
++ *(lechars + 1) = *((unsigned char *) i_p + sizeof(int) - 2);
++}
++static inline void
++le2toI(unsigned char *lechars, int *i_p)
++{
++ unsigned char *ic_p;
++ *i_p = 0;
++ ic_p = (unsigned char *) i_p;
++ *(ic_p + 2) = *(lechars + 1);
++ *(ic_p + 3) = *(lechars);
++}
++static inline int
++is_empty(unsigned char *ptr, int len)
++{
++ return !memcmp(ptr, (unsigned char *) &static_pvt_me_key+60, len);
++}
++enum hdstat
++query_online(int deviceNr, int cdx, int resetNr, int *q_depth, int *dev_type)
++{
++ int q_nr, i, t_depth, t_dev_type;
++ enum devstat ccode;
++ struct ap_status_word stat_word;
++ enum hdstat stat;
++ int break_out;
++ q_nr = (deviceNr << SKIP_BITL) + cdx;
++ stat = HD_BUSY;
++ ccode = testq(q_nr, &t_depth, &t_dev_type, &stat_word);
++ PDEBUG("ccode %d response_code %02X\n", ccode, stat_word.response_code);
++ break_out = 0;
++ for (i = 0; i < resetNr; i++) {
++ if (ccode > 3) {
++ PRINTKC("Exception testing device %d\n", i);
++ return HD_TSQ_EXCEPTION;
++ }
++ switch (ccode) {
++ case 0:
++ PDEBUG("t_dev_type %d\n", t_dev_type);
++ break_out = 1;
++ stat = HD_ONLINE;
++ *q_depth = t_depth + 1;
++ switch (t_dev_type) {
++ case OTHER_HW:
++ stat = HD_NOT_THERE;
++ *dev_type = NILDEV;
++ break;
++ case PCICA_HW:
++ *dev_type = PCICA;
++ break;
++ case PCICC_HW:
++ *dev_type = PCICC;
++ break;
++ case PCIXCC_HW:
++ *dev_type = PCIXCC_UNK;
++ break;
++ case CEX2C_HW:
++ *dev_type = CEX2C;
++ break;
++ default:
++ *dev_type = NILDEV;
++ break;
++ }
++ PDEBUG("available device %d: Q depth = %d, dev "
++ "type = %d, stat = %02X%02X%02X%02X\n",
++ deviceNr, *q_depth, *dev_type,
++ stat_word.q_stat_flags,
++ stat_word.response_code,
++ stat_word.reserved[0],
++ stat_word.reserved[1]);
++ break;
++ case 3:
++ switch (stat_word.response_code) {
++ case AP_RESPONSE_NORMAL:
++ stat = HD_ONLINE;
++ break_out = 1;
++ *q_depth = t_depth + 1;
++ *dev_type = t_dev_type;
++ PDEBUG("cc3, available device "
++ "%d: Q depth = %d, dev "
++ "type = %d, stat = "
++ "%02X%02X%02X%02X\n",
++ deviceNr, *q_depth,
++ *dev_type,
++ stat_word.q_stat_flags,
++ stat_word.response_code,
++ stat_word.reserved[0],
++ stat_word.reserved[1]);
++ break;
++ case AP_RESPONSE_Q_NOT_AVAIL:
++ stat = HD_NOT_THERE;
++ break_out = 1;
++ break;
++ case AP_RESPONSE_RESET_IN_PROGRESS:
++ PDEBUG("device %d in reset\n",
++ deviceNr);
++ break;
++ case AP_RESPONSE_DECONFIGURED:
++ stat = HD_DECONFIGURED;
++ break_out = 1;
++ break;
++ case AP_RESPONSE_CHECKSTOPPED:
++ stat = HD_CHECKSTOPPED;
++ break_out = 1;
++ break;
++ case AP_RESPONSE_BUSY:
++ PDEBUG("device %d busy\n",
++ deviceNr);
++ break;
++ default:
++ break;
++ }
++ break;
++ default:
++ stat = HD_NOT_THERE;
++ break_out = 1;
++ break;
++ }
++ if (break_out)
++ break;
++ udelay(5);
++ ccode = testq(q_nr, &t_depth, &t_dev_type, &stat_word);
++ }
++ return stat;
++}
++enum devstat
++reset_device(int deviceNr, int cdx, int resetNr)
++{
++ int q_nr, ccode = 0, dummy_qdepth, dummy_devType, i;
++ struct ap_status_word stat_word;
++ enum devstat stat;
++ int break_out;
++ q_nr = (deviceNr << SKIP_BITL) + cdx;
++ stat = DEV_GONE;
++ ccode = resetq(q_nr, &stat_word);
++ if (ccode > 3)
++ return DEV_RSQ_EXCEPTION;
++ break_out = 0;
++ for (i = 0; i < resetNr; i++) {
++ switch (ccode) {
++ case 0:
++ stat = DEV_ONLINE;
++ if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY)
++ break_out = 1;
++ break;
++ case 3:
++ switch (stat_word.response_code) {
++ case AP_RESPONSE_NORMAL:
++ stat = DEV_ONLINE;
++ if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY)
++ break_out = 1;
++ break;
++ case AP_RESPONSE_Q_NOT_AVAIL:
++ case AP_RESPONSE_DECONFIGURED:
++ case AP_RESPONSE_CHECKSTOPPED:
++ stat = DEV_GONE;
++ break_out = 1;
++ break;
++ case AP_RESPONSE_RESET_IN_PROGRESS:
++ case AP_RESPONSE_BUSY:
++ default:
++ break;
++ }
++ break;
++ default:
++ stat = DEV_GONE;
++ break_out = 1;
++ break;
++ }
++ if (break_out == 1)
++ break;
++ udelay(5);
++ ccode = testq(q_nr, &dummy_qdepth, &dummy_devType, &stat_word);
++ if (ccode > 3) {
++ stat = DEV_TSQ_EXCEPTION;
++ break;
++ }
++ }
++ PDEBUG("Number of testq's needed for reset: %d\n", i);
++ if (i >= resetNr) {
++ stat = DEV_GONE;
++ }
++ return stat;
++}
++#ifdef DEBUG_HYDRA_MSGS
++static inline void
++print_buffer(unsigned char *buffer, int bufflen)
++{
++ int i;
++ for (i = 0; i < bufflen; i += 16) {
++ PRINTK("%04X: %02X%02X%02X%02X %02X%02X%02X%02X "
++ "%02X%02X%02X%02X %02X%02X%02X%02X\n", i,
++ buffer[i+0], buffer[i+1], buffer[i+2], buffer[i+3],
++ buffer[i+4], buffer[i+5], buffer[i+6], buffer[i+7],
++ buffer[i+8], buffer[i+9], buffer[i+10], buffer[i+11],
++ buffer[i+12], buffer[i+13], buffer[i+14], buffer[i+15]);
++ }
++}
++#endif
++enum devstat
++send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext)
++{
++ struct ap_status_word stat_word;
++ enum devstat stat;
++ int ccode;
++ ((struct request_msg_ext *) msg_ext)->q_nr =
++ (dev_nr << SKIP_BITL) + cdx;
++ PDEBUG("msg_len passed to sen: %d\n", msg_len);
++ PDEBUG("q number passed to sen: %02x%02x%02x%02x\n",
++ msg_ext[0], msg_ext[1], msg_ext[2], msg_ext[3]);
++ stat = DEV_GONE;
++#ifdef DEBUG_HYDRA_MSGS
++ PRINTK("Request header: %02X%02X%02X%02X %02X%02X%02X%02X "
++ "%02X%02X%02X%02X\n",
++ msg_ext[0], msg_ext[1], msg_ext[2], msg_ext[3],
++ msg_ext[4], msg_ext[5], msg_ext[6], msg_ext[7],
++ msg_ext[8], msg_ext[9], msg_ext[10], msg_ext[11]);
++
++ print_buffer(msg_ext+CALLER_HEADER, msg_len);
++#endif
++ ccode = sen(msg_len, msg_ext, &stat_word);
++ if (ccode > 3)
++ return DEV_SEN_EXCEPTION;
++ PDEBUG("nq cc: %u, st: %02x%02x%02x%02x\n",
++ ccode, stat_word.q_stat_flags, stat_word.response_code,
++ stat_word.reserved[0], stat_word.reserved[1]);
++ switch (ccode) {
++ case 0:
++ stat = DEV_ONLINE;
++ break;
++ case 1:
++ stat = DEV_GONE;
++ break;
++ case 3:
++ switch (stat_word.response_code) {
++ case AP_RESPONSE_NORMAL:
++ stat = DEV_ONLINE;
++ break;
++ case AP_RESPONSE_Q_FULL:
++ stat = DEV_QUEUE_FULL;
++ break;
++ default:
++ stat = DEV_GONE;
++ break;
++ }
++ break;
++ default:
++ stat = DEV_GONE;
++ break;
++ }
++ return stat;
++}
++enum devstat
++receive_from_AP(int dev_nr, int cdx, int resplen, unsigned char *resp,
++ unsigned char *psmid)
++{
++ int ccode;
++ struct ap_status_word stat_word;
++ enum devstat stat;
++ memset(resp, 0x00, 8);
++ ccode = rec((dev_nr << SKIP_BITL) + cdx, resplen, resp, psmid,
++ &stat_word);
++ if (ccode > 3)
++ return DEV_REC_EXCEPTION;
++ PDEBUG("dq cc: %u, st: %02x%02x%02x%02x\n",
++ ccode, stat_word.q_stat_flags, stat_word.response_code,
++ stat_word.reserved[0], stat_word.reserved[1]);
++ stat = DEV_GONE;
++ switch (ccode) {
++ case 0:
++ stat = DEV_ONLINE;
++#ifdef DEBUG_HYDRA_MSGS
++ print_buffer(resp, resplen);
++#endif
++ break;
++ case 3:
++ switch (stat_word.response_code) {
++ case AP_RESPONSE_NORMAL:
++ stat = DEV_ONLINE;
++ break;
++ case AP_RESPONSE_NO_PENDING_REPLY:
++ if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY)
++ stat = DEV_EMPTY;
++ else
++ stat = DEV_NO_WORK;
++ break;
++ case AP_RESPONSE_INDEX_TOO_BIG:
++ case AP_RESPONSE_NO_FIRST_PART:
++ case AP_RESPONSE_MESSAGE_TOO_BIG:
++ stat = DEV_BAD_MESSAGE;
++ break;
++ default:
++ break;
++ }
++ break;
++ default:
++ break;
++ }
++ return stat;
++}
++static inline int
++pad_msg(unsigned char *buffer, int totalLength, int msgLength)
++{
++ int pad_len;
++ for (pad_len = 0; pad_len < (totalLength - msgLength); pad_len++)
++ if (buffer[pad_len] != 0x00)
++ break;
++ pad_len -= 3;
++ if (pad_len < 8)
++ return SEN_PAD_ERROR;
++ buffer[0] = 0x00;
++ buffer[1] = 0x02;
++ memcpy(buffer+2, static_pad, pad_len);
++ buffer[pad_len + 2] = 0x00;
++ return 0;
++}
++static inline int
++is_common_public_key(unsigned char *key, int len)
++{
++ int i;
++ for (i = 0; i < len; i++)
++ if (key[i])
++ break;
++ key += i;
++ len -= i;
++ if (((len == 1) && (key[0] == 3)) ||
++ ((len == 3) && (key[0] == 1) && (key[1] == 0) && (key[2] == 1)))
++ return 1;
++ return 0;
++}
++static int
++ICAMEX_msg_to_type4MEX_msg(struct ica_rsa_modexpo *icaMex_p, int *z90cMsg_l_p,
++ union type4_msg *z90cMsg_p)
++{
++ int mod_len, msg_size, mod_tgt_len, exp_tgt_len, inp_tgt_len;
++ unsigned char *mod_tgt, *exp_tgt, *inp_tgt;
++ union type4_msg *tmp_type4_msg;
++ mod_len = icaMex_p->inputdatalength;
++ msg_size = ((mod_len <= 128) ? TYPE4_SME_LEN : TYPE4_LME_LEN) +
++ CALLER_HEADER;
++ memset(z90cMsg_p, 0, msg_size);
++ tmp_type4_msg = (union type4_msg *)
++ ((unsigned char *) z90cMsg_p + CALLER_HEADER);
++ tmp_type4_msg->sme.header.msg_type_code = TYPE4_TYPE_CODE;
++ tmp_type4_msg->sme.header.request_code = TYPE4_REQU_CODE;
++ if (mod_len <= 128) {
++ tmp_type4_msg->sme.header.msg_fmt = TYPE4_SME_FMT;
++ tmp_type4_msg->sme.header.msg_len = TYPE4_SME_LEN;
++ mod_tgt = tmp_type4_msg->sme.modulus;
++ mod_tgt_len = sizeof(tmp_type4_msg->sme.modulus);
++ exp_tgt = tmp_type4_msg->sme.exponent;
++ exp_tgt_len = sizeof(tmp_type4_msg->sme.exponent);
++ inp_tgt = tmp_type4_msg->sme.message;
++ inp_tgt_len = sizeof(tmp_type4_msg->sme.message);
++ } else {
++ tmp_type4_msg->lme.header.msg_fmt = TYPE4_LME_FMT;
++ tmp_type4_msg->lme.header.msg_len = TYPE4_LME_LEN;
++ mod_tgt = tmp_type4_msg->lme.modulus;
++ mod_tgt_len = sizeof(tmp_type4_msg->lme.modulus);
++ exp_tgt = tmp_type4_msg->lme.exponent;
++ exp_tgt_len = sizeof(tmp_type4_msg->lme.exponent);
++ inp_tgt = tmp_type4_msg->lme.message;
++ inp_tgt_len = sizeof(tmp_type4_msg->lme.message);
++ }
++ mod_tgt += (mod_tgt_len - mod_len);
++ if (copy_from_user(mod_tgt, icaMex_p->n_modulus, mod_len))
++ return SEN_RELEASED;
++ if (is_empty(mod_tgt, mod_len))
++ return SEN_USER_ERROR;
++ exp_tgt += (exp_tgt_len - mod_len);
++ if (copy_from_user(exp_tgt, icaMex_p->b_key, mod_len))
++ return SEN_RELEASED;
++ if (is_empty(exp_tgt, mod_len))
++ return SEN_USER_ERROR;
++ inp_tgt += (inp_tgt_len - mod_len);
++ if (copy_from_user(inp_tgt, icaMex_p->inputdata, mod_len))
++ return SEN_RELEASED;
++ if (is_empty(inp_tgt, mod_len))
++ return SEN_USER_ERROR;
++ *z90cMsg_l_p = msg_size - CALLER_HEADER;
++ return 0;
++}
++static int
++ICACRT_msg_to_type4CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p,
++ int *z90cMsg_l_p, union type4_msg *z90cMsg_p)
++{
++ int mod_len, short_len, long_len, tmp_size, p_tgt_len, q_tgt_len,
++ dp_tgt_len, dq_tgt_len, u_tgt_len, inp_tgt_len;
++ unsigned char *p_tgt, *q_tgt, *dp_tgt, *dq_tgt, *u_tgt, *inp_tgt;
++ union type4_msg *tmp_type4_msg;
++ mod_len = icaMsg_p->inputdatalength;
++ short_len = mod_len / 2;
++ long_len = mod_len / 2 + 8;
++ tmp_size = ((mod_len <= 128) ? TYPE4_SCR_LEN : TYPE4_LCR_LEN) +
++ CALLER_HEADER;
++ memset(z90cMsg_p, 0, tmp_size);
++ tmp_type4_msg = (union type4_msg *)
++ ((unsigned char *) z90cMsg_p + CALLER_HEADER);
++ tmp_type4_msg->scr.header.msg_type_code = TYPE4_TYPE_CODE;
++ tmp_type4_msg->scr.header.request_code = TYPE4_REQU_CODE;
++ if (mod_len <= 128) {
++ tmp_type4_msg->scr.header.msg_fmt = TYPE4_SCR_FMT;
++ tmp_type4_msg->scr.header.msg_len = TYPE4_SCR_LEN;
++ p_tgt = tmp_type4_msg->scr.p;
++ p_tgt_len = sizeof(tmp_type4_msg->scr.p);
++ q_tgt = tmp_type4_msg->scr.q;
++ q_tgt_len = sizeof(tmp_type4_msg->scr.q);
++ dp_tgt = tmp_type4_msg->scr.dp;
++ dp_tgt_len = sizeof(tmp_type4_msg->scr.dp);
++ dq_tgt = tmp_type4_msg->scr.dq;
++ dq_tgt_len = sizeof(tmp_type4_msg->scr.dq);
++ u_tgt = tmp_type4_msg->scr.u;
++ u_tgt_len = sizeof(tmp_type4_msg->scr.u);
++ inp_tgt = tmp_type4_msg->scr.message;
++ inp_tgt_len = sizeof(tmp_type4_msg->scr.message);
++ } else {
++ tmp_type4_msg->lcr.header.msg_fmt = TYPE4_LCR_FMT;
++ tmp_type4_msg->lcr.header.msg_len = TYPE4_LCR_LEN;
++ p_tgt = tmp_type4_msg->lcr.p;
++ p_tgt_len = sizeof(tmp_type4_msg->lcr.p);
++ q_tgt = tmp_type4_msg->lcr.q;
++ q_tgt_len = sizeof(tmp_type4_msg->lcr.q);
++ dp_tgt = tmp_type4_msg->lcr.dp;
++ dp_tgt_len = sizeof(tmp_type4_msg->lcr.dp);
++ dq_tgt = tmp_type4_msg->lcr.dq;
++ dq_tgt_len = sizeof(tmp_type4_msg->lcr.dq);
++ u_tgt = tmp_type4_msg->lcr.u;
++ u_tgt_len = sizeof(tmp_type4_msg->lcr.u);
++ inp_tgt = tmp_type4_msg->lcr.message;
++ inp_tgt_len = sizeof(tmp_type4_msg->lcr.message);
++ }
++ p_tgt += (p_tgt_len - long_len);
++ if (copy_from_user(p_tgt, icaMsg_p->np_prime, long_len))
++ return SEN_RELEASED;
++ if (is_empty(p_tgt, long_len))
++ return SEN_USER_ERROR;
++ q_tgt += (q_tgt_len - short_len);
++ if (copy_from_user(q_tgt, icaMsg_p->nq_prime, short_len))
++ return SEN_RELEASED;
++ if (is_empty(q_tgt, short_len))
++ return SEN_USER_ERROR;
++ dp_tgt += (dp_tgt_len - long_len);
++ if (copy_from_user(dp_tgt, icaMsg_p->bp_key, long_len))
++ return SEN_RELEASED;
++ if (is_empty(dp_tgt, long_len))
++ return SEN_USER_ERROR;
++ dq_tgt += (dq_tgt_len - short_len);
++ if (copy_from_user(dq_tgt, icaMsg_p->bq_key, short_len))
++ return SEN_RELEASED;
++ if (is_empty(dq_tgt, short_len))
++ return SEN_USER_ERROR;
++ u_tgt += (u_tgt_len - long_len);
++ if (copy_from_user(u_tgt, icaMsg_p->u_mult_inv, long_len))
++ return SEN_RELEASED;
++ if (is_empty(u_tgt, long_len))
++ return SEN_USER_ERROR;
++ inp_tgt += (inp_tgt_len - mod_len);
++ if (copy_from_user(inp_tgt, icaMsg_p->inputdata, mod_len))
++ return SEN_RELEASED;
++ if (is_empty(inp_tgt, mod_len))
++ return SEN_USER_ERROR;
++ *z90cMsg_l_p = tmp_size - CALLER_HEADER;
++ return 0;
++}
++static int
++ICAMEX_msg_to_type6MEX_de_msg(struct ica_rsa_modexpo *icaMsg_p, int cdx,
++ int *z90cMsg_l_p, struct type6_msg *z90cMsg_p)
++{
++ int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l;
++ unsigned char *temp;
++ struct type6_hdr *tp6Hdr_p;
++ struct CPRB *cprb_p;
++ struct cca_private_ext_ME *key_p;
++ static int deprecated_msg_count = 0;
++ mod_len = icaMsg_p->inputdatalength;
++ tmp_size = FIXED_TYPE6_ME_LEN + mod_len;
++ total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
++ parmBlock_l = total_CPRB_len - sizeof(struct CPRB);
++ tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER;
++ memset(z90cMsg_p, 0, tmp_size);
++
++ temp = (unsigned char *)z90cMsg_p + CALLER_HEADER;
++ memcpy(temp, &static_type6_hdr, sizeof(struct type6_hdr));
++ tp6Hdr_p = (struct type6_hdr *)temp;
++ tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4);
++ tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE;
++
++ temp += sizeof(struct type6_hdr);
++ memcpy(temp, &static_cprb, sizeof(struct CPRB));
++ cprb_p = (struct CPRB *) temp;
++ cprb_p->usage_domain[0]= (unsigned char)cdx;
++ itoLe2(&parmBlock_l, cprb_p->req_parml);
++ itoLe2((int *)&(tp6Hdr_p->FromCardLen1), cprb_p->rpl_parml);
++
++ temp += sizeof(struct CPRB);
++ memcpy(temp, &static_pkd_function_and_rules,
++ sizeof(struct function_and_rules_block));
++
++ temp += sizeof(struct function_and_rules_block);
++ vud_len = 2 + icaMsg_p->inputdatalength;
++ itoLe2(&vud_len, temp);
++
++ temp += 2;
++ if (copy_from_user(temp, icaMsg_p->inputdata, mod_len))
++ return SEN_RELEASED;
++ if (is_empty(temp, mod_len))
++ return SEN_USER_ERROR;
++
++ temp += mod_len;
++ memcpy(temp, &static_T6_keyBlock_hdr, sizeof(struct T6_keyBlock_hdr));
++
++ temp += sizeof(struct T6_keyBlock_hdr);
++ memcpy(temp, &static_pvt_me_key, sizeof(struct cca_private_ext_ME));
++ key_p = (struct cca_private_ext_ME *)temp;
++ temp = key_p->pvtMESec.exponent + sizeof(key_p->pvtMESec.exponent)
++ - mod_len;
++ if (copy_from_user(temp, icaMsg_p->b_key, mod_len))
++ return SEN_RELEASED;
++ if (is_empty(temp, mod_len))
++ return SEN_USER_ERROR;
++ if (is_common_public_key(temp, mod_len)) {
++ if (deprecated_msg_count < 20) {
++ PRINTK("Common public key used for modex decrypt\n");
++ deprecated_msg_count++;
++ if (deprecated_msg_count == 20)
++ PRINTK("No longer issuing messages about common"
++ " public key for modex decrypt.\n");
++ }
++ return SEN_NOT_AVAIL;
++ }
++ temp = key_p->pvtMESec.modulus + sizeof(key_p->pvtMESec.modulus)
++ - mod_len;
++ if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len))
++ return SEN_RELEASED;
++ if (is_empty(temp, mod_len))
++ return SEN_USER_ERROR;
++
++ key_p->pubMESec.modulus_bit_len = 8 * mod_len;
++ *z90cMsg_l_p = tmp_size - CALLER_HEADER;
++ return 0;
++}
++static int
++ICAMEX_msg_to_type6MEX_en_msg(struct ica_rsa_modexpo *icaMsg_p, int cdx,
++ int *z90cMsg_l_p, struct type6_msg *z90cMsg_p)
++{
++ int mod_len, vud_len, exp_len, key_len;
++ int pad_len, tmp_size, total_CPRB_len, parmBlock_l, i;
++ unsigned char temp_exp[256], *exp_p, *temp;
++ struct type6_hdr *tp6Hdr_p;
++ struct CPRB *cprb_p;
++ struct cca_public_key *key_p;
++ struct T6_keyBlock_hdr *keyb_p;
++ mod_len = icaMsg_p->inputdatalength;
++ if (copy_from_user(temp_exp, icaMsg_p->b_key, mod_len))
++ return SEN_RELEASED;
++ if (is_empty(temp_exp, mod_len))
++ return SEN_USER_ERROR;
++ exp_p = temp_exp;
++ for (i = 0; i < mod_len; i++)
++ if (exp_p[i])
++ break;
++ if (i >= mod_len)
++ return SEN_USER_ERROR;
++ exp_len = mod_len - i;
++ exp_p += i;
++ PDEBUG("exp_len after computation: %08x\n", exp_len);
++ tmp_size = FIXED_TYPE6_ME_EN_LEN + 2 * mod_len + exp_len;
++ total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
++ parmBlock_l = total_CPRB_len - sizeof(struct CPRB);
++ tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER;
++ vud_len = 2 + mod_len;
++ memset(z90cMsg_p, 0, tmp_size);
++
++ temp = (unsigned char *)z90cMsg_p + CALLER_HEADER;
++ memcpy(temp, &static_type6_hdr, sizeof(struct type6_hdr));
++ tp6Hdr_p = (struct type6_hdr *)temp;
++ tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4);
++ tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE;
++ memcpy(tp6Hdr_p->function_code, static_PKE_function_code,
++ sizeof(static_PKE_function_code));
++
++ temp += sizeof(struct type6_hdr);
++ memcpy(temp, &static_cprb, sizeof(struct CPRB));
++ cprb_p = (struct CPRB *) temp;
++ cprb_p->usage_domain[0]= (unsigned char)cdx;
++ itoLe2((int *)&(tp6Hdr_p->FromCardLen1), cprb_p->rpl_parml);
++
++ temp += sizeof(struct CPRB);
++ memcpy(temp, &static_pke_function_and_rules,
++ sizeof(struct function_and_rules_block));
++
++ temp += sizeof(struct function_and_rules_block);
++ temp += 2;
++ if (copy_from_user(temp, icaMsg_p->inputdata, mod_len))
++ return SEN_RELEASED;
++ if (is_empty(temp, mod_len))
++ return SEN_USER_ERROR;
++
++ if ((temp[0] != 0x00) || (temp[1] != 0x02))
++ return SEN_NOT_AVAIL;
++ for (i = 2; i < mod_len; i++)
++ if (temp[i] == 0x00)
++ break;
++ if ((i < 9) || (i > (mod_len - 2)))
++ return SEN_NOT_AVAIL;
++ pad_len = i + 1;
++ vud_len = mod_len - pad_len;
++ memmove(temp, temp+pad_len, vud_len);
++
++ temp -= 2;
++ vud_len += 2;
++ itoLe2(&vud_len, temp);
++
++ temp += (vud_len);
++ keyb_p = (struct T6_keyBlock_hdr *)temp;
++
++ temp += sizeof(struct T6_keyBlock_hdr);
++ memcpy(temp, &static_public_key, sizeof(static_public_key));
++ key_p = (struct cca_public_key *)temp;
++ temp = key_p->pubSec.exponent;
++ memcpy(temp, exp_p, exp_len);
++ temp += exp_len;
++ if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len))
++ return SEN_RELEASED;
++ if (is_empty(temp, mod_len))
++ return SEN_USER_ERROR;
++ key_p->pubSec.modulus_bit_len = 8 * mod_len;
++ key_p->pubSec.modulus_byte_len = mod_len;
++ key_p->pubSec.exponent_len = exp_len;
++ key_p->pubSec.section_length = CALLER_HEADER + mod_len + exp_len;
++ key_len = key_p->pubSec.section_length + sizeof(struct cca_token_hdr);
++ key_p->pubHdr.token_length = key_len;
++ key_len += 4;
++ itoLe2(&key_len, keyb_p->ulen);
++ key_len += 2;
++ itoLe2(&key_len, keyb_p->blen);
++ parmBlock_l -= pad_len;
++ itoLe2(&parmBlock_l, cprb_p->req_parml);
++ *z90cMsg_l_p = tmp_size - CALLER_HEADER;
++ return 0;
++}
++static int
++ICACRT_msg_to_type6CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p, int cdx,
++ int *z90cMsg_l_p, struct type6_msg *z90cMsg_p)
++{
++ int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l, short_len;
++ int long_len, pad_len, keyPartsLen, tmp_l;
++ unsigned char *tgt_p, *temp;
++ struct type6_hdr *tp6Hdr_p;
++ struct CPRB *cprb_p;
++ struct cca_token_hdr *keyHdr_p;
++ struct cca_pvt_ext_CRT_sec *pvtSec_p;
++ struct cca_public_sec *pubSec_p;
++ mod_len = icaMsg_p->inputdatalength;
++ short_len = mod_len / 2;
++ long_len = 8 + short_len;
++ keyPartsLen = 3 * long_len + 2 * short_len;
++ pad_len = (8 - (keyPartsLen % 8)) % 8;
++ keyPartsLen += pad_len + mod_len;
++ tmp_size = FIXED_TYPE6_CR_LEN + keyPartsLen + mod_len;
++ total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
++ parmBlock_l = total_CPRB_len - sizeof(struct CPRB);
++ vud_len = 2 + mod_len;
++ tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER;
++ memset(z90cMsg_p, 0, tmp_size);
++
++ tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER;
++ memcpy(tgt_p, &static_type6_hdr, sizeof(struct type6_hdr));
++ tp6Hdr_p = (struct type6_hdr *)tgt_p;
++ tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4);
++ tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE;
++
++ tgt_p += sizeof(struct type6_hdr);
++ cprb_p = (struct CPRB *) tgt_p;
++ memcpy(tgt_p, &static_cprb, sizeof(struct CPRB));
++ cprb_p->usage_domain[0]= *((unsigned char *)(&(cdx))+3);
++ itoLe2(&parmBlock_l, cprb_p->req_parml);
++ memcpy(cprb_p->rpl_parml, cprb_p->req_parml,
++ sizeof(cprb_p->req_parml));
++
++ tgt_p += sizeof(struct CPRB);
++ memcpy(tgt_p, &static_pkd_function_and_rules,
++ sizeof(struct function_and_rules_block));
++
++ tgt_p += sizeof(struct function_and_rules_block);
++ itoLe2(&vud_len, tgt_p);
++
++ tgt_p += 2;
++ if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len))
++ return SEN_RELEASED;
++ if (is_empty(tgt_p, mod_len))
++ return SEN_USER_ERROR;
++
++ tgt_p += mod_len;
++ tmp_l = sizeof(struct T6_keyBlock_hdr) + sizeof(struct cca_token_hdr) +
++ sizeof(struct cca_pvt_ext_CRT_sec) + 0x0F + keyPartsLen;
++ itoLe2(&tmp_l, tgt_p);
++
++ temp = tgt_p + 2;
++ tmp_l -= 2;
++ itoLe2(&tmp_l, temp);
++
++ tgt_p += sizeof(struct T6_keyBlock_hdr);
++ keyHdr_p = (struct cca_token_hdr *)tgt_p;
++ keyHdr_p->token_identifier = CCA_TKN_HDR_ID_EXT;
++ tmp_l -= 4;
++ keyHdr_p->token_length = tmp_l;
++
++ tgt_p += sizeof(struct cca_token_hdr);
++ pvtSec_p = (struct cca_pvt_ext_CRT_sec *)tgt_p;
++ pvtSec_p->section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT;
++ pvtSec_p->section_length =
++ sizeof(struct cca_pvt_ext_CRT_sec) + keyPartsLen;
++ pvtSec_p->key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL;
++ pvtSec_p->key_use_flags[0] = CCA_PVT_USAGE_ALL;
++ pvtSec_p->p_len = long_len;
++ pvtSec_p->q_len = short_len;
++ pvtSec_p->dp_len = long_len;
++ pvtSec_p->dq_len = short_len;
++ pvtSec_p->u_len = long_len;
++ pvtSec_p->mod_len = mod_len;
++ pvtSec_p->pad_len = pad_len;
++
++ tgt_p += sizeof(struct cca_pvt_ext_CRT_sec);
++ if (copy_from_user(tgt_p, icaMsg_p->np_prime, long_len))
++ return SEN_RELEASED;
++ if (is_empty(tgt_p, long_len))
++ return SEN_USER_ERROR;
++ tgt_p += long_len;
++ if (copy_from_user(tgt_p, icaMsg_p->nq_prime, short_len))
++ return SEN_RELEASED;
++ if (is_empty(tgt_p, short_len))
++ return SEN_USER_ERROR;
++ tgt_p += short_len;
++ if (copy_from_user(tgt_p, icaMsg_p->bp_key, long_len))
++ return SEN_RELEASED;
++ if (is_empty(tgt_p, long_len))
++ return SEN_USER_ERROR;
++ tgt_p += long_len;
++ if (copy_from_user(tgt_p, icaMsg_p->bq_key, short_len))
++ return SEN_RELEASED;
++ if (is_empty(tgt_p, short_len))
++ return SEN_USER_ERROR;
++ tgt_p += short_len;
++ if (copy_from_user(tgt_p, icaMsg_p->u_mult_inv, long_len))
++ return SEN_RELEASED;
++ if (is_empty(tgt_p, long_len))
++ return SEN_USER_ERROR;
++ tgt_p += long_len;
++ tgt_p += pad_len;
++ memset(tgt_p, 0xFF, mod_len);
++
++ tgt_p += mod_len;
++ memcpy(tgt_p, &static_cca_pub_sec, sizeof(struct cca_public_sec));
++ pubSec_p = (struct cca_public_sec *) tgt_p;
++ pubSec_p->modulus_bit_len = 8 * mod_len;
++ *z90cMsg_l_p = tmp_size - CALLER_HEADER;
++ return 0;
++}
++static int
++ICAMEX_msg_to_type6MEX_msgX(struct ica_rsa_modexpo *icaMsg_p, int cdx,
++ int *z90cMsg_l_p, struct type6_msg *z90cMsg_p,
++ int dev_type)
++{
++ int mod_len, exp_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l;
++ int key_len, i;
++ unsigned char temp_exp[256], *tgt_p, *temp, *exp_p;
++ struct type6_hdr *tp6Hdr_p;
++ struct CPRBX *cprbx_p;
++ struct cca_public_key *key_p;
++ struct T6_keyBlock_hdrX *keyb_p;
++ mod_len = icaMsg_p->inputdatalength;
++ if (copy_from_user(temp_exp, icaMsg_p->b_key, mod_len))
++ return SEN_RELEASED;
++ if (is_empty(temp_exp, mod_len))
++ return SEN_USER_ERROR;
++ exp_p = temp_exp;
++ for (i = 0; i < mod_len; i++)
++ if (exp_p[i])
++ break;
++ if (i >= mod_len)
++ return SEN_USER_ERROR;
++ exp_len = mod_len - i;
++ exp_p += i;
++ PDEBUG("exp_len after computation: %08x\n", exp_len);
++ tmp_size = FIXED_TYPE6_ME_EN_LENX + 2 * mod_len + exp_len;
++ total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
++ parmBlock_l = total_CPRB_len - sizeof(struct CPRBX);
++ tmp_size = tmp_size + CALLER_HEADER;
++ vud_len = 2 + mod_len;
++ memset(z90cMsg_p, 0, tmp_size);
++
++ tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER;
++ memcpy(tgt_p, &static_type6_hdrX, sizeof(struct type6_hdr));
++ tp6Hdr_p = (struct type6_hdr *)tgt_p;
++ tp6Hdr_p->ToCardLen1 = total_CPRB_len;
++ tp6Hdr_p->FromCardLen1 = RESPONSE_CPRBX_SIZE;
++ memcpy(tp6Hdr_p->function_code, static_PKE_function_code,
++ sizeof(static_PKE_function_code));
++
++ tgt_p += sizeof(struct type6_hdr);
++ memcpy(tgt_p, &static_cprbx, sizeof(struct CPRBX));
++ cprbx_p = (struct CPRBX *) tgt_p;
++ cprbx_p->domain = (unsigned short)cdx;
++ cprbx_p->rpl_msgbl = RESPONSE_CPRBX_SIZE;
++
++ tgt_p += sizeof(struct CPRBX);
++ if (dev_type == PCIXCC_MCL2)
++ memcpy(tgt_p, &static_pke_function_and_rulesX_MCL2,
++ sizeof(struct function_and_rules_block));
++ else
++ memcpy(tgt_p, &static_pke_function_and_rulesX,
++ sizeof(struct function_and_rules_block));
++
++ tgt_p += sizeof(struct function_and_rules_block);
++
++ tgt_p += 2;
++ if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len))
++ return SEN_RELEASED;
++ if (is_empty(tgt_p, mod_len))
++ return SEN_USER_ERROR;
++
++ tgt_p -= 2;
++ *((short *)tgt_p) = (short) vud_len;
++ tgt_p += vud_len;
++ keyb_p = (struct T6_keyBlock_hdrX *)tgt_p;
++
++ tgt_p += sizeof(struct T6_keyBlock_hdrX);
++ memcpy(tgt_p, &static_public_key, sizeof(static_public_key));
++ key_p = (struct cca_public_key *)tgt_p;
++ temp = key_p->pubSec.exponent;
++ memcpy(temp, exp_p, exp_len);
++ temp += exp_len;
++ if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len))
++ return SEN_RELEASED;
++ if (is_empty(temp, mod_len))
++ return SEN_USER_ERROR;
++ key_p->pubSec.modulus_bit_len = 8 * mod_len;
++ key_p->pubSec.modulus_byte_len = mod_len;
++ key_p->pubSec.exponent_len = exp_len;
++ key_p->pubSec.section_length = CALLER_HEADER + mod_len + exp_len;
++ key_len = key_p->pubSec.section_length + sizeof(struct cca_token_hdr);
++ key_p->pubHdr.token_length = key_len;
++ key_len += 4;
++ keyb_p->ulen = (unsigned short)key_len;
++ key_len += 2;
++ keyb_p->blen = (unsigned short)key_len;
++ cprbx_p->req_parml = parmBlock_l;
++ *z90cMsg_l_p = tmp_size - CALLER_HEADER;
++ return 0;
++}
++static int
++ICACRT_msg_to_type6CRT_msgX(struct ica_rsa_modexpo_crt *icaMsg_p, int cdx,
++ int *z90cMsg_l_p, struct type6_msg *z90cMsg_p,
++ int dev_type)
++{
++ int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l, short_len;
++ int long_len, pad_len, keyPartsLen, tmp_l;
++ unsigned char *tgt_p, *temp;
++ struct type6_hdr *tp6Hdr_p;
++ struct CPRBX *cprbx_p;
++ struct cca_token_hdr *keyHdr_p;
++ struct cca_pvt_ext_CRT_sec *pvtSec_p;
++ struct cca_public_sec *pubSec_p;
++ mod_len = icaMsg_p->inputdatalength;
++ short_len = mod_len / 2;
++ long_len = 8 + short_len;
++ keyPartsLen = 3 * long_len + 2 * short_len;
++ pad_len = (8 - (keyPartsLen % 8)) % 8;
++ keyPartsLen += pad_len + mod_len;
++ tmp_size = FIXED_TYPE6_CR_LENX + keyPartsLen + mod_len;
++ total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
++ parmBlock_l = total_CPRB_len - sizeof(struct CPRBX);
++ vud_len = 2 + mod_len;
++ tmp_size = tmp_size + CALLER_HEADER;
++ memset(z90cMsg_p, 0, tmp_size);
++
++ tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER;
++ memcpy(tgt_p, &static_type6_hdrX, sizeof(struct type6_hdr));
++ tp6Hdr_p = (struct type6_hdr *)tgt_p;
++ tp6Hdr_p->ToCardLen1 = total_CPRB_len;
++ tp6Hdr_p->FromCardLen1 = RESPONSE_CPRBX_SIZE;
++
++ tgt_p += sizeof(struct type6_hdr);
++ cprbx_p = (struct CPRBX *) tgt_p;
++ memcpy(tgt_p, &static_cprbx, sizeof(struct CPRBX));
++ cprbx_p->domain = (unsigned short)cdx;
++ cprbx_p->req_parml = parmBlock_l;
++ cprbx_p->rpl_msgbl = parmBlock_l;
++
++ tgt_p += sizeof(struct CPRBX);
++ if (dev_type == PCIXCC_MCL2)
++ memcpy(tgt_p, &static_pkd_function_and_rulesX_MCL2,
++ sizeof(struct function_and_rules_block));
++ else
++ memcpy(tgt_p, &static_pkd_function_and_rulesX,
++ sizeof(struct function_and_rules_block));
++
++ tgt_p += sizeof(struct function_and_rules_block);
++ *((short *)tgt_p) = (short) vud_len;
++
++ tgt_p += 2;
++ if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len))
++ return SEN_RELEASED;
++ if (is_empty(tgt_p, mod_len))
++ return SEN_USER_ERROR;
++
++ tgt_p += mod_len;
++ tmp_l = sizeof(struct T6_keyBlock_hdr) + sizeof(struct cca_token_hdr) +
++ sizeof(struct cca_pvt_ext_CRT_sec) + 0x0F + keyPartsLen;
++ *((short *)tgt_p) = (short) tmp_l;
++ temp = tgt_p + 2;
++ tmp_l -= 2;
++ *((short *)temp) = (short) tmp_l;
++
++ tgt_p += sizeof(struct T6_keyBlock_hdr);
++ keyHdr_p = (struct cca_token_hdr *)tgt_p;
++ keyHdr_p->token_identifier = CCA_TKN_HDR_ID_EXT;
++ tmp_l -= 4;
++ keyHdr_p->token_length = tmp_l;
++
++ tgt_p += sizeof(struct cca_token_hdr);
++ pvtSec_p = (struct cca_pvt_ext_CRT_sec *)tgt_p;
++ pvtSec_p->section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT;
++ pvtSec_p->section_length =
++ sizeof(struct cca_pvt_ext_CRT_sec) + keyPartsLen;
++ pvtSec_p->key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL;
++ pvtSec_p->key_use_flags[0] = CCA_PVT_USAGE_ALL;
++ pvtSec_p->p_len = long_len;
++ pvtSec_p->q_len = short_len;
++ pvtSec_p->dp_len = long_len;
++ pvtSec_p->dq_len = short_len;
++ pvtSec_p->u_len = long_len;
++ pvtSec_p->mod_len = mod_len;
++ pvtSec_p->pad_len = pad_len;
++
++ tgt_p += sizeof(struct cca_pvt_ext_CRT_sec);
++ if (copy_from_user(tgt_p, icaMsg_p->np_prime, long_len))
++ return SEN_RELEASED;
++ if (is_empty(tgt_p, long_len))
++ return SEN_USER_ERROR;
++ tgt_p += long_len;
++ if (copy_from_user(tgt_p, icaMsg_p->nq_prime, short_len))
++ return SEN_RELEASED;
++ if (is_empty(tgt_p, short_len))
++ return SEN_USER_ERROR;
++ tgt_p += short_len;
++ if (copy_from_user(tgt_p, icaMsg_p->bp_key, long_len))
++ return SEN_RELEASED;
++ if (is_empty(tgt_p, long_len))
++ return SEN_USER_ERROR;
++ tgt_p += long_len;
++ if (copy_from_user(tgt_p, icaMsg_p->bq_key, short_len))
++ return SEN_RELEASED;
++ if (is_empty(tgt_p, short_len))
++ return SEN_USER_ERROR;
++ tgt_p += short_len;
++ if (copy_from_user(tgt_p, icaMsg_p->u_mult_inv, long_len))
++ return SEN_RELEASED;
++ if (is_empty(tgt_p, long_len))
++ return SEN_USER_ERROR;
++ tgt_p += long_len;
++ tgt_p += pad_len;
++ memset(tgt_p, 0xFF, mod_len);
++
++ tgt_p += mod_len;
++ memcpy(tgt_p, &static_cca_pub_sec, sizeof(struct cca_public_sec));
++ pubSec_p = (struct cca_public_sec *) tgt_p;
++ pubSec_p->modulus_bit_len = 8 * mod_len;
++ *z90cMsg_l_p = tmp_size - CALLER_HEADER;
++ return 0;
++}
++int
++convert_request(unsigned char *buffer, int func, unsigned short function,
++ int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p)
++{
++ if (dev_type == PCICA) {
++ if (func == ICARSACRT)
++ return ICACRT_msg_to_type4CRT_msg(
++ (struct ica_rsa_modexpo_crt *) buffer,
++ msg_l_p, (union type4_msg *) msg_p);
++ else
++ return ICAMEX_msg_to_type4MEX_msg(
++ (struct ica_rsa_modexpo *) buffer,
++ msg_l_p, (union type4_msg *) msg_p);
++ }
++ if (dev_type == PCICC) {
++ if (func == ICARSACRT)
++ return ICACRT_msg_to_type6CRT_msg(
++ (struct ica_rsa_modexpo_crt *) buffer,
++ cdx, msg_l_p, (struct type6_msg *)msg_p);
++ if (function == PCI_FUNC_KEY_ENCRYPT)
++ return ICAMEX_msg_to_type6MEX_en_msg(
++ (struct ica_rsa_modexpo *) buffer,
++ cdx, msg_l_p, (struct type6_msg *) msg_p);
++ else
++ return ICAMEX_msg_to_type6MEX_de_msg(
++ (struct ica_rsa_modexpo *) buffer,
++ cdx, msg_l_p, (struct type6_msg *) msg_p);
++ }
++ if ((dev_type == PCIXCC_MCL2) ||
++ (dev_type == PCIXCC_MCL3) ||
++ (dev_type == CEX2C)) {
++ if (func == ICARSACRT)
++ return ICACRT_msg_to_type6CRT_msgX(
++ (struct ica_rsa_modexpo_crt *) buffer,
++ cdx, msg_l_p, (struct type6_msg *) msg_p,
++ dev_type);
++ else
++ return ICAMEX_msg_to_type6MEX_msgX(
++ (struct ica_rsa_modexpo *) buffer,
++ cdx, msg_l_p, (struct type6_msg *) msg_p,
++ dev_type);
++ }
++ return 0;
++}
++int ext_bitlens_msg_count = 0;
++static inline void
++unset_ext_bitlens(void)
++{
++ if (!ext_bitlens_msg_count) {
++ PRINTK("Unable to use coprocessors for extended bitlengths. "
++ "Using PCICAs (if present) for extended bitlengths. "
++ "This is not an error.\n");
++ ext_bitlens_msg_count++;
++ }
++ ext_bitlens = 0;
++}
++int
++convert_response(unsigned char *response, unsigned char *buffer,
++ int *respbufflen_p, unsigned char *resp_buff)
++{
++ struct ica_rsa_modexpo *icaMsg_p = (struct ica_rsa_modexpo *) buffer;
++ struct type82_hdr *t82h_p = (struct type82_hdr *) response;
++ struct type84_hdr *t84h_p = (struct type84_hdr *) response;
++ struct type86_fmt2_msg *t86m_p = (struct type86_fmt2_msg *) response;
++ int reply_code, service_rc, service_rs, src_l;
++ unsigned char *src_p, *tgt_p;
++ struct CPRB *cprb_p;
++ struct CPRBX *cprbx_p;
++ src_p = 0;
++ reply_code = 0;
++ service_rc = 0;
++ service_rs = 0;
++ src_l = 0;
++ switch (t82h_p->type) {
++ case TYPE82_RSP_CODE:
++ reply_code = t82h_p->reply_code;
++ src_p = (unsigned char *)t82h_p;
++ PRINTK("Hardware error: Type 82 Message Header: "
++ "%02x%02x%02x%02x%02x%02x%02x%02x\n",
++ src_p[0], src_p[1], src_p[2], src_p[3],
++ src_p[4], src_p[5], src_p[6], src_p[7]);
++ break;
++ case TYPE84_RSP_CODE:
++ src_l = icaMsg_p->outputdatalength;
++ src_p = response + (int)t84h_p->len - src_l;
++ break;
++ case TYPE86_RSP_CODE:
++ reply_code = t86m_p->hdr.reply_code;
++ if (reply_code != 0)
++ break;
++ cprb_p = (struct CPRB *)
++ (response + sizeof(struct type86_fmt2_msg));
++ cprbx_p = (struct CPRBX *) cprb_p;
++ if (cprb_p->cprb_ver_id != 0x02) {
++ le2toI(cprb_p->ccp_rtcode, &service_rc);
++ if (service_rc != 0) {
++ le2toI(cprb_p->ccp_rscode, &service_rs);
++ if ((service_rc == 8) && (service_rs == 66))
++ PDEBUG("Bad block format on PCICC\n");
++ else if ((service_rc == 8) && (service_rs == 770)) {
++ PDEBUG("Invalid key length on PCICC\n");
++ unset_ext_bitlens();
++ return REC_USE_PCICA;
++ }
++ else if ((service_rc == 8) && (service_rs == 783)) {
++ PDEBUG("Extended bitlengths not enabled"
++ "on PCICC\n");
++ unset_ext_bitlens();
++ return REC_USE_PCICA;
++ }
++ else
++ PRINTK("service rc/rs: %d/%d\n",
++ service_rc, service_rs);
++ return REC_OPERAND_INV;
++ }
++ src_p = (unsigned char *)cprb_p + sizeof(struct CPRB);
++ src_p += 4;
++ le2toI(src_p, &src_l);
++ src_l -= 2;
++ src_p += 2;
++ } else {
++ service_rc = (int)cprbx_p->ccp_rtcode;
++ if (service_rc != 0) {
++ service_rs = (int) cprbx_p->ccp_rscode;
++ if ((service_rc == 8) && (service_rs == 66))
++ PDEBUG("Bad block format on PCXICC\n");
++ else if ((service_rc == 8) && (service_rs == 770)) {
++ PDEBUG("Invalid key length on PCIXCC\n");
++ unset_ext_bitlens();
++ return REC_USE_PCICA;
++ }
++ else if ((service_rc == 8) && (service_rs == 783)) {
++ PDEBUG("Extended bitlengths not enabled"
++ "on PCIXCC\n");
++ unset_ext_bitlens();
++ return REC_USE_PCICA;
++ }
++ else
++ PRINTK("service rc/rs: %d/%d\n",
++ service_rc, service_rs);
++ return REC_OPERAND_INV;
++ }
++ src_p = (unsigned char *)
++ cprbx_p + sizeof(struct CPRBX);
++ src_p += 4;
++ src_l = (int)(*((short *) src_p));
++ src_l -= 2;
++ src_p += 2;
++ }
++ break;
++ default:
++ return REC_BAD_MESSAGE;
++ }
++
++ if (reply_code)
++ switch (reply_code) {
++ case REPLY_ERROR_OPERAND_INVALID:
++ return REC_OPERAND_INV;
++ case REPLY_ERROR_OPERAND_SIZE:
++ return REC_OPERAND_SIZE;
++ case REPLY_ERROR_EVEN_MOD_IN_OPND:
++ return REC_EVEN_MOD;
++ case REPLY_ERROR_MESSAGE_TYPE:
++ return WRONG_DEVICE_TYPE;
++ case REPLY_ERROR_TRANSPORT_FAIL:
++ PRINTKW("Transport failed (APFS = %02X%02X%02X%02X)\n",
++ t86m_p->apfs[0], t86m_p->apfs[1],
++ t86m_p->apfs[2], t86m_p->apfs[3]);
++ return REC_HARDWAR_ERR;
++ default:
++ PRINTKW("reply code = %d\n", reply_code);
++ return REC_HARDWAR_ERR;
++ }
++ if (service_rc != 0)
++ return REC_OPERAND_INV;
++ if ((src_l > icaMsg_p->outputdatalength) ||
++ (src_l > RESPBUFFSIZE) ||
++ (src_l <= 0))
++ return REC_OPERAND_SIZE;
++ PDEBUG("Length returned = %d\n", src_l);
++
++ tgt_p = resp_buff + icaMsg_p->outputdatalength - src_l;
++ memcpy(tgt_p, src_p, src_l);
++
++ if ((t82h_p->type == TYPE86_RSP_CODE) && (resp_buff < tgt_p)) {
++ memset(resp_buff, 0, icaMsg_p->outputdatalength - src_l);
++ if (pad_msg(resp_buff, icaMsg_p->outputdatalength, src_l))
++ return REC_INVALID_PAD;
++ }
++ *respbufflen_p = icaMsg_p->outputdatalength;
++ if (*respbufflen_p == 0)
++ PRINTK("Zero *respbufflen_p\n");
++ return 0;
++}
+=== drivers/s390/misc/z90main.c
+==================================================================
+--- drivers/s390/misc/z90main.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/misc/z90main.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,3588 @@
++/*
++ * linux/drivers/s390/misc/z90main.c
++ *
++ * z90crypt 1.3.2
++ *
++ * Copyright (C) 2001, 2004 IBM Corporation
++ * Author(s): Robert Burroughs (burrough at us.ibm.com)
++ * Eric Rossman (edrossma at us.ibm.com)
++ *
++ * Hotplug & misc device support: Jochen Roehrig (roehrig at de.ibm.com)
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2, or (at your option)
++ * any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <asm/uaccess.h> // copy_(from|to)_user
++#include <linux/compiler.h>
++#include <linux/delay.h> // mdelay
++#include <linux/init.h>
++#include <linux/interrupt.h> // for tasklets
++#include <asm/ioctl32.h>
++#include <linux/kmod.h>
++#include <linux/module.h>
++#include <linux/proc_fs.h>
++#include <linux/version.h>
++#include "z90crypt.h"
++#include "z90common.h"
++#ifndef Z90CRYPT_USE_HOTPLUG
++#include <linux/miscdevice.h>
++#endif
++
++#define VERSION_CODE(vers, rel, seq) (((vers)<<16) | ((rel)<<8) | (seq))
++#if LINUX_VERSION_CODE < VERSION_CODE(2,4,0) /* version < 2.4 */
++# error "This kernel is too old: not supported"
++#endif
++#if LINUX_VERSION_CODE > VERSION_CODE(2,7,0) /* version > 2.6 */
++# error "This kernel is too recent: not supported by this file"
++#endif
++
++#define VERSION_Z90MAIN_C "$Revision: 1.9.4.13 $"
++
++static char z90cmain_version[] __initdata =
++ "z90main.o (" VERSION_Z90MAIN_C "/"
++ VERSION_Z90COMMON_H "/" VERSION_Z90CRYPT_H ")";
++
++extern char z90chardware_version[];
++
++/**
++ * Defaults that may be modified.
++ */
++
++#ifndef Z90CRYPT_USE_HOTPLUG
++/**
++ * You can specify a different minor at compile time.
++ */
++#ifndef Z90CRYPT_MINOR
++#define Z90CRYPT_MINOR MISC_DYNAMIC_MINOR
++#endif
++#else
++/**
++ * You can specify a different major at compile time.
++ */
++#ifndef Z90CRYPT_MAJOR
++#define Z90CRYPT_MAJOR 0
++#endif
++#endif
++
++/**
++ * You can specify a different domain at compile time or on the insmod
++ * command line.
++ */
++#ifndef DOMAIN_INDEX
++#define DOMAIN_INDEX -1
++#endif
++
++/**
++ * This is the name under which the device is registered in /proc/modules.
++ */
++#define REG_NAME "z90crypt"
++
++/**
++ * Cleanup should run every CLEANUPTIME seconds and should clean up requests
++ * older than CLEANUPTIME seconds in the past.
++ */
++#ifndef CLEANUPTIME
++#define CLEANUPTIME 15
++#endif
++
++/**
++ * Config should run every CONFIGTIME seconds
++ */
++#ifndef CONFIGTIME
++#define CONFIGTIME 30
++#endif
++
++/**
++ * The first execution of the config task should take place
++ * immediately after initialization
++ */
++#ifndef INITIAL_CONFIGTIME
++#define INITIAL_CONFIGTIME 1
++#endif
++
++/**
++ * Reader should run every READERTIME milliseconds
++ * With the 100Hz patch for s390, z90crypt can lock the system solid while
++ * under heavy load. We'll try to avoid that.
++ */
++#ifndef READERTIME
++#if HZ > 1000
++#define READERTIME 2
++#else
++#define READERTIME 10
++#endif
++#endif
++
++/**
++ * turn long device array index into device pointer
++ */
++#define LONG2DEVPTR(ndx) (z90crypt.device_p[(ndx)])
++
++/**
++ * turn short device array index into long device array index
++ */
++#define SHRT2LONG(ndx) (z90crypt.overall_device_x.device_index[(ndx)])
++
++/**
++ * turn short device array index into device pointer
++ */
++#define SHRT2DEVPTR(ndx) LONG2DEVPTR(SHRT2LONG(ndx))
++
++/**
++ * Status for a work-element
++ */
++#define STAT_DEFAULT 0x00 // request has not been processed
++
++#define STAT_ROUTED 0x80 // bit 7: requests get routed to specific device
++ // else, device is determined each write
++#define STAT_FAILED 0x40 // bit 6: this bit is set if the request failed
++ // before being sent to the hardware.
++#define STAT_WRITTEN 0x30 // bits 5-4: work to be done, not sent to device
++// 0x20 // UNUSED state
++#define STAT_READPEND 0x10 // bits 5-4: work done, we're returning data now
++#define STAT_NOWORK 0x00 // bits off: no work on any queue
++#define STAT_RDWRMASK 0x30 // mask for bits 5-4
++
++/**
++ * Macros to check the status RDWRMASK
++ */
++#define CHK_RDWRMASK(statbyte) ((statbyte) & STAT_RDWRMASK)
++#define SET_RDWRMASK(statbyte, newval) \
++ {(statbyte) &= ~STAT_RDWRMASK; (statbyte) |= newval;}
++
++/**
++ * Audit Trail. Progress of a Work element
++ * audit[0]: Unless noted otherwise, these bits are all set by the process
++ */
++#define FP_COPYFROM 0x80 // Caller's buffer has been copied to work element
++#define FP_BUFFREQ 0x40 // Low Level buffer requested
++#define FP_BUFFGOT 0x20 // Low Level buffer obtained
++#define FP_SENT 0x10 // Work element sent to a crypto device
++ // (may be set by process or by reader task)
++#define FP_PENDING 0x08 // Work element placed on pending queue
++ // (may be set by process or by reader task)
++#define FP_REQUEST 0x04 // Work element placed on request queue
++#define FP_ASLEEP 0x02 // Work element about to sleep
++#define FP_AWAKE 0x01 // Work element has been awakened
++
++/**
++ * audit[1]: These bits are set by the reader task and/or the cleanup task
++ */
++#define FP_NOTPENDING 0x80 // Work element removed from pending queue
++#define FP_AWAKENING 0x40 // Caller about to be awakened
++#define FP_TIMEDOUT 0x20 // Caller timed out
++#define FP_RESPSIZESET 0x10 // Response size copied to work element
++#define FP_RESPADDRCOPIED 0x08 // Response address copied to work element
++#define FP_RESPBUFFCOPIED 0x04 // Response buffer copied to work element
++#define FP_REMREQUEST 0x02 // Work element removed from request queue
++#define FP_SIGNALED 0x01 // Work element was awakened by a signal
++
++/**
++ * audit[2]: unused
++ */
++
++/**
++ * state of the file handle in private_data.status
++ */
++#define STAT_OPEN 0
++#define STAT_CLOSED 1
++
++/**
++ * PID() expands to the process ID of the current process
++ */
++#define PID() (current->pid)
++
++/**
++ * Selected Constants. The number of APs and the number of devices
++ */
++#ifndef Z90CRYPT_NUM_APS
++#define Z90CRYPT_NUM_APS 64
++#endif
++#ifndef Z90CRYPT_NUM_DEVS
++#define Z90CRYPT_NUM_DEVS Z90CRYPT_NUM_APS
++#endif
++
++/**
++ * Buffer size for receiving responses. The maximum Response Size
++ * is actually the maximum request size, since in an error condition
++ * the request itself may be returned unchanged.
++ */
++#define MAX_RESPONSE_SIZE 0x0000077C
++
++/**
++ * A count and status-byte mask
++ */
++struct status {
++ int st_count; // # of enabled devices
++ int disabled_count; // # of disabled devices
++ int user_disabled_count; // # of devices disabled via proc fs
++ unsigned char st_mask[Z90CRYPT_NUM_APS]; // current status mask
++};
++
++/**
++ * The array of device indexes is a mechanism for fast indexing into
++ * a long (and sparse) array. For instance, if APs 3, 9 and 47 are
++ * installed, z90CDeviceIndex[0] is 3, z90CDeviceIndex[1] is 9, and
++ * z90CDeviceIndex[2] is 47.
++ */
++struct device_x {
++ int device_index[Z90CRYPT_NUM_DEVS];
++};
++
++/**
++ * All devices are arranged in a single array: 64 APs
++ */
++struct device {
++ int dev_type; // PCICA, PCICC, PCIXCC_MCL2,
++ // PCIXCC_MCL3, CEX2C
++ enum devstat dev_stat; // current device status
++ int dev_self_x; // Index in array
++ int disabled; // Set when device is in error
++ int user_disabled; // Set when device is disabled by user
++ int dev_q_depth; // q depth
++ unsigned char * dev_resp_p; // Response buffer address
++ int dev_resp_l; // Response Buffer length
++ int dev_caller_count; // Number of callers
++ int dev_total_req_cnt; // # requests for device since load
++ struct list_head dev_caller_list; // List of callers
++};
++
++/**
++ * There's a struct status and a struct device_x for each device type.
++ */
++struct hdware_block {
++ struct status hdware_mask;
++ struct status type_mask[Z90CRYPT_NUM_TYPES];
++ struct device_x type_x_addr[Z90CRYPT_NUM_TYPES];
++ unsigned char device_type_array[Z90CRYPT_NUM_APS];
++};
++
++/**
++ * z90crypt is the topmost data structure in the hierarchy.
++ */
++struct z90crypt {
++ int max_count; // Nr of possible crypto devices
++ struct status mask;
++ int q_depth_array[Z90CRYPT_NUM_DEVS];
++ int dev_type_array[Z90CRYPT_NUM_DEVS];
++ struct device_x overall_device_x; // array device indexes
++ struct device * device_p[Z90CRYPT_NUM_DEVS];
++ int terminating;
++ int domain_established;// TRUE: domain has been found
++ int cdx; // Crypto Domain Index
++ int len; // Length of this data structure
++ struct hdware_block *hdware_info;
++};
++
++/**
++ * An array of these structures is pointed to from dev_caller
++ * The length of the array depends on the device type. For APs,
++ * there are 8.
++ *
++ * The caller buffer is allocated to the user at OPEN. At WRITE,
++ * it contains the request; at READ, the response. The function
++ * send_to_crypto_device converts the request to device-dependent
++ * form and use the caller's OPEN-allocated buffer for the response.
++ */
++struct caller {
++ int caller_buf_l; // length of original request
++ unsigned char * caller_buf_p; // Original request on WRITE
++ int caller_dev_dep_req_l; // len device dependent request
++ unsigned char * caller_dev_dep_req_p; // Device dependent form
++ unsigned char caller_id[8]; // caller-supplied message id
++ struct list_head caller_liste;
++ unsigned char caller_dev_dep_req[MAX_RESPONSE_SIZE];
++};
++
++/**
++ * Function prototypes from z90hardware.c
++ */
++enum hdstat query_online(int, int, int, int *, int *);
++enum devstat reset_device(int, int, int);
++enum devstat send_to_AP(int, int, int, unsigned char *);
++enum devstat receive_from_AP(int, int, int, unsigned char *, unsigned char *);
++int convert_request(unsigned char *, int, short, int, int, int *,
++ unsigned char *);
++int convert_response(unsigned char *, unsigned char *, int *, unsigned char *);
++
++/**
++ * Low level function prototypes
++ */
++static int create_z90crypt(int *);
++static int refresh_z90crypt(int *);
++static int find_crypto_devices(struct status *);
++static int create_crypto_device(int);
++static int destroy_crypto_device(int);
++static void destroy_z90crypt(void);
++static int refresh_index_array(struct status *, struct device_x *);
++static int probe_device_type(struct device *);
++static int probe_PCIXCC_type(struct device *);
++
++/**
++ * proc fs definitions
++ */
++static struct proc_dir_entry *z90crypt_entry;
++
++/**
++ * data structures
++ */
++
++/**
++ * work_element.opener points back to this structure
++ */
++struct priv_data {
++ pid_t opener_pid;
++ unsigned char status; // 0: open 1: closed
++};
++
++/**
++ * A work element is allocated for each request
++ */
++struct work_element {
++ struct priv_data *priv_data;
++ pid_t pid;
++ int devindex; // index of device processing this w_e
++ // (If request did not specify device,
++ // -1 until placed onto a queue)
++ int devtype;
++ struct list_head liste; // used for requestq and pendingq
++ char buffer[128]; // local copy of user request
++ int buff_size; // size of the buffer for the request
++ char resp_buff[RESPBUFFSIZE];
++ int resp_buff_size;
++ char __user * resp_addr; // address of response in user space
++ unsigned int funccode; // function code of request
++ wait_queue_head_t waitq;
++ unsigned long requestsent; // time at which the request was sent
++ atomic_t alarmrung; // wake-up signal
++ unsigned char caller_id[8]; // pid + counter, for this w_e
++ unsigned char status[1]; // bits to mark status of the request
++ unsigned char audit[3]; // record of work element's progress
++ unsigned char * requestptr; // address of request buffer
++ int retcode; // return code of request
++};
++
++/**
++ * High level function prototypes
++ */
++static int z90crypt_open(struct inode *, struct file *);
++static int z90crypt_release(struct inode *, struct file *);
++static ssize_t z90crypt_read(struct file *, char __user *, size_t, loff_t *);
++static ssize_t z90crypt_write(struct file *, const char __user *,
++ size_t, loff_t *);
++static int z90crypt_ioctl(struct inode *, struct file *,
++ unsigned int, unsigned long);
++
++static void z90crypt_reader_task(unsigned long);
++static void z90crypt_schedule_reader_task(unsigned long);
++static void z90crypt_config_task(unsigned long);
++static void z90crypt_cleanup_task(unsigned long);
++
++static int z90crypt_status(char *, char **, off_t, int, int *, void *);
++static int z90crypt_status_write(struct file *, const char __user *,
++ unsigned long, void *);
++
++/**
++ * Hotplug support
++ */
++
++#ifdef Z90CRYPT_USE_HOTPLUG
++#define Z90CRYPT_HOTPLUG_ADD 1
++#define Z90CRYPT_HOTPLUG_REMOVE 2
++
++static void z90crypt_hotplug_event(int, int, int);
++#endif
++
++/**
++ * Storage allocated at initialization and used throughout the life of
++ * this insmod
++ */
++#ifdef Z90CRYPT_USE_HOTPLUG
++static int z90crypt_major = Z90CRYPT_MAJOR;
++#endif
++
++static int domain = DOMAIN_INDEX;
++static struct z90crypt z90crypt;
++static int quiesce_z90crypt;
++static spinlock_t queuespinlock;
++static struct list_head request_list;
++static int requestq_count;
++static struct list_head pending_list;
++static int pendingq_count;
++
++static struct tasklet_struct reader_tasklet;
++static struct timer_list reader_timer;
++static struct timer_list config_timer;
++static struct timer_list cleanup_timer;
++static atomic_t total_open;
++static atomic_t z90crypt_step;
++
++static struct file_operations z90crypt_fops = {
++ .owner = THIS_MODULE,
++ .read = z90crypt_read,
++ .write = z90crypt_write,
++ .ioctl = z90crypt_ioctl,
++ .open = z90crypt_open,
++ .release = z90crypt_release
++};
++
++#ifndef Z90CRYPT_USE_HOTPLUG
++static struct miscdevice z90crypt_misc_device = {
++ .minor = Z90CRYPT_MINOR,
++ .name = DEV_NAME,
++ .fops = &z90crypt_fops,
++};
++#endif
++
++/**
++ * Documentation values.
++ */
++MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman"
++ "and Jochen Roehrig");
++MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, "
++ "Copyright 2001, 2004 IBM Corporation");
++MODULE_LICENSE("GPL");
++MODULE_PARM(domain, "i");
++MODULE_PARM_DESC(domain, "domain index for device");
++
++#ifdef CONFIG_S390_SUPPORT
++/**
++ * Borrowed from 2.6 kernel
++ * - compat_uptr_t
++ * - compat_ptr()
++ * - compat_alloc_user_space()
++ */
++/**
++ * A pointer passed in from user mode. This should not
++ * be used for syscall parameters, just declare them
++ * as pointers because the syscall entry code will have
++ * appropriately comverted them already.
++ */
++typedef u32 compat_uptr_t;
++
++static inline void __user *compat_ptr(compat_uptr_t uptr)
++{
++ return (void __user *)(unsigned long)(uptr & 0x7fffffffUL);
++}
++
++static inline void __user *compat_alloc_user_space(long len)
++{
++ unsigned long stack;
++
++ stack = KSTK_ESP(current);
++ stack &= 0x7fffffffUL;
++ return (void __user *) (stack - len);
++}
++
++/**
++ * ioctl32 conversion routines
++ */
++struct ica_rsa_modexpo_32 { // For 32-bit callers
++ compat_uptr_t inputdata;
++ unsigned int inputdatalength;
++ compat_uptr_t outputdata;
++ unsigned int outputdatalength;
++ compat_uptr_t b_key;
++ compat_uptr_t n_modulus;
++};
++
++static int
++trans_modexpo32(unsigned int fd, unsigned int cmd, unsigned long arg,
++ struct file *file)
++{
++ struct ica_rsa_modexpo_32 __user *mex32u = compat_ptr(arg);
++ struct ica_rsa_modexpo_32 mex32k;
++ struct ica_rsa_modexpo __user *mex64;
++ int ret = 0;
++ unsigned int i;
++
++ if (!access_ok(VERIFY_WRITE, mex32u, sizeof(struct ica_rsa_modexpo_32)))
++ return -EFAULT;
++ mex64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo));
++ if (!access_ok(VERIFY_WRITE, mex64, sizeof(struct ica_rsa_modexpo)))
++ return -EFAULT;
++ if (copy_from_user(&mex32k, mex32u, sizeof(struct ica_rsa_modexpo_32)))
++ return -EFAULT;
++ if (__put_user(compat_ptr(mex32k.inputdata), &mex64->inputdata) ||
++ __put_user(mex32k.inputdatalength, &mex64->inputdatalength) ||
++ __put_user(compat_ptr(mex32k.outputdata), &mex64->outputdata) ||
++ __put_user(mex32k.outputdatalength, &mex64->outputdatalength) ||
++ __put_user(compat_ptr(mex32k.b_key), &mex64->b_key) ||
++ __put_user(compat_ptr(mex32k.n_modulus), &mex64->n_modulus))
++ return -EFAULT;
++ ret = sys_ioctl(fd, cmd, (unsigned long)mex64);
++ if (!ret)
++ if (__get_user(i, &mex64->outputdatalength) ||
++ __put_user(i, &mex32u->outputdatalength))
++ ret = -EFAULT;
++ return ret;
++}
++
++struct ica_rsa_modexpo_crt_32 { // For 32-bit callers
++ compat_uptr_t inputdata;
++ unsigned int inputdatalength;
++ compat_uptr_t outputdata;
++ unsigned int outputdatalength;
++ compat_uptr_t bp_key;
++ compat_uptr_t bq_key;
++ compat_uptr_t np_prime;
++ compat_uptr_t nq_prime;
++ compat_uptr_t u_mult_inv;
++};
++
++static int
++trans_modexpo_crt32(unsigned int fd, unsigned int cmd, unsigned long arg,
++ struct file *file)
++{
++ struct ica_rsa_modexpo_crt_32 __user *crt32u = compat_ptr(arg);
++ struct ica_rsa_modexpo_crt_32 crt32k;
++ struct ica_rsa_modexpo_crt __user *crt64;
++ int ret = 0;
++ unsigned int i;
++
++ if (!access_ok(VERIFY_WRITE, crt32u,
++ sizeof(struct ica_rsa_modexpo_crt_32)))
++ return -EFAULT;
++ crt64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo_crt));
++ if (!access_ok(VERIFY_WRITE, crt64, sizeof(struct ica_rsa_modexpo_crt)))
++ return -EFAULT;
++ if (copy_from_user(&crt32k, crt32u,
++ sizeof(struct ica_rsa_modexpo_crt_32)))
++ return -EFAULT;
++ if (__put_user(compat_ptr(crt32k.inputdata), &crt64->inputdata) ||
++ __put_user(crt32k.inputdatalength, &crt64->inputdatalength) ||
++ __put_user(compat_ptr(crt32k.outputdata), &crt64->outputdata) ||
++ __put_user(crt32k.outputdatalength, &crt64->outputdatalength) ||
++ __put_user(compat_ptr(crt32k.bp_key), &crt64->bp_key) ||
++ __put_user(compat_ptr(crt32k.bq_key), &crt64->bq_key) ||
++ __put_user(compat_ptr(crt32k.np_prime), &crt64->np_prime) ||
++ __put_user(compat_ptr(crt32k.nq_prime), &crt64->nq_prime) ||
++ __put_user(compat_ptr(crt32k.u_mult_inv), &crt64->u_mult_inv))
++ ret = -EFAULT;
++ if (!ret)
++ ret = sys_ioctl(fd, cmd, (unsigned long)crt64);
++ if (!ret)
++ if (__get_user(i, &crt64->outputdatalength) ||
++ __put_user(i, &crt32u->outputdatalength))
++ ret = -EFAULT;
++ return ret;
++}
++
++static int compatible_ioctls[] = {
++ ICAZ90STATUS, Z90QUIESCE, Z90STAT_TOTALCOUNT, Z90STAT_PCICACOUNT,
++ Z90STAT_PCICCCOUNT, Z90STAT_PCIXCCCOUNT, Z90STAT_PCIXCCMCL2COUNT,
++ Z90STAT_PCIXCCMCL3COUNT, Z90STAT_CEX2CCOUNT, Z90STAT_REQUESTQ_COUNT,
++ Z90STAT_PENDINGQ_COUNT, Z90STAT_TOTALOPEN_COUNT, Z90STAT_DOMAIN_INDEX,
++ Z90STAT_STATUS_MASK, Z90STAT_QDEPTH_MASK, Z90STAT_PERDEV_REQCNT,
++};
++
++static void z90_unregister_ioctl32s(void)
++{
++ int i;
++
++ unregister_ioctl32_conversion(ICARSAMODEXPO);
++ unregister_ioctl32_conversion(ICARSACRT);
++
++ for(i = 0; i < ARRAY_SIZE(compatible_ioctls); i++)
++ unregister_ioctl32_conversion(compatible_ioctls[i]);
++}
++
++static int z90_register_ioctl32s(void)
++{
++ int result, i;
++
++ result = register_ioctl32_conversion(ICARSAMODEXPO, trans_modexpo32);
++ if (result == -EBUSY) {
++ unregister_ioctl32_conversion(ICARSAMODEXPO);
++ result = register_ioctl32_conversion(ICARSAMODEXPO,
++ trans_modexpo32);
++ }
++ if (result)
++ return result;
++ result = register_ioctl32_conversion(ICARSACRT, trans_modexpo_crt32);
++ if (result == -EBUSY) {
++ unregister_ioctl32_conversion(ICARSACRT);
++ result = register_ioctl32_conversion(ICARSACRT,
++ trans_modexpo_crt32);
++ }
++ if (result)
++ return result;
++
++ for(i = 0; i < ARRAY_SIZE(compatible_ioctls); i++) {
++ result = register_ioctl32_conversion(compatible_ioctls[i],
++ (void *) sys_ioctl);
++ if (result == -EBUSY) {
++ unregister_ioctl32_conversion(compatible_ioctls[i]);
++ result = register_ioctl32_conversion(
++ compatible_ioctls[i],
++ (void *) sys_ioctl);
++ }
++ if (result)
++ return result;
++ }
++ return 0;
++}
++#else // !CONFIG_COMPAT
++static inline void z90_unregister_ioctl32s(void)
++{
++}
++
++static inline int z90_register_ioctl32s(void)
++{
++ return 0;
++}
++#endif
++
++/**
++ * The module initialization code.
++ */
++static int __init
++z90crypt_init_module(void)
++{
++ int result, nresult;
++ struct proc_dir_entry *entry;
++
++ PDEBUG("PID %d\n", PID());
++
++ if ((domain < -1) || (domain > 15)) {
++ PRINTKW("Invalid param: domain = %d. Not loading.\n", domain);
++ return -EINVAL;
++ }
++
++#ifndef Z90CRYPT_USE_HOTPLUG
++ /* Register as misc device with given minor (or get a dynamic one). */
++ result = misc_register(&z90crypt_misc_device);
++ if (result < 0) {
++ PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
++ z90crypt_misc_device.minor, result);
++ return result;
++ }
++#else
++ /* Register the major (or get a dynamic one). */
++ result = register_chrdev(z90crypt_major, REG_NAME, &z90crypt_fops);
++ if (result < 0) {
++ PRINTKW("register_chrdev (major %d) failed with %d.\n",
++ z90crypt_major, result);
++ return result;
++ }
++
++ if (z90crypt_major == 0)
++ z90crypt_major = result;
++#endif
++
++ PDEBUG("Registered " DEV_NAME " with result %d\n", result);
++
++ result = create_z90crypt(&domain);
++ if (result != 0) {
++ PRINTKW("create_z90crypt (domain index %d) failed with %d.\n",
++ domain, result);
++ result = -ENOMEM;
++ goto init_module_cleanup;
++ }
++
++ if (result == 0) {
++ PRINTKN("Version %d.%d.%d loaded, built on %s %s\n",
++ z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT,
++ __DATE__, __TIME__);
++ PRINTKN("%s\n", z90cmain_version);
++ PRINTKN("%s\n", z90chardware_version);
++ PDEBUG("create_z90crypt (domain index %d) successful.\n",
++ domain);
++ } else
++ PRINTK("No devices at startup\n");
++
++#ifdef Z90CRYPT_USE_HOTPLUG
++ /* generate hotplug event for device node generation */
++ z90crypt_hotplug_event(z90crypt_major, 0, Z90CRYPT_HOTPLUG_ADD);
++#endif
++
++ /* Initialize globals. */
++ spin_lock_init(&queuespinlock);
++
++ INIT_LIST_HEAD(&pending_list);
++ pendingq_count = 0;
++
++ INIT_LIST_HEAD(&request_list);
++ requestq_count = 0;
++
++ quiesce_z90crypt = 0;
++
++ atomic_set(&total_open, 0);
++ atomic_set(&z90crypt_step, 0);
++
++ /* Set up the cleanup task. */
++ init_timer(&cleanup_timer);
++ cleanup_timer.function = z90crypt_cleanup_task;
++ cleanup_timer.data = 0;
++ cleanup_timer.expires = jiffies + (CLEANUPTIME * HZ);
++ add_timer(&cleanup_timer);
++
++ /* Set up the proc file system */
++ entry = create_proc_entry("driver/z90crypt", 0644, 0);
++ if (entry) {
++ entry->nlink = 1;
++ entry->data = 0;
++ entry->read_proc = z90crypt_status;
++ entry->write_proc = z90crypt_status_write;
++ }
++ else
++ PRINTK("Couldn't create z90crypt proc entry\n");
++ z90crypt_entry = entry;
++
++ /* Set up the configuration task. */
++ init_timer(&config_timer);
++ config_timer.function = z90crypt_config_task;
++ config_timer.data = 0;
++ config_timer.expires = jiffies + (INITIAL_CONFIGTIME * HZ);
++ add_timer(&config_timer);
++
++ /* Set up the reader task */
++ tasklet_init(&reader_tasklet, z90crypt_reader_task, 0);
++ init_timer(&reader_timer);
++ reader_timer.function = z90crypt_schedule_reader_task;
++ reader_timer.data = 0;
++ reader_timer.expires = jiffies + (READERTIME * HZ / 1000);
++ add_timer(&reader_timer);
++
++ if ((result = z90_register_ioctl32s()))
++ goto init_module_cleanup;
++
++ return 0; // success
++
++init_module_cleanup:
++ z90_unregister_ioctl32s();
++
++#ifndef Z90CRYPT_USE_HOTPLUG
++ if ((nresult = misc_deregister(&z90crypt_misc_device)))
++ PRINTK("misc_deregister failed with %d.\n", nresult);
++ else
++ PDEBUG("misc_deregister successful.\n");
++#else
++ if ((nresult = unregister_chrdev(z90crypt_major, REG_NAME)))
++ PRINTK("unregister_chrdev failed with %d.\n", nresult);
++ else
++ PDEBUG("unregister_chrdev successful.\n");
++#endif
++
++ return result; // failure
++}
++
++/**
++ * The module termination code
++ */
++static void __exit
++z90crypt_cleanup_module(void)
++{
++ int nresult;
++
++ PDEBUG("PID %d\n", PID());
++
++ z90_unregister_ioctl32s();
++
++ remove_proc_entry("driver/z90crypt", 0);
++
++#ifndef Z90CRYPT_USE_HOTPLUG
++ if ((nresult = misc_deregister(&z90crypt_misc_device)))
++ PRINTK("misc_deregister failed with %d.\n", nresult);
++ else
++ PDEBUG("misc_deregister successful.\n");
++#else
++ z90crypt_hotplug_event(z90crypt_major, 0, Z90CRYPT_HOTPLUG_REMOVE);
++
++ if ((nresult = unregister_chrdev(z90crypt_major, REG_NAME)))
++ PRINTK("unregister_chrdev failed with %d.\n", nresult);
++ else
++ PDEBUG("unregister_chrdev successful.\n");
++#endif
++
++ /* Remove the tasks */
++ tasklet_kill(&reader_tasklet);
++ del_timer(&reader_timer);
++ del_timer(&config_timer);
++ del_timer(&cleanup_timer);
++
++ destroy_z90crypt();
++
++ PRINTKN("Unloaded.\n");
++}
++
++/**
++ * Functions running under a process id
++ *
++ * The I/O functions:
++ * z90crypt_open
++ * z90crypt_release
++ * z90crypt_read
++ * z90crypt_write
++ * z90crypt_ioctl
++ * z90crypt_status
++ * z90crypt_status_write
++ * disable_card
++ * enable_card
++ * scan_char
++ * scan_string
++ *
++ * Helper functions:
++ * z90crypt_rsa
++ * z90crypt_prepare
++ * z90crypt_send
++ * z90crypt_process_results
++ *
++ */
++static int
++z90crypt_open(struct inode *inode, struct file *filp)
++{
++ struct priv_data *private_data_p;
++
++ if (quiesce_z90crypt)
++ return -EQUIESCE;
++
++ private_data_p = kmalloc(sizeof(struct priv_data), GFP_KERNEL);
++ if (!private_data_p) {
++ PRINTK("Memory allocate failed\n");
++ return -ENOMEM;
++ }
++
++ memset((void *)private_data_p, 0, sizeof(struct priv_data));
++ private_data_p->status = STAT_OPEN;
++ private_data_p->opener_pid = PID();
++ filp->private_data = private_data_p;
++ atomic_inc(&total_open);
++
++ return 0;
++}
++
++static int
++z90crypt_release(struct inode *inode, struct file *filp)
++{
++ struct priv_data *private_data_p = filp->private_data;
++
++ PDEBUG("PID %d (filp %p)\n", PID(), filp);
++
++ private_data_p->status = STAT_CLOSED;
++ memset(private_data_p, 0, sizeof(struct priv_data));
++ kfree(private_data_p);
++ atomic_dec(&total_open);
++
++ return 0;
++}
++
++/*
++ * there are two read functions, of which compile options will choose one
++ * without USE_GET_RANDOM_BYTES
++ * => read() always returns -EPERM;
++ * otherwise
++ * => read() uses get_random_bytes() kernel function
++ */
++#ifndef USE_GET_RANDOM_BYTES
++/**
++ * z90crypt_read will not be supported beyond z90crypt 1.3.1
++ */
++static ssize_t
++z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
++{
++ PDEBUG("filp %p (PID %d)\n", filp, PID());
++ return -EPERM;
++}
++#else // we want to use get_random_bytes
++/**
++ * read() just returns a string of random bytes. Since we have no way
++ * to generate these cryptographically, we just execute get_random_bytes
++ * for the length specified.
++ */
++#include <linux/random.h>
++static ssize_t
++z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
++{
++ unsigned char *temp_buff;
++
++ PDEBUG("filp %p (PID %d)\n", filp, PID());
++
++ if (quiesce_z90crypt)
++ return -EQUIESCE;
++ if (count < 0) {
++ PRINTK("Requested random byte count negative: %ld\n", count);
++ return -EINVAL;
++ }
++ if (count > RESPBUFFSIZE) {
++ PDEBUG("count[%d] > RESPBUFFSIZE", count);
++ return -EINVAL;
++ }
++ if (count == 0)
++ return 0;
++ temp_buff = kmalloc(RESPBUFFSIZE, GFP_KERNEL);
++ if (!temp_buff) {
++ PRINTK("Memory allocate failed\n");
++ return -ENOMEM;
++ }
++ get_random_bytes(temp_buff, count);
++
++ if (copy_to_user(buf, temp_buff, count) != 0) {
++ kfree(temp_buff);
++ return -EFAULT;
++ }
++ kfree(temp_buff);
++ return count;
++}
++#endif
++
++/**
++ * Write is is not allowed
++ */
++static ssize_t
++z90crypt_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
++{
++ PDEBUG("filp %p (PID %d)\n", filp, PID());
++ return -EPERM;
++}
++
++/**
++ * New status functions
++ */
++static inline int
++get_status_totalcount(void)
++{
++ return z90crypt.hdware_info->hdware_mask.st_count;
++}
++
++static inline int
++get_status_PCICAcount(void)
++{
++ return z90crypt.hdware_info->type_mask[PCICA].st_count;
++}
++
++static inline int
++get_status_PCICCcount(void)
++{
++ return z90crypt.hdware_info->type_mask[PCICC].st_count;
++}
++
++static inline int
++get_status_PCIXCCcount(void)
++{
++ return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count +
++ z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
++}
++
++static inline int
++get_status_PCIXCCMCL2count(void)
++{
++ return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count;
++}
++
++static inline int
++get_status_PCIXCCMCL3count(void)
++{
++ return z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
++}
++
++static inline int
++get_status_CEX2Ccount(void)
++{
++ return z90crypt.hdware_info->type_mask[CEX2C].st_count;
++}
++
++static inline int
++get_status_requestq_count(void)
++{
++ return requestq_count;
++}
++
++static inline int
++get_status_pendingq_count(void)
++{
++ return pendingq_count;
++}
++
++static inline int
++get_status_totalopen_count(void)
++{
++ return atomic_read(&total_open);
++}
++
++static inline int
++get_status_domain_index(void)
++{
++ return z90crypt.cdx;
++}
++
++static inline unsigned char *
++get_status_status_mask(unsigned char status[Z90CRYPT_NUM_APS])
++{
++ int i, ix;
++
++ memcpy(status, z90crypt.hdware_info->device_type_array,
++ Z90CRYPT_NUM_APS);
++
++ for (i = 0; i < get_status_totalcount(); i++) {
++ ix = SHRT2LONG(i);
++ if (LONG2DEVPTR(ix)->user_disabled)
++ status[ix] = 0x0d;
++ }
++
++ return status;
++}
++
++static inline unsigned char *
++get_status_qdepth_mask(unsigned char qdepth[Z90CRYPT_NUM_APS])
++{
++ int i, ix;
++
++ memset(qdepth, 0, Z90CRYPT_NUM_APS);
++
++ for (i = 0; i < get_status_totalcount(); i++) {
++ ix = SHRT2LONG(i);
++ qdepth[ix] = LONG2DEVPTR(ix)->dev_caller_count;
++ }
++
++ return qdepth;
++}
++
++static inline unsigned int *
++get_status_perdevice_reqcnt(unsigned int reqcnt[Z90CRYPT_NUM_APS])
++{
++ int i, ix;
++
++ memset(reqcnt, 0, Z90CRYPT_NUM_APS * sizeof(int));
++
++ for (i = 0; i < get_status_totalcount(); i++) {
++ ix = SHRT2LONG(i);
++ reqcnt[ix] = LONG2DEVPTR(ix)->dev_total_req_cnt;
++ }
++
++ return reqcnt;
++}
++
++static inline void
++init_work_element(struct work_element *we_p,
++ struct priv_data *priv_data, pid_t pid)
++{
++ int step;
++
++ we_p->requestptr = (unsigned char *)we_p + sizeof(struct work_element);
++ /* Come up with a unique id for this caller. */
++ step = atomic_inc_return(&z90crypt_step);
++ memcpy(we_p->caller_id+0, (void *) &pid, sizeof(pid));
++ memcpy(we_p->caller_id+4, (void *) &step, sizeof(step));
++ we_p->pid = pid;
++ we_p->priv_data = priv_data;
++ we_p->status[0] = STAT_DEFAULT;
++ we_p->audit[0] = 0x00;
++ we_p->audit[1] = 0x00;
++ we_p->audit[2] = 0x00;
++ we_p->resp_buff_size = 0;
++ we_p->retcode = 0;
++ we_p->devindex = -1;
++ we_p->devtype = -1;
++ atomic_set(&we_p->alarmrung, 0);
++ init_waitqueue_head(&we_p->waitq);
++ INIT_LIST_HEAD(&(we_p->liste));
++}
++
++static inline int
++allocate_work_element(struct work_element **we_pp,
++ struct priv_data *priv_data_p, pid_t pid)
++{
++ struct work_element *we_p;
++
++ we_p = (struct work_element *) get_zeroed_page(GFP_KERNEL);
++ if (!we_p)
++ return -ENOMEM;
++ init_work_element(we_p, priv_data_p, pid);
++ *we_pp = we_p;
++ return 0;
++}
++
++static inline void
++remove_device(struct device *device_p)
++{
++ if (!device_p || (device_p->disabled != 0))
++ return;
++ device_p->disabled = 1;
++ z90crypt.hdware_info->type_mask[device_p->dev_type].disabled_count++;
++ z90crypt.hdware_info->hdware_mask.disabled_count++;
++}
++
++/**
++ * Bitlength limits for each card
++ *
++ * There are new MCLs which allow more bitlengths. See the table for details.
++ * The MCL must be applied and the newer bitlengths enabled for these to work.
++ *
++ * Card Type Old limit New limit
++ * PCICC 512-1024 512-2048
++ * PCIXCC_MCL2 512-2048 no change (applying this MCL == card is MCL3+)
++ * PCIXCC_MCL3 512-2048 128-2048
++ * CEX2C 512-2048 128-2048
++ *
++ * ext_bitlens (extended bitlengths) is a global, since you should not apply an
++ * MCL to just one card in a machine. We assume, at first, that all cards have
++ * these capabilities.
++ */
++int ext_bitlens = 1; // This is global
++#define PCIXCC_MIN_MOD_SIZE 16 // 128 bits
++#define OLD_PCIXCC_MIN_MOD_SIZE 64 // 512 bits
++#define PCICC_MIN_MOD_SIZE 64 // 512 bits
++#define OLD_PCICC_MAX_MOD_SIZE 128 // 1024 bits
++#define MAX_MOD_SIZE 256 // 2048 bits
++
++static inline int
++select_device_type(int *dev_type_p, int bytelength)
++{
++ static int count = 0;
++ int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, index_to_use;
++ struct status *stat;
++ if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) &&
++ (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) &&
++ (*dev_type_p != CEX2C) && (*dev_type_p != ANYDEV))
++ return -1;
++ if (*dev_type_p != ANYDEV) {
++ stat = &z90crypt.hdware_info->type_mask[*dev_type_p];
++ if (stat->st_count >
++ (stat->disabled_count + stat->user_disabled_count))
++ return 0;
++ return -1;
++ }
++
++ /* Assumption: PCICA, PCIXCC_MCL3, and CEX2C are all similar in speed */
++ stat = &z90crypt.hdware_info->type_mask[PCICA];
++ PCICA_avail = stat->st_count -
++ (stat->disabled_count + stat->user_disabled_count);
++ stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL3];
++ PCIXCC_MCL3_avail = stat->st_count -
++ (stat->disabled_count + stat->user_disabled_count);
++ stat = &z90crypt.hdware_info->type_mask[CEX2C];
++ CEX2C_avail = stat->st_count -
++ (stat->disabled_count + stat->user_disabled_count);
++ if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail) {
++ /**
++ * bitlength is a factor, PCICA is the most capable, even with
++ * the new MCL.
++ */
++ if ((bytelength < PCIXCC_MIN_MOD_SIZE) ||
++ (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) {
++ if (!PCICA_avail)
++ return -1;
++ else {
++ *dev_type_p = PCICA;
++ return 0;
++ }
++ }
++
++ index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail +
++ CEX2C_avail);
++ if (index_to_use < PCICA_avail)
++ *dev_type_p = PCICA;
++ else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail))
++ *dev_type_p = PCIXCC_MCL3;
++ else
++ *dev_type_p = CEX2C;
++ count++;
++ return 0;
++ }
++
++ /* Less than OLD_PCIXCC_MIN_MOD_SIZE cannot go to a PCIXCC_MCL2 */
++ if (bytelength < OLD_PCIXCC_MIN_MOD_SIZE)
++ return -1;
++ stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL2];
++ if (stat->st_count >
++ (stat->disabled_count + stat->user_disabled_count)) {
++ *dev_type_p = PCIXCC_MCL2;
++ return 0;
++ }
++
++ /**
++ * Less than PCICC_MIN_MOD_SIZE or more than OLD_PCICC_MAX_MOD_SIZE
++ * (if we don't have the MCL applied and the newer bitlengths enabled)
++ * cannot go to a PCICC
++ */
++ if ((bytelength < PCICC_MIN_MOD_SIZE) ||
++ (!ext_bitlens && (bytelength > OLD_PCICC_MAX_MOD_SIZE))) {
++ return -1;
++ }
++ stat = &z90crypt.hdware_info->type_mask[PCICC];
++ if (stat->st_count >
++ (stat->disabled_count + stat->user_disabled_count)) {
++ *dev_type_p = PCICC;
++ return 0;
++ }
++
++ return -1;
++}
++
++/**
++ * Try the selected number, then the selected type (can be ANYDEV)
++ */
++static inline int
++select_device(int *dev_type_p, int *device_nr_p, int bytelength)
++{
++ int i, indx, devTp, low_count, low_indx;
++ struct device_x *index_p;
++ struct device *dev_ptr;
++
++ PDEBUG("device type = %d, index = %d\n", *dev_type_p, *device_nr_p);
++ if ((*device_nr_p >= 0) && (*device_nr_p < Z90CRYPT_NUM_DEVS)) {
++ PDEBUG("trying index = %d\n", *device_nr_p);
++ dev_ptr = z90crypt.device_p[*device_nr_p];
++
++ if (dev_ptr &&
++ (dev_ptr->dev_stat != DEV_GONE) &&
++ (dev_ptr->disabled == 0) &&
++ (dev_ptr->user_disabled == 0)) {
++ PDEBUG("selected by number, index = %d\n",
++ *device_nr_p);
++ *dev_type_p = dev_ptr->dev_type;
++ return *device_nr_p;
++ }
++ }
++ *device_nr_p = -1;
++ PDEBUG("trying type = %d\n", *dev_type_p);
++ devTp = *dev_type_p;
++ if (select_device_type(&devTp, bytelength) == -1) {
++ PDEBUG("failed to select by type\n");
++ return -1;
++ }
++ PDEBUG("selected type = %d\n", devTp);
++ index_p = &z90crypt.hdware_info->type_x_addr[devTp];
++ low_count = 0x0000FFFF;
++ low_indx = -1;
++ for (i = 0; i < z90crypt.hdware_info->type_mask[devTp].st_count; i++) {
++ indx = index_p->device_index[i];
++ dev_ptr = z90crypt.device_p[indx];
++ if (dev_ptr &&
++ (dev_ptr->dev_stat != DEV_GONE) &&
++ (dev_ptr->disabled == 0) &&
++ (dev_ptr->user_disabled == 0) &&
++ (devTp == dev_ptr->dev_type) &&
++ (low_count > dev_ptr->dev_caller_count)) {
++ low_count = dev_ptr->dev_caller_count;
++ low_indx = indx;
++ }
++ }
++ *device_nr_p = low_indx;
++ return low_indx;
++}
++
++static inline int
++send_to_crypto_device(struct work_element *we_p)
++{
++ struct caller *caller_p;
++ struct device *device_p;
++ int dev_nr;
++ int bytelen = ((struct ica_rsa_modexpo *)we_p->buffer)->inputdatalength;
++
++ if (!we_p->requestptr)
++ return SEN_FATAL_ERROR;
++ caller_p = (struct caller *)we_p->requestptr;
++ dev_nr = we_p->devindex;
++ if (select_device(&we_p->devtype, &dev_nr, bytelen) == -1) {
++ if (z90crypt.hdware_info->hdware_mask.st_count != 0)
++ return SEN_RETRY;
++ else
++ return SEN_NOT_AVAIL;
++ }
++ we_p->devindex = dev_nr;
++ device_p = z90crypt.device_p[dev_nr];
++ if (!device_p)
++ return SEN_NOT_AVAIL;
++ if (device_p->dev_type != we_p->devtype)
++ return SEN_RETRY;
++ if (device_p->dev_caller_count >= device_p->dev_q_depth)
++ return SEN_QUEUE_FULL;
++ PDEBUG("device number prior to send: %d\n", dev_nr);
++ switch (send_to_AP(dev_nr, z90crypt.cdx,
++ caller_p->caller_dev_dep_req_l,
++ caller_p->caller_dev_dep_req_p)) {
++ case DEV_SEN_EXCEPTION:
++ PRINTKC("Exception during send to device %d\n", dev_nr);
++ z90crypt.terminating = 1;
++ return SEN_FATAL_ERROR;
++ case DEV_GONE:
++ PRINTK("Device %d not available\n", dev_nr);
++ remove_device(device_p);
++ return SEN_NOT_AVAIL;
++ case DEV_EMPTY:
++ return SEN_NOT_AVAIL;
++ case DEV_NO_WORK:
++ return SEN_FATAL_ERROR;
++ case DEV_BAD_MESSAGE:
++ return SEN_USER_ERROR;
++ case DEV_QUEUE_FULL:
++ return SEN_QUEUE_FULL;
++ default:
++ case DEV_ONLINE:
++ break;
++ }
++ list_add_tail(&(caller_p->caller_liste), &(device_p->dev_caller_list));
++ device_p->dev_caller_count++;
++ return 0;
++}
++
++/**
++ * Send puts the user's work on one of two queues:
++ * the pending queue if the send was successful
++ * the request queue if the send failed because device full or busy
++ */
++static inline int
++z90crypt_send(struct work_element *we_p, const char *buf)
++{
++ int rv;
++
++ PDEBUG("PID %d\n", PID());
++
++ if (CHK_RDWRMASK(we_p->status[0]) != STAT_NOWORK) {
++ PDEBUG("PID %d tried to send more work but has outstanding "
++ "work.\n", PID());
++ return -EWORKPEND;
++ }
++ we_p->devindex = -1; // Reset device number
++ spin_lock_irq(&queuespinlock);
++ rv = send_to_crypto_device(we_p);
++ switch (rv) {
++ case 0:
++ we_p->requestsent = jiffies;
++ we_p->audit[0] |= FP_SENT;
++ list_add_tail(&we_p->liste, &pending_list);
++ ++pendingq_count;
++ we_p->audit[0] |= FP_PENDING;
++ break;
++ case SEN_BUSY:
++ case SEN_QUEUE_FULL:
++ rv = 0;
++ we_p->devindex = -1; // any device will do
++ we_p->requestsent = jiffies;
++ list_add_tail(&we_p->liste, &request_list);
++ ++requestq_count;
++ we_p->audit[0] |= FP_REQUEST;
++ break;
++ case SEN_RETRY:
++ rv = -ERESTARTSYS;
++ break;
++ case SEN_NOT_AVAIL:
++ PRINTK("*** No devices available.\n");
++ rv = we_p->retcode = -ENODEV;
++ we_p->status[0] |= STAT_FAILED;
++ break;
++ case REC_OPERAND_INV:
++ case REC_OPERAND_SIZE:
++ case REC_EVEN_MOD:
++ case REC_INVALID_PAD:
++ rv = we_p->retcode = -EINVAL;
++ we_p->status[0] |= STAT_FAILED;
++ break;
++ default:
++ we_p->retcode = rv;
++ we_p->status[0] |= STAT_FAILED;
++ break;
++ }
++ if (rv != -ERESTARTSYS)
++ SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
++ spin_unlock_irq(&queuespinlock);
++ if (rv == 0)
++ tasklet_schedule(&reader_tasklet);
++ return rv;
++}
++
++/**
++ * process_results copies the user's work from kernel space.
++ */
++static inline int
++z90crypt_process_results(struct work_element *we_p, char __user *buf)
++{
++ int rv;
++
++ PDEBUG("we_p %p (PID %d)\n", we_p, PID());
++
++ LONG2DEVPTR(we_p->devindex)->dev_total_req_cnt++;
++ SET_RDWRMASK(we_p->status[0], STAT_READPEND);
++
++ rv = 0;
++ if (!we_p->buffer) {
++ PRINTK("we_p %p PID %d in STAT_READPEND: buffer NULL.\n",
++ we_p, PID());
++ rv = -ENOBUFF;
++ }
++
++ if (!rv)
++ if ((rv = copy_to_user(buf, we_p->buffer, we_p->buff_size))) {
++ PDEBUG("copy_to_user failed: rv = %d\n", rv);
++ rv = -EFAULT;
++ }
++
++ if (!rv)
++ rv = we_p->retcode;
++ if (!rv)
++ if (we_p->resp_buff_size
++ && copy_to_user(we_p->resp_addr, we_p->resp_buff,
++ we_p->resp_buff_size))
++ rv = -EFAULT;
++
++ SET_RDWRMASK(we_p->status[0], STAT_NOWORK);
++ return rv;
++}
++
++static unsigned char NULL_psmid[8] =
++{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
++
++/**
++ * Used in device configuration functions
++ */
++#define MAX_RESET 90
++
++/**
++ * This is used only for PCICC support
++ */
++static inline int
++is_PKCS11_padded(unsigned char *buffer, int length)
++{
++ int i;
++ if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
++ return 0;
++ for (i = 2; i < length; i++)
++ if (buffer[i] != 0xFF)
++ break;
++ if ((i < 10) || (i == length))
++ return 0;
++ if (buffer[i] != 0x00)
++ return 0;
++ return 1;
++}
++
++/**
++ * This is used only for PCICC support
++ */
++static inline int
++is_PKCS12_padded(unsigned char *buffer, int length)
++{
++ int i;
++ if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
++ return 0;
++ for (i = 2; i < length; i++)
++ if (buffer[i] == 0x00)
++ break;
++ if ((i < 10) || (i == length))
++ return 0;
++ if (buffer[i] != 0x00)
++ return 0;
++ return 1;
++}
++
++/**
++ * builds struct caller and converts message from generic format to
++ * device-dependent format
++ * func is ICARSAMODEXPO or ICARSACRT
++ * function is PCI_FUNC_KEY_ENCRYPT or PCI_FUNC_KEY_DECRYPT
++ */
++static inline int
++build_caller(struct work_element *we_p, short function)
++{
++ int rv;
++ struct caller *caller_p = (struct caller *)we_p->requestptr;
++
++ if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) &&
++ (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
++ (we_p->devtype != CEX2C))
++ return SEN_NOT_AVAIL;
++
++ memcpy(caller_p->caller_id, we_p->caller_id,
++ sizeof(caller_p->caller_id));
++ caller_p->caller_dev_dep_req_p = caller_p->caller_dev_dep_req;
++ caller_p->caller_dev_dep_req_l = MAX_RESPONSE_SIZE;
++ caller_p->caller_buf_p = we_p->buffer;
++ INIT_LIST_HEAD(&(caller_p->caller_liste));
++
++ rv = convert_request(we_p->buffer, we_p->funccode, function,
++ z90crypt.cdx, we_p->devtype,
++ &caller_p->caller_dev_dep_req_l,
++ caller_p->caller_dev_dep_req_p);
++ if (rv) {
++ if (rv == SEN_NOT_AVAIL)
++ PDEBUG("request can't be processed on hdwr avail\n");
++ else
++ PRINTK("Error from convert_request: %d\n", rv);
++ }
++ else
++ memcpy(&(caller_p->caller_dev_dep_req_p[4]), we_p->caller_id,8);
++ return rv;
++}
++
++static inline void
++unbuild_caller(struct device *device_p, struct caller *caller_p)
++{
++ if (!caller_p)
++ return;
++ if (caller_p->caller_liste.next && caller_p->caller_liste.prev)
++ if (!list_empty(&caller_p->caller_liste)) {
++ list_del_init(&caller_p->caller_liste);
++ device_p->dev_caller_count--;
++ }
++ memset(caller_p->caller_id, 0, sizeof(caller_p->caller_id));
++}
++
++static inline int
++get_crypto_request_buffer(struct work_element *we_p)
++{
++ struct ica_rsa_modexpo *mex_p;
++ struct ica_rsa_modexpo_crt *crt_p;
++ unsigned char *temp_buffer;
++ short function;
++ int rv;
++
++ mex_p = (struct ica_rsa_modexpo *) we_p->buffer;
++ crt_p = (struct ica_rsa_modexpo_crt *) we_p->buffer;
++
++ PDEBUG("device type input = %d\n", we_p->devtype);
++
++ if (z90crypt.terminating)
++ return REC_NO_RESPONSE;
++ if (memcmp(we_p->caller_id, NULL_psmid, 8) == 0) {
++ PRINTK("psmid zeroes\n");
++ return SEN_FATAL_ERROR;
++ }
++ if (!we_p->buffer) {
++ PRINTK("buffer pointer NULL\n");
++ return SEN_USER_ERROR;
++ }
++ if (!we_p->requestptr) {
++ PRINTK("caller pointer NULL\n");
++ return SEN_USER_ERROR;
++ }
++
++ if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) &&
++ (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
++ (we_p->devtype != CEX2C) && (we_p->devtype != ANYDEV)) {
++ PRINTK("invalid device type\n");
++ return SEN_USER_ERROR;
++ }
++
++ if ((mex_p->inputdatalength < 1) ||
++ (mex_p->inputdatalength > MAX_MOD_SIZE)) {
++ PRINTK("inputdatalength[%d] is not valid\n",
++ mex_p->inputdatalength);
++ return SEN_USER_ERROR;
++ }
++
++ if (mex_p->outputdatalength < mex_p->inputdatalength) {
++ PRINTK("outputdatalength[%d] < inputdatalength[%d]\n",
++ mex_p->outputdatalength, mex_p->inputdatalength);
++ return SEN_USER_ERROR;
++ }
++
++ if (!mex_p->inputdata || !mex_p->outputdata) {
++ PRINTK("inputdata[%p] or outputdata[%p] is NULL\n",
++ mex_p->outputdata, mex_p->inputdata);
++ return SEN_USER_ERROR;
++ }
++
++ /**
++ * As long as outputdatalength is big enough, we can set the
++ * outputdatalength equal to the inputdatalength, since that is the
++ * number of bytes we will copy in any case
++ */
++ mex_p->outputdatalength = mex_p->inputdatalength;
++
++ rv = 0;
++ switch (we_p->funccode) {
++ case ICARSAMODEXPO:
++ if (!mex_p->b_key || !mex_p->n_modulus)
++ rv = SEN_USER_ERROR;
++ break;
++ case ICARSACRT:
++ if (!IS_EVEN(crt_p->inputdatalength)) {
++ PRINTK("inputdatalength[%d] is odd, CRT form\n",
++ crt_p->inputdatalength);
++ rv = SEN_USER_ERROR;
++ break;
++ }
++ if (!crt_p->bp_key ||
++ !crt_p->bq_key ||
++ !crt_p->np_prime ||
++ !crt_p->nq_prime ||
++ !crt_p->u_mult_inv) {
++ PRINTK("CRT form, bad data: %p/%p/%p/%p/%p\n",
++ crt_p->bp_key, crt_p->bq_key,
++ crt_p->np_prime, crt_p->nq_prime,
++ crt_p->u_mult_inv);
++ rv = SEN_USER_ERROR;
++ }
++ break;
++ default:
++ PRINTK("bad func = %d\n", we_p->funccode);
++ rv = SEN_USER_ERROR;
++ break;
++ }
++ if (rv != 0)
++ return rv;
++
++ if (select_device_type(&we_p->devtype, mex_p->inputdatalength) < 0)
++ return SEN_NOT_AVAIL;
++
++ temp_buffer = (unsigned char *)we_p + sizeof(struct work_element) +
++ sizeof(struct caller);
++ if (copy_from_user(temp_buffer, mex_p->inputdata,
++ mex_p->inputdatalength) != 0)
++ return SEN_RELEASED;
++
++ function = PCI_FUNC_KEY_ENCRYPT;
++ switch (we_p->devtype) {
++ /* PCICA does everything with a simple RSA mod-expo operation */
++ case PCICA:
++ function = PCI_FUNC_KEY_ENCRYPT;
++ break;
++ /**
++ * PCIXCC_MCL2 does all Mod-Expo form with a simple RSA mod-expo
++ * operation, and all CRT forms with a PKCS-1.2 format decrypt.
++ * PCIXCC_MCL3 and CEX2C do all Mod-Expo and CRT forms with a simple RSA
++ * mod-expo operation
++ */
++ case PCIXCC_MCL2:
++ if (we_p->funccode == ICARSAMODEXPO)
++ function = PCI_FUNC_KEY_ENCRYPT;
++ else
++ function = PCI_FUNC_KEY_DECRYPT;
++ break;
++ case PCIXCC_MCL3:
++ case CEX2C:
++ if (we_p->funccode == ICARSAMODEXPO)
++ function = PCI_FUNC_KEY_ENCRYPT;
++ else
++ function = PCI_FUNC_KEY_DECRYPT;
++ break;
++ /**
++ * PCICC does everything as a PKCS-1.2 format request
++ */
++ case PCICC:
++ /* PCICC cannot handle input that is is PKCS#1.1 padded */
++ if (is_PKCS11_padded(temp_buffer, mex_p->inputdatalength)) {
++ return SEN_NOT_AVAIL;
++ }
++ if (we_p->funccode == ICARSAMODEXPO) {
++ if (is_PKCS12_padded(temp_buffer,
++ mex_p->inputdatalength))
++ function = PCI_FUNC_KEY_ENCRYPT;
++ else
++ function = PCI_FUNC_KEY_DECRYPT;
++ } else
++ /* all CRT forms are decrypts */
++ function = PCI_FUNC_KEY_DECRYPT;
++ break;
++ }
++ PDEBUG("function: %04x\n", function);
++ rv = build_caller(we_p, function);
++ PDEBUG("rv from build_caller = %d\n", rv);
++ return rv;
++}
++
++static inline int
++z90crypt_prepare(struct work_element *we_p, unsigned int funccode,
++ const char __user *buffer)
++{
++ int rv;
++
++ we_p->devindex = -1;
++ if (funccode == ICARSAMODEXPO)
++ we_p->buff_size = sizeof(struct ica_rsa_modexpo);
++ else
++ we_p->buff_size = sizeof(struct ica_rsa_modexpo_crt);
++
++ if (copy_from_user(we_p->buffer, buffer, we_p->buff_size))
++ return -EFAULT;
++
++ we_p->audit[0] |= FP_COPYFROM;
++ SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
++ we_p->funccode = funccode;
++ we_p->devtype = -1;
++ we_p->audit[0] |= FP_BUFFREQ;
++ rv = get_crypto_request_buffer(we_p);
++ switch (rv) {
++ case 0:
++ we_p->audit[0] |= FP_BUFFGOT;
++ break;
++ case SEN_USER_ERROR:
++ rv = -EINVAL;
++ break;
++ case SEN_QUEUE_FULL:
++ rv = 0;
++ break;
++ case SEN_RELEASED:
++ rv = -EFAULT;
++ break;
++ case REC_NO_RESPONSE:
++ rv = -ENODEV;
++ break;
++ case SEN_NOT_AVAIL:
++ rv = -EGETBUFF;
++ break;
++ default:
++ PRINTK("rv = %d\n", rv);
++ rv = -EGETBUFF;
++ break;
++ }
++ if (CHK_RDWRMASK(we_p->status[0]) == STAT_WRITTEN)
++ SET_RDWRMASK(we_p->status[0], STAT_DEFAULT);
++ return rv;
++}
++
++static inline void
++purge_work_element(struct work_element *we_p)
++{
++ struct list_head *lptr;
++
++ spin_lock_irq(&queuespinlock);
++ list_for_each(lptr, &request_list) {
++ if (lptr == &we_p->liste) {
++ list_del_init(lptr);
++ requestq_count--;
++ break;
++ }
++ }
++ list_for_each(lptr, &pending_list) {
++ if (lptr == &we_p->liste) {
++ list_del_init(lptr);
++ pendingq_count--;
++ break;
++ }
++ }
++ spin_unlock_irq(&queuespinlock);
++}
++
++/**
++ * Build the request and send it.
++ */
++static inline int
++z90crypt_rsa(struct priv_data *private_data_p, pid_t pid,
++ unsigned int cmd, unsigned long arg)
++{
++ struct work_element *we_p;
++ int rv;
++
++ if ((rv = allocate_work_element(&we_p, private_data_p, pid))) {
++ PDEBUG("PID %d: allocate_work_element returned ENOMEM\n", pid);
++ return rv;
++ }
++ if ((rv = z90crypt_prepare(we_p, cmd, (const char __user *)arg)))
++ PDEBUG("PID %d: rv = %d from z90crypt_prepare\n", pid, rv);
++ if (!rv)
++ if ((rv = z90crypt_send(we_p, (const char *)arg)))
++ PDEBUG("PID %d: rv %d from z90crypt_send.\n", pid, rv);
++ if (!rv) {
++ we_p->audit[0] |= FP_ASLEEP;
++ wait_event(we_p->waitq, atomic_read(&we_p->alarmrung));
++ we_p->audit[0] |= FP_AWAKE;
++ rv = we_p->retcode;
++ }
++ if (!rv)
++ rv = z90crypt_process_results(we_p, (char __user *)arg);
++
++ if ((we_p->status[0] & STAT_FAILED)) {
++ switch (rv) {
++ /**
++ * EINVAL *after* receive is almost always a padding error or
++ * length error issued by a coprocessor (not an accelerator).
++ * We convert this return value to -EGETBUFF which should
++ * trigger a fallback to software.
++ */
++ case -EINVAL:
++ if (we_p->devtype != PCICA)
++ rv = -EGETBUFF;
++ break;
++ case -ETIMEOUT:
++ if (z90crypt.mask.st_count > 0)
++ rv = -ERESTARTSYS; // retry with another
++ else
++ rv = -ENODEV; // no cards left
++ /* fall through to clean up request queue */
++ case -ERESTARTSYS:
++ case -ERELEASED:
++ switch (CHK_RDWRMASK(we_p->status[0])) {
++ case STAT_WRITTEN:
++ purge_work_element(we_p);
++ break;
++ case STAT_READPEND:
++ case STAT_NOWORK:
++ default:
++ break;
++ }
++ break;
++ default:
++ we_p->status[0] ^= STAT_FAILED;
++ break;
++ }
++ }
++ free_page((long)we_p);
++ return rv;
++}
++
++/**
++ * This function is a little long, but it's really just one large switch
++ * statement.
++ */
++static int
++z90crypt_ioctl(struct inode *inode, struct file *filp,
++ unsigned int cmd, unsigned long arg)
++{
++ struct priv_data *private_data_p = filp->private_data;
++ unsigned char *status;
++ unsigned char *qdepth;
++ unsigned int *reqcnt;
++ struct ica_z90_status *pstat;
++ int ret, i, loopLim, tempstat;
++ static int deprecated_msg_count1 = 0;
++ static int deprecated_msg_count2 = 0;
++
++ PDEBUG("filp %p (PID %d), cmd 0x%08X\n", filp, PID(), cmd);
++ PDEBUG("cmd 0x%08X: dir %s, size 0x%04X, type 0x%02X, nr 0x%02X\n",
++ cmd,
++ !_IOC_DIR(cmd) ? "NO"
++ : ((_IOC_DIR(cmd) == (_IOC_READ|_IOC_WRITE)) ? "RW"
++ : ((_IOC_DIR(cmd) == _IOC_READ) ? "RD"
++ : "WR")),
++ _IOC_SIZE(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd));
++
++ if (_IOC_TYPE(cmd) != Z90_IOCTL_MAGIC) {
++ PRINTK("cmd 0x%08X contains bad magic\n", cmd);
++ return -ENOTTY;
++ }
++
++ ret = 0;
++ switch (cmd) {
++ case ICARSAMODEXPO:
++ case ICARSACRT:
++ if (quiesce_z90crypt) {
++ ret = -EQUIESCE;
++ break;
++ }
++ ret = -ENODEV; // Default if no devices
++ loopLim = z90crypt.hdware_info->hdware_mask.st_count -
++ (z90crypt.hdware_info->hdware_mask.disabled_count +
++ z90crypt.hdware_info->hdware_mask.user_disabled_count);
++ for (i = 0; i < loopLim; i++) {
++ ret = z90crypt_rsa(private_data_p, PID(), cmd, arg);
++ if (ret != -ERESTARTSYS)
++ break;
++ }
++ if (ret == -ERESTARTSYS)
++ ret = -ENODEV;
++ break;
++
++ case Z90STAT_TOTALCOUNT:
++ tempstat = get_status_totalcount();
++ if (copy_to_user((int __user *)arg, &tempstat,sizeof(int)) != 0)
++ ret = -EFAULT;
++ break;
++
++ case Z90STAT_PCICACOUNT:
++ tempstat = get_status_PCICAcount();
++ if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
++ ret = -EFAULT;
++ break;
++
++ case Z90STAT_PCICCCOUNT:
++ tempstat = get_status_PCICCcount();
++ if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
++ ret = -EFAULT;
++ break;
++
++ case Z90STAT_PCIXCCMCL2COUNT:
++ tempstat = get_status_PCIXCCMCL2count();
++ if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
++ ret = -EFAULT;
++ break;
++
++ case Z90STAT_PCIXCCMCL3COUNT:
++ tempstat = get_status_PCIXCCMCL3count();
++ if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
++ ret = -EFAULT;
++ break;
++
++ case Z90STAT_CEX2CCOUNT:
++ tempstat = get_status_CEX2Ccount();
++ if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
++ ret = -EFAULT;
++ break;
++
++ case Z90STAT_REQUESTQ_COUNT:
++ tempstat = get_status_requestq_count();
++ if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
++ ret = -EFAULT;
++ break;
++
++ case Z90STAT_PENDINGQ_COUNT:
++ tempstat = get_status_pendingq_count();
++ if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
++ ret = -EFAULT;
++ break;
++
++ case Z90STAT_TOTALOPEN_COUNT:
++ tempstat = get_status_totalopen_count();
++ if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
++ ret = -EFAULT;
++ break;
++
++ case Z90STAT_DOMAIN_INDEX:
++ tempstat = get_status_domain_index();
++ if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
++ ret = -EFAULT;
++ break;
++
++ case Z90STAT_STATUS_MASK:
++ status = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
++ if (!status) {
++ PRINTK("kmalloc for status failed!\n");
++ ret = -ENOMEM;
++ break;
++ }
++ get_status_status_mask(status);
++ if (copy_to_user((char __user *) arg, status, Z90CRYPT_NUM_APS)
++ != 0)
++ ret = -EFAULT;
++ kfree(status);
++ break;
++
++ case Z90STAT_QDEPTH_MASK:
++ qdepth = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
++ if (!qdepth) {
++ PRINTK("kmalloc for qdepth failed!\n");
++ ret = -ENOMEM;
++ break;
++ }
++ get_status_qdepth_mask(qdepth);
++ if (copy_to_user((char __user *) arg, qdepth, Z90CRYPT_NUM_APS) != 0)
++ ret = -EFAULT;
++ kfree(qdepth);
++ break;
++
++ case Z90STAT_PERDEV_REQCNT:
++ reqcnt = kmalloc(sizeof(int) * Z90CRYPT_NUM_APS, GFP_KERNEL);
++ if (!reqcnt) {
++ PRINTK("kmalloc for reqcnt failed!\n");
++ ret = -ENOMEM;
++ break;
++ }
++ get_status_perdevice_reqcnt(reqcnt);
++ if (copy_to_user((char __user *) arg, reqcnt,
++ Z90CRYPT_NUM_APS * sizeof(int)) != 0)
++ ret = -EFAULT;
++ kfree(reqcnt);
++ break;
++
++ /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
++ case ICAZ90STATUS:
++ if (deprecated_msg_count1 < 20) {
++ PRINTK("deprecated call to ioctl (ICAZ90STATUS)!\n");
++ deprecated_msg_count1++;
++ if (deprecated_msg_count1 == 20)
++ PRINTK("No longer issuing messages related to "
++ "deprecated call to ICAZ90STATUS.\n");
++ }
++
++ pstat = kmalloc(sizeof(struct ica_z90_status), GFP_KERNEL);
++ if (!pstat) {
++ PRINTK("kmalloc for pstat failed!\n");
++ ret = -ENOMEM;
++ break;
++ }
++
++ pstat->totalcount = get_status_totalcount();
++ pstat->leedslitecount = get_status_PCICAcount();
++ pstat->leeds2count = get_status_PCICCcount();
++ pstat->requestqWaitCount = get_status_requestq_count();
++ pstat->pendingqWaitCount = get_status_pendingq_count();
++ pstat->totalOpenCount = get_status_totalopen_count();
++ pstat->cryptoDomain = get_status_domain_index();
++ get_status_status_mask(pstat->status);
++ get_status_qdepth_mask(pstat->qdepth);
++
++ if (copy_to_user((struct ica_z90_status __user *) arg, pstat,
++ sizeof(struct ica_z90_status)) != 0)
++ ret = -EFAULT;
++ kfree(pstat);
++ break;
++
++ /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
++ case Z90STAT_PCIXCCCOUNT:
++ if (deprecated_msg_count2 < 20) {
++ PRINTK("deprecated ioctl (Z90STAT_PCIXCCCOUNT)!\n");
++ deprecated_msg_count2++;
++ if (deprecated_msg_count2 == 20)
++ PRINTK("No longer issuing messages about depre"
++ "cated ioctl Z90STAT_PCIXCCCOUNT.\n");
++ }
++
++ tempstat = get_status_PCIXCCcount();
++ if (copy_to_user((int *)arg, &tempstat, sizeof(int)) != 0)
++ ret = -EFAULT;
++ break;
++
++ case Z90QUIESCE:
++ if (current->euid != 0) {
++ PRINTK("QUIESCE fails: euid %d\n",
++ current->euid);
++ ret = -EACCES;
++ } else {
++ PRINTK("QUIESCE device from PID %d\n", PID());
++ quiesce_z90crypt = 1;
++ }
++ break;
++
++ default:
++ /* user passed an invalid IOCTL number */
++ PDEBUG("cmd 0x%08X contains invalid ioctl code\n", cmd);
++ ret = -ENOTTY;
++ break;
++ }
++
++ return ret;
++}
++
++static inline int
++sprintcl(unsigned char *outaddr, unsigned char *addr, unsigned int len)
++{
++ int hl, i;
++
++ hl = 0;
++ for (i = 0; i < len; i++)
++ hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
++ hl += sprintf(outaddr+hl, " ");
++
++ return hl;
++}
++
++static inline int
++sprintrw(unsigned char *outaddr, unsigned char *addr, unsigned int len)
++{
++ int hl, inl, c, cx;
++
++ hl = sprintf(outaddr, " ");
++ inl = 0;
++ for (c = 0; c < (len / 16); c++) {
++ hl += sprintcl(outaddr+hl, addr+inl, 16);
++ inl += 16;
++ }
++
++ cx = len%16;
++ if (cx) {
++ hl += sprintcl(outaddr+hl, addr+inl, cx);
++ inl += cx;
++ }
++
++ hl += sprintf(outaddr+hl, "\n");
++
++ return hl;
++}
++
++static inline int
++sprinthx(unsigned char *title, unsigned char *outaddr,
++ unsigned char *addr, unsigned int len)
++{
++ int hl, inl, r, rx;
++
++ hl = sprintf(outaddr, "\n%s\n", title);
++ inl = 0;
++ for (r = 0; r < (len / 64); r++) {
++ hl += sprintrw(outaddr+hl, addr+inl, 64);
++ inl += 64;
++ }
++ rx = len % 64;
++ if (rx) {
++ hl += sprintrw(outaddr+hl, addr+inl, rx);
++ inl += rx;
++ }
++
++ hl += sprintf(outaddr+hl, "\n");
++
++ return hl;
++}
++
++static inline int
++sprinthx4(unsigned char *title, unsigned char *outaddr,
++ unsigned int *array, unsigned int len)
++{
++ int hl, r;
++
++ hl = sprintf(outaddr, "\n%s\n", title);
++
++ for (r = 0; r < len; r++) {
++ if ((r % 8) == 0)
++ hl += sprintf(outaddr+hl, " ");
++ hl += sprintf(outaddr+hl, "%08X ", array[r]);
++ if ((r % 8) == 7)
++ hl += sprintf(outaddr+hl, "\n");
++ }
++
++ hl += sprintf(outaddr+hl, "\n");
++
++ return hl;
++}
++
++static int
++z90crypt_status(char *resp_buff, char **start, off_t offset,
++ int count, int *eof, void *data)
++{
++ unsigned char *workarea;
++ int len;
++
++ /* resp_buff is a page. Use the right half for a work area */
++ workarea = resp_buff+2000;
++ len = 0;
++ len += sprintf(resp_buff+len, "\nz90crypt version: %d.%d.%d\n",
++ z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT);
++ len += sprintf(resp_buff+len, "Cryptographic domain: %d\n",
++ get_status_domain_index());
++ len += sprintf(resp_buff+len, "Total device count: %d\n",
++ get_status_totalcount());
++ len += sprintf(resp_buff+len, "PCICA count: %d\n",
++ get_status_PCICAcount());
++ len += sprintf(resp_buff+len, "PCICC count: %d\n",
++ get_status_PCICCcount());
++ len += sprintf(resp_buff+len, "PCIXCC MCL2 count: %d\n",
++ get_status_PCIXCCMCL2count());
++ len += sprintf(resp_buff+len, "PCIXCC MCL3 count: %d\n",
++ get_status_PCIXCCMCL3count());
++ len += sprintf(resp_buff+len, "CEX2C count: %d\n",
++ get_status_CEX2Ccount());
++ len += sprintf(resp_buff+len, "requestq count: %d\n",
++ get_status_requestq_count());
++ len += sprintf(resp_buff+len, "pendingq count: %d\n",
++ get_status_pendingq_count());
++ len += sprintf(resp_buff+len, "Total open handles: %d\n\n",
++ get_status_totalopen_count());
++ len += sprinthx(
++ "Online devices: 1: PCICA, 2: PCICC, 3: PCIXCC (MCL2), "
++ "4: PCIXCC (MCL3), 5: CEX2C",
++ resp_buff+len,
++ get_status_status_mask(workarea),
++ Z90CRYPT_NUM_APS);
++ len += sprinthx("Waiting work element counts",
++ resp_buff+len,
++ get_status_qdepth_mask(workarea),
++ Z90CRYPT_NUM_APS);
++ len += sprinthx4(
++ "Per-device successfully completed request counts",
++ resp_buff+len,
++ get_status_perdevice_reqcnt((unsigned int *)workarea),
++ Z90CRYPT_NUM_APS);
++ *eof = 1;
++ memset(workarea, 0, Z90CRYPT_NUM_APS * sizeof(unsigned int));
++ return len;
++}
++
++static inline void
++disable_card(int card_index)
++{
++ struct device *devp;
++
++ devp = LONG2DEVPTR(card_index);
++ if (!devp || devp->user_disabled)
++ return;
++ devp->user_disabled = 1;
++ z90crypt.hdware_info->hdware_mask.user_disabled_count++;
++ if (devp->dev_type == -1)
++ return;
++ z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count++;
++}
++
++static inline void
++enable_card(int card_index)
++{
++ struct device *devp;
++
++ devp = LONG2DEVPTR(card_index);
++ if (!devp || !devp->user_disabled)
++ return;
++ devp->user_disabled = 0;
++ z90crypt.hdware_info->hdware_mask.user_disabled_count--;
++ if (devp->dev_type == -1)
++ return;
++ z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count--;
++}
++
++static inline int
++scan_char(unsigned char *bf, unsigned int len,
++ unsigned int *offs, unsigned int *p_eof, unsigned char c)
++{
++ unsigned int i, found;
++
++ found = 0;
++ for (i = 0; i < len; i++) {
++ if (bf[i] == c) {
++ found = 1;
++ break;
++ }
++ if (bf[i] == '\0') {
++ *p_eof = 1;
++ break;
++ }
++ if (bf[i] == '\n') {
++ break;
++ }
++ }
++ *offs = i+1;
++ return found;
++}
++
++static inline int
++scan_string(unsigned char *bf, unsigned int len,
++ unsigned int *offs, unsigned int *p_eof, unsigned char *s)
++{
++ unsigned int temp_len, temp_offs, found, eof;
++
++ temp_len = temp_offs = found = eof = 0;
++ while (!eof && !found) {
++ found = scan_char(bf+temp_len, len-temp_len,
++ &temp_offs, &eof, *s);
++
++ temp_len += temp_offs;
++ if (eof) {
++ found = 0;
++ break;
++ }
++
++ if (found) {
++ if (len >= temp_offs+strlen(s)) {
++ found = !strncmp(bf+temp_len-1, s, strlen(s));
++ if (found) {
++ *offs = temp_len+strlen(s)-1;
++ break;
++ }
++ } else {
++ found = 0;
++ *p_eof = 1;
++ break;
++ }
++ }
++ }
++ return found;
++}
++
++static int
++z90crypt_status_write(struct file *file, const char __user *buffer,
++ unsigned long count, void *data)
++{
++ int i, j, len, offs, found, eof;
++ unsigned char *lbuf;
++ unsigned int local_count;
++
++#define LBUFSIZE 600
++ lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
++ if (!lbuf) {
++ PRINTK("kmalloc failed!\n");
++ return 0;
++ }
++
++ if (count <= 0)
++ return 0;
++
++ local_count = UMIN((unsigned int)count, LBUFSIZE-1);
++
++ if (copy_from_user(lbuf, buffer, local_count) != 0) {
++ kfree(lbuf);
++ return -EFAULT;
++ }
++
++ lbuf[local_count-1] = '\0';
++
++ len = 0;
++ eof = 0;
++ found = 0;
++ while (!eof) {
++ found = scan_string(lbuf+len, local_count-len, &offs, &eof,
++ "Online devices");
++ len += offs;
++ if (found == 1)
++ break;
++ }
++
++ if (eof) {
++ kfree(lbuf);
++ return count;
++ }
++
++ if (found)
++ found = scan_char(lbuf+len, local_count-len, &offs, &eof, '\n');
++
++ if (!found || eof) {
++ kfree(lbuf);
++ return count;
++ }
++
++ len += offs;
++ j = 0;
++ for (i = 0; i < 80; i++) {
++ switch (*(lbuf+len+i)) {
++ case '\t':
++ case ' ':
++ break;
++ case '\n':
++ default:
++ eof = 1;
++ break;
++ case '0':
++ case '1':
++ case '2':
++ case '3':
++ case '4':
++ case '5':
++ j++;
++ break;
++ case 'd':
++ case 'D':
++ disable_card(j);
++ j++;
++ break;
++ case 'e':
++ case 'E':
++ enable_card(j);
++ j++;
++ break;
++ }
++ if (eof)
++ break;
++ }
++
++ kfree(lbuf);
++ return count;
++}
++
++/**
++ * Functions that run under a timer, with no process id
++ *
++ * The task functions:
++ * z90crypt_reader_task
++ * helper_send_work
++ * helper_handle_work_element
++ * helper_receive_rc
++ * z90crypt_config_task
++ * z90crypt_cleanup_task
++ *
++ * Helper functions:
++ * z90crypt_schedule_reader_timer
++ * z90crypt_schedule_reader_task
++ * z90crypt_schedule_config_task
++ * z90crypt_schedule_cleanup_task
++ */
++static inline int
++receive_from_crypto_device(int index, unsigned char *psmid, int *buff_len_p,
++ unsigned char *buff, unsigned char __user **dest_p_p)
++{
++ int dv, rv;
++ struct device *dev_ptr;
++ struct caller *caller_p;
++ struct ica_rsa_modexpo *icaMsg_p;
++ struct list_head *ptr, *tptr;
++
++ memcpy(psmid, NULL_psmid, sizeof(NULL_psmid));
++
++ if (z90crypt.terminating)
++ return REC_FATAL_ERROR;
++
++ caller_p = 0;
++ dev_ptr = z90crypt.device_p[index];
++ rv = 0;
++ do {
++ if (!dev_ptr || dev_ptr->disabled) {
++ rv = REC_NO_WORK; // a disabled device can't return work
++ break;
++ }
++ if (dev_ptr->dev_self_x != index) {
++ PRINTK("Corrupt dev ptr in receive_from_AP\n");
++ z90crypt.terminating = 1;
++ rv = REC_FATAL_ERROR;
++ break;
++ }
++ if (!dev_ptr->dev_resp_l || !dev_ptr->dev_resp_p) {
++ dv = DEV_REC_EXCEPTION;
++ PRINTK("dev_resp_l = %d, dev_resp_p = %p\n",
++ dev_ptr->dev_resp_l, dev_ptr->dev_resp_p);
++ } else {
++ PDEBUG("Dequeue called for device %d\n", index);
++ dv = receive_from_AP(index, z90crypt.cdx,
++ dev_ptr->dev_resp_l,
++ dev_ptr->dev_resp_p, psmid);
++ }
++ switch (dv) {
++ case DEV_REC_EXCEPTION:
++ rv = REC_FATAL_ERROR;
++ z90crypt.terminating = 1;
++ PRINTKC("Exception in receive from device %d\n",
++ index);
++ break;
++ case DEV_ONLINE:
++ rv = 0;
++ break;
++ case DEV_EMPTY:
++ rv = REC_EMPTY;
++ break;
++ case DEV_NO_WORK:
++ rv = REC_NO_WORK;
++ break;
++ case DEV_BAD_MESSAGE:
++ case DEV_GONE:
++ case REC_HARDWAR_ERR:
++ default:
++ rv = REC_NO_RESPONSE;
++ break;
++ }
++ if (rv)
++ break;
++ if (dev_ptr->dev_caller_count <= 0) {
++ rv = REC_USER_GONE;
++ break;
++ }
++
++ list_for_each_safe(ptr, tptr, &dev_ptr->dev_caller_list) {
++ caller_p = list_entry(ptr, struct caller, caller_liste);
++ if (!memcmp(caller_p->caller_id, psmid,
++ sizeof(caller_p->caller_id))) {
++ if (!list_empty(&caller_p->caller_liste)) {
++ list_del_init(ptr);
++ dev_ptr->dev_caller_count--;
++ break;
++ }
++ }
++ caller_p = 0;
++ }
++ if (!caller_p) {
++ PRINTKW("Unable to locate PSMID %02X%02X%02X%02X%02X"
++ "%02X%02X%02X in device list\n",
++ psmid[0], psmid[1], psmid[2], psmid[3],
++ psmid[4], psmid[5], psmid[6], psmid[7]);
++ rv = REC_USER_GONE;
++ break;
++ }
++
++ PDEBUG("caller_p after successful receive: %p\n", caller_p);
++ rv = convert_response(dev_ptr->dev_resp_p,
++ caller_p->caller_buf_p, buff_len_p, buff);
++ switch (rv) {
++ case REC_USE_PCICA:
++ break;
++ case REC_OPERAND_INV:
++ case REC_OPERAND_SIZE:
++ case REC_EVEN_MOD:
++ case REC_INVALID_PAD:
++ PDEBUG("device %d: 'user error' %d\n", index, rv);
++ break;
++ case WRONG_DEVICE_TYPE:
++ case REC_HARDWAR_ERR:
++ case REC_BAD_MESSAGE:
++ PRINTKW("device %d: hardware error %d\n", index, rv);
++ rv = REC_NO_RESPONSE;
++ break;
++ default:
++ PDEBUG("device %d: rv = %d\n", index, rv);
++ break;
++ }
++ } while (0);
++
++ switch (rv) {
++ case 0:
++ PDEBUG("Successful receive from device %d\n", index);
++ icaMsg_p = (struct ica_rsa_modexpo *)caller_p->caller_buf_p;
++ *dest_p_p = icaMsg_p->outputdata;
++ if (*buff_len_p == 0)
++ PRINTK("Zero *buff_len_p\n");
++ break;
++ case REC_NO_RESPONSE:
++ PRINTKW("Removing device %d from availability\n", index);
++ remove_device(dev_ptr);
++ break;
++ }
++
++ if (caller_p)
++ unbuild_caller(dev_ptr, caller_p);
++
++ return rv;
++}
++
++static inline void
++helper_send_work(int index)
++{
++ struct work_element *rq_p;
++ int rv;
++
++ if (list_empty(&request_list))
++ return;
++ requestq_count--;
++ rq_p = list_entry(request_list.next, struct work_element, liste);
++ list_del_init(&rq_p->liste);
++ rq_p->audit[1] |= FP_REMREQUEST;
++ if (rq_p->devtype == SHRT2DEVPTR(index)->dev_type) {
++ rq_p->devindex = SHRT2LONG(index);
++ rv = send_to_crypto_device(rq_p);
++ if (rv == 0) {
++ rq_p->requestsent = jiffies;
++ rq_p->audit[0] |= FP_SENT;
++ list_add_tail(&rq_p->liste, &pending_list);
++ ++pendingq_count;
++ rq_p->audit[0] |= FP_PENDING;
++ } else {
++ switch (rv) {
++ case REC_OPERAND_INV:
++ case REC_OPERAND_SIZE:
++ case REC_EVEN_MOD:
++ case REC_INVALID_PAD:
++ rq_p->retcode = -EINVAL;
++ break;
++ case SEN_NOT_AVAIL:
++ case SEN_RETRY:
++ case REC_NO_RESPONSE:
++ default:
++ if (z90crypt.mask.st_count > 1)
++ rq_p->retcode =
++ -ERESTARTSYS;
++ else
++ rq_p->retcode = -ENODEV;
++ break;
++ }
++ rq_p->status[0] |= STAT_FAILED;
++ rq_p->audit[1] |= FP_AWAKENING;
++ atomic_set(&rq_p->alarmrung, 1);
++ wake_up(&rq_p->waitq);
++ }
++ } else {
++ if (z90crypt.mask.st_count > 1)
++ rq_p->retcode = -ERESTARTSYS;
++ else
++ rq_p->retcode = -ENODEV;
++ rq_p->status[0] |= STAT_FAILED;
++ rq_p->audit[1] |= FP_AWAKENING;
++ atomic_set(&rq_p->alarmrung, 1);
++ wake_up(&rq_p->waitq);
++ }
++}
++
++static inline void
++helper_handle_work_element(int index, unsigned char psmid[8], int rc,
++ int buff_len, unsigned char *buff,
++ unsigned char __user *resp_addr)
++{
++ struct work_element *pq_p;
++ struct list_head *lptr, *tptr;
++
++ pq_p = 0;
++ list_for_each_safe(lptr, tptr, &pending_list) {
++ pq_p = list_entry(lptr, struct work_element, liste);
++ if (!memcmp(pq_p->caller_id, psmid, sizeof(pq_p->caller_id))) {
++ list_del_init(lptr);
++ pendingq_count--;
++ pq_p->audit[1] |= FP_NOTPENDING;
++ break;
++ }
++ pq_p = 0;
++ }
++
++ if (!pq_p) {
++ PRINTK("device %d has work but no caller exists on pending Q\n",
++ SHRT2LONG(index));
++ return;
++ }
++
++ switch (rc) {
++ case 0:
++ pq_p->resp_buff_size = buff_len;
++ pq_p->audit[1] |= FP_RESPSIZESET;
++ if (buff_len) {
++ pq_p->resp_addr = resp_addr;
++ pq_p->audit[1] |= FP_RESPADDRCOPIED;
++ memcpy(pq_p->resp_buff, buff, buff_len);
++ pq_p->audit[1] |= FP_RESPBUFFCOPIED;
++ }
++ break;
++ case REC_OPERAND_INV:
++ case REC_OPERAND_SIZE:
++ case REC_EVEN_MOD:
++ case REC_INVALID_PAD:
++ PDEBUG("-EINVAL after application error %d\n", rc);
++ pq_p->retcode = -EINVAL;
++ pq_p->status[0] |= STAT_FAILED;
++ break;
++ case REC_USE_PCICA:
++ pq_p->retcode = -ERESTARTSYS;
++ pq_p->status[0] |= STAT_FAILED;
++ break;
++ case REC_NO_RESPONSE:
++ default:
++ if (z90crypt.mask.st_count > 1)
++ pq_p->retcode = -ERESTARTSYS;
++ else
++ pq_p->retcode = -ENODEV;
++ pq_p->status[0] |= STAT_FAILED;
++ break;
++ }
++ if ((pq_p->status[0] != STAT_FAILED) || (pq_p->retcode != -ERELEASED)) {
++ pq_p->audit[1] |= FP_AWAKENING;
++ atomic_set(&pq_p->alarmrung, 1);
++ wake_up(&pq_p->waitq);
++ }
++}
++
++/**
++ * return TRUE if the work element should be removed from the queue
++ */
++static inline int
++helper_receive_rc(int index, int *rc_p)
++{
++ switch (*rc_p) {
++ case 0:
++ case REC_OPERAND_INV:
++ case REC_OPERAND_SIZE:
++ case REC_EVEN_MOD:
++ case REC_INVALID_PAD:
++ case REC_USE_PCICA:
++ break;
++
++ case REC_BUSY:
++ case REC_NO_WORK:
++ case REC_EMPTY:
++ case REC_RETRY_DEV:
++ case REC_FATAL_ERROR:
++ return 0;
++
++ case REC_NO_RESPONSE:
++ break;
++
++ default:
++ PRINTK("rc %d, device %d converted to REC_NO_RESPONSE\n",
++ *rc_p, SHRT2LONG(index));
++ *rc_p = REC_NO_RESPONSE;
++ break;
++ }
++ return 1;
++}
++
++static inline void
++z90crypt_schedule_reader_timer(void)
++{
++ if (timer_pending(&reader_timer))
++ return;
++ if (mod_timer(&reader_timer, jiffies+(READERTIME*HZ/1000)) != 0)
++ PRINTK("Timer pending while modifying reader timer\n");
++}
++
++static void
++z90crypt_reader_task(unsigned long ptr)
++{
++ int workavail, index, rc, buff_len;
++ unsigned char psmid[8];
++ unsigned char __user *resp_addr;
++ static unsigned char buff[1024];
++
++ /**
++ * we use workavail = 2 to ensure 2 passes with nothing dequeued before
++ * exiting the loop. If (pendingq_count+requestq_count) == 0 after the
++ * loop, there is no work remaining on the queues.
++ */
++ resp_addr = 0;
++ workavail = 2;
++ buff_len = 0;
++ while (workavail) {
++ workavail--;
++ rc = 0;
++ spin_lock_irq(&queuespinlock);
++ memset(buff, 0x00, sizeof(buff));
++
++ /* Dequeue once from each device in round robin. */
++ for (index = 0; index < z90crypt.mask.st_count; index++) {
++ PDEBUG("About to receive.\n");
++ rc = receive_from_crypto_device(SHRT2LONG(index),
++ psmid,
++ &buff_len,
++ buff,
++ &resp_addr);
++ PDEBUG("Dequeued: rc = %d.\n", rc);
++
++ if (helper_receive_rc(index, &rc)) {
++ if (rc != REC_NO_RESPONSE) {
++ helper_send_work(index);
++ workavail = 2;
++ }
++
++ helper_handle_work_element(index, psmid, rc,
++ buff_len, buff,
++ resp_addr);
++ }
++
++ if (rc == REC_FATAL_ERROR)
++ PRINTKW("REC_FATAL_ERROR from device %d!\n",
++ SHRT2LONG(index));
++ }
++ spin_unlock_irq(&queuespinlock);
++ }
++
++ if (pendingq_count + requestq_count)
++ z90crypt_schedule_reader_timer();
++}
++
++static inline void
++z90crypt_schedule_config_task(unsigned int expiration)
++{
++ if (timer_pending(&config_timer))
++ return;
++ if (mod_timer(&config_timer, jiffies+(expiration*HZ)) != 0)
++ PRINTK("Timer pending while modifying config timer\n");
++}
++
++static void
++z90crypt_config_task(unsigned long ptr)
++{
++ int rc;
++
++ PDEBUG("jiffies %ld\n", jiffies);
++
++ if ((rc = refresh_z90crypt(&z90crypt.cdx)))
++ PRINTK("Error %d detected in refresh_z90crypt.\n", rc);
++ /* If return was fatal, don't bother reconfiguring */
++ if ((rc != TSQ_FATAL_ERROR) && (rc != RSQ_FATAL_ERROR))
++ z90crypt_schedule_config_task(CONFIGTIME);
++}
++
++static inline void
++z90crypt_schedule_cleanup_task(void)
++{
++ if (timer_pending(&cleanup_timer))
++ return;
++ if (mod_timer(&cleanup_timer, jiffies+(CLEANUPTIME*HZ)) != 0)
++ PRINTK("Timer pending while modifying cleanup timer\n");
++}
++
++static inline void
++helper_drain_queues(void)
++{
++ struct work_element *pq_p;
++ struct list_head *lptr, *tptr;
++
++ list_for_each_safe(lptr, tptr, &pending_list) {
++ pq_p = list_entry(lptr, struct work_element, liste);
++ pq_p->retcode = -ENODEV;
++ pq_p->status[0] |= STAT_FAILED;
++ unbuild_caller(LONG2DEVPTR(pq_p->devindex),
++ (struct caller *)pq_p->requestptr);
++ list_del_init(lptr);
++ pendingq_count--;
++ pq_p->audit[1] |= FP_NOTPENDING;
++ pq_p->audit[1] |= FP_AWAKENING;
++ atomic_set(&pq_p->alarmrung, 1);
++ wake_up(&pq_p->waitq);
++ }
++
++ list_for_each_safe(lptr, tptr, &request_list) {
++ pq_p = list_entry(lptr, struct work_element, liste);
++ pq_p->retcode = -ENODEV;
++ pq_p->status[0] |= STAT_FAILED;
++ list_del_init(lptr);
++ requestq_count--;
++ pq_p->audit[1] |= FP_REMREQUEST;
++ pq_p->audit[1] |= FP_AWAKENING;
++ atomic_set(&pq_p->alarmrung, 1);
++ wake_up(&pq_p->waitq);
++ }
++}
++
++static inline void
++helper_timeout_requests(void)
++{
++ struct work_element *pq_p;
++ struct list_head *lptr, *tptr;
++ long timelimit;
++
++ timelimit = jiffies - (CLEANUPTIME * HZ);
++ /* The list is in strict chronological order */
++ list_for_each_safe(lptr, tptr, &pending_list) {
++ pq_p = list_entry(lptr, struct work_element, liste);
++ if (pq_p->requestsent >= timelimit)
++ break;
++ PRINTKW("Purging(PQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
++ ((struct caller *)pq_p->requestptr)->caller_id[0],
++ ((struct caller *)pq_p->requestptr)->caller_id[1],
++ ((struct caller *)pq_p->requestptr)->caller_id[2],
++ ((struct caller *)pq_p->requestptr)->caller_id[3],
++ ((struct caller *)pq_p->requestptr)->caller_id[4],
++ ((struct caller *)pq_p->requestptr)->caller_id[5],
++ ((struct caller *)pq_p->requestptr)->caller_id[6],
++ ((struct caller *)pq_p->requestptr)->caller_id[7]);
++ pq_p->retcode = -ETIMEOUT;
++ pq_p->status[0] |= STAT_FAILED;
++ /* get this off any caller queue it may be on */
++ unbuild_caller(LONG2DEVPTR(pq_p->devindex),
++ (struct caller *) pq_p->requestptr);
++ list_del_init(lptr);
++ pendingq_count--;
++ pq_p->audit[1] |= FP_TIMEDOUT;
++ pq_p->audit[1] |= FP_NOTPENDING;
++ pq_p->audit[1] |= FP_AWAKENING;
++ atomic_set(&pq_p->alarmrung, 1);
++ wake_up(&pq_p->waitq);
++ }
++
++ /**
++ * If pending count is zero, items left on the request queue may
++ * never be processed.
++ */
++ if (pendingq_count <= 0) {
++ list_for_each_safe(lptr, tptr, &request_list) {
++ pq_p = list_entry(lptr, struct work_element, liste);
++ if (pq_p->requestsent >= timelimit)
++ break;
++ PRINTKW("Purging(RQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
++ ((struct caller *)pq_p->requestptr)->caller_id[0],
++ ((struct caller *)pq_p->requestptr)->caller_id[1],
++ ((struct caller *)pq_p->requestptr)->caller_id[2],
++ ((struct caller *)pq_p->requestptr)->caller_id[3],
++ ((struct caller *)pq_p->requestptr)->caller_id[4],
++ ((struct caller *)pq_p->requestptr)->caller_id[5],
++ ((struct caller *)pq_p->requestptr)->caller_id[6],
++ ((struct caller *)pq_p->requestptr)->caller_id[7]);
++ pq_p->retcode = -ETIMEOUT;
++ pq_p->status[0] |= STAT_FAILED;
++ list_del_init(lptr);
++ requestq_count--;
++ pq_p->audit[1] |= FP_TIMEDOUT;
++ pq_p->audit[1] |= FP_REMREQUEST;
++ pq_p->audit[1] |= FP_AWAKENING;
++ atomic_set(&pq_p->alarmrung, 1);
++ wake_up(&pq_p->waitq);
++ }
++ }
++}
++
++static void
++z90crypt_cleanup_task(unsigned long ptr)
++{
++ PDEBUG("jiffies %ld\n", jiffies);
++ spin_lock_irq(&queuespinlock);
++ if (z90crypt.mask.st_count <= 0) // no devices!
++ helper_drain_queues();
++ else
++ helper_timeout_requests();
++ spin_unlock_irq(&queuespinlock);
++ z90crypt_schedule_cleanup_task();
++}
++
++static void
++z90crypt_schedule_reader_task(unsigned long ptr)
++{
++ tasklet_schedule(&reader_tasklet);
++}
++
++/**
++ * Lowlevel Functions:
++ *
++ * create_z90crypt: creates and initializes basic data structures
++ * refresh_z90crypt: re-initializes basic data structures
++ * find_crypto_devices: returns a count and mask of hardware status
++ * create_crypto_device: builds the descriptor for a device
++ * destroy_crypto_device: unallocates the descriptor for a device
++ * destroy_z90crypt: drains all work, unallocates structs
++ */
++
++/**
++ * build the z90crypt root structure using the given domain index
++ */
++static int
++create_z90crypt(int *cdx_p)
++{
++ struct hdware_block *hdware_blk_p;
++
++ memset(&z90crypt, 0x00, sizeof(struct z90crypt));
++ z90crypt.domain_established = 0;
++ z90crypt.len = sizeof(struct z90crypt);
++ z90crypt.max_count = Z90CRYPT_NUM_DEVS;
++ z90crypt.cdx = *cdx_p;
++
++ hdware_blk_p = (struct hdware_block *)
++ kmalloc(sizeof(struct hdware_block), GFP_ATOMIC);
++ if (!hdware_blk_p) {
++ PDEBUG("kmalloc for hardware block failed\n");
++ return ENOMEM;
++ }
++ memset(hdware_blk_p, 0x00, sizeof(struct hdware_block));
++ z90crypt.hdware_info = hdware_blk_p;
++
++ return 0;
++}
++
++static inline int
++helper_scan_devices(int cdx_array[16], int *cdx_p, int *correct_cdx_found)
++{
++ enum hdstat hd_stat;
++ int q_depth, dev_type;
++ int indx, chkdom, numdomains;
++
++ q_depth = dev_type = numdomains = 0;
++ for (chkdom = 0; chkdom <= 15; cdx_array[chkdom++] = -1);
++ for (indx = 0; indx < z90crypt.max_count; indx++) {
++ hd_stat = HD_NOT_THERE;
++ numdomains = 0;
++ for (chkdom = 0; chkdom <= 15; chkdom++) {
++ hd_stat = query_online(indx, chkdom, MAX_RESET,
++ &q_depth, &dev_type);
++ if (hd_stat == HD_TSQ_EXCEPTION) {
++ z90crypt.terminating = 1;
++ PRINTKC("exception taken!\n");
++ break;
++ }
++ if (hd_stat == HD_ONLINE) {
++ cdx_array[numdomains++] = chkdom;
++ if (*cdx_p == chkdom) {
++ *correct_cdx_found = 1;
++ break;
++ }
++ }
++ }
++ if ((*correct_cdx_found == 1) || (numdomains != 0))
++ break;
++ if (z90crypt.terminating)
++ break;
++ }
++ return numdomains;
++}
++
++static inline int
++probe_crypto_domain(int *cdx_p)
++{
++ int cdx_array[16];
++ char cdx_array_text[53], temp[5];
++ int correct_cdx_found, numdomains;
++
++ correct_cdx_found = 0;
++ numdomains = helper_scan_devices(cdx_array, cdx_p, &correct_cdx_found);
++
++ if (z90crypt.terminating)
++ return TSQ_FATAL_ERROR;
++
++ if (correct_cdx_found)
++ return 0;
++
++ if (numdomains == 0) {
++ PRINTKW("Unable to find crypto domain: No devices found\n");
++ return Z90C_NO_DEVICES;
++ }
++
++ if (numdomains == 1) {
++ if (*cdx_p == -1) {
++ *cdx_p = cdx_array[0];
++ return 0;
++ }
++ PRINTKW("incorrect domain: specified = %d, found = %d\n",
++ *cdx_p, cdx_array[0]);
++ return Z90C_INCORRECT_DOMAIN;
++ }
++
++ numdomains--;
++ sprintf(cdx_array_text, "%d", cdx_array[numdomains]);
++ while (numdomains) {
++ numdomains--;
++ sprintf(temp, ", %d", cdx_array[numdomains]);
++ strcat(cdx_array_text, temp);
++ }
++
++ PRINTKW("ambiguous domain detected: specified = %d, found array = %s\n",
++ *cdx_p, cdx_array_text);
++ return Z90C_AMBIGUOUS_DOMAIN;
++}
++
++static int
++refresh_z90crypt(int *cdx_p)
++{
++ int i, j, indx, rv;
++ struct status local_mask;
++ struct device *devPtr;
++ unsigned char oldStat, newStat;
++ int return_unchanged;
++
++ if (z90crypt.len != sizeof(z90crypt))
++ return ENOTINIT;
++ if (z90crypt.terminating)
++ return TSQ_FATAL_ERROR;
++ rv = 0;
++ if (!z90crypt.hdware_info->hdware_mask.st_count &&
++ !z90crypt.domain_established) {
++ rv = probe_crypto_domain(cdx_p);
++ if (z90crypt.terminating)
++ return TSQ_FATAL_ERROR;
++ if (rv == Z90C_NO_DEVICES)
++ return 0; // try later
++ if (rv)
++ return rv;
++ z90crypt.cdx = *cdx_p;
++ z90crypt.domain_established = 1;
++ }
++ rv = find_crypto_devices(&local_mask);
++ if (rv) {
++ PRINTK("find crypto devices returned %d\n", rv);
++ return rv;
++ }
++ if (!memcmp(&local_mask, &z90crypt.hdware_info->hdware_mask,
++ sizeof(struct status))) {
++ return_unchanged = 1;
++ for (i = 0; i < Z90CRYPT_NUM_TYPES; i++) {
++ /**
++ * Check for disabled cards. If any device is marked
++ * disabled, destroy it.
++ */
++ for (j = 0;
++ j < z90crypt.hdware_info->type_mask[i].st_count;
++ j++) {
++ indx = z90crypt.hdware_info->type_x_addr[i].
++ device_index[j];
++ devPtr = z90crypt.device_p[indx];
++ if (devPtr && devPtr->disabled) {
++ local_mask.st_mask[indx] = HD_NOT_THERE;
++ return_unchanged = 0;
++ }
++ }
++ }
++ if (return_unchanged == 1)
++ return 0;
++ }
++
++ spin_lock_irq(&queuespinlock);
++ for (i = 0; i < z90crypt.max_count; i++) {
++ oldStat = z90crypt.hdware_info->hdware_mask.st_mask[i];
++ newStat = local_mask.st_mask[i];
++ if ((oldStat == HD_ONLINE) && (newStat != HD_ONLINE))
++ destroy_crypto_device(i);
++ else if ((oldStat != HD_ONLINE) && (newStat == HD_ONLINE)) {
++ rv = create_crypto_device(i);
++ if (rv >= REC_FATAL_ERROR)
++ return rv;
++ if (rv != 0) {
++ local_mask.st_mask[i] = HD_NOT_THERE;
++ local_mask.st_count--;
++ }
++ }
++ }
++ memcpy(z90crypt.hdware_info->hdware_mask.st_mask, local_mask.st_mask,
++ sizeof(local_mask.st_mask));
++ z90crypt.hdware_info->hdware_mask.st_count = local_mask.st_count;
++ z90crypt.hdware_info->hdware_mask.disabled_count =
++ local_mask.disabled_count;
++ refresh_index_array(&z90crypt.mask, &z90crypt.overall_device_x);
++ for (i = 0; i < Z90CRYPT_NUM_TYPES; i++)
++ refresh_index_array(&(z90crypt.hdware_info->type_mask[i]),
++ &(z90crypt.hdware_info->type_x_addr[i]));
++ spin_unlock_irq(&queuespinlock);
++
++ return rv;
++}
++
++static int
++find_crypto_devices(struct status *deviceMask)
++{
++ int i, q_depth, dev_type;
++ enum hdstat hd_stat;
++
++ deviceMask->st_count = 0;
++ deviceMask->disabled_count = 0;
++ deviceMask->user_disabled_count = 0;
++
++ for (i = 0; i < z90crypt.max_count; i++) {
++ hd_stat = query_online(i, z90crypt.cdx, MAX_RESET, &q_depth,
++ &dev_type);
++ if (hd_stat == HD_TSQ_EXCEPTION) {
++ z90crypt.terminating = 1;
++ PRINTKC("Exception during probe for crypto devices\n");
++ return TSQ_FATAL_ERROR;
++ }
++ deviceMask->st_mask[i] = hd_stat;
++ if (hd_stat == HD_ONLINE) {
++ PDEBUG("Got an online crypto!: %d\n", i);
++ PDEBUG("Got a queue depth of %d\n", q_depth);
++ PDEBUG("Got a device type of %d\n", dev_type);
++ if (q_depth <= 0)
++ return TSQ_FATAL_ERROR;
++ deviceMask->st_count++;
++ z90crypt.q_depth_array[i] = q_depth;
++ z90crypt.dev_type_array[i] = dev_type;
++ }
++ }
++
++ return 0;
++}
++
++static int
++refresh_index_array(struct status *status_str, struct device_x *index_array)
++{
++ int i, count;
++ enum devstat stat;
++
++ i = -1;
++ count = 0;
++ do {
++ stat = status_str->st_mask[++i];
++ if (stat == DEV_ONLINE)
++ index_array->device_index[count++] = i;
++ } while ((i < Z90CRYPT_NUM_DEVS) && (count < status_str->st_count));
++
++ return count;
++}
++
++static int
++create_crypto_device(int index)
++{
++ int rv, devstat, total_size;
++ struct device *dev_ptr;
++ struct status *type_str_p;
++ int deviceType;
++
++ dev_ptr = z90crypt.device_p[index];
++ if (!dev_ptr) {
++ total_size = sizeof(struct device) +
++ z90crypt.q_depth_array[index] * sizeof(int);
++
++ dev_ptr = (struct device *) kmalloc(total_size, GFP_ATOMIC);
++ if (!dev_ptr) {
++ PRINTK("kmalloc device %d failed\n", index);
++ return ENOMEM;
++ }
++ memset(dev_ptr, 0, total_size);
++ dev_ptr->dev_resp_p = kmalloc(MAX_RESPONSE_SIZE, GFP_ATOMIC);
++ if (!dev_ptr->dev_resp_p) {
++ kfree(dev_ptr);
++ PRINTK("kmalloc device %d rec buffer failed\n", index);
++ return ENOMEM;
++ }
++ dev_ptr->dev_resp_l = MAX_RESPONSE_SIZE;
++ INIT_LIST_HEAD(&(dev_ptr->dev_caller_list));
++ }
++
++ devstat = reset_device(index, z90crypt.cdx, MAX_RESET);
++ if (devstat == DEV_RSQ_EXCEPTION) {
++ PRINTK("exception during reset device %d\n", index);
++ kfree(dev_ptr->dev_resp_p);
++ kfree(dev_ptr);
++ return RSQ_FATAL_ERROR;
++ }
++ if (devstat == DEV_ONLINE) {
++ dev_ptr->dev_self_x = index;
++ dev_ptr->dev_type = z90crypt.dev_type_array[index];
++ if (dev_ptr->dev_type == NILDEV) {
++ rv = probe_device_type(dev_ptr);
++ if (rv) {
++ PRINTK("rv = %d from probe_device_type %d\n",
++ rv, index);
++ kfree(dev_ptr->dev_resp_p);
++ kfree(dev_ptr);
++ return rv;
++ }
++ }
++ if (dev_ptr->dev_type == PCIXCC_UNK) {
++ rv = probe_PCIXCC_type(dev_ptr);
++ if (rv) {
++ PRINTK("rv = %d from probe_PCIXCC_type %d\n",
++ rv, index);
++ kfree(dev_ptr->dev_resp_p);
++ kfree(dev_ptr);
++ return rv;
++ }
++ }
++ deviceType = dev_ptr->dev_type;
++ z90crypt.dev_type_array[index] = deviceType;
++ if (deviceType == PCICA)
++ z90crypt.hdware_info->device_type_array[index] = 1;
++ else if (deviceType == PCICC)
++ z90crypt.hdware_info->device_type_array[index] = 2;
++ else if (deviceType == PCIXCC_MCL2)
++ z90crypt.hdware_info->device_type_array[index] = 3;
++ else if (deviceType == PCIXCC_MCL3)
++ z90crypt.hdware_info->device_type_array[index] = 4;
++ else if (deviceType == CEX2C)
++ z90crypt.hdware_info->device_type_array[index] = 5;
++ else
++ z90crypt.hdware_info->device_type_array[index] = -1;
++ }
++
++ /**
++ * 'q_depth' returned by the hardware is one less than
++ * the actual depth
++ */
++ dev_ptr->dev_q_depth = z90crypt.q_depth_array[index];
++ dev_ptr->dev_type = z90crypt.dev_type_array[index];
++ dev_ptr->dev_stat = devstat;
++ dev_ptr->disabled = 0;
++ z90crypt.device_p[index] = dev_ptr;
++
++ if (devstat == DEV_ONLINE) {
++ if (z90crypt.mask.st_mask[index] != DEV_ONLINE) {
++ z90crypt.mask.st_mask[index] = DEV_ONLINE;
++ z90crypt.mask.st_count++;
++ }
++ deviceType = dev_ptr->dev_type;
++ type_str_p = &z90crypt.hdware_info->type_mask[deviceType];
++ if (type_str_p->st_mask[index] != DEV_ONLINE) {
++ type_str_p->st_mask[index] = DEV_ONLINE;
++ type_str_p->st_count++;
++ }
++ }
++
++ return 0;
++}
++
++static int
++destroy_crypto_device(int index)
++{
++ struct device *dev_ptr;
++ int t, disabledFlag;
++
++ dev_ptr = z90crypt.device_p[index];
++
++ /* remember device type; get rid of device struct */
++ if (dev_ptr) {
++ disabledFlag = dev_ptr->disabled;
++ t = dev_ptr->dev_type;
++ if (dev_ptr->dev_resp_p)
++ kfree(dev_ptr->dev_resp_p);
++ kfree(dev_ptr);
++ } else {
++ disabledFlag = 0;
++ t = -1;
++ }
++ z90crypt.device_p[index] = 0;
++
++ /* if the type is valid, remove the device from the type_mask */
++ if ((t != -1) && z90crypt.hdware_info->type_mask[t].st_mask[index]) {
++ z90crypt.hdware_info->type_mask[t].st_mask[index] = 0x00;
++ z90crypt.hdware_info->type_mask[t].st_count--;
++ if (disabledFlag == 1)
++ z90crypt.hdware_info->type_mask[t].disabled_count--;
++ }
++ if (z90crypt.mask.st_mask[index] != DEV_GONE) {
++ z90crypt.mask.st_mask[index] = DEV_GONE;
++ z90crypt.mask.st_count--;
++ }
++ z90crypt.hdware_info->device_type_array[index] = 0;
++
++ return 0;
++}
++
++static void
++destroy_z90crypt(void)
++{
++ int i;
++ for (i = 0; i < z90crypt.max_count; i++)
++ if (z90crypt.device_p[i])
++ destroy_crypto_device(i);
++ if (z90crypt.hdware_info)
++ kfree((void *)z90crypt.hdware_info);
++ memset((void *)&z90crypt, 0, sizeof(z90crypt));
++}
++
++static unsigned char static_testmsg[384] = {
++0x00,0x00,0x00,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x00,0x06,0x00,0x00,
++0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x58,
++0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x43,0x43,
++0x41,0x2d,0x41,0x50,0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,0x00,0x00,0x00,0x00,
++0x50,0x4b,0x00,0x00,0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++0x00,0x00,0x00,0x00,0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++0x00,0x00,0x00,0x00,0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x32,
++0x01,0x00,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++0xb8,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++0x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x49,0x43,0x53,0x46,
++0x20,0x20,0x20,0x20,0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,0x2d,0x31,0x2e,0x32,
++0x37,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
++0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
++0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
++0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,0x88,0x1e,0x00,0x00,
++0x57,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,0x03,0x02,0x00,0x00,
++0x40,0x01,0x00,0x01,0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,0xf6,0xd2,0x7b,0x58,
++0x4b,0xf9,0x28,0x68,0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,0x63,0x42,0xef,0xf8,
++0xfd,0xa4,0xf8,0xb0,0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,0x53,0x8c,0x6f,0x4e,
++0x72,0x8f,0x6c,0x04,0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,0xf7,0xdd,0xfd,0x4f,
++0x11,0x36,0x95,0x5d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
++};
++
++static int
++probe_device_type(struct device *devPtr)
++{
++ int rv, dv, i, index, length;
++ unsigned char psmid[8];
++ static unsigned char loc_testmsg[sizeof(static_testmsg)];
++
++ index = devPtr->dev_self_x;
++ rv = 0;
++ do {
++ memcpy(loc_testmsg, static_testmsg, sizeof(static_testmsg));
++ length = sizeof(static_testmsg) - 24;
++ /* the -24 allows for the header */
++ dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
++ if (dv) {
++ PDEBUG("dv returned by send during probe: %d\n", dv);
++ if (dv == DEV_SEN_EXCEPTION) {
++ rv = SEN_FATAL_ERROR;
++ PRINTKC("exception in send to AP %d\n", index);
++ break;
++ }
++ PDEBUG("return value from send_to_AP: %d\n", rv);
++ switch (dv) {
++ case DEV_GONE:
++ PDEBUG("dev %d not available\n", index);
++ rv = SEN_NOT_AVAIL;
++ break;
++ case DEV_ONLINE:
++ rv = 0;
++ break;
++ case DEV_EMPTY:
++ rv = SEN_NOT_AVAIL;
++ break;
++ case DEV_NO_WORK:
++ rv = SEN_FATAL_ERROR;
++ break;
++ case DEV_BAD_MESSAGE:
++ rv = SEN_USER_ERROR;
++ break;
++ case DEV_QUEUE_FULL:
++ rv = SEN_QUEUE_FULL;
++ break;
++ default:
++ PRINTK("unknown dv=%d for dev %d\n", dv, index);
++ rv = SEN_NOT_AVAIL;
++ break;
++ }
++ }
++
++ if (rv)
++ break;
++
++ for (i = 0; i < 6; i++) {
++ mdelay(300);
++ dv = receive_from_AP(index, z90crypt.cdx,
++ devPtr->dev_resp_l,
++ devPtr->dev_resp_p, psmid);
++ PDEBUG("dv returned by DQ = %d\n", dv);
++ if (dv == DEV_REC_EXCEPTION) {
++ rv = REC_FATAL_ERROR;
++ PRINTKC("exception in dequeue %d\n",
++ index);
++ break;
++ }
++ switch (dv) {
++ case DEV_ONLINE:
++ rv = 0;
++ break;
++ case DEV_EMPTY:
++ rv = REC_EMPTY;
++ break;
++ case DEV_NO_WORK:
++ rv = REC_NO_WORK;
++ break;
++ case DEV_BAD_MESSAGE:
++ case DEV_GONE:
++ default:
++ rv = REC_NO_RESPONSE;
++ break;
++ }
++ if ((rv != 0) && (rv != REC_NO_WORK))
++ break;
++ if (rv == 0)
++ break;
++ }
++ if (rv)
++ break;
++ rv = (devPtr->dev_resp_p[0] == 0x00) &&
++ (devPtr->dev_resp_p[1] == 0x86);
++ if (rv)
++ devPtr->dev_type = PCICC;
++ else
++ devPtr->dev_type = PCICA;
++ rv = 0;
++ } while (0);
++ /* In a general error case, the card is not marked online */
++ return rv;
++}
++
++static unsigned char MCL3_testmsg[] = {
++0x00,0x00,0x00,0x00,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,
++0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
++0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
++0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
++0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
++0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
++0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
++0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
++0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
++0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
++0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
++0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
++0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
++0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
++0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
++0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
++0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
++0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
++0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
++0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
++0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,0xF1,0x3D,0x93,0x53
++};
++
++static int
++probe_PCIXCC_type(struct device *devPtr)
++{
++ int rv, dv, i, index, length;
++ unsigned char psmid[8];
++ static unsigned char loc_testmsg[548];
++ struct CPRBX *cprbx_p;
++
++ index = devPtr->dev_self_x;
++ rv = 0;
++ do {
++ memcpy(loc_testmsg, MCL3_testmsg, sizeof(MCL3_testmsg));
++ length = sizeof(MCL3_testmsg) - 0x0C;
++ dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
++ if (dv) {
++ PDEBUG("dv returned = %d\n", dv);
++ if (dv == DEV_SEN_EXCEPTION) {
++ rv = SEN_FATAL_ERROR;
++ PRINTKC("exception in send to AP %d\n", index);
++ break;
++ }
++ PDEBUG("return value from send_to_AP: %d\n", rv);
++ switch (dv) {
++ case DEV_GONE:
++ PDEBUG("dev %d not available\n", index);
++ rv = SEN_NOT_AVAIL;
++ break;
++ case DEV_ONLINE:
++ rv = 0;
++ break;
++ case DEV_EMPTY:
++ rv = SEN_NOT_AVAIL;
++ break;
++ case DEV_NO_WORK:
++ rv = SEN_FATAL_ERROR;
++ break;
++ case DEV_BAD_MESSAGE:
++ rv = SEN_USER_ERROR;
++ break;
++ case DEV_QUEUE_FULL:
++ rv = SEN_QUEUE_FULL;
++ break;
++ default:
++ PRINTK("unknown dv=%d for dev %d\n", dv, index);
++ rv = SEN_NOT_AVAIL;
++ break;
++ }
++ }
++
++ if (rv)
++ break;
++
++ for (i = 0; i < 6; i++) {
++ mdelay(300);
++ dv = receive_from_AP(index, z90crypt.cdx,
++ devPtr->dev_resp_l,
++ devPtr->dev_resp_p, psmid);
++ PDEBUG("dv returned by DQ = %d\n", dv);
++ if (dv == DEV_REC_EXCEPTION) {
++ rv = REC_FATAL_ERROR;
++ PRINTKC("exception in dequeue %d\n",
++ index);
++ break;
++ }
++ switch (dv) {
++ case DEV_ONLINE:
++ rv = 0;
++ break;
++ case DEV_EMPTY:
++ rv = REC_EMPTY;
++ break;
++ case DEV_NO_WORK:
++ rv = REC_NO_WORK;
++ break;
++ case DEV_BAD_MESSAGE:
++ case DEV_GONE:
++ default:
++ rv = REC_NO_RESPONSE;
++ break;
++ }
++ if ((rv != 0) && (rv != REC_NO_WORK))
++ break;
++ if (rv == 0)
++ break;
++ }
++ if (rv)
++ break;
++ cprbx_p = (struct CPRBX *) (devPtr->dev_resp_p + 48);
++ if ((cprbx_p->ccp_rtcode == 8) && (cprbx_p->ccp_rscode == 33)) {
++ devPtr->dev_type = PCIXCC_MCL2;
++ PDEBUG("device %d is MCL2\n", index);
++ } else {
++ devPtr->dev_type = PCIXCC_MCL3;
++ PDEBUG("device %d is MCL3\n", index);
++ }
++ } while (0);
++ /* In a general error case, the card is not marked online */
++ return rv;
++}
++
++#ifdef Z90CRYPT_USE_HOTPLUG
++static void
++z90crypt_hotplug_event(int dev_major, int dev_minor, int action)
++{
++#ifdef CONFIG_HOTPLUG
++ char *argv[3];
++ char *envp[6];
++ char major[20];
++ char minor[20];
++
++ sprintf(major, "MAJOR=%d", dev_major);
++ sprintf(minor, "MINOR=%d", dev_minor);
++
++ argv[0] = hotplug_path;
++ argv[1] = "z90crypt";
++ argv[2] = 0;
++
++ envp[0] = "HOME=/";
++ envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
++
++ switch (action) {
++ case Z90CRYPT_HOTPLUG_ADD:
++ envp[2] = "ACTION=add";
++ break;
++ case Z90CRYPT_HOTPLUG_REMOVE:
++ envp[2] = "ACTION=remove";
++ break;
++ default:
++ BUG();
++ break;
++ }
++ envp[3] = major;
++ envp[4] = minor;
++ envp[5] = 0;
++
++ call_usermodehelper(argv[0], argv, envp);
++#endif
++}
++#endif
++
++module_init(z90crypt_init_module);
++module_exit(z90crypt_cleanup_module);
+=== drivers/s390/misc/chandev.c
+==================================================================
+--- drivers/s390/misc/chandev.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/misc/chandev.c (/trunk/2.4.27) (revision 52)
+@@ -1023,15 +1023,16 @@
+
+ /* 3172/2216 Paralell the 2216 allows 16 ports per card the */
+ /* the original 3172 only allows 4 we will assume the max of 16 */
+- chandev_add_model(chandev_type_lcs|chandev_type_ctc,0x3088,0x8,-1,-1,15,default_msck_bits,FALSE,FALSE);
++ chandev_add_model(chandev_type_lcs|chandev_type_ctc|chandev_type_ctcmpc,0x3088,0x8,-1,-1,15,default_msck_bits,FALSE,FALSE);
+
+ /* 3172/2216 Escon serial the 2216 allows 16 ports per card the */
+ /* the original 3172 only allows 4 we will assume the max of 16 */
+- chandev_add_model(chandev_type_lcs|chandev_type_escon,0x3088,0x1F,-1,-1,15,default_msck_bits,FALSE,FALSE);
++ chandev_add_model(chandev_type_lcs|chandev_type_escon|chandev_type_ctcmpc,0x3088,0x1F,-1,-1,15,default_msck_bits,FALSE,FALSE);
+
+ /* Only 2 ports allowed on OSA2 cards model 0x60 */
+ chandev_add_model(chandev_type_lcs,0x3088,0x60,-1,-1,1,default_msck_bits,FALSE,FALSE);
+- /* qeth gigabit ethernet */
++ chandev_add_model(chandev_type_claw,0x3088,0x61,-1,-1,0,default_msck_bits,FALSE,FALSE);
++ /* qeth gigabit ethernet */
+ chandev_add_model(chandev_type_qeth,0x1731,0x1,0x1732,0x1,0,default_msck_bits,FALSE,FALSE);
+ chandev_add_model(chandev_type_qeth,0x1731,0x5,0x1732,0x5,0,default_msck_bits,FALSE,FALSE);
+ /* Osa-D we currently aren't too emotionally involved with this */
+@@ -1040,7 +1041,7 @@
+ chandev_add_model(chandev_type_claw,0x3088,0x61,-1,-1,0,default_msck_bits,FALSE,FALSE);
+
+ /* ficon attached ctc */
+- chandev_add_model(chandev_type_escon,0x3088,0x1E,-1,-1,0,default_msck_bits,FALSE,FALSE);
++ chandev_add_model(chandev_type_escon|chandev_type_ctcmpc,0x3088,0x1E,-1,-1,0,default_msck_bits,FALSE,FALSE);
+ }
+
+
+@@ -2187,6 +2188,7 @@
+ {
+ "noauto",
+ "del_noauto",
++ "ctcmpc",
+ "ctc",
+ "escon",
+ "lcs",
+@@ -2224,6 +2226,7 @@
+ first_stridx=0,
+ noauto_stridx=first_stridx,
+ del_noauto_stridx,
++ ctcmpc_stridx,
+ ctc_stridx,
+ escon_stridx,
+ lcs_stridx,
+@@ -2285,7 +2288,7 @@
+
+
+ static char chandev_keydescript[]=
+-"\nchan_type key bitfield ctc=0x1,escon=0x2,lcs=0x4,osad=0x8,qeth=0x10,claw=0x20\n";
++"\nchan_type key bitfield ctc=0x1,escon=0x2,lcs=0x4,osad=0x8,qeth=0x10,claw=0x20,ctcmpc=0x40\n";
+
+
+ #if CONFIG_ARCH_S390X
+@@ -2500,10 +2503,12 @@
+ ints[3],ints[4],ints[5],ints[6],ints[7],
+ NULL,NULL,NULL);
+ break;
++ case (ctcmpc_stridx*stridx_mult)|isnum|iscomma:
+ case (ctc_stridx*stridx_mult)|isnum|iscomma:
+ case (escon_stridx*stridx_mult)|isnum|iscomma:
+ case (lcs_stridx*stridx_mult)|isnum|iscomma:
+ case (osad_stridx*stridx_mult)|isnum|iscomma:
++ case (ctcmpc_stridx*stridx_mult)|iscomma:
+ case (ctc_stridx*stridx_mult)|iscomma:
+ case (escon_stridx*stridx_mult)|iscomma:
+ case (lcs_stridx*stridx_mult)|iscomma:
+@@ -2513,6 +2518,9 @@
+ case (ctc_stridx*stridx_mult):
+ chan_type=chandev_type_ctc;
+ break;
++ case (ctcmpc_stridx*stridx_mult):
++ chan_type=chandev_type_ctcmpc;
++ break;
+ case (escon_stridx*stridx_mult):
+ chan_type=chandev_type_escon;
+ break;
+=== drivers/s390/misc/z90crypt.h
+==================================================================
+--- drivers/s390/misc/z90crypt.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/misc/z90crypt.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,258 @@
++/*
++ * linux/drivers/s390/misc/z90crypt.h
++ *
++ * z90crypt 1.3.2
++ *
++ * Copyright (C) 2001, 2004 IBM Corporation
++ * Author(s): Robert Burroughs (burrough at us.ibm.com)
++ * Eric Rossman (edrossma at us.ibm.com)
++ *
++ * Hotplug & misc device support: Jochen Roehrig (roehrig at de.ibm.com)
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2, or (at your option)
++ * any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#ifndef _LINUX_Z90CRYPT_H_
++#define _LINUX_Z90CRYPT_H_
++
++#include <linux/ioctl.h>
++
++#define VERSION_Z90CRYPT_H "$Revision: 1.3.4.7 $"
++
++#define z90crypt_VERSION 1
++#define z90crypt_RELEASE 3 // 2 = PCIXCC, 3 = rewrite for coding standards
++#define z90crypt_VARIANT 2 // 2 = added PCIXCC MCL3 and CEX2C support
++
++/**
++ * If we are not using the sparse checker, __user has no use.
++ */
++#ifdef __CHECKER__
++# define __user __attribute__((noderef, address_space(1)))
++#else
++# define __user
++#endif
++
++/**
++ * struct ica_rsa_modexpo
++ *
++ * Requirements:
++ * - outputdatalength is at least as large as inputdatalength.
++ * - All key parts are right justified in their fields, padded on
++ * the left with zeroes.
++ * - length(b_key) = inputdatalength
++ * - length(n_modulus) = inputdatalength
++ */
++struct ica_rsa_modexpo {
++ char __user * inputdata;
++ unsigned int inputdatalength;
++ char __user * outputdata;
++ unsigned int outputdatalength;
++ char __user * b_key;
++ char __user * n_modulus;
++};
++
++/**
++ * struct ica_rsa_modexpo_crt
++ *
++ * Requirements:
++ * - inputdatalength is even.
++ * - outputdatalength is at least as large as inputdatalength.
++ * - All key parts are right justified in their fields, padded on
++ * the left with zeroes.
++ * - length(bp_key) = inputdatalength/2 + 8
++ * - length(bq_key) = inputdatalength/2
++ * - length(np_key) = inputdatalength/2 + 8
++ * - length(nq_key) = inputdatalength/2
++ * - length(u_mult_inv) = inputdatalength/2 + 8
++ */
++struct ica_rsa_modexpo_crt {
++ char __user * inputdata;
++ unsigned int inputdatalength;
++ char __user * outputdata;
++ unsigned int outputdatalength;
++ char __user * bp_key;
++ char __user * bq_key;
++ char __user * np_prime;
++ char __user * nq_prime;
++ char __user * u_mult_inv;
++};
++
++#define Z90_IOCTL_MAGIC 'z' // NOTE: Need to allocate from linux folks
++
++/**
++ * Interface notes:
++ *
++ * The ioctl()s which are implemented (along with relevant details)
++ * are:
++ *
++ * ICARSAMODEXPO
++ * Perform an RSA operation using a Modulus-Exponent pair
++ * This takes an ica_rsa_modexpo struct as its arg.
++ *
++ * NOTE: please refer to the comments preceding this structure
++ * for the implementation details for the contents of the
++ * block
++ *
++ * ICARSACRT
++ * Perform an RSA operation using a Chinese-Remainder Theorem key
++ * This takes an ica_rsa_modexpo_crt struct as its arg.
++ *
++ * NOTE: please refer to the comments preceding this structure
++ * for the implementation details for the contents of the
++ * block
++ *
++ * Z90STAT_TOTALCOUNT
++ * Return an integer count of all device types together.
++ *
++ * Z90STAT_PCICACOUNT
++ * Return an integer count of all PCICAs.
++ *
++ * Z90STAT_PCICCCOUNT
++ * Return an integer count of all PCICCs.
++ *
++ * Z90STAT_PCIXCCMCL2COUNT
++ * Return an integer count of all MCL2 PCIXCCs.
++ *
++ * Z90STAT_PCIXCCMCL3COUNT
++ * Return an integer count of all MCL3 PCIXCCs.
++ *
++ * Z90STAT_CEX2CCOUNT
++ * Return an integer count of all CEX2Cs.
++ *
++ * Z90STAT_REQUESTQ_COUNT
++ * Return an integer count of the number of entries waiting to be
++ * sent to a device.
++ *
++ * Z90STAT_PENDINGQ_COUNT
++ * Return an integer count of the number of entries sent to a
++ * device awaiting the reply.
++ *
++ * Z90STAT_TOTALOPEN_COUNT
++ * Return an integer count of the number of open file handles.
++ *
++ * Z90STAT_DOMAIN_INDEX
++ * Return the integer value of the Cryptographic Domain.
++ *
++ * Z90STAT_STATUS_MASK
++ * Return an 64 element array of unsigned chars for the status of
++ * all devices.
++ * 0x01: PCICA
++ * 0x02: PCICC
++ * 0x03: PCIXCC_MCL2
++ * 0x04: PCIXCC_MCL3
++ * 0x05: CEX2C
++ * 0x0d: device is disabled via the proc filesystem
++ *
++ * Z90STAT_QDEPTH_MASK
++ * Return an 64 element array of unsigned chars for the queue
++ * depth of all devices.
++ *
++ * Z90STAT_PERDEV_REQCNT
++ * Return an 64 element array of unsigned integers for the number
++ * of successfully completed requests per device since the device
++ * was detected and made available.
++ *
++ * ICAZ90STATUS (deprecated)
++ * Return some device driver status in a ica_z90_status struct
++ * This takes an ica_z90_status struct as its arg.
++ *
++ * NOTE: this ioctl() is deprecated, and has been replaced with
++ * single ioctl()s for each type of status being requested
++ *
++ * Z90STAT_PCIXCCCOUNT (deprecated)
++ * Return an integer count of all PCIXCCs (MCL2 + MCL3).
++ * This is DEPRECATED now that MCL3 PCIXCCs are treated differently from
++ * MCL2 PCIXCCs.
++ *
++ * Z90QUIESCE (not recommended)
++ * Quiesce the driver. This is intended to stop all new
++ * requests from being processed. Its use is NOT recommended,
++ * except in circumstances where there is no other way to stop
++ * callers from accessing the driver. Its original use was to
++ * allow the driver to be "drained" of work in preparation for
++ * a system shutdown.
++ *
++ * NOTE: once issued, this ban on new work cannot be undone
++ * except by unloading and reloading the driver.
++ */
++
++/**
++ * Supported ioctl calls
++ */
++#define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, Z90_IOCTL_MAGIC, 0x05, 0)
++#define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, Z90_IOCTL_MAGIC, 0x06, 0)
++
++/* DEPRECATED status calls (bound for removal at some point) */
++#define ICAZ90STATUS _IOR(Z90_IOCTL_MAGIC, 0x10, struct ica_z90_status)
++#define Z90STAT_PCIXCCCOUNT _IOR(Z90_IOCTL_MAGIC, 0x43, int)
++
++/* unrelated to ICA callers */
++#define Z90QUIESCE _IO(Z90_IOCTL_MAGIC, 0x11)
++
++/* New status calls */
++#define Z90STAT_TOTALCOUNT _IOR(Z90_IOCTL_MAGIC, 0x40, int)
++#define Z90STAT_PCICACOUNT _IOR(Z90_IOCTL_MAGIC, 0x41, int)
++#define Z90STAT_PCICCCOUNT _IOR(Z90_IOCTL_MAGIC, 0x42, int)
++#define Z90STAT_PCIXCCMCL2COUNT _IOR(Z90_IOCTL_MAGIC, 0x4b, int)
++#define Z90STAT_PCIXCCMCL3COUNT _IOR(Z90_IOCTL_MAGIC, 0x4c, int)
++#define Z90STAT_CEX2CCOUNT _IOR(Z90_IOCTL_MAGIC, 0x4d, int)
++#define Z90STAT_REQUESTQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x44, int)
++#define Z90STAT_PENDINGQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x45, int)
++#define Z90STAT_TOTALOPEN_COUNT _IOR(Z90_IOCTL_MAGIC, 0x46, int)
++#define Z90STAT_DOMAIN_INDEX _IOR(Z90_IOCTL_MAGIC, 0x47, int)
++#define Z90STAT_STATUS_MASK _IOR(Z90_IOCTL_MAGIC, 0x48, char[64])
++#define Z90STAT_QDEPTH_MASK _IOR(Z90_IOCTL_MAGIC, 0x49, char[64])
++#define Z90STAT_PERDEV_REQCNT _IOR(Z90_IOCTL_MAGIC, 0x4a, int[64])
++
++/**
++ * local errno definitions
++ */
++#define ENOBUFF 129 // filp->private_data->...>work_elem_p->buffer is NULL
++#define EWORKPEND 130 // user issues ioctl while another pending
++#define ERELEASED 131 // user released while ioctl pending
++#define EQUIESCE 132 // z90crypt quiescing (no more work allowed)
++#define ETIMEOUT 133 // request timed out
++#define EUNKNOWN 134 // some unrecognized error occured (retry may succeed)
++#define EGETBUFF 135 // Error getting buffer or hardware lacks capability
++ // (retry in software)
++
++/**
++ * DEPRECATED STRUCTURES
++ */
++
++/**
++ * This structure is DEPRECATED and the corresponding ioctl() has been
++ * replaced with individual ioctl()s for each piece of data!
++ * This structure will NOT survive past version 1.3.1, so switch to the
++ * new ioctl()s.
++ */
++#define MASK_LENGTH 64 // mask length
++struct ica_z90_status {
++ int totalcount;
++ int leedslitecount; // PCICA
++ int leeds2count; // PCICC
++ // int PCIXCCCount; is not in struct for backward compatibility
++ int requestqWaitCount;
++ int pendingqWaitCount;
++ int totalOpenCount;
++ int cryptoDomain;
++ // status: 0=not there, 1=PCICA, 2=PCICC, 3=PCIXCC_MCL2, 4=PCIXCC_MCL3,
++ // 5=CEX2C
++ unsigned char status[MASK_LENGTH];
++ // qdepth: # work elements waiting for each device
++ unsigned char qdepth[MASK_LENGTH];
++};
++
++#endif /* _LINUX_Z90CRYPT_H_ */
+=== drivers/s390/misc/Makefile
+==================================================================
+--- drivers/s390/misc/Makefile (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/misc/Makefile (/trunk/2.4.27) (revision 52)
+@@ -4,7 +4,15 @@
+
+ O_TARGET := s390-misc.o
+
+-obj-$(CONFIG_CHANDEV) += chandev.o
+ export-objs += chandev.o
+
++list-multi := z90crypt.o
++z90crypt_mod-objs := z90main.o z90hardware.o
++obj-$(CONFIG_Z90CRYPT) += z90crypt.o
++
++obj-$(CONFIG_CHANDEV) += chandev.o
++
+ include $(TOPDIR)/Rules.make
++
++z90crypt.o: $(z90crypt_mod-objs)
++ $(LD) -r -o $@ $(z90crypt_mod-objs)
+=== drivers/s390/Makefile
+==================================================================
+--- drivers/s390/Makefile (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/Makefile (/trunk/2.4.27) (revision 52)
+@@ -4,12 +4,17 @@
+
+ O_TARGET := io.o
+
+-subdir-y := block char misc net
++subdir-y := block char misc net scsi
+ subdir-m := $(subdir-y)
+
+ obj-y := s390io.o s390mach.o s390dyn.o ccwcache.o sysinfo.o
+ export-objs += ccwcache.o s390dyn.o s390io.o
++obj-$(CONFIG_QDIO) += qdio.o
++export-objs += qdio.o
+
++obj-$(CONFIG_S390_CMF) += cmf.o
++export-objs += cmf.o
++
+ obj-y += $(foreach dir,$(subdir-y),$(dir)/s390-$(dir).o)
+
+ include $(TOPDIR)/Rules.make
+=== drivers/s390/char/tape.h
+==================================================================
+--- drivers/s390/char/tape.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tape.h (/trunk/2.4.27) (revision 52)
+@@ -1,203 +1,436 @@
+-/***************************************************************************
+- *
++/*
+ * drivers/s390/char/tape.h
+- * tape device driver for 3480/3490E tapes.
++ * tape device driver for 3480/3490E/3590 tapes.
+ *
+ * S390 and zSeries version
+- * Copyright (C) 2001 IBM Corporation
++ * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Carsten Otte <cotte at de.ibm.com>
+- * Tuan Ngo-Anh <ngoanh at de.ibm.com>
+- *
+- ****************************************************************************
++ * Tuan Ngo-Anh <ngoanh at de.ibm.com>
++ * Martin Schwidefsky <schwidefsky at de.ibm.com>
++ * Stefan Bader <shbader at de.ibm.com>
+ */
+
+ #ifndef _TAPE_H
++#define _TAPE_H
+
+-#define _TAPE_H
+ #include <linux/config.h>
+ #include <linux/blkdev.h>
++#include <linux/kernel.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/mtio.h>
++#include <linux/interrupt.h>
++#include <linux/timer.h>
++#include <asm/debug.h>
++#include <asm/idals.h>
++#include <asm/s390dyn.h>
++#ifdef CONFIG_DEVFS_FS
++#include <linux/devfs_fs_kernel.h>
++#endif
+
+-#define MAX_TAPES 7 /* Max tapes supported is 7*/
+-#define TAPE_MAGIC 0xE3C1D7C5 /* is ebcdic-"TAPE" */
++/*
++ * macros s390 debug feature (dbf)
++ */
++#define DBF_EVENT(d_level, d_str...) \
++do { \
++ debug_sprintf_event(TAPE_DBF_AREA, d_level, d_str); \
++} while (0)
+
+-typedef enum {
+- TS_UNUSED=0, TS_IDLE, TS_DONE, TS_FAILED,
+- TS_BLOCK_INIT,
+- TS_BSB_INIT,
+- TS_BSF_INIT,
+- TS_DSE_INIT,
+- TS_EGA_INIT,
+- TS_FSB_INIT,
+- TS_FSF_INIT,
+- TS_LDI_INIT,
+- TS_LBL_INIT,
+- TS_MSE_INIT,
+- TS_NOP_INIT,
+- TS_RBA_INIT,
+- TS_RBI_INIT,
+- TS_RBU_INIT,
+- TS_RBL_INIT,
+- TS_RDC_INIT,
+- TS_RFO_INIT,
+- TS_RSD_INIT,
+- TS_REW_INIT,
+- TS_REW_RELEASE_INIT,
+- TS_RUN_INIT,
+- TS_SEN_INIT,
+- TS_SID_INIT,
+- TS_SNP_INIT,
+- TS_SPG_INIT,
+- TS_SWI_INIT,
+- TS_SMR_INIT,
+- TS_SYN_INIT,
+- TS_TIO_INIT,
+- TS_UNA_INIT,
+- TS_WRI_INIT,
+- TS_WTM_INIT,
+- TS_NOT_OPER,
+- TS_SIZE } tape_stat;
++#define DBF_EXCEPTION(d_level, d_str...) \
++do { \
++ debug_sprintf_exception(TAPE_DBF_AREA, d_level, d_str); \
++} while (0)
+
+-struct _tape_info_t; //Forward declaration
++#define TAPE_VERSION_MAJOR 2
++#define TAPE_VERSION_MINOR 0
++#define TAPE_MAGIC "tape"
+
+-typedef enum {
+- TE_START=0, TE_DONE, TE_FAILED, TE_ERROR, TE_OTHER,
+- TE_SIZE } tape_events;
++#define TAPE_MINORS_PER_DEV 2 /* two minors per device */
++#define TAPEBLOCK_HSEC_SIZE 2048
++#define TAPEBLOCK_HSEC_S2B 2
++#define TAPEBLOCK_RETRIES 5
+
+-typedef void (*tape_disc_shutdown_t) (int);
+-typedef void (*tape_event_handler_t) (struct _tape_info_t*);
+-typedef ccw_req_t* (*tape_ccwgen_t)(struct _tape_info_t* ti,int count);
+-typedef ccw_req_t* (*tape_reqgen_t)(struct request* req,struct _tape_info_t* ti,int tapeblock_major);
+-typedef ccw_req_t* (*tape_rwblock_t)(const char* data,size_t count,struct _tape_info_t* ti);
+-typedef void (*tape_freeblock_t)(ccw_req_t* cqr,struct _tape_info_t* ti);
+-typedef void (*tape_setup_assist_t) (struct _tape_info_t*);
++/* Event types for hotplug */
++#define TAPE_HOTPLUG_CHAR_ADD 1
++#define TAPE_HOTPLUG_BLOCK_ADD 2
++#define TAPE_HOTPLUG_CHAR_REMOVE 3
++#define TAPE_HOTPLUG_BLOCK_REMOVE 4
++
++enum tape_medium_state {
++ MS_UNKNOWN,
++ MS_LOADED,
++ MS_UNLOADED,
++ MS_SIZE
++};
++
++enum tape_op {
++ TO_BLOCK, /* Block read */
++ TO_BSB, /* Backward space block */
++ TO_BSF, /* Backward space filemark */
++ TO_DSE, /* Data security erase */
++ TO_FSB, /* Forward space block */
++ TO_FSF, /* Forward space filemark */
++ TO_LBL, /* Locate block label */
++ TO_NOP, /* No operation */
++ TO_RBA, /* Read backward */
++ TO_RBI, /* Read block information */
++ TO_RFO, /* Read forward */
++ TO_REW, /* Rewind tape */
++ TO_RUN, /* Rewind and unload tape */
++ TO_WRI, /* Write block */
++ TO_WTM, /* Write tape mark */
++ TO_MSEN, /* Medium sense */
++ TO_LOAD, /* Load tape */
++ TO_READ_CONFIG, /* Read configuration data */
++ TO_READ_ATTMSG, /* Read attention message */
++ TO_DIS, /* Tape display */
++ TO_ASSIGN, /* Assign tape to channel path */
++ TO_UNASSIGN, /* Unassign tape from channel path */
++ TO_BREAKASS, /* Break the assignment of another host */
++ TO_SIZE /* #entries in tape_op_t */
++};
++
++/* Forward declaration */
++struct tape_device;
++
++/* The tape device list lock */
++extern rwlock_t tape_dev_lock;
++
++/* Tape CCW request */
++struct tape_request {
++ struct list_head list; /* list head for request queueing. */
++ struct tape_device *device; /* tape device of this request */
++ ccw1_t *cpaddr; /* address of the channel program. */
++ void *cpdata; /* pointer to ccw data. */
++ char status; /* status of this request */
++ int options; /* options for execution. */
++ int retries; /* retry counter for error recovery. */
++
++ /*
++ * This timer can be used to automatically cancel a request after
++ * some time. Specifically the assign request seems to lockup under
++ * certain circumstances.
++ */
++ struct timer_list timeout;
++
++ enum tape_op op;
++ int rc;
++ atomic_t ref_count;
++
++ /* Callback for delivering final status. */
++ void (*callback)(struct tape_request *, void *);
++ void *callback_data;
++};
++
++/* tape_request->status can be: */
++#define TAPE_REQUEST_INIT 0x00 /* request is ready to be processed */
++#define TAPE_REQUEST_QUEUED 0x01 /* request is queued to be processed */
++#define TAPE_REQUEST_IN_IO 0x02 /* request is currently in IO */
++#define TAPE_REQUEST_DONE 0x03 /* request is completed. */
++
++/* Function type for magnetic tape commands */
++typedef int (*tape_mtop_fn)(struct tape_device *, int);
++
++/* Size of the array containing the mtops for a discipline */
++#define TAPE_NR_MTOPS (MTMKPART+1)
++
++/* Tape Discipline */
++struct tape_discipline {
++ struct list_head list;
++ struct module *owner;
++ unsigned int cu_type;
++ int (*setup_device)(struct tape_device *);
++ void (*cleanup_device)(struct tape_device *);
++ int (*assign)(struct tape_device *);
++ int (*unassign)(struct tape_device *);
++ int (*force_unassign)(struct tape_device *);
++ int (*irq)(struct tape_device *, struct tape_request *);
++ struct tape_request *(*read_block)(struct tape_device *, size_t);
++ struct tape_request *(*write_block)(struct tape_device *, size_t);
++ void (*process_eov)(struct tape_device*);
++ /* Block device stuff. */
++ struct tape_request *(*bread)(struct tape_device *, struct request *);
++ void (*check_locate)(struct tape_device *, struct tape_request *);
++ void (*free_bread)(struct tape_request *);
++ /* ioctl function for additional ioctls. */
++ int (*ioctl_fn)(struct tape_device *, unsigned int, unsigned long);
++ /* Array of tape commands with TAPE_NR_MTOPS entries */
++ tape_mtop_fn *mtop_array;
++};
++
++/*
++ * The discipline irq function either returns an error code (<0) which
++ * means that the request has failed with an error or one of the following:
++ */
++#define TAPE_IO_SUCCESS 0 /* request successful */
++#define TAPE_IO_PENDING 1 /* request still running */
++#define TAPE_IO_RETRY 2 /* retry to current request */
++#define TAPE_IO_STOP 3 /* stop the running request */
++
++/* Char Frontend Data */
++struct tape_char_data {
++ /* Idal buffer to temporaily store character data */
++ struct idal_buffer * idal_buf;
++ /* Block size (in bytes) of the character device (0=auto) */
++ int block_size;
+ #ifdef CONFIG_DEVFS_FS
+-typedef void (*tape_devfs_handler_t) (struct _tape_info_t*);
+-#endif
+-typedef tape_event_handler_t tape_event_table_t[TS_SIZE][TE_SIZE];
+-typedef struct _tape_discipline_t {
+- unsigned int cu_type;
+- tape_setup_assist_t setup_assist;
+- tape_event_handler_t error_recovery;
+- tape_reqgen_t bread;
+- tape_freeblock_t free_bread;
+- tape_rwblock_t write_block;
+- tape_freeblock_t free_write_block;
+- tape_rwblock_t read_block;
+- tape_freeblock_t free_read_block;
+- tape_ccwgen_t mtfsf;
+- tape_ccwgen_t mtbsf;
+- tape_ccwgen_t mtfsr;
+- tape_ccwgen_t mtbsr;
+- tape_ccwgen_t mtweof;
+- tape_ccwgen_t mtrew;
+- tape_ccwgen_t mtoffl;
+- tape_ccwgen_t mtnop;
+- tape_ccwgen_t mtbsfm;
+- tape_ccwgen_t mtfsfm;
+- tape_ccwgen_t mteom;
+- tape_ccwgen_t mterase;
+- tape_ccwgen_t mtsetdensity;
+- tape_ccwgen_t mtseek;
+- tape_ccwgen_t mttell;
+- tape_ccwgen_t mtsetdrvbuffer;
+- tape_ccwgen_t mtlock;
+- tape_ccwgen_t mtunlock;
+- tape_ccwgen_t mtload;
+- tape_ccwgen_t mtunload;
+- tape_ccwgen_t mtcompression;
+- tape_ccwgen_t mtsetpart;
+- tape_ccwgen_t mtmkpart;
+- tape_ccwgen_t mtiocget;
+- tape_ccwgen_t mtiocpos;
+- tape_disc_shutdown_t shutdown;
+- int (*discipline_ioctl_overload)(struct inode *,struct file*, unsigned int,unsigned long);
+- tape_event_table_t* event_table;
+- tape_event_handler_t default_handler;
+- struct _tape_info_t* tape; /* pointer for backreference */
+- void* next;
+-} tape_discipline_t __attribute__ ((aligned(8)));
++ /* tape/<DEVNO>/char subdirectory in devfs */
++ devfs_handle_t devfs_char_dir;
++ /* tape/<DEVNO>/char/nonrewinding entry in devfs */
++ devfs_handle_t devfs_nonrewinding;
++ /* tape/<DEVNO>/char/rewinding entry in devfs */
++ devfs_handle_t devfs_rewinding;
++#endif /* CONFIG_DEVFS_FS */
++};
+
+-typedef struct _tape_frontend_t {
+- tape_setup_assist_t device_setup;
++#ifdef CONFIG_S390_TAPE_BLOCK
++/* Block Frontend Data */
++struct tape_blk_data
++{
++ /* Block device request queue. */
++ request_queue_t request_queue;
++ /* Block frontend tasklet */
++ struct tasklet_struct tasklet;
++ /* Current position on the tape. */
++ unsigned int block_position;
++ /* The start of the block device image file */
++ unsigned int start_block_id;
+ #ifdef CONFIG_DEVFS_FS
+- tape_devfs_handler_t mkdevfstree;
+- tape_devfs_handler_t rmdevfstree;
++ /* tape/<DEVNO>/block subdirectory in devfs */
++ devfs_handle_t devfs_block_dir;
++ /* tape/<DEVNO>/block/disc entry in devfs */
++ devfs_handle_t devfs_disc;
++#endif /* CONFIG_DEVFS_FS */
++};
+ #endif
+- void* next;
+-} tape_frontend_t __attribute__ ((aligned(8)));
+
++#define TAPE_STATUS_INIT 0x00000001
++#define TAPE_STATUS_ASSIGN_M 0x00000002
++#define TAPE_STATUS_ASSIGN_A 0x00000004
++#define TAPE_STATUS_OPEN 0x00000008
++#define TAPE_STATUS_BLOCKDEV 0x00000010
++#define TAPE_STATUS_BOXED 0x20000000
++#define TAPE_STATUS_NOACCESS 0x40000000
++#define TAPE_STATUS_NOT_OPER 0x80000000
+
+-typedef struct _tape_info_t {
+- wait_queue_head_t wq;
+- s390_dev_info_t devinfo; /* device info from Common I/O */
+- int wanna_wakeup;
+- int rew_minor; /* minor number for the rewinding tape */
+- int nor_minor; /* minor number for the nonrewinding tape */
+- int blk_minor; /* minor number for the block device */
+- devstat_t devstat; /* contains irq, devno, status */
+- size_t block_size; /* block size of tape */
+- int drive_type; /* Code indicating type of drive */
+- struct file *rew_filp; /* backpointer to file structure */
+- struct file *nor_filp;
+- struct file *blk_filp;
+- int tape_state; /* State of the device. See tape_stat */
+- int rc; /* Return code. */
+- tape_discipline_t* discipline;
+- request_queue_t request_queue;
+- struct request* current_request;
+- int blk_retries;
+- long position;
+- int medium_is_unloaded; // Becomes true when a unload-type operation was issued, false again when medium-insert was detected
+- ccw_req_t* cqr;
+- atomic_t bh_scheduled;
+- struct tq_struct bh_tq;
++#define TAPE_SET_STATE(td,st) \
++ do { \
++ tape_state_set(td, td->tape_status | (st)); \
++ } while(0)
++#define TAPE_CLEAR_STATE(td,st) \
++ do { \
++ tape_state_set(td, td->tape_status & ~(st)); \
++ } while(0)
++
++#define TAPE_UNUSED(td) (!TAPE_OPEN(td))
++#define TAPE_INIT(td) (td->tape_status & TAPE_STATUS_INIT)
++#define TAPE_ASSIGNED(td) ( \
++ td->tape_status & ( \
++ TAPE_STATUS_ASSIGN_M | \
++ TAPE_STATUS_ASSIGN_A \
++ ) \
++ )
++#define TAPE_OPEN(td) (td->tape_status & TAPE_STATUS_OPEN)
++#define TAPE_BLOCKDEV(td) (td->tape_status & TAPE_STATUS_BLOCKDEV)
++#define TAPE_BOXED(td) (td->tape_status & TAPE_STATUS_BOXED)
++#define TAPE_NOACCESS(td) (td->tape_status & TAPE_STATUS_NOACCESS)
++#define TAPE_NOT_OPER(td) (td->tape_status & TAPE_STATUS_NOT_OPER)
++
++/* Tape Info */
++struct tape_device {
++ /* Device discipline information. */
++ struct tape_discipline *discipline;
++ void * discdata;
++
++ /* Generic status bits */
++ long tape_generic_status;
++ unsigned int tape_status;
++ enum tape_medium_state medium_state;
++
++ /* Number of tapemarks required for correct termination */
++ int required_tapemarks;
++
++ /* Waitqueue for state changes and device flags */
++ wait_queue_head_t state_change_wq;
++ unsigned char * modeset_byte;
++
++ /* Reference count. */
++ atomic_t ref_count;
++
++ /* For persistent assign */
++ spinlock_t assign_lock;
++
++ /* Request queue. */
++ struct list_head req_queue;
++ atomic_t bh_scheduled;
++ struct tq_struct bh_task;
++
++ /* Common i/o stuff. */
++ s390_dev_info_t devinfo;
++ devstat_t devstat;
++
++ /* each tape device has two minors */
++ int first_minor;
++
+ #ifdef CONFIG_DEVFS_FS
+- devfs_handle_t devfs_dir; /* devfs handle for tape/DEVNO directory */
+- devfs_handle_t devfs_char_dir; /* devfs handle for tape/DEVNO/char directory */
+- devfs_handle_t devfs_block_dir; /* devfs handle for tape/DEVNO/block directory */
+- devfs_handle_t devfs_nonrewinding; /* devfs handle for tape/DEVNO/char/nonrewinding device */
+- devfs_handle_t devfs_rewinding; /* devfs handle for tape/DEVNO/char/rewinding device */
+- devfs_handle_t devfs_disc; /* devfs handle for tape/DEVNO/block/disc device */
++ /* Toplevel devfs directory. */
++ devfs_handle_t devfs_dir;
++#endif /* CONFIG_DEVFS_FS */
++ /* Character device frontend data */
++ struct tape_char_data char_data;
++#ifdef CONFIG_S390_TAPE_BLOCK
++ /* Block dev frontend data */
++ struct tape_blk_data blk_data;
+ #endif
+- void* discdata;
+- void* kernbuf;
+- void* userbuf;
+- void* next;
+-} tape_info_t __attribute__ ((aligned(8)));
++};
+
++/* Externals from tape_core.c */
++struct tape_request *tape_alloc_request(int cplength, int datasize);
++struct tape_request *tape_put_request(struct tape_request *);
++struct tape_request *tape_clone_request(struct tape_request *);
++int tape_do_io(struct tape_device *, struct tape_request *);
++int tape_do_io_async(struct tape_device *, struct tape_request *);
++int tape_do_io_interruptible(struct tape_device *, struct tape_request *);
++void tape_schedule_bh(struct tape_device *);
++void tape_hotplug_event(struct tape_device *, int, int);
++
++static inline int
++tape_do_io_free(struct tape_device *device, struct tape_request *request)
++{
++ int rc;
++
++ rc = tape_do_io(device, request);
++ tape_put_request(request);
++ return rc;
++}
++
++int tape_oper_handler(int irq, devreg_t *devreg);
++void tape_noper_handler(int irq, int status);
++int tape_open(struct tape_device *);
++int tape_release(struct tape_device *);
++int tape_assign(struct tape_device *, int type);
++int tape_unassign(struct tape_device *, int type);
++int tape_mtop(struct tape_device *, int, int);
++
++/* Externals from tape_devmap.c */
++int tape_devmap_init(void);
++void tape_devmap_exit(void);
++
++struct tape_device *tape_get_device(int devindex);
++struct tape_device *tape_get_device_by_devno(int devno);
++struct tape_device *tape_clone_device(struct tape_device *);
++void tape_put_device(struct tape_device *);
++
++void tape_auto_detect(void);
++void tape_add_devices(struct tape_discipline *);
++void tape_remove_devices(struct tape_discipline *);
++
++extern int tape_max_devindex;
++
++/* Externals from tape_char.c */
++int tapechar_init(void);
++void tapechar_exit(void);
++int tapechar_setup_device(struct tape_device *);
++void tapechar_cleanup_device(struct tape_device *);
++
++/* Externals from tape_block.c */
++int tapeblock_init (void);
++void tapeblock_exit(void);
++int tapeblock_setup_device(struct tape_device *);
++void tapeblock_cleanup_device(struct tape_device *);
++void tapeblock_medium_change(struct tape_device *);
++
++/* Discipline functions */
++int tape_register_discipline(struct tape_discipline *);
++void tape_unregister_discipline(struct tape_discipline *);
++struct tape_discipline *tape_get_discipline(int cu_type);
++void tape_put_discipline(struct tape_discipline *);
++int tape_enable_device(struct tape_device *, struct tape_discipline *);
++void tape_disable_device(struct tape_device *device, int gone);
++
+ /* tape initialisation functions */
+-int tape_init(void);
+-int tape_setup (tape_info_t * ti, int irq, int minor);
++void tape_proc_init (void);
++void tape_proc_cleanup (void);
+
+-/* functoins for alloc'ing ccw stuff */
+-inline ccw_req_t * tape_alloc_ccw_req (tape_info_t* ti, int cplength, int datasize);
+-void tape_free_request (ccw_req_t * request);
+-
+ /* a function for dumping device sense info */
+-void tape_dump_sense (devstat_t * stat);
++void tape_dump_sense(struct tape_device *, struct tape_request *);
++void tape_dump_sense_dbf(struct tape_device *, struct tape_request *);
+
+-#ifdef CONFIG_S390_TAPE_DYNAMIC
+-/* functions for dyn. dev. attach/detach */
+-int tape_oper_handler ( int irq, struct _devreg *dreg);
+-#endif
+-
+ /* functions for handling the status of a device */
+-inline void tapestate_set (tape_info_t * ti, int newstate);
+-inline int tapestate_get (tape_info_t * ti);
+-void tapestate_event (tape_info_t * ti, int event);
+-extern char* state_verbose[TS_SIZE];
+-extern char* event_verbose[TE_SIZE];
++inline void tape_state_set (struct tape_device *, unsigned int status);
++inline void tape_med_state_set(struct tape_device *, enum tape_medium_state);
++const char *tape_state_string(struct tape_device *);
+
+-/****************************************************************************/
++/* Tape 3480/3490 init/exit functions. */
++int tape_34xx_init(void);
++void tape_34xx_exit(void);
+
+-/* Some linked lists for storing plugins and devices */
+-extern tape_info_t *first_tape_info;
+-extern tape_discipline_t *first_discipline;
+-extern tape_frontend_t *first_frontend;
+-
+ /* The debug area */
+-#ifdef TAPE_DEBUG
+-extern debug_info_t *tape_debug_area;
+-#endif
++extern debug_info_t *TAPE_DBF_AREA;
+
++/* functions for building ccws */
++static inline ccw1_t *
++tape_ccw_cc(ccw1_t *ccw, __u8 cmd_code, __u16 memsize, void *cda)
++{
++ ccw->cmd_code = cmd_code;
++ ccw->flags = CCW_FLAG_CC;
++ ccw->count = memsize;
++ ccw->cda = (__u32)(addr_t) cda;
++ return ccw + 1;
++}
++
++static inline ccw1_t *
++tape_ccw_end(ccw1_t *ccw, __u8 cmd_code, __u16 memsize, void *cda)
++{
++ ccw->cmd_code = cmd_code;
++ ccw->flags = 0;
++ ccw->count = memsize;
++ ccw->cda = (__u32)(addr_t) cda;
++ return ccw + 1;
++}
++
++static inline ccw1_t *
++tape_ccw_cmd(ccw1_t *ccw, __u8 cmd_code)
++{
++ ccw->cmd_code = cmd_code;
++ ccw->flags = 0;
++ ccw->count = 0;
++ ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
++ return ccw + 1;
++}
++
++static inline ccw1_t *
++tape_ccw_repeat(ccw1_t *ccw, __u8 cmd_code, int count)
++{
++ while (count-- > 0) {
++ ccw->cmd_code = cmd_code;
++ ccw->flags = CCW_FLAG_CC;
++ ccw->count = 0;
++ ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
++ ccw++;
++ }
++ return ccw;
++}
++
++extern inline ccw1_t*
++tape_ccw_cc_idal(ccw1_t *ccw, __u8 cmd_code, struct idal_buffer *idal)
++{
++ ccw->cmd_code = cmd_code;
++ ccw->flags = CCW_FLAG_CC;
++ idal_buffer_set_cda(idal, ccw);
++ return ccw++;
++}
++
++extern inline ccw1_t*
++tape_ccw_end_idal(ccw1_t *ccw, __u8 cmd_code, struct idal_buffer *idal)
++{
++ ccw->cmd_code = cmd_code;
++ ccw->flags = 0;
++ idal_buffer_set_cda(idal, ccw);
++ return ccw++;
++}
++
++/* Global vars */
++extern const char *tape_op_verbose[];
++
+ #endif /* for ifdef tape.h */
+=== drivers/s390/char/sclp_con.c
+==================================================================
+--- drivers/s390/char/sclp_con.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/sclp_con.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,245 @@
++/*
++ * drivers/s390/char/sclp_con.c
++ * SCLP line mode console driver
++ *
++ * S390 version
++ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
++ * Author(s): Martin Peschke <mpeschke at de.ibm.com>
++ * Martin Schwidefsky <schwidefsky at de.ibm.com>
++ */
++
++#include <linux/config.h>
++#include <linux/version.h>
++#include <linux/kmod.h>
++#include <linux/console.h>
++#include <linux/init.h>
++#include <linux/timer.h>
++#include <linux/sched.h>
++#include <linux/bootmem.h>
++
++#include "sclp.h"
++#include "sclp_rw.h"
++
++#define SCLP_CON_PRINT_HEADER "sclp console driver: "
++
++#define sclp_console_major 4 /* TTYAUX_MAJOR */
++#define sclp_console_minor 64
++#define sclp_console_name "ttyS"
++
++/* Lock to guard over changes to global variables */
++static spinlock_t sclp_con_lock;
++/* List of free pages that can be used for console output buffering */
++static struct list_head sclp_con_pages;
++/* List of full struct sclp_buffer structures ready for output */
++static struct list_head sclp_con_outqueue;
++/* Counter how many buffers are emitted (max 1) and how many */
++/* are on the output queue. */
++static int sclp_con_buffer_count;
++/* Pointer to current console buffer */
++static struct sclp_buffer *sclp_conbuf;
++/* Timer for delayed output of console messages */
++static struct timer_list sclp_con_timer;
++
++/* Output format for console messages */
++static unsigned short sclp_con_columns;
++static unsigned short sclp_con_width_htab;
++
++static void
++sclp_conbuf_callback(struct sclp_buffer *buffer, int rc)
++{
++ unsigned long flags;
++ struct sclp_buffer *next;
++ void *page;
++
++ /* Ignore return code - because console-writes aren't critical,
++ we do without a sophisticated error recovery mechanism. */
++ page = sclp_unmake_buffer(buffer);
++ spin_lock_irqsave(&sclp_con_lock, flags);
++ /* Remove buffer from outqueue */
++ list_del(&buffer->list);
++ sclp_con_buffer_count--;
++ list_add_tail((struct list_head *) page, &sclp_con_pages);
++ /* Check if there is a pending buffer on the out queue. */
++ next = NULL;
++ if (!list_empty(&sclp_con_outqueue))
++ next = list_entry(sclp_con_outqueue.next,
++ struct sclp_buffer, list);
++ spin_unlock_irqrestore(&sclp_con_lock, flags);
++ if (next != NULL)
++ sclp_emit_buffer(next, sclp_conbuf_callback);
++}
++
++static inline void
++sclp_conbuf_emit(void)
++{
++ struct sclp_buffer* buffer;
++ unsigned long flags;
++ int count;
++
++ spin_lock_irqsave(&sclp_con_lock, flags);
++ buffer = sclp_conbuf;
++ sclp_conbuf = NULL;
++ if (buffer == NULL) {
++ spin_unlock_irqrestore(&sclp_con_lock, flags);
++ return;
++ }
++ list_add_tail(&buffer->list, &sclp_con_outqueue);
++ count = sclp_con_buffer_count++;
++ spin_unlock_irqrestore(&sclp_con_lock, flags);
++ if (count == 0)
++ sclp_emit_buffer(buffer, sclp_conbuf_callback);
++}
++
++/*
++ * When this routine is called from the timer then we flush the
++ * temporary write buffer without further waiting on a final new line.
++ */
++static void
++sclp_console_timeout(unsigned long data)
++{
++ sclp_conbuf_emit();
++}
++
++/*
++ * Writes the given message to S390 system console
++ */
++static void
++sclp_console_write(struct console *console, const char *message,
++ unsigned int count)
++{
++ unsigned long flags;
++ void *page;
++ int written;
++
++ if (count == 0)
++ return;
++ spin_lock_irqsave(&sclp_con_lock, flags);
++ /*
++ * process escape characters, write message into buffer,
++ * send buffer to SCLP
++ */
++ do {
++ /* make sure we have a console output buffer */
++ if (sclp_conbuf == NULL) {
++ while (list_empty(&sclp_con_pages)) {
++ spin_unlock_irqrestore(&sclp_con_lock, flags);
++ sclp_sync_wait();
++ spin_lock_irqsave(&sclp_con_lock, flags);
++ }
++ page = sclp_con_pages.next;
++ list_del((struct list_head *) page);
++ sclp_conbuf = sclp_make_buffer(page, sclp_con_columns,
++ sclp_con_width_htab);
++ }
++ /* try to write the string to the current output buffer */
++ written = sclp_write(sclp_conbuf, (const unsigned char *)
++ message, count, 0);
++ if (written == -EFAULT || written == count)
++ break;
++ /*
++ * Not all characters could be written to the current
++ * output buffer. Emit the buffer, create a new buffer
++ * and then output the rest of the string.
++ */
++ spin_unlock_irqrestore(&sclp_con_lock, flags);
++ sclp_conbuf_emit();
++ spin_lock_irqsave(&sclp_con_lock, flags);
++ message += written;
++ count -= written;
++ } while (count > 0);
++ /* Setup timer to output current console buffer after 1/10 second */
++ if (sclp_conbuf != NULL && sclp_chars_in_buffer(sclp_conbuf) != 0 &&
++ !timer_pending(&sclp_con_timer)) {
++ init_timer(&sclp_con_timer);
++ sclp_con_timer.function = sclp_console_timeout;
++ sclp_con_timer.data = 0UL;
++ sclp_con_timer.expires = jiffies + HZ/10;
++ add_timer(&sclp_con_timer);
++ }
++ spin_unlock_irqrestore(&sclp_con_lock, flags);
++}
++
++/* returns the device number of the SCLP console */
++static kdev_t
++sclp_console_device(struct console *c)
++{
++ return mk_kdev(sclp_console_major, sclp_console_minor);
++}
++
++/*
++ * This routine is called from panic when the kernel
++ * is going to give up. We have to make sure that all buffers
++ * will be flushed to the SCLP.
++ */
++static void
++sclp_console_unblank(void)
++{
++ unsigned long flags;
++
++ sclp_conbuf_emit();
++ spin_lock_irqsave(&sclp_con_lock, flags);
++ if (timer_pending(&sclp_con_timer))
++ del_timer(&sclp_con_timer);
++ while (sclp_con_buffer_count > 0) {
++ spin_unlock_irqrestore(&sclp_con_lock, flags);
++ sclp_sync_wait();
++ spin_lock_irqsave(&sclp_con_lock, flags);
++ }
++ spin_unlock_irqrestore(&sclp_con_lock, flags);
++}
++
++/*
++ * used to register the SCLP console to the kernel and to
++ * give printk necessary information
++ */
++static struct console sclp_console =
++{
++ .name = sclp_console_name,
++ .write = sclp_console_write,
++ .device = sclp_console_device,
++ .unblank = sclp_console_unblank,
++ .flags = CON_PRINTBUFFER,
++ .index = 0 /* ttyS0 */
++};
++
++/*
++ * called by console_init() in drivers/char/tty_io.c at boot-time.
++ */
++void __init
++sclp_console_init(void)
++{
++ void *page;
++ int i;
++
++ if (!CONSOLE_IS_SCLP)
++ return;
++ if (sclp_rw_init() != 0)
++ return;
++ /* Allocate pages for output buffering */
++ INIT_LIST_HEAD(&sclp_con_pages);
++ for (i = 0; i < MAX_CONSOLE_PAGES; i++) {
++ page = alloc_bootmem_low_pages(PAGE_SIZE);
++ if (page == NULL)
++ return;
++ list_add_tail((struct list_head *) page, &sclp_con_pages);
++ }
++ INIT_LIST_HEAD(&sclp_con_outqueue);
++ spin_lock_init(&sclp_con_lock);
++ sclp_con_buffer_count = 0;
++ sclp_conbuf = NULL;
++ init_timer(&sclp_con_timer);
++
++ /* Set output format */
++ if (MACHINE_IS_VM)
++ /*
++ * save 4 characters for the CPU number
++ * written at start of each line by VM/CP
++ */
++ sclp_con_columns = 76;
++ else
++ sclp_con_columns = 80;
++ sclp_con_width_htab = 8;
++
++ /* enable printk-access to this driver */
++ register_console(&sclp_console);
++}
+=== drivers/s390/char/sclp_tty.c
+==================================================================
+--- drivers/s390/char/sclp_tty.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/sclp_tty.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,828 @@
++/*
++ * drivers/s390/char/sclp_tty.c
++ * SCLP line mode terminal driver.
++ *
++ * S390 version
++ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
++ * Author(s): Martin Peschke <mpeschke at de.ibm.com>
++ * Martin Schwidefsky <schwidefsky at de.ibm.com>
++ */
++
++#include <linux/config.h>
++#include <linux/version.h>
++#include <linux/kmod.h>
++#include <linux/tty.h>
++#include <linux/tty_driver.h>
++#include <linux/sched.h>
++#include <linux/wait.h>
++#include <linux/slab.h>
++#include <linux/interrupt.h>
++#include <asm/uaccess.h>
++
++#include "ctrlchar.h"
++#include "sclp.h"
++#include "sclp_rw.h"
++#include "sclp_tty.h"
++
++#define SCLP_TTY_PRINT_HEADER "sclp tty driver: "
++
++/*
++ * size of a buffer that collects single characters coming in
++ * via sclp_tty_put_char()
++ */
++#define SCLP_TTY_BUF_SIZE 512
++
++/*
++ * There is exactly one SCLP terminal, so we can keep things simple
++ * and allocate all variables statically.
++ */
++
++/* Lock to guard over changes to global variables. */
++static spinlock_t sclp_tty_lock;
++/* List of free pages that can be used for console output buffering. */
++static struct list_head sclp_tty_pages;
++/* List of full struct sclp_buffer structures ready for output. */
++static struct list_head sclp_tty_outqueue;
++/* Counter how many buffers are emitted. */
++static int sclp_tty_buffer_count;
++/* Pointer to current console buffer. */
++static struct sclp_buffer *sclp_ttybuf;
++/* Timer for delayed output of console messages. */
++static struct timer_list sclp_tty_timer;
++/* Waitqueue to wait for buffers to get empty. */
++static wait_queue_head_t sclp_tty_waitq;
++
++static struct tty_struct *sclp_tty;
++static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE];
++static unsigned short int sclp_tty_chars_count;
++
++static struct tty_driver sclp_tty_driver;
++static struct tty_struct * sclp_tty_table[1];
++static struct termios * sclp_tty_termios[1];
++static struct termios * sclp_tty_termios_locked[1];
++static int sclp_tty_refcount = 0;
++
++extern struct termios tty_std_termios;
++
++static struct sclp_ioctls sclp_ioctls;
++static struct sclp_ioctls sclp_ioctls_init =
++{
++ 8, /* 1 hor. tab. = 8 spaces */
++ 0, /* no echo of input by this driver */
++ 80, /* 80 characters/line */
++ 1, /* write after 1/10 s without final new line */
++ MAX_KMEM_PAGES, /* quick fix: avoid __alloc_pages */
++ MAX_KMEM_PAGES, /* take 32/64 pages from kernel memory, */
++ 0, /* do not convert to lower case */
++ 0x6c /* to seprate upper and lower case */
++ /* ('%' in EBCDIC) */
++};
++
++/* This routine is called whenever we try to open a SCLP terminal. */
++static int
++sclp_tty_open(struct tty_struct *tty, struct file *filp)
++{
++ sclp_tty = tty;
++ tty->driver_data = NULL;
++ tty->low_latency = 0;
++ return 0;
++}
++
++/* This routine is called when the SCLP terminal is closed. */
++static void
++sclp_tty_close(struct tty_struct *tty, struct file *filp)
++{
++ if (tty->count > 1)
++ return;
++ sclp_tty = NULL;
++}
++
++/* execute commands to control the i/o behaviour of the SCLP tty at runtime */
++static int
++sclp_tty_ioctl(struct tty_struct *tty, struct file * file,
++ unsigned int cmd, unsigned long arg)
++{
++ unsigned long flags;
++ unsigned int obuf;
++ int check;
++ int rc;
++
++ if (tty->flags & (1 << TTY_IO_ERROR))
++ return -EIO;
++ rc = 0;
++ check = 0;
++ switch (cmd) {
++ case TIOCSCLPSHTAB:
++ /* set width of horizontal tab */
++ if (get_user(sclp_ioctls.htab, (unsigned short *) arg))
++ rc = -EFAULT;
++ else
++ check = 1;
++ break;
++ case TIOCSCLPGHTAB:
++ /* get width of horizontal tab */
++ if (put_user(sclp_ioctls.htab, (unsigned short *) arg))
++ rc = -EFAULT;
++ break;
++ case TIOCSCLPSECHO:
++ /* enable/disable echo of input */
++ if (get_user(sclp_ioctls.echo, (unsigned char *) arg))
++ rc = -EFAULT;
++ break;
++ case TIOCSCLPGECHO:
++ /* Is echo of input enabled ? */
++ if (put_user(sclp_ioctls.echo, (unsigned char *) arg))
++ rc = -EFAULT;
++ break;
++ case TIOCSCLPSCOLS:
++ /* set number of columns for output */
++ if (get_user(sclp_ioctls.columns, (unsigned short *) arg))
++ rc = -EFAULT;
++ else
++ check = 1;
++ break;
++ case TIOCSCLPGCOLS:
++ /* get number of columns for output */
++ if (put_user(sclp_ioctls.columns, (unsigned short *) arg))
++ rc = -EFAULT;
++ break;
++ case TIOCSCLPSNL:
++ /* enable/disable writing without final new line character */
++ if (get_user(sclp_ioctls.final_nl, (signed char *) arg))
++ rc = -EFAULT;
++ break;
++ case TIOCSCLPGNL:
++ /* Is writing without final new line character enabled ? */
++ if (put_user(sclp_ioctls.final_nl, (signed char *) arg))
++ rc = -EFAULT;
++ break;
++ case TIOCSCLPSOBUF:
++ /*
++ * set the maximum buffers size for output, will be rounded
++ * up to next 4kB boundary and stored as number of SCCBs
++ * (4kB Buffers) limitation: 256 x 4kB
++ */
++ if (get_user(obuf, (unsigned int *) arg) == 0) {
++ if (obuf & 0xFFF)
++ sclp_ioctls.max_sccb = (obuf >> 12) + 1;
++ else
++ sclp_ioctls.max_sccb = (obuf >> 12);
++ } else
++ rc = -EFAULT;
++ break;
++ case TIOCSCLPGOBUF:
++ /* get the maximum buffers size for output */
++ obuf = sclp_ioctls.max_sccb << 12;
++ if (put_user(obuf, (unsigned int *) arg))
++ rc = -EFAULT;
++ break;
++ case TIOCSCLPGKBUF:
++ /* get the number of buffers got from kernel at startup */
++ if (put_user(sclp_ioctls.kmem_sccb, (unsigned short *) arg))
++ rc = -EFAULT;
++ break;
++ case TIOCSCLPSCASE:
++ /* enable/disable conversion from upper to lower case */
++ if (get_user(sclp_ioctls.tolower, (unsigned char *) arg))
++ rc = -EFAULT;
++ break;
++ case TIOCSCLPGCASE:
++ /* Is conversion from upper to lower case of input enabled? */
++ if (put_user(sclp_ioctls.tolower, (unsigned char *) arg))
++ rc = -EFAULT;
++ break;
++ case TIOCSCLPSDELIM:
++ /*
++ * set special character used for separating upper and
++ * lower case, 0x00 disables this feature
++ */
++ if (get_user(sclp_ioctls.delim, (unsigned char *) arg))
++ rc = -EFAULT;
++ break;
++ case TIOCSCLPGDELIM:
++ /*
++ * get special character used for separating upper and
++ * lower case, 0x00 disables this feature
++ */
++ if (put_user(sclp_ioctls.delim, (unsigned char *) arg))
++ rc = -EFAULT;
++ break;
++ case TIOCSCLPSINIT:
++ /* set initial (default) sclp ioctls */
++ sclp_ioctls = sclp_ioctls_init;
++ check = 1;
++ break;
++ default:
++ rc = -ENOIOCTLCMD;
++ break;
++ }
++ if (check) {
++ spin_lock_irqsave(&sclp_tty_lock, flags);
++ if (sclp_ttybuf != NULL) {
++ sclp_set_htab(sclp_ttybuf, sclp_ioctls.htab);
++ sclp_set_columns(sclp_ttybuf, sclp_ioctls.columns);
++ }
++ spin_unlock_irqrestore(&sclp_tty_lock, flags);
++ }
++ return rc;
++}
++
++/*
++ * This routine returns the numbers of characters the tty driver
++ * will accept for queuing to be written. This number is subject
++ * to change as output buffers get emptied, or if the output flow
++ * control is acted. This is not an exact number because not every
++ * character needs the same space in the sccb. The worst case is
++ * a string of newlines. Every newlines creates a new mto which
++ * needs 8 bytes.
++ */
++static int
++sclp_tty_write_room (struct tty_struct *tty)
++{
++ unsigned long flags;
++ struct list_head *l;
++ int count;
++
++ spin_lock_irqsave(&sclp_tty_lock, flags);
++ count = 0;
++ if (sclp_ttybuf != NULL)
++ count = sclp_buffer_space(sclp_ttybuf) / sizeof(struct mto);
++ list_for_each(l, &sclp_tty_pages)
++ count += NR_EMPTY_MTO_PER_SCCB;
++ spin_unlock_irqrestore(&sclp_tty_lock, flags);
++ return count;
++}
++
++static void
++sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc)
++{
++ unsigned long flags;
++ struct sclp_buffer *next;
++ void *page;
++
++ /* Ignore return code - because tty-writes aren't critical,
++ we do without a sophisticated error recovery mechanism. */
++ page = sclp_unmake_buffer(buffer);
++ spin_lock_irqsave(&sclp_tty_lock, flags);
++ /* Remove buffer from outqueue */
++ list_del(&buffer->list);
++ sclp_tty_buffer_count--;
++ list_add_tail((struct list_head *) page, &sclp_tty_pages);
++ /* Check if there is a pending buffer on the out queue. */
++ next = NULL;
++ if (!list_empty(&sclp_tty_outqueue))
++ next = list_entry(sclp_tty_outqueue.next,
++ struct sclp_buffer, list);
++ spin_unlock_irqrestore(&sclp_tty_lock, flags);
++ if (next != NULL)
++ sclp_emit_buffer(next, sclp_ttybuf_callback);
++ wake_up(&sclp_tty_waitq);
++ /* check if the tty needs a wake up call */
++ if (sclp_tty != NULL) {
++ if ((sclp_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
++ sclp_tty->ldisc.write_wakeup)
++ (sclp_tty->ldisc.write_wakeup)(sclp_tty);
++ wake_up_interruptible(&sclp_tty->write_wait);
++ }
++}
++
++static inline void
++__sclp_ttybuf_emit(struct sclp_buffer *buffer)
++{
++ unsigned long flags;
++ int count;
++
++ spin_lock_irqsave(&sclp_tty_lock, flags);
++ list_add_tail(&buffer->list, &sclp_tty_outqueue);
++ count = sclp_tty_buffer_count++;
++ spin_unlock_irqrestore(&sclp_tty_lock, flags);
++
++ if (count == 0)
++ sclp_emit_buffer(buffer, sclp_ttybuf_callback);
++}
++
++/*
++ * When this routine is called from the timer then we flush the
++ * temporary write buffer.
++ */
++static void
++sclp_tty_timeout(unsigned long data)
++{
++ unsigned long flags;
++ struct sclp_buffer *buf;
++
++ spin_lock_irqsave(&sclp_tty_lock, flags);
++ buf = sclp_ttybuf;
++ sclp_ttybuf = NULL;
++ spin_unlock_irqrestore(&sclp_tty_lock, flags);
++
++ if (buf != NULL) {
++ __sclp_ttybuf_emit(buf);
++ }
++}
++
++/*
++ * Write a string to the sclp tty.
++ */
++static void
++sclp_tty_write_string(const unsigned char *str, int count, int from_user)
++{
++ unsigned long flags;
++ void *page;
++ int written;
++ struct sclp_buffer *buf;
++
++ if (count <= 0)
++ return;
++ spin_lock_irqsave(&sclp_tty_lock, flags);
++ do {
++ /* Create a sclp output buffer if none exists yet */
++ if (sclp_ttybuf == NULL) {
++ while (list_empty(&sclp_tty_pages)) {
++ spin_unlock_irqrestore(&sclp_tty_lock, flags);
++ if (in_interrupt())
++ sclp_sync_wait();
++ else
++ wait_event(sclp_tty_waitq,
++ !list_empty(&sclp_tty_pages));
++ spin_lock_irqsave(&sclp_tty_lock, flags);
++ }
++ page = sclp_tty_pages.next;
++ list_del((struct list_head *) page);
++ sclp_ttybuf = sclp_make_buffer(page,
++ sclp_ioctls.columns,
++ sclp_ioctls.htab);
++ }
++ /* try to write the string to the current output buffer */
++ written = sclp_write(sclp_ttybuf, str, count, from_user);
++ if (written == -EFAULT || written == count)
++ break;
++ /*
++ * Not all characters could be written to the current
++ * output buffer. Emit the buffer, create a new buffer
++ * and then output the rest of the string.
++ */
++ buf = sclp_ttybuf;
++ sclp_ttybuf = NULL;
++ spin_unlock_irqrestore(&sclp_tty_lock, flags);
++ __sclp_ttybuf_emit(buf);
++ spin_lock_irqsave(&sclp_tty_lock, flags);
++ str += written;
++ count -= written;
++ } while (count > 0);
++ /* Setup timer to output current console buffer after 1/10 second */
++ if (sclp_ioctls.final_nl) {
++ if (sclp_ttybuf != NULL &&
++ sclp_chars_in_buffer(sclp_ttybuf) != 0 &&
++ !timer_pending(&sclp_tty_timer)) {
++ init_timer(&sclp_tty_timer);
++ sclp_tty_timer.function = sclp_tty_timeout;
++ sclp_tty_timer.data = 0UL;
++ sclp_tty_timer.expires = jiffies + HZ/10;
++ add_timer(&sclp_tty_timer);
++ }
++ } else {
++ if (sclp_ttybuf != NULL &&
++ sclp_chars_in_buffer(sclp_ttybuf) != 0) {
++ buf = sclp_ttybuf;
++ sclp_ttybuf = NULL;
++ spin_unlock_irqrestore(&sclp_tty_lock, flags);
++ __sclp_ttybuf_emit(buf);
++ spin_lock_irqsave(&sclp_tty_lock, flags);
++ }
++ }
++ spin_unlock_irqrestore(&sclp_tty_lock, flags);
++}
++
++/*
++ * This routine is called by the kernel to write a series of characters to the
++ * tty device. The characters may come from user space or kernel space. This
++ * routine will return the number of characters actually accepted for writing.
++ */
++static int
++sclp_tty_write(struct tty_struct *tty, int from_user,
++ const unsigned char *buf, int count)
++{
++ if (sclp_tty_chars_count > 0) {
++ sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
++ sclp_tty_chars_count = 0;
++ }
++ sclp_tty_write_string(buf, count, from_user);
++ return count;
++}
++
++/*
++ * This routine is called by the kernel to write a single character to the tty
++ * device. If the kernel uses this routine, it must call the flush_chars()
++ * routine (if defined) when it is done stuffing characters into the driver.
++ *
++ * Characters provided to sclp_tty_put_char() are buffered by the SCLP driver.
++ * If the given character is a '\n' the contents of the SCLP write buffer
++ * - including previous characters from sclp_tty_put_char() and strings from
++ * sclp_write() without final '\n' - will be written.
++ */
++static void
++sclp_tty_put_char(struct tty_struct *tty, unsigned char ch)
++{
++ sclp_tty_chars[sclp_tty_chars_count++] = ch;
++ if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) {
++ sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
++ sclp_tty_chars_count = 0;
++ }
++}
++
++/*
++ * This routine is called by the kernel after it has written a series of
++ * characters to the tty device using put_char().
++ */
++static void
++sclp_tty_flush_chars(struct tty_struct *tty)
++{
++ if (sclp_tty_chars_count > 0) {
++ sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
++ sclp_tty_chars_count = 0;
++ }
++}
++
++/*
++ * This routine returns the number of characters in the write buffer of the
++ * SCLP driver. The provided number includes all characters that are stored
++ * in the SCCB (will be written next time the SCLP is not busy) as well as
++ * characters in the write buffer (will not be written as long as there is a
++ * final line feed missing).
++ */
++static int
++sclp_tty_chars_in_buffer(struct tty_struct *tty)
++{
++ unsigned long flags;
++ struct list_head *l;
++ struct sclp_buffer *t;
++ int count;
++
++ spin_lock_irqsave(&sclp_tty_lock, flags);
++ count = 0;
++ if (sclp_ttybuf != NULL)
++ count = sclp_chars_in_buffer(sclp_ttybuf);
++ list_for_each(l, &sclp_tty_outqueue) {
++ t = list_entry(l, struct sclp_buffer, list);
++ count += sclp_chars_in_buffer(sclp_ttybuf);
++ }
++ spin_unlock_irqrestore(&sclp_tty_lock, flags);
++ return count;
++}
++
++/*
++ * removes all content from buffers of low level driver
++ */
++static void
++sclp_tty_flush_buffer(struct tty_struct *tty)
++{
++ if (sclp_tty_chars_count > 0) {
++ sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
++ sclp_tty_chars_count = 0;
++ }
++}
++
++/*
++ * push input to tty
++ */
++static void
++sclp_tty_input(unsigned char* buf, unsigned int count)
++{
++ unsigned int cchar;
++
++ /*
++ * If this tty driver is currently closed
++ * then throw the received input away.
++ */
++ if (sclp_tty == NULL)
++ return;
++ cchar = ctrlchar_handle(buf, count, sclp_tty);
++ switch (cchar & CTRLCHAR_MASK) {
++ case CTRLCHAR_SYSRQ:
++ break;
++ case CTRLCHAR_CTRL:
++ sclp_tty->flip.count++;
++ *sclp_tty->flip.flag_buf_ptr++ = TTY_NORMAL;
++ *sclp_tty->flip.char_buf_ptr++ = cchar;
++ tty_flip_buffer_push(sclp_tty);
++ break;
++ case CTRLCHAR_NONE:
++ /* send (normal) input to line discipline */
++ memcpy(sclp_tty->flip.char_buf_ptr, buf, count);
++ if (count < 2 ||
++ (strncmp ((const char *) buf + count - 2, "^n", 2) &&
++ strncmp ((const char *) buf + count - 2, "\0252n", 2))) {
++ sclp_tty->flip.char_buf_ptr[count] = '\n';
++ count++;
++ } else
++ count -= 2;
++ memset(sclp_tty->flip.flag_buf_ptr, TTY_NORMAL, count);
++ sclp_tty->flip.char_buf_ptr += count;
++ sclp_tty->flip.flag_buf_ptr += count;
++ sclp_tty->flip.count += count;
++ tty_flip_buffer_push(sclp_tty);
++ break;
++ }
++}
++
++/*
++ * get a EBCDIC string in upper/lower case,
++ * find out characters in lower/upper case separated by a special character,
++ * modifiy original string,
++ * returns length of resulting string
++ */
++static int
++sclp_switch_cases(unsigned char *buf, int count,
++ unsigned char delim, int tolower)
++{
++ unsigned char *ip, *op;
++ int toggle;
++
++ /* initially changing case is off */
++ toggle = 0;
++ ip = op = buf;
++ while (count-- > 0) {
++ /* compare with special character */
++ if (*ip == delim) {
++ /* followed by another special character? */
++ if (count && ip[1] == delim) {
++ /*
++ * ... then put a single copy of the special
++ * character to the output string
++ */
++ *op++ = *ip++;
++ count--;
++ } else
++ /*
++ * ... special character follower by a normal
++ * character toggles the case change behaviour
++ */
++ toggle = ~toggle;
++ /* skip special character */
++ ip++;
++ } else
++ /* not the special character */
++ if (toggle)
++ /* but case switching is on */
++ if (tolower)
++ /* switch to uppercase */
++ *op++ = _ebc_toupper[(int) *ip++];
++ else
++ /* switch to lowercase */
++ *op++ = _ebc_tolower[(int) *ip++];
++ else
++ /* no case switching, copy the character */
++ *op++ = *ip++;
++ }
++ /* return length of reformatted string. */
++ return op - buf;
++}
++
++static void
++sclp_get_input(unsigned char *start, unsigned char *end)
++{
++ int count;
++
++ count = end - start;
++ /*
++ * if set in ioctl convert EBCDIC to lower case
++ * (modify original input in SCCB)
++ */
++ if (sclp_ioctls.tolower)
++ EBC_TOLOWER(start, count);
++
++ /*
++ * if set in ioctl find out characters in lower or upper case
++ * (depends on current case) separated by a special character,
++ * works on EBCDIC
++ */
++ if (sclp_ioctls.delim)
++ count = sclp_switch_cases(start, count,
++ sclp_ioctls.delim,
++ sclp_ioctls.tolower);
++
++ /* convert EBCDIC to ASCII (modify original input in SCCB) */
++ sclp_ebcasc_str(start, count);
++
++ /* if set in ioctl write operators input to console */
++ if (sclp_ioctls.echo)
++ sclp_tty_write(sclp_tty, 0, start, count);
++
++ /* transfer input to high level driver */
++ sclp_tty_input(start, count);
++}
++
++static inline struct gds_vector *
++find_gds_vector(struct gds_vector *start, struct gds_vector *end, u16 id)
++{
++ struct gds_vector *vec;
++
++ for (vec = start; vec < end; (void *) vec += vec->length)
++ if (vec->gds_id == id)
++ return vec;
++ return NULL;
++}
++
++static inline struct gds_subvector *
++find_gds_subvector(struct gds_subvector *start,
++ struct gds_subvector *end, u8 key)
++{
++ struct gds_subvector *subvec;
++
++ for (subvec = start; subvec < end; (void *) subvec += subvec->length)
++ if (subvec->key == key)
++ return subvec;
++ return NULL;
++}
++
++static inline void
++sclp_eval_selfdeftextmsg(struct gds_subvector *start,
++ struct gds_subvector *end)
++{
++ struct gds_subvector *subvec;
++
++ subvec = start;
++ while (subvec < end) {
++ subvec = find_gds_subvector(subvec, end, 0x30);
++ if (!subvec)
++ break;
++ sclp_get_input((unsigned char *)(subvec + 1),
++ (unsigned char *) subvec + subvec->length);
++ (void *) subvec += subvec->length;
++ }
++}
++
++static inline void
++sclp_eval_textcmd(struct gds_subvector *start,
++ struct gds_subvector *end)
++{
++ struct gds_subvector *subvec;
++
++ subvec = start;
++ while (subvec < end) {
++ subvec = find_gds_subvector(subvec, end,
++ GDS_KEY_SelfDefTextMsg);
++ if (!subvec)
++ break;
++ sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1),
++ (void *)subvec + subvec->length);
++ (void *) subvec += subvec->length;
++ }
++}
++
++static inline void
++sclp_eval_cpmsu(struct gds_vector *start, struct gds_vector *end)
++{
++ struct gds_vector *vec;
++
++ vec = start;
++ while (vec < end) {
++ vec = find_gds_vector(vec, end, GDS_ID_TextCmd);
++ if (!vec)
++ break;
++ sclp_eval_textcmd((struct gds_subvector *)(vec + 1),
++ (void *) vec + vec->length);
++ (void *) vec += vec->length;
++ }
++}
++
++
++static inline void
++sclp_eval_mdsmu(struct gds_vector *start, void *end)
++{
++ struct gds_vector *vec;
++
++ vec = find_gds_vector(start, end, GDS_ID_CPMSU);
++ if (vec)
++ sclp_eval_cpmsu(vec + 1, (void *) vec + vec->length);
++}
++
++static void
++sclp_tty_receiver(struct evbuf_header *evbuf)
++{
++ struct gds_vector *start, *end, *vec;
++
++ start = (struct gds_vector *)(evbuf + 1);
++ end = (void *) evbuf + evbuf->length;
++ vec = find_gds_vector(start, end, GDS_ID_MDSMU);
++ if (vec)
++ sclp_eval_mdsmu(vec + 1, (void *) vec + vec->length);
++}
++
++static void
++sclp_tty_state_change(struct sclp_register *reg)
++{
++}
++
++static struct sclp_register sclp_input_event =
++{
++ .receive_mask = EvTyp_OpCmd_Mask | EvTyp_PMsgCmd_Mask,
++ .state_change_fn = sclp_tty_state_change,
++ .receiver_fn = sclp_tty_receiver
++};
++
++void
++sclp_tty_init(void)
++{
++ void *page;
++ int i;
++ int rc;
++
++ if (!CONSOLE_IS_SCLP)
++ return;
++ rc = sclp_rw_init();
++ if (rc != 0) {
++ printk(KERN_ERR SCLP_TTY_PRINT_HEADER
++ "could not register tty - "
++ "sclp_rw_init returned %d\n", rc);
++ return;
++ }
++ /* Allocate pages for output buffering */
++ INIT_LIST_HEAD(&sclp_tty_pages);
++ for (i = 0; i < MAX_KMEM_PAGES; i++) {
++ page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
++ if (page == NULL)
++ return;
++ list_add_tail((struct list_head *) page, &sclp_tty_pages);
++ }
++ INIT_LIST_HEAD(&sclp_tty_outqueue);
++ spin_lock_init(&sclp_tty_lock);
++ init_waitqueue_head(&sclp_tty_waitq);
++ init_timer(&sclp_tty_timer);
++ sclp_ttybuf = NULL;
++ sclp_tty_buffer_count = 0;
++ if (MACHINE_IS_VM) {
++ /*
++ * save 4 characters for the CPU number
++ * written at start of each line by VM/CP
++ */
++ sclp_ioctls_init.columns = 76;
++ /* case input lines to lowercase */
++ sclp_ioctls_init.tolower = 1;
++ }
++ sclp_ioctls = sclp_ioctls_init;
++ sclp_tty_chars_count = 0;
++ sclp_tty = NULL;
++
++ ctrlchar_init();
++
++ if (sclp_register(&sclp_input_event) != 0)
++ return;
++
++ memset (&sclp_tty_driver, 0, sizeof(struct tty_driver));
++ sclp_tty_driver.magic = TTY_DRIVER_MAGIC;
++ sclp_tty_driver.driver_name = "sclp_line";
++ sclp_tty_driver.name = "ttyS";
++ sclp_tty_driver.name_base = 0;
++ sclp_tty_driver.major = TTY_MAJOR;
++ sclp_tty_driver.minor_start = 64;
++ sclp_tty_driver.num = 1;
++ sclp_tty_driver.type = TTY_DRIVER_TYPE_SYSTEM;
++ sclp_tty_driver.subtype = SYSTEM_TYPE_TTY;
++ sclp_tty_driver.init_termios = tty_std_termios;
++ sclp_tty_driver.flags = TTY_DRIVER_REAL_RAW;
++ sclp_tty_driver.refcount = &sclp_tty_refcount;
++ /* sclp_tty_driver.proc_entry ? */
++ sclp_tty_driver.table = sclp_tty_table;
++ sclp_tty_driver.termios = sclp_tty_termios;
++ sclp_tty_driver.termios_locked = sclp_tty_termios_locked;
++ sclp_tty_driver.open = sclp_tty_open;
++ sclp_tty_driver.close = sclp_tty_close;
++ sclp_tty_driver.write = sclp_tty_write;
++ sclp_tty_driver.put_char = sclp_tty_put_char;
++ sclp_tty_driver.flush_chars = sclp_tty_flush_chars;
++ sclp_tty_driver.write_room = sclp_tty_write_room;
++ sclp_tty_driver.chars_in_buffer = sclp_tty_chars_in_buffer;
++ sclp_tty_driver.flush_buffer = sclp_tty_flush_buffer;
++ sclp_tty_driver.ioctl = sclp_tty_ioctl;
++ /*
++ * No need for these function because they would be only called when
++ * the line discipline is close to full. That means that there must be
++ * collected nearly 4kB of input data. I suppose it is very difficult
++ * for the operator to enter lines quickly enough to let overrun the
++ * line discipline. Besides the n_tty line discipline does not try to
++ * call such functions if the pointers are set to NULL. Finally I have
++ * no idea what to do within these function. I can not prevent the
++ * operator and the SCLP to deliver input. Because of the reasons
++ * above it seems not worth to implement a buffer mechanism.
++ */
++ sclp_tty_driver.throttle = NULL;
++ sclp_tty_driver.unthrottle = NULL;
++ sclp_tty_driver.send_xchar = NULL;
++ sclp_tty_driver.set_termios = NULL;
++ sclp_tty_driver.set_ldisc = NULL;
++ sclp_tty_driver.stop = NULL;
++ sclp_tty_driver.start = NULL;
++ sclp_tty_driver.hangup = NULL;
++ sclp_tty_driver.break_ctl = NULL;
++ sclp_tty_driver.wait_until_sent = NULL;
++ sclp_tty_driver.read_proc = NULL;
++ sclp_tty_driver.write_proc = NULL;
++
++ rc = tty_register_driver(&sclp_tty_driver);
++ if (rc != 0)
++ printk(KERN_ERR SCLP_TTY_PRINT_HEADER
++ "could not register tty - "
++ "sclp_drv_register returned %d\n", rc);
++}
+=== drivers/s390/char/ctrlchar.c
+==================================================================
+--- drivers/s390/char/ctrlchar.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/ctrlchar.c (/trunk/2.4.27) (revision 52)
+@@ -9,6 +9,7 @@
+
+ #include <linux/config.h>
+ #include <linux/stddef.h>
++#include <linux/errno.h>
+ #include <linux/sysrq.h>
+ #include <linux/ctype.h>
+ #include <linux/interrupt.h>
+=== drivers/s390/char/tape_std.c
+==================================================================
+--- drivers/s390/char/tape_std.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tape_std.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,779 @@
++/*
++ * drivers/s390/char/tape_std.c
++ * standard tape device functions for ibm tapes.
++ *
++ * S390 and zSeries version
++ * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
++ * Author(s): Carsten Otte <cotte at de.ibm.com>
++ * Michael Holzheu <holzheu at de.ibm.com>
++ * Tuan Ngo-Anh <ngoanh at de.ibm.com>
++ * Martin Schwidefsky <schwidefsky at de.ibm.com>
++ * Stefan Bader <shbader at de.ibm.com>
++ */
++
++#include <linux/config.h>
++#include <linux/version.h>
++#include <linux/stddef.h>
++#include <linux/kernel.h>
++#include <linux/timer.h>
++#ifdef CONFIG_S390_TAPE_BLOCK
++#include <linux/blkdev.h>
++#endif
++
++#include <asm/types.h>
++#include <asm/idals.h>
++#include <asm/ebcdic.h>
++#include <asm/tape390.h>
++
++#define TAPE_DBF_AREA tape_core_dbf
++
++#include "tape.h"
++#include "tape_std.h"
++
++#define PRINTK_HEADER "T3xxx:"
++#define ZLINUX_PASSWD "zLinux PWD"
++
++/*
++ * tape_std_assign
++ */
++int
++tape_std_assign(struct tape_device *device)
++{
++ struct tape_request *request;
++
++ request = tape_alloc_request(2, 11);
++ if (IS_ERR(request))
++ return PTR_ERR(request);
++
++ request->op = TO_ASSIGN;
++
++ /*
++ * From the documentation assign requests should fail with the
++ * 'assigned elsewhere' bit set if the tape is already assigned
++ * to another host. However, it seems, in reality the request
++ * hangs forever. Therfor we just set a timeout for this request.
++ */
++ init_timer(&request->timeout);
++ request->timeout.expires = jiffies + 2 * HZ;
++
++ /* Setup the CCWs */
++ tape_ccw_cc(request->cpaddr, ASSIGN, 11, request->cpdata);
++ tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
++
++ return tape_do_io_free(device, request);
++}
++
++/*
++ * tape_std_unassign
++ */
++int
++tape_std_unassign (struct tape_device *device)
++{
++ struct tape_request *request;
++
++ request = tape_alloc_request(2, 11);
++ if (IS_ERR(request))
++ return PTR_ERR(request);
++ request->op = TO_UNASSIGN;
++ tape_ccw_cc(request->cpaddr, UNASSIGN, 11, request->cpdata);
++ tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
++ return tape_do_io_free(device, request);
++}
++
++#ifdef TAPE390_FORCE_UNASSIGN
++/*
++ * tape_std_force_unassign: forces assignment from another host.
++ * (Since we need a password this works only with other zLinux hosts!)
++ */
++int
++tape_std_force_unassign(struct tape_device *device)
++{
++ struct tape_request *request;
++ struct tape_ca_data *ca_data1;
++ struct tape_ca_data *ca_data2;
++
++ request = tape_alloc_request(2, 24);
++ if (IS_ERR(request))
++ return PTR_ERR(request);
++
++ request->op = TO_BREAKASS;
++ ca_data1 = (struct tape_ca_data *)
++ (((char *) request->cpdata));
++ ca_data2 = (struct tape_ca_data *)
++ (((char *) request->cpdata) + 12);
++
++ ca_data1->function = 0x80; /* Conditional enable */
++ strcpy(ca_data1->password, ZLINUX_PASSWD);
++ ASCEBC(ca_data1->password, 11);
++ ca_data2->function = 0x40; /* Conditional disable */
++ memcpy(ca_data2->password, ca_data1->password, 11);
++
++ tape_ccw_cc(request->cpaddr, CONTROL_ACCESS, 12, ca_data1);
++ tape_ccw_end(request->cpaddr + 1, CONTROL_ACCESS, 12, ca_data2);
++
++ return tape_do_io_free(device, request);
++}
++#endif
++
++/*
++ * TAPE390_DISPLAY: Show a string on the tape display.
++ */
++int
++tape_std_display(struct tape_device *device, struct display_struct *disp)
++{
++ struct tape_request *request;
++ int rc;
++
++ request = tape_alloc_request(2, 17);
++ if (IS_ERR(request)) {
++ DBF_EVENT(3, "TAPE: load display failed\n");
++ return PTR_ERR(request);
++ }
++
++ request->op = TO_DIS;
++ *(unsigned char *) request->cpdata = disp->cntrl;
++ DBF_EVENT(5, "TAPE: display cntrl=%04x\n", disp->cntrl);
++ memcpy(((unsigned char *) request->cpdata) + 1, disp->message1, 8);
++ memcpy(((unsigned char *) request->cpdata) + 9, disp->message2, 8);
++ ASCEBC(((unsigned char*) request->cpdata) + 1, 16);
++
++ tape_ccw_cc(request->cpaddr, LOAD_DISPLAY, 17, request->cpdata);
++ tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
++
++ rc = tape_do_io_interruptible(device, request);
++ tape_put_request(request);
++ return rc;
++}
++
++/*
++ * Read block id.
++ */
++int
++tape_std_read_block_id(struct tape_device *device, unsigned int *bid)
++{
++ struct tape_request *request;
++ struct {
++ unsigned int channel_block_id;
++ unsigned int device_block_id;
++ } __attribute__ ((packed)) *rbi_data;
++ int rc;
++
++ request = tape_alloc_request(3, 8);
++ if (IS_ERR(request))
++ return PTR_ERR(request);
++ request->op = TO_RBI;
++
++ /* setup ccws */
++ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
++ tape_ccw_cc(request->cpaddr + 1, READ_BLOCK_ID, 8, request->cpdata);
++ tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
++
++ /* execute it */
++ rc = tape_do_io(device, request);
++ if (rc == 0) {
++ /* Get result from read buffer. */
++ DBF_EVENT(6, "rbi_data = 0x%08x%08x\n",
++ *((unsigned int *) request->cpdata),
++ *(((unsigned int *) request->cpdata)+1));
++ rbi_data = (void *) request->cpdata;
++ *bid = rbi_data->channel_block_id;
++ }
++ tape_put_request(request);
++ return rc;
++}
++
++/* Seek block id */
++int
++tape_std_seek_block_id(struct tape_device *device, unsigned int bid)
++{
++ struct tape_request *request;
++
++ request = tape_alloc_request(3, 4);
++ if (IS_ERR(request))
++ return PTR_ERR(request);
++
++ request->op = TO_LBL;
++ *(__u32 *) request->cpdata = bid;
++
++ /* setup ccws */
++ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
++ tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
++ tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
++
++ /* execute it */
++ return tape_do_io_free(device, request);
++}
++
++int
++tape_std_terminate_write(struct tape_device *device)
++{
++ int rc;
++
++ if(device->required_tapemarks == 0)
++ return 0;
++
++ DBF_EVENT(5, "(%04x): terminate_write %ixEOF\n",
++ device->devstat.devno, device->required_tapemarks);
++
++ rc = tape_mtop(device, MTWEOF, device->required_tapemarks);
++ if (rc)
++ return rc;
++
++ device->required_tapemarks = 0;
++ return tape_mtop(device, MTBSR, 1);
++}
++
++/*
++ * MTLOAD: Loads the tape.
++ * The default implementation just wait until the tape medium state changes
++ * to MS_LOADED.
++ */
++int
++tape_std_mtload(struct tape_device *device, int count)
++{
++ return wait_event_interruptible(device->state_change_wq,
++ (device->medium_state == MS_LOADED));
++}
++
++/*
++ * MTSETBLK: Set block size.
++ */
++int
++tape_std_mtsetblk(struct tape_device *device, int count)
++{
++ struct idal_buffer *new;
++
++ DBF_EVENT(6, "tape_std_mtsetblk(%d)\n", count);
++ if (count <= 0) {
++ /*
++ * Just set block_size to 0. tapechar_read/tapechar_write
++ * will realloc the idal buffer if a bigger one than the
++ * current is needed.
++ */
++ device->char_data.block_size = 0;
++ return 0;
++ }
++ if (device->char_data.idal_buf != NULL &&
++ device->char_data.idal_buf->size == count)
++ /* We already have a idal buffer of that size. */
++ return 0;
++
++ if (count > MAX_BLOCKSIZE) {
++ DBF_EVENT(3, "Invalid block size (%ld > %ld) given.\n",
++ count, MAX_BLOCKSIZE);
++ PRINT_ERR("Invalid block size (%ld > %ld) given.\n",
++ count, MAX_BLOCKSIZE);
++ return -EINVAL;
++ }
++
++ /* Allocate a new idal buffer. */
++ new = idal_buffer_alloc(count, 0);
++ if (new == NULL)
++ return -ENOMEM;
++ if (device->char_data.idal_buf != NULL)
++ idal_buffer_free(device->char_data.idal_buf);
++
++ device->char_data.idal_buf = new;
++ device->char_data.block_size = count;
++ DBF_EVENT(6, "new blocksize is %d\n", device->char_data.block_size);
++ return 0;
++}
++
++/*
++ * MTRESET: Set block size to 0.
++ */
++int
++tape_std_mtreset(struct tape_device *device, int count)
++{
++ DBF_EVENT(6, "TCHAR:devreset:\n");
++ device->char_data.block_size = 0;
++ return 0;
++}
++
++/*
++ * MTFSF: Forward space over 'count' file marks. The tape is positioned
++ * at the EOT (End of Tape) side of the file mark.
++ */
++int
++tape_std_mtfsf(struct tape_device *device, int mt_count)
++{
++ struct tape_request *request;
++ ccw1_t *ccw;
++
++ request = tape_alloc_request(mt_count + 2, 0);
++ if (IS_ERR(request))
++ return PTR_ERR(request);
++ request->op = TO_FSF;
++ /* setup ccws */
++ ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
++ device->modeset_byte);
++ ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
++ ccw = tape_ccw_end(ccw, NOP, 0, NULL);
++ /* execute it */
++ return tape_do_io_free(device, request);
++}
++
++/*
++ * MTFSR: Forward space over 'count' tape blocks (blocksize is set
++ * via MTSETBLK.
++ */
++int
++tape_std_mtfsr(struct tape_device *device, int mt_count)
++{
++ struct tape_request *request;
++ ccw1_t *ccw;
++
++ request = tape_alloc_request(mt_count + 2, 0);
++ if (IS_ERR(request))
++ return PTR_ERR(request);
++ request->op = TO_FSB;
++ /* setup ccws */
++ ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
++ device->modeset_byte);
++ ccw = tape_ccw_repeat(ccw, FORSPACEBLOCK, mt_count);
++ ccw = tape_ccw_end(ccw, NOP, 0, NULL);
++ /* execute it */
++ return tape_do_io_free(device, request);
++}
++
++/*
++ * MTBSR: Backward space over 'count' tape blocks.
++ * (blocksize is set via MTSETBLK.
++ */
++int
++tape_std_mtbsr(struct tape_device *device, int mt_count)
++{
++ struct tape_request *request;
++ ccw1_t *ccw;
++
++ request = tape_alloc_request(mt_count + 2, 0);
++ if (IS_ERR(request))
++ return PTR_ERR(request);
++ request->op = TO_BSB;
++ /* setup ccws */
++ ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
++ device->modeset_byte);
++ ccw = tape_ccw_repeat(ccw, BACKSPACEBLOCK, mt_count);
++ ccw = tape_ccw_end(ccw, NOP, 0, NULL);
++ /* execute it */
++ return tape_do_io_free(device, request);
++}
++
++/*
++ * MTWEOF: Write 'count' file marks at the current position.
++ */
++int
++tape_std_mtweof(struct tape_device *device, int mt_count)
++{
++ struct tape_request *request;
++ ccw1_t *ccw;
++
++ request = tape_alloc_request(mt_count + 2, 0);
++ if (IS_ERR(request))
++ return PTR_ERR(request);
++ request->op = TO_WTM;
++ /* setup ccws */
++ ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
++ device->modeset_byte);
++ ccw = tape_ccw_repeat(ccw, WRITETAPEMARK, mt_count);
++ ccw = tape_ccw_end(ccw, NOP, 0, NULL);
++ /* execute it */
++ return tape_do_io_free(device, request);
++}
++
++/*
++ * MTBSFM: Backward space over 'count' file marks.
++ * The tape is positioned at the BOT (Begin Of Tape) side of the
++ * last skipped file mark.
++ */
++int
++tape_std_mtbsfm(struct tape_device *device, int mt_count)
++{
++ struct tape_request *request;
++ ccw1_t *ccw;
++
++ request = tape_alloc_request(mt_count + 2, 0);
++ if (IS_ERR(request))
++ return PTR_ERR(request);
++ request->op = TO_BSF;
++ /* setup ccws */
++ ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
++ device->modeset_byte);
++ ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
++ ccw = tape_ccw_end(ccw, NOP, 0, NULL);
++ /* execute it */
++ return tape_do_io_free(device, request);
++}
++
++/*
++ * MTBSF: Backward space over 'count' file marks. The tape is positioned at
++ * the EOT (End of Tape) side of the last skipped file mark.
++ */
++int
++tape_std_mtbsf(struct tape_device *device, int mt_count)
++{
++ struct tape_request *request;
++ ccw1_t *ccw;
++ int rc;
++
++ request = tape_alloc_request(mt_count + 2, 0);
++ if (IS_ERR(request))
++ return PTR_ERR(request);
++ request->op = TO_BSF;
++ /* setup ccws */
++ ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
++ device->modeset_byte);
++ ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
++ ccw = tape_ccw_end(ccw, NOP, 0, NULL);
++ /* execute it */
++ rc = tape_do_io(device, request);
++ if (rc == 0) {
++ request->op = TO_FSF;
++ /* need to skip forward over the filemark. */
++ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
++ device->modeset_byte);
++ tape_ccw_cc(request->cpaddr + 1, FORSPACEFILE, 0, NULL);
++ tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
++ /* execute it */
++ rc = tape_do_io(device, request);
++ }
++ tape_put_request(request);
++ return rc;
++}
++
++/*
++ * MTFSFM: Forward space over 'count' file marks.
++ * The tape is positioned at the BOT (Begin Of Tape) side
++ * of the last skipped file mark.
++ */
++int
++tape_std_mtfsfm(struct tape_device *device, int mt_count)
++{
++ struct tape_request *request;
++ ccw1_t *ccw;
++ int rc;
++
++ request = tape_alloc_request(mt_count + 2, 0);
++ if (IS_ERR(request))
++ return PTR_ERR(request);
++ request->op = TO_FSF;
++ /* setup ccws */
++ ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
++ device->modeset_byte);
++ ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
++ ccw = tape_ccw_end(ccw, NOP, 0, NULL);
++ /* execute it */
++ rc = tape_do_io(device, request);
++ if (rc == 0) {
++ request->op = TO_BSF;
++ /* need to skip forward over the filemark. */
++ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
++ device->modeset_byte);
++ tape_ccw_cc(request->cpaddr + 1, BACKSPACEFILE, 0, NULL);
++ tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
++ /* execute it */
++ rc = tape_do_io(device, request);
++ }
++ tape_put_request(request);
++ return rc;
++}
++
++/*
++ * MTREW: Rewind the tape.
++ */
++int
++tape_std_mtrew(struct tape_device *device, int mt_count)
++{
++ struct tape_request *request;
++
++ request = tape_alloc_request(3, 0);
++ if (IS_ERR(request))
++ return PTR_ERR(request);
++ request->op = TO_REW;
++ /* setup ccws */
++ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
++ device->modeset_byte);
++ tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
++ tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
++ /* execute it */
++ return tape_do_io_free(device, request);
++}
++
++/*
++ * MTOFFL: Rewind the tape and put the drive off-line.
++ * Implement 'rewind unload'
++ */
++int
++tape_std_mtoffl(struct tape_device *device, int mt_count)
++{
++ struct tape_request *request;
++
++ request = tape_alloc_request(3, 0);
++ if (IS_ERR(request))
++ return PTR_ERR(request);
++ request->op = TO_RUN;
++ /* setup ccws */
++ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
++ tape_ccw_cc(request->cpaddr + 1, REWIND_UNLOAD, 0, NULL);
++ tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
++ /* execute it */
++ return tape_do_io_free(device, request);
++}
++
++/*
++ * MTNOP: 'No operation'.
++ */
++int
++tape_std_mtnop(struct tape_device *device, int mt_count)
++{
++ struct tape_request *request;
++
++ request = tape_alloc_request(2, 0);
++ if (IS_ERR(request))
++ return PTR_ERR(request);
++ request->op = TO_NOP;
++ /* setup ccws */
++ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
++ tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
++ /* execute it */
++ return tape_do_io_free(device, request);
++}
++
++/*
++ * MTEOM: positions at the end of the portion of the tape already used
++ * for recordind data. MTEOM positions after the last file mark, ready for
++ * appending another file.
++ */
++int
++tape_std_mteom(struct tape_device *device, int mt_count)
++{
++ int rc;
++
++ /*
++ * Since there is currently no other way to seek, return to the
++ * BOT and start from there.
++ */
++ if((rc = tape_mtop(device, MTREW, 1)) < 0)
++ return rc;
++
++ do {
++ if((rc = tape_mtop(device, MTFSF, 1)) < 0)
++ return rc;
++ if((rc = tape_mtop(device, MTFSR, 1)) < 0)
++ return rc;
++ } while((device->devstat.dstat & DEV_STAT_UNIT_EXCEP) == 0);
++
++ return tape_mtop(device, MTBSR, 1);
++}
++
++/*
++ * MTRETEN: Retension the tape, i.e. forward space to end of tape and rewind.
++ */
++int
++tape_std_mtreten(struct tape_device *device, int mt_count)
++{
++ struct tape_request *request;
++ int rc;
++
++ request = tape_alloc_request(4, 0);
++ if (IS_ERR(request))
++ return PTR_ERR(request);
++ request->op = TO_FSF;
++ /* setup ccws */
++ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
++ tape_ccw_cc(request->cpaddr + 1,FORSPACEFILE, 0, NULL);
++ tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL);
++ tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr);
++ /* execute it, MTRETEN rc gets ignored */
++ rc = tape_do_io_interruptible(device, request);
++ tape_put_request(request);
++ return tape_std_mtrew(device, 1);
++}
++
++/*
++ * MTERASE: erases the tape.
++ */
++int
++tape_std_mterase(struct tape_device *device, int mt_count)
++{
++ struct tape_request *request;
++
++ request = tape_alloc_request(5, 0);
++ if (IS_ERR(request))
++ return PTR_ERR(request);
++ request->op = TO_DSE;
++ /* setup ccws */
++ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
++ tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
++ tape_ccw_cc(request->cpaddr + 2, ERASE_GAP, 0, NULL);
++ tape_ccw_cc(request->cpaddr + 3, DATA_SEC_ERASE, 0, NULL);
++ tape_ccw_end(request->cpaddr + 4, NOP, 0, NULL);
++ /* execute it */
++ return tape_do_io_free(device, request);
++}
++
++/*
++ * MTUNLOAD: Rewind the tape and unload it.
++ */
++int
++tape_std_mtunload(struct tape_device *device, int mt_count)
++{
++ struct tape_request *request;
++
++ request = tape_alloc_request(3, 32);
++ if (IS_ERR(request))
++ return PTR_ERR(request);
++ request->op = TO_RUN;
++ /* setup ccws */
++ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
++ tape_ccw_cc(request->cpaddr + 1, REWIND_UNLOAD, 0, NULL);
++ tape_ccw_end(request->cpaddr + 2, SENSE, 32, request->cpdata);
++ /* execute it */
++ return tape_do_io_free(device, request);
++}
++
++/*
++ * MTCOMPRESSION: used to enable compression.
++ * Sets the IDRC on/off.
++ */
++int
++tape_std_mtcompression(struct tape_device *device, int mt_count)
++{
++ struct tape_request *request;
++
++ if (mt_count < 0 || mt_count > 1) {
++ DBF_EXCEPTION(6, "xcom parm\n");
++ if (*device->modeset_byte & 0x08)
++ PRINT_INFO("(%x) Compression is currently on\n",
++ device->devstat.devno);
++ else
++ PRINT_INFO("(%x) Compression is currently off\n",
++ device->devstat.devno);
++ PRINT_INFO("Use 1 to switch compression on, 0 to "
++ "switch it off\n");
++ return -EINVAL;
++ }
++ request = tape_alloc_request(2, 0);
++ if (IS_ERR(request))
++ return PTR_ERR(request);
++ request->op = TO_NOP;
++ /* setup ccws */
++ *device->modeset_byte = (mt_count == 0) ? 0x00 : 0x08;
++ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
++ tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
++ /* execute it */
++ return tape_do_io_free(device, request);
++}
++
++/*
++ * Read Block
++ */
++struct tape_request *
++tape_std_read_block(struct tape_device *device, size_t count)
++{
++ struct tape_request *request;
++
++ /*
++ * We have to alloc 4 ccws in order to be able to transform request
++ * into a read backward request in error case.
++ */
++ request = tape_alloc_request(4, 0);
++ if (IS_ERR(request)) {
++ DBF_EXCEPTION(6, "xrbl fail");
++ return request;
++ }
++ request->op = TO_RFO;
++ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
++ tape_ccw_end_idal(request->cpaddr + 1, READ_FORWARD,
++ device->char_data.idal_buf);
++ DBF_EVENT(6, "xrbl ccwg\n");
++ return request;
++}
++
++/*
++ * Read Block backward transformation function.
++ */
++void
++tape_std_read_backward(struct tape_device *device, struct tape_request *request)
++{
++ /*
++ * We have allocated 4 ccws in tape_std_read, so we can now
++ * transform the request to a read backward, followed by a
++ * forward space block.
++ */
++ request->op = TO_RBA;
++ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
++ tape_ccw_cc_idal(request->cpaddr + 1, READ_BACKWARD,
++ device->char_data.idal_buf);
++ tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL);
++ tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL);
++ DBF_EVENT(6, "xrop ccwg");}
++
++/*
++ * Write Block
++ */
++struct tape_request *
++tape_std_write_block(struct tape_device *device, size_t count)
++{
++ struct tape_request *request;
++
++ request = tape_alloc_request(2, 0);
++ if (IS_ERR(request)) {
++ DBF_EXCEPTION(6, "xwbl fail\n");
++ return request;
++ }
++ request->op = TO_WRI;
++ tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
++ tape_ccw_end_idal(request->cpaddr + 1, WRITE_CMD,
++ device->char_data.idal_buf);
++ DBF_EVENT(6, "xwbl ccwg\n");
++ return request;
++}
++
++/*
++ * This routine is called by frontend after an ENOSP on write
++ */
++void
++tape_std_process_eov(struct tape_device *device)
++{
++ /*
++ * End of volume: We have to backspace the last written record, then
++ * we TRY to write a tapemark and then backspace over the written TM
++ */
++ if (tape_mtop(device, MTBSR, 1) < 0)
++ return;
++ if (tape_mtop(device, MTWEOF, 1) < 0)
++ return;
++ tape_mtop(device, MTBSR, 1);
++}
++
++EXPORT_SYMBOL(tape_std_assign);
++EXPORT_SYMBOL(tape_std_unassign);
++#ifdef TAPE390_FORCE_UNASSIGN
++EXPORT_SYMBOL(tape_std_force_unassign);
++#endif
++EXPORT_SYMBOL(tape_std_display);
++EXPORT_SYMBOL(tape_std_read_block_id);
++EXPORT_SYMBOL(tape_std_seek_block_id);
++EXPORT_SYMBOL(tape_std_mtload);
++EXPORT_SYMBOL(tape_std_mtsetblk);
++EXPORT_SYMBOL(tape_std_mtreset);
++EXPORT_SYMBOL(tape_std_mtfsf);
++EXPORT_SYMBOL(tape_std_mtfsr);
++EXPORT_SYMBOL(tape_std_mtbsr);
++EXPORT_SYMBOL(tape_std_mtweof);
++EXPORT_SYMBOL(tape_std_mtbsfm);
++EXPORT_SYMBOL(tape_std_mtbsf);
++EXPORT_SYMBOL(tape_std_mtfsfm);
++EXPORT_SYMBOL(tape_std_mtrew);
++EXPORT_SYMBOL(tape_std_mtoffl);
++EXPORT_SYMBOL(tape_std_mtnop);
++EXPORT_SYMBOL(tape_std_mteom);
++EXPORT_SYMBOL(tape_std_mtreten);
++EXPORT_SYMBOL(tape_std_mterase);
++EXPORT_SYMBOL(tape_std_mtunload);
++EXPORT_SYMBOL(tape_std_mtcompression);
++EXPORT_SYMBOL(tape_std_read_block);
++EXPORT_SYMBOL(tape_std_read_backward);
++EXPORT_SYMBOL(tape_std_write_block);
++EXPORT_SYMBOL(tape_std_process_eov);
+=== drivers/s390/char/sclp_tty.h
+==================================================================
+--- drivers/s390/char/sclp_tty.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/sclp_tty.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,67 @@
++/*
++ * drivers/s390/char/sclp_tty.h
++ * interface to the SCLP-read/write driver
++ *
++ * S390 version
++ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
++ * Author(s): Martin Peschke <mpeschke at de.ibm.com>
++ * Martin Schwidefsky <schwidefsky at de.ibm.com>
++ */
++
++#ifndef __SCLP_TTY_H__
++#define __SCLP_TTY_H__
++
++#include <linux/ioctl.h>
++
++/* This is the type of data structures storing sclp ioctl setting. */
++struct sclp_ioctls {
++ unsigned short htab;
++ unsigned char echo;
++ unsigned short columns;
++ unsigned char final_nl;
++ unsigned short max_sccb;
++ unsigned short kmem_sccb; /* can't be modified at run time */
++ unsigned char tolower;
++ unsigned char delim;
++};
++
++/* must be unique, FIXME: must be added in Documentation/ioctl_number.txt */
++#define SCLP_IOCTL_LETTER 'B'
++
++/* set width of horizontal tabulator */
++#define TIOCSCLPSHTAB _IOW(SCLP_IOCTL_LETTER, 0, unsigned short)
++/* enable/disable echo of input (independent from line discipline) */
++#define TIOCSCLPSECHO _IOW(SCLP_IOCTL_LETTER, 1, unsigned char)
++/* set number of colums for output */
++#define TIOCSCLPSCOLS _IOW(SCLP_IOCTL_LETTER, 2, unsigned short)
++/* enable/disable writing without final new line character */
++#define TIOCSCLPSNL _IOW(SCLP_IOCTL_LETTER, 4, signed char)
++/* set the maximum buffers size for output, rounded up to next 4kB boundary */
++#define TIOCSCLPSOBUF _IOW(SCLP_IOCTL_LETTER, 5, unsigned short)
++/* set initial (default) sclp ioctls */
++#define TIOCSCLPSINIT _IO(SCLP_IOCTL_LETTER, 6)
++/* enable/disable conversion from upper to lower case of input */
++#define TIOCSCLPSCASE _IOW(SCLP_IOCTL_LETTER, 7, unsigned char)
++/* set special character used for separating upper and lower case, */
++/* 0x00 disables this feature */
++#define TIOCSCLPSDELIM _IOW(SCLP_IOCTL_LETTER, 9, unsigned char)
++
++/* get width of horizontal tabulator */
++#define TIOCSCLPGHTAB _IOR(SCLP_IOCTL_LETTER, 10, unsigned short)
++/* Is echo of input enabled ? (independent from line discipline) */
++#define TIOCSCLPGECHO _IOR(SCLP_IOCTL_LETTER, 11, unsigned char)
++/* get number of colums for output */
++#define TIOCSCLPGCOLS _IOR(SCLP_IOCTL_LETTER, 12, unsigned short)
++/* Is writing without final new line character enabled ? */
++#define TIOCSCLPGNL _IOR(SCLP_IOCTL_LETTER, 14, signed char)
++/* get the maximum buffers size for output */
++#define TIOCSCLPGOBUF _IOR(SCLP_IOCTL_LETTER, 15, unsigned short)
++/* Is conversion from upper to lower case of input enabled ? */
++#define TIOCSCLPGCASE _IOR(SCLP_IOCTL_LETTER, 17, unsigned char)
++/* get special character used for separating upper and lower case, */
++/* 0x00 disables this feature */
++#define TIOCSCLPGDELIM _IOR(SCLP_IOCTL_LETTER, 19, unsigned char)
++/* get the number of buffers/pages got from kernel at startup */
++#define TIOCSCLPGKBUF _IOR(SCLP_IOCTL_LETTER, 20, unsigned short)
++
++#endif /* __SCLP_TTY_H__ */
+=== drivers/s390/char/tape_std.h
+==================================================================
+--- drivers/s390/char/tape_std.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tape_std.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,161 @@
++/*
++ * drivers/s390/char/tape_std.h
++ * standard tape device functions for ibm tapes.
++ *
++ * S390 and zSeries version
++ * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
++ * Author(s): Carsten Otte <cotte at de.ibm.com>
++ * Tuan Ngo-Anh <ngoanh at de.ibm.com>
++ * Martin Schwidefsky <schwidefsky at de.ibm.com>
++ * Stefan Bader <shbader at de.ibm.com>
++ */
++
++#ifndef _TAPE_STD_H
++#define _TAPE_STD_H
++
++#include <asm/tape390.h>
++
++/*
++ * Biggest block size to handle. Currently 64K because we only build
++ * channel programs without data chaining.
++ */
++#define MAX_BLOCKSIZE 65535
++
++/*
++ * The CCW commands for the Tape type of command.
++ */
++#define INVALID_00 0x00 /* Invalid cmd */
++#define BACKSPACEBLOCK 0x27 /* Back Space block */
++#define BACKSPACEFILE 0x2f /* Back Space file */
++#define DATA_SEC_ERASE 0x97 /* Data security erase */
++#define ERASE_GAP 0x17 /* Erase Gap */
++#define FORSPACEBLOCK 0x37 /* Forward space block */
++#define FORSPACEFILE 0x3F /* Forward Space file */
++#define FORCE_STREAM_CNT 0xEB /* Forced streaming count # */
++#define NOP 0x03 /* No operation */
++#define READ_FORWARD 0x02 /* Read forward */
++#define REWIND 0x07 /* Rewind */
++#define REWIND_UNLOAD 0x0F /* Rewind and Unload */
++#define SENSE 0x04 /* Sense */
++#define NEW_MODE_SET 0xEB /* Guess it is Mode set */
++#define WRITE_CMD 0x01 /* Write */
++#define WRITETAPEMARK 0x1F /* Write Tape Mark */
++
++#define ASSIGN 0xB7 /* 3420 REJECT,3480 OK */
++#define CONTROL_ACCESS 0xE3 /* Set high speed */
++#define DIAG_MODE_SET 0x0B /* 3420 NOP, 3480 REJECT */
++#define LOAD_DISPLAY 0x9F /* 3420 REJECT,3480 OK */
++#define LOCATE 0x4F /* 3420 REJ, 3480 NOP */
++#define LOOP_WRITE_TO_READ 0x8B /* 3480 REJECT */
++#define MODE_SET_DB 0xDB /* 3420 REJECT,3480 OK */
++#define MODE_SET_C3 0xC3 /* for 3420 */
++#define MODE_SET_CB 0xCB /* for 3420 */
++#define MODE_SET_D3 0xD3 /* for 3420 */
++#define READ_BACKWARD 0x0C /* */
++#define READ_BLOCK_ID 0x22 /* 3420 REJECT,3480 OK */
++#define READ_BUFFER 0x12 /* 3420 REJECT,3480 OK */
++#define READ_BUFF_LOG 0x24 /* 3420 REJECT,3480 OK */
++#define RELEASE 0xD4 /* 3420 NOP, 3480 REJECT */
++#define REQ_TRK_IN_ERROR 0x1B /* 3420 NOP, 3480 REJECT */
++#define RESERVE 0xF4 /* 3420 NOP, 3480 REJECT */
++#define SENSE_GROUP_ID 0x34 /* 3420 REJECT,3480 OK */
++#define SENSE_ID 0xE4 /* 3420 REJECT,3480 OK */
++#define READ_DEV_CHAR 0x64 /* Read device characteristics */
++#define SET_DIAGNOSE 0x4B /* 3420 NOP, 3480 REJECT */
++#define SET_GROUP_ID 0xAF /* 3420 REJECT,3480 OK */
++#define SET_TAPE_WRITE_IMMED 0xC3 /* for 3480 */
++#define SUSPEND 0x5B /* 3420 REJ, 3480 NOP */
++#define SYNC 0x43 /* Synchronize (flush buffer) */
++#define UNASSIGN 0xC7 /* 3420 REJECT,3480 OK */
++#define PERF_SUBSYS_FUNC 0x77 /* 3490 CMD */
++#define READ_CONFIG_DATA 0xFA /* 3490 CMD */
++#define READ_MESSAGE_ID 0x4E /* 3490 CMD */
++#define READ_SUBSYS_DATA 0x3E /* 3490 CMD */
++#define SET_INTERFACE_ID 0x73 /* 3490 CMD */
++
++#define SENSE_COMMAND_REJECT 0x80
++#define SENSE_INTERVENTION_REQUIRED 0x40
++#define SENSE_BUS_OUT_CHECK 0x20
++#define SENSE_EQUIPMENT_CHECK 0x10
++#define SENSE_DATA_CHECK 0x08
++#define SENSE_OVERRUN 0x04
++#define SENSE_DEFERRED_UNIT_CHECK 0x02
++#define SENSE_ASSIGNED_ELSEWHERE 0x01
++
++#define SENSE_LOCATE_FAILURE 0x80
++#define SENSE_DRIVE_ONLINE 0x40
++#define SENSE_RESERVED 0x20
++#define SENSE_RECORD_SEQUENCE_ERR 0x10
++#define SENSE_BEGINNING_OF_TAPE 0x08
++#define SENSE_WRITE_MODE 0x04
++#define SENSE_WRITE_PROTECT 0x02
++#define SENSE_NOT_CAPABLE 0x01
++
++#define SENSE_CHANNEL_ADAPTER_CODE 0xE0
++#define SENSE_CHANNEL_ADAPTER_LOC 0x10
++#define SENSE_REPORTING_CU 0x08
++#define SENSE_AUTOMATIC_LOADER 0x04
++#define SENSE_TAPE_SYNC_MODE 0x02
++#define SENSE_TAPE_POSITIONING 0x01
++
++/* Data structure for the CONTROL_ACCESS call */
++struct tape_ca_data {
++ unsigned char function;
++ char password[11];
++} __attribute__ ((packed));
++
++/* discipline functions */
++struct tape_request *tape_std_read_block(struct tape_device *, size_t);
++void tape_std_read_backward(struct tape_device *device,
++ struct tape_request *request);
++struct tape_request *tape_std_write_block(struct tape_device *, size_t);
++struct tape_request *tape_std_bread(struct tape_device *, struct request *);
++void tape_std_free_bread(struct tape_request *);
++void tape_std_check_locate(struct tape_device *, struct tape_request *);
++struct tape_request *tape_std_bwrite(struct request *,
++ struct tape_device *, int);
++
++/* Some non-mtop commands. */
++int tape_std_assign(struct tape_device *);
++int tape_std_unassign(struct tape_device *);
++int tape_std_force_unassign(struct tape_device *);
++int tape_std_read_block_id(struct tape_device *, unsigned int *);
++int tape_std_seek_block_id(struct tape_device *, unsigned int);
++int tape_std_display(struct tape_device *, struct display_struct *);
++int tape_std_terminate_write(struct tape_device *);
++
++/* Standard magnetic tape commands. */
++int tape_std_mtbsf(struct tape_device *, int);
++int tape_std_mtbsfm(struct tape_device *, int);
++int tape_std_mtbsr(struct tape_device *, int);
++int tape_std_mtcompression(struct tape_device *, int);
++int tape_std_mteom(struct tape_device *, int);
++int tape_std_mterase(struct tape_device *, int);
++int tape_std_mtfsf(struct tape_device *, int);
++int tape_std_mtfsfm(struct tape_device *, int);
++int tape_std_mtfsr(struct tape_device *, int);
++int tape_std_mtload(struct tape_device *, int);
++int tape_std_mtnop(struct tape_device *, int);
++int tape_std_mtoffl(struct tape_device *, int);
++int tape_std_mtreset(struct tape_device *, int);
++int tape_std_mtreten(struct tape_device *, int);
++int tape_std_mtrew(struct tape_device *, int);
++int tape_std_mtsetblk(struct tape_device *, int);
++int tape_std_mtunload(struct tape_device *, int);
++int tape_std_mtweof(struct tape_device *, int);
++
++/* Event handlers */
++void tape_std_default_handler(struct tape_device *);
++void tape_std_unexpect_uchk_handler(struct tape_device *);
++void tape_std_irq(struct tape_device *);
++void tape_std_process_eov(struct tape_device *);
++
++// the error recovery stuff:
++void tape_std_error_recovery(struct tape_device *);
++void tape_std_error_recovery_has_failed(struct tape_device *,int error_id);
++void tape_std_error_recovery_succeded(struct tape_device *);
++void tape_std_error_recovery_do_retry(struct tape_device *);
++void tape_std_error_recovery_read_opposite(struct tape_device *);
++void tape_std_error_recovery_HWBUG(struct tape_device *, int condno);
++
++#endif // _TAPE_STD_H
+=== drivers/s390/char/tape_34xx.c
+==================================================================
+--- drivers/s390/char/tape_34xx.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tape_34xx.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,1357 @@
++/*
++ * drivers/s390/char/tape_34xx.c
++ * tape device discipline for 3480/3490 tapes.
++ *
++ * S390 and zSeries version
++ * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
++ * Author(s): Carsten Otte <cotte at de.ibm.com>
++ * Tuan Ngo-Anh <ngoanh at de.ibm.com>
++ * Martin Schwidefsky <schwidefsky at de.ibm.com>
++ * Stefan Bader <shbader at de.ibm.com>
++ */
++
++#include <linux/config.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <asm/tape390.h>
++
++#define TAPE_DBF_AREA tape_34xx_dbf
++
++#include "tape.h"
++#include "tape_std.h"
++
++#define PRINTK_HEADER "T34xx:"
++
++/*
++ * Pointer to debug area.
++ */
++debug_info_t *TAPE_DBF_AREA = NULL;
++
++/*
++ * The block ID is the complete marker for a specific tape position.
++ * It contains a physical part (wrap, segment, format) and a logical
++ * block number.
++ */
++#define TBI_FORMAT_3480 0x00
++#define TBI_FORMAT_3480_2_XF 0x01
++#define TBI_FORMAT_3480_XF 0x02
++#define TBI_FORMAT_RESERVED 0x03
++
++struct tape_34xx_block_id {
++ unsigned int tbi_wrap : 1;
++ unsigned int tbi_segment : 7;
++ unsigned int tbi_format : 2;
++ unsigned int tbi_block : 22;
++} __attribute__ ((packed));
++
++struct sbid_entry {
++ struct list_head list;
++ struct tape_34xx_block_id bid;
++};
++
++struct tape_34xx_discdata {
++ /* A list of block id's of the tape segments (for faster seek) */
++ struct list_head sbid_list;
++};
++
++/* Internal prototypes */
++static void tape_34xx_clear_sbid_list(struct tape_device *);
++
++/* 34xx specific functions */
++static void
++__tape_34xx_medium_sense_callback(struct tape_request *request, void *data)
++{
++ unsigned char *sense = request->cpdata;
++
++ request->callback = NULL;
++
++ DBF_EVENT(5, "TO_MSEN[0]: %08x\n", *((unsigned int *) sense));
++ DBF_EVENT(5, "TO_MSEN[1]: %08x\n", *((unsigned int *) sense+1));
++ DBF_EVENT(5, "TO_MSEN[2]: %08x\n", *((unsigned int *) sense+2));
++ DBF_EVENT(5, "TO_MSEN[3]: %08x\n", *((unsigned int *) sense+3));
++
++ if(sense[0] & SENSE_INTERVENTION_REQUIRED) {
++ tape_med_state_set(request->device, MS_UNLOADED);
++ } else {
++ tape_med_state_set(request->device, MS_LOADED);
++ }
++
++ if(sense[1] & SENSE_WRITE_PROTECT) {
++ request->device->tape_generic_status |= GMT_WR_PROT(~0);
++ } else{
++ request->device->tape_generic_status &= ~GMT_WR_PROT(~0);
++ }
++
++ tape_put_request(request);
++}
++
++static int
++tape_34xx_medium_sense(struct tape_device *device)
++{
++ struct tape_request * request;
++ int rc;
++
++ tape_34xx_clear_sbid_list(device);
++
++ request = tape_alloc_request(1, 32);
++ if(IS_ERR(request)) {
++ DBF_EXCEPTION(6, "MSN fail\n");
++ return PTR_ERR(request);
++ }
++
++ request->op = TO_MSEN;
++ tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
++ request->callback = __tape_34xx_medium_sense_callback;
++
++ rc = tape_do_io_async(device, request);
++
++ return rc;
++}
++
++static void
++tape_34xx_work_handler(void *data)
++{
++ struct {
++ struct tape_device *device;
++ enum tape_op op;
++ struct tq_struct task;
++ } *p = data;
++
++ switch(p->op) {
++ case TO_MSEN:
++ tape_34xx_medium_sense(p->device);
++ break;
++ default:
++ DBF_EVENT(3, "T34XX: internal error: unknown work\n");
++ }
++
++ tape_put_device(p->device);
++ kfree(p);
++}
++
++/*
++ * This function is currently used to schedule a sense for later execution.
++ * For example whenever a unsolicited interrupt signals a new tape medium
++ * and we can't call tape_do_io from that interrupt handler.
++ */
++static int
++tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
++{
++ struct {
++ struct tape_device *device;
++ enum tape_op op;
++ struct tq_struct task;
++ } *p;
++
++ if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
++ return -ENOMEM;
++
++ memset(p, 0, sizeof(*p));
++ INIT_LIST_HEAD(&p->task.list);
++ p->task.routine = tape_34xx_work_handler;
++ p->task.data = p;
++
++ p->device = tape_clone_device(device);
++ p->op = op;
++
++ schedule_task(&p->task);
++
++ return 0;
++}
++
++/*
++ * Done Handler is called when dev stat = DEVICE-END (successful operation)
++ */
++static int
++tape_34xx_done(struct tape_device *device, struct tape_request *request)
++{
++ DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
++ // FIXME: Maybe only on assign/unassign
++ TAPE_CLEAR_STATE(device, TAPE_STATUS_BOXED);
++
++ return TAPE_IO_SUCCESS;
++}
++
++static inline int
++tape_34xx_erp_failed(struct tape_device *device,
++ struct tape_request *request, int rc)
++{
++ DBF_EVENT(3, "Error recovery failed for %s\n",
++ tape_op_verbose[request->op]);
++ return rc;
++}
++
++static inline int
++tape_34xx_erp_succeeded(struct tape_device *device,
++ struct tape_request *request)
++{
++ DBF_EVENT(3, "Error Recovery successful for %s\n",
++ tape_op_verbose[request->op]);
++ return tape_34xx_done(device, request);
++}
++
++static inline int
++tape_34xx_erp_retry(struct tape_device *device, struct tape_request *request)
++{
++ DBF_EVENT(3, "xerp retr %s\n",
++ tape_op_verbose[request->op]);
++ return TAPE_IO_RETRY;
++}
++
++/*
++ * This function is called, when no request is outstanding and we get an
++ * interrupt
++ */
++static int
++tape_34xx_unsolicited_irq(struct tape_device *device)
++{
++ if (device->devstat.dstat == 0x85 /* READY */) {
++ /* A medium was inserted in the drive. */
++ DBF_EVENT(6, "T34xx: tape load\n");
++ tape_34xx_schedule_work(device, TO_MSEN);
++ } else {
++ DBF_EVENT(3, "T34xx: unsol.irq! dev end: %x\n",
++ device->devinfo.irq);
++ PRINT_WARN("Unsolicited IRQ (Device End) caught.\n");
++ tape_dump_sense(device, NULL);
++ }
++ return TAPE_IO_SUCCESS;
++}
++
++/*
++ * Read Opposite Error Recovery Function:
++ * Used, when Read Forward does not work
++ */
++static int
++tape_34xx_erp_read_opposite(struct tape_device *device,
++ struct tape_request *request)
++{
++ if (request->op == TO_RFO) {
++ /*
++ * We did read forward, but the data could not be read
++ * *correctly*. We transform the request to a read backward
++ * and try again.
++ */
++ tape_std_read_backward(device, request);
++ return tape_34xx_erp_retry(device, request);
++ }
++ if (request->op != TO_RBA)
++ PRINT_ERR("read_opposite called with state:%s\n",
++ tape_op_verbose[request->op]);
++ /*
++ * We tried to read forward and backward, but hat no
++ * success -> failed.
++ */
++ return tape_34xx_erp_failed(device, request, -EIO);
++}
++
++static int
++tape_34xx_erp_bug(struct tape_device *device,
++ struct tape_request *request, int no)
++{
++ if (request->op != TO_ASSIGN) {
++ PRINT_WARN("An unexpected condition #%d was caught in "
++ "tape error recovery.\n", no);
++ PRINT_WARN("Please report this incident.\n");
++ if (request)
++ PRINT_WARN("Operation of tape:%s\n",
++ tape_op_verbose[request->op]);
++ tape_dump_sense(device, request);
++ }
++ return tape_34xx_erp_failed(device, request, -EIO);
++}
++
++/*
++ * Handle data overrun between cu and drive. The channel speed might
++ * be too slow.
++ */
++static int
++tape_34xx_erp_overrun(struct tape_device *device, struct tape_request *request)
++{
++ if (device->devstat.ii.sense.data[3] == 0x40) {
++ PRINT_WARN ("Data overrun error between control-unit "
++ "and drive. Use a faster channel connection, "
++ "if possible! \n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ }
++ return tape_34xx_erp_bug(device, request, -1);
++}
++
++/*
++ * Handle record sequence error.
++ */
++static int
++tape_34xx_erp_sequence(struct tape_device *device,
++ struct tape_request *request)
++{
++ if (device->devstat.ii.sense.data[3] == 0x41) {
++ /*
++ * cu detected incorrect block-id sequence on tape.
++ */
++ PRINT_WARN("Illegal block-id sequence found!\n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ }
++ /*
++ * Record sequence error bit is set, but erpa does not
++ * show record sequence error.
++ */
++ return tape_34xx_erp_bug(device, request, -2);
++}
++
++/*
++ * This function analyses the tape's sense-data in case of a unit-check.
++ * If possible, it tries to recover from the error. Else the user is
++ * informed about the problem.
++ */
++static int
++tape_34xx_unit_check(struct tape_device *device, struct tape_request *request)
++{
++ int inhibit_cu_recovery;
++ __u8* sense;
++
++ inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0;
++ sense = device->devstat.ii.sense.data;
++
++#ifdef CONFIG_S390_TAPE_BLOCK
++ if (request->op == TO_BLOCK) {
++ /*
++ * Recovery for block device requests. Set the block_position
++ * to something invalid and retry.
++ */
++ device->blk_data.block_position = -1;
++ if (request->retries-- <= 0)
++ return tape_34xx_erp_failed(device, request, -EIO);
++ else
++ return tape_34xx_erp_retry(device, request);
++ }
++#endif
++
++ if (
++ sense[0] & SENSE_COMMAND_REJECT &&
++ sense[1] & SENSE_WRITE_PROTECT
++ ) {
++ if (
++ request->op == TO_DSE ||
++ request->op == TO_WRI ||
++ request->op == TO_WTM
++ ) {
++ /* medium is write protected */
++ return tape_34xx_erp_failed(device, request, -EACCES);
++ } else {
++ return tape_34xx_erp_bug(device, request, -3);
++ }
++ }
++
++ /*
++ * special cases for various tape-states when reaching
++ * end of recorded area
++ */
++ /*
++ * FIXME: Maybe a special case of the special case:
++ * sense[0] == SENSE_EQUIPMENT_CHECK &&
++ * sense[1] == SENSE_DRIVE_ONLINE &&
++ * sense[3] == 0x47 (Volume Fenced)
++ *
++ * This was caused by continued FSF or FSR after an
++ * 'End Of Data'.
++ */
++ if ((
++ sense[0] == SENSE_DATA_CHECK ||
++ sense[0] == SENSE_EQUIPMENT_CHECK ||
++ sense[0] == SENSE_EQUIPMENT_CHECK + SENSE_DEFERRED_UNIT_CHECK
++ ) && (
++ sense[1] == SENSE_DRIVE_ONLINE ||
++ sense[1] == SENSE_BEGINNING_OF_TAPE + SENSE_WRITE_MODE
++ )) {
++ switch (request->op) {
++ /*
++ * sense[0] == SENSE_DATA_CHECK &&
++ * sense[1] == SENSE_DRIVE_ONLINE
++ * sense[3] == 0x36 (End Of Data)
++ *
++ * Further seeks might return a 'Volume Fenced'.
++ */
++ case TO_FSF:
++ case TO_FSB:
++ /* Trying to seek beyond end of recorded area */
++ return tape_34xx_erp_failed(device, request, -ENOSPC);
++ case TO_BSB:
++ return tape_34xx_erp_retry(device, request);
++ /*
++ * sense[0] == SENSE_DATA_CHECK &&
++ * sense[1] == SENSE_DRIVE_ONLINE &&
++ * sense[3] == 0x36 (End Of Data)
++ */
++ case TO_LBL:
++ /* Block could not be located. */
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case TO_RFO:
++ /* Read beyond end of recorded area -> 0 bytes read */
++ return tape_34xx_erp_failed(device, request, 0);
++ default:
++ PRINT_ERR("Invalid op %s in %s:%i\n",
++ tape_op_verbose[request->op],
++ __FUNCTION__, __LINE__);
++ return tape_34xx_erp_failed(device, request, 0);
++ }
++ }
++
++ /* Sensing special bits */
++ if (sense[0] & SENSE_BUS_OUT_CHECK)
++ return tape_34xx_erp_retry(device, request);
++
++ if (sense[0] & SENSE_DATA_CHECK) {
++ /*
++ * hardware failure, damaged tape or improper
++ * operating conditions
++ */
++ switch (sense[3]) {
++ case 0x23:
++ /* a read data check occurred */
++ if ((sense[2] & SENSE_TAPE_SYNC_MODE) ||
++ inhibit_cu_recovery)
++ // data check is not permanent, may be
++ // recovered. We always use async-mode with
++ // cu-recovery, so this should *never* happen.
++ return tape_34xx_erp_bug(device, request, -4);
++
++ /* data check is permanent, CU recovery has failed */
++ PRINT_WARN("Permanent read error\n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x25:
++ // a write data check occurred
++ if ((sense[2] & SENSE_TAPE_SYNC_MODE) ||
++ inhibit_cu_recovery)
++ // data check is not permanent, may be
++ // recovered. We always use async-mode with
++ // cu-recovery, so this should *never* happen.
++ return tape_34xx_erp_bug(device, request, -5);
++
++ // data check is permanent, cu-recovery has failed
++ PRINT_WARN("Permanent write error\n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x26:
++ /* Data Check (read opposite) occurred. */
++ return tape_34xx_erp_read_opposite(device, request);
++ case 0x28:
++ /* ID-Mark at tape start couldn't be written */
++ PRINT_WARN("ID-Mark could not be written.\n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x31:
++ /* Tape void. Tried to read beyond end of device. */
++ PRINT_WARN("Read beyond end of recorded area.\n");
++ return tape_34xx_erp_failed(device, request, -ENOSPC);
++ case 0x41:
++ /* Record sequence error. */
++ PRINT_WARN("Invalid block-id sequence found.\n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ default:
++ /* all data checks for 3480 should result in one of
++ * the above erpa-codes. For 3490, other data-check
++ * conditions do exist. */
++ if (device->discipline->cu_type == 0x3480)
++ return tape_34xx_erp_bug(device, request, -6);
++ }
++ }
++
++ if (sense[0] & SENSE_OVERRUN)
++ return tape_34xx_erp_overrun(device, request);
++
++ if (sense[1] & SENSE_RECORD_SEQUENCE_ERR)
++ return tape_34xx_erp_sequence(device, request);
++
++ /* Sensing erpa codes */
++ switch (sense[3]) {
++ case 0x00:
++ /* Unit check with erpa code 0. Report and ignore. */
++ PRINT_WARN("Non-error sense was found. "
++ "Unit-check will be ignored.\n");
++ return TAPE_IO_SUCCESS;
++ case 0x21:
++ /*
++ * Data streaming not operational. CU will switch to
++ * interlock mode. Reissue the command.
++ */
++ PRINT_WARN("Data streaming not operational. "
++ "Switching to interlock-mode.\n");
++ return tape_34xx_erp_retry(device, request);
++ case 0x22:
++ /*
++ * Path equipment check. Might be drive adapter error, buffer
++ * error on the lower interface, internal path not usable,
++ * or error during cartridge load.
++ */
++ PRINT_WARN("A path equipment check occurred. One of the "
++ "following conditions occurred:\n");
++ PRINT_WARN("drive adapter error, buffer error on the lower "
++ "interface, internal path not usable, error "
++ "during cartridge load.\n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x24:
++ /*
++ * Load display check. Load display was command was issued,
++ * but the drive is displaying a drive check message. Can
++ * be threated as "device end".
++ */
++ return tape_34xx_erp_succeeded(device, request);
++ case 0x27:
++ /*
++ * Command reject. May indicate illegal channel program or
++ * buffer over/underrun. Since all channel programs are
++ * issued by this driver and ought be correct, we assume a
++ * over/underrun situation and retry the channel program.
++ */
++ return tape_34xx_erp_retry(device, request);
++ case 0x29:
++ /*
++ * Function incompatible. Either the tape is idrc compressed
++ * but the hardware isn't capable to do idrc, or a perform
++ * subsystem func is issued and the CU is not on-line.
++ */
++ PRINT_WARN ("Function incompatible. Try to switch off idrc\n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x2a:
++ /*
++ * Unsolicited environmental data. An internal counter
++ * overflows, we can ignore this and reissue the cmd.
++ */
++ return tape_34xx_erp_retry(device, request);
++ case 0x2b:
++ /*
++ * Environmental data present. Indicates either unload
++ * completed ok or read buffered log command completed ok.
++ */
++ if (request->op == TO_RUN) {
++ tape_med_state_set(device, MS_UNLOADED);
++ /* Rewind unload completed ok. */
++ return tape_34xx_erp_succeeded(device, request);
++ }
++ /* tape_34xx doesn't use read buffered log commands. */
++ return tape_34xx_erp_bug(device, request, sense[3]);
++ case 0x2c:
++ /*
++ * Permanent equipment check. CU has tried recovery, but
++ * did not succeed.
++ */
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x2d:
++ /* Data security erase failure. */
++ if (request->op == TO_DSE)
++ return tape_34xx_erp_failed(device, request, -EIO);
++ /* Data security erase failure, but no such command issued. */
++ return tape_34xx_erp_bug(device, request, sense[3]);
++ case 0x2e:
++ /*
++ * Not capable. This indicates either that the drive fails
++ * reading the format id mark or that that format specified
++ * is not supported by the drive.
++ */
++ PRINT_WARN("Drive not capable processing the tape format!");
++ return tape_34xx_erp_failed(device, request, -EMEDIUMTYPE);
++ case 0x30:
++ /* The medium is write protected. */
++ PRINT_WARN("Medium is write protected!\n");
++ return tape_34xx_erp_failed(device, request, -EACCES);
++ case 0x32:
++ // Tension loss. We cannot recover this, it's an I/O error.
++ PRINT_WARN("The drive lost tape tension.\n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x33:
++ /*
++ * Load Failure. The cartridge was not inserted correctly or
++ * the tape is not threaded correctly.
++ */
++ PRINT_WARN("Cartridge load failure. Reload the cartridge "
++ "and try again.\n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x34:
++ /*
++ * Unload failure. The drive cannot maintain tape tension
++ * and control tape movement during an unload operation.
++ */
++ PRINT_WARN("Failure during cartridge unload. "
++ "Please try manually.\n");
++ if (request->op == TO_RUN)
++ return tape_34xx_erp_failed(device, request, -EIO);
++ return tape_34xx_erp_bug(device, request, sense[3]);
++ case 0x35:
++ /*
++ * Drive equipment check. One of the following:
++ * - cu cannot recover from a drive detected error
++ * - a check code message is shown on drive display
++ * - the cartridge loader does not respond correctly
++ * - a failure occurs during an index, load, or unload cycle
++ */
++ PRINT_WARN("Equipment check! Please check the drive and "
++ "the cartridge loader.\n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x36:
++ if (device->discipline->cu_type == 0x3490)
++ /* End of data. */
++ return tape_34xx_erp_failed(device, request, -EIO);
++ /* This erpa is reserved for 3480 */
++ return tape_34xx_erp_bug(device,request,sense[3]);
++ case 0x37:
++ /*
++ * Tape length error. The tape is shorter than reported in
++ * the beginning-of-tape data.
++ */
++ PRINT_WARN("Tape length error.\n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x38:
++ /*
++ * Physical end of tape. A read/write operation reached
++ * the physical end of tape.
++ */
++ if (request->op==TO_WRI ||
++ request->op==TO_DSE ||
++ request->op==TO_WTM)
++ return tape_34xx_erp_failed(device, request, -ENOSPC);
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x39:
++ /* Backward at Beginning of tape. */
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x3a:
++ /* Drive switched to not ready. */
++ PRINT_WARN("Drive not ready. Turn the ready/not ready switch "
++ "to ready position and try again.\n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x3b:
++ /* Manual rewind or unload. This causes an I/O error. */
++ PRINT_WARN("Medium was rewound or unloaded manually.\n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x42:
++ /*
++ * Degraded mode. A condition that can cause degraded
++ * performance is detected.
++ */
++ PRINT_WARN("Subsystem is running in degraded mode.\n");
++ return tape_34xx_erp_retry(device, request);
++ case 0x43:
++ /* Drive not ready. */
++ tape_med_state_set(device, MS_UNLOADED);
++ /* SMB: some commands do not need a tape inserted */
++ if((sense[1] & SENSE_DRIVE_ONLINE)) {
++ switch(request->op) {
++ case TO_ASSIGN:
++ case TO_UNASSIGN:
++ case TO_DIS:
++ case TO_NOP:
++ return tape_34xx_done(device, request);
++ break;
++ default:
++ break;
++ }
++ }
++ PRINT_WARN("The drive is not ready.\n");
++ return tape_34xx_erp_failed(device, request, -ENOMEDIUM);
++ case 0x44:
++ /* Locate Block unsuccessful. */
++ if (request->op != TO_BLOCK && request->op != TO_LBL)
++ /* No locate block was issued. */
++ return tape_34xx_erp_bug(device, request, sense[3]);
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x45:
++ /* The drive is assigned to a different channel path. */
++ PRINT_WARN("The drive is assigned elsewhere.\n");
++ TAPE_SET_STATE(device, TAPE_STATUS_BOXED);
++ return tape_34xx_erp_failed(device, request, -EPERM);
++ case 0x46:
++ /*
++ * Drive not on-line. Drive may be switched offline,
++ * the power supply may be switched off or
++ * the drive address may not be set correctly.
++ */
++ PRINT_WARN("The drive is not on-line.");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x47:
++ /* Volume fenced. CU reports volume integrity is lost. */
++ PRINT_WARN("Volume fenced. The volume integrity is lost because\n");
++ PRINT_WARN("assignment or tape position was lost.\n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x48:
++ /* Log sense data and retry request. */
++ return tape_34xx_erp_retry(device, request);
++ case 0x49:
++ /* Bus out check. A parity check error on the bus was found. */
++ PRINT_WARN("Bus out check. A data transfer over the bus "
++ "has been corrupted.\n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x4a:
++ /* Control unit erp failed. */
++ PRINT_WARN("The control unit I/O error recovery failed.\n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x4b:
++ /*
++ * CU and drive incompatible. The drive requests micro-program
++ * patches, which are not available on the CU.
++ */
++ PRINT_WARN("The drive needs microprogram patches from the "
++ "control unit, which are not available.\n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x4c:
++ /*
++ * Recovered Check-One failure. Cu develops a hardware error,
++ * but is able to recover.
++ */
++ return tape_34xx_erp_retry(device, request);
++ case 0x4d:
++ if (device->discipline->cu_type == 0x3490)
++ /*
++ * Resetting event received. Since the driver does
++ * not support resetting event recovery (which has to
++ * be handled by the I/O Layer), retry our command.
++ */
++ return tape_34xx_erp_retry(device, request);
++ /* This erpa is reserved for 3480. */
++ return tape_34xx_erp_bug(device, request, sense[3]);
++ case 0x4e:
++ if (device->discipline->cu_type == 0x3490) {
++ /*
++ * Maximum block size exceeded. This indicates, that
++ * the block to be written is larger than allowed for
++ * buffered mode.
++ */
++ PRINT_WARN("Maximum block size for buffered "
++ "mode exceeded.\n");
++ return tape_34xx_erp_failed(device, request, -ENOBUFS);
++ }
++ /* This erpa is reserved for 3480. */
++ return tape_34xx_erp_bug(device, request, sense[3]);
++ case 0x50:
++ /*
++ * Read buffered log (Overflow). CU is running in extended
++ * buffered log mode, and a counter overflows. This should
++ * never happen, since we're never running in extended
++ * buffered log mode.
++ */
++ return tape_34xx_erp_retry(device, request);
++ case 0x51:
++ /*
++ * Read buffered log (EOV). EOF processing occurs while the
++ * CU is in extended buffered log mode. This should never
++ * happen, since we're never running in extended buffered
++ * log mode.
++ */
++ return tape_34xx_erp_retry(device, request);
++ case 0x52:
++ /* End of Volume complete. Rewind unload completed ok. */
++ if (request->op == TO_RUN) {
++ /* SMB */
++ tape_med_state_set(device, MS_UNLOADED);
++ return tape_34xx_erp_succeeded(device, request);
++ }
++ return tape_34xx_erp_bug(device, request, sense[3]);
++ case 0x53:
++ /* Global command intercept. */
++ return tape_34xx_erp_retry(device, request);
++ case 0x54:
++ /* Channel interface recovery (temporary). */
++ return tape_34xx_erp_retry(device, request);
++ case 0x55:
++ /* Channel interface recovery (permanent). */
++ PRINT_WARN("A permanent channel interface error occurred.\n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x56:
++ /* Channel protocol error. */
++ PRINT_WARN("A channel protocol error occurred.\n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x57:
++ if (device->discipline->cu_type == 0x3480) {
++ /* Attention intercept. */
++ PRINT_WARN("An attention intercept occurred, "
++ "which will be recovered.\n");
++ return tape_34xx_erp_retry(device, request);
++ } else {
++ /* Global status intercept. */
++ PRINT_WARN("An global status intercept was received, "
++ "which will be recovered.\n");
++ return tape_34xx_erp_retry(device, request);
++ }
++ case 0x5a:
++ /*
++ * Tape length incompatible. The tape inserted is too long,
++ * which could cause damage to the tape or the drive.
++ */
++ PRINT_WARN("Tape length incompatible [should be IBM Cartridge "
++ "System Tape]. May cause damage to drive or tape.\n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x5b:
++ /* Format 3480 XF incompatible */
++ if (sense[1] & SENSE_BEGINNING_OF_TAPE)
++ /* The tape will get overwritten. */
++ return tape_34xx_erp_retry(device, request);
++ PRINT_WARN("Tape format is incompatible to the drive, "
++ "which writes 3480-2 XF.\n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x5c:
++ /* Format 3480-2 XF incompatible */
++ PRINT_WARN("Tape format is incompatible to the drive. "
++ "The drive cannot access 3480-2 XF volumes.\n");
++ return tape_34xx_erp_failed(device, request, -EIO);
++ case 0x5d:
++ /* Tape length violation. */
++ PRINT_WARN("Tape length violation [should be IBM Enhanced "
++ "Capacity Cartridge System Tape]. May cause "
++ "damage to drive or tape.\n");
++ return tape_34xx_erp_failed(device, request, -EMEDIUMTYPE);
++ case 0x5e:
++ /* Compaction algorithm incompatible. */
++ PRINT_WARN("The volume is recorded using an incompatible "
++ "compaction algorithm, which is not supported by "
++ "the control unit.\n");
++ return tape_34xx_erp_failed(device, request, -EMEDIUMTYPE);
++
++ /* The following erpas should have been covered earlier. */
++ case 0x23: /* Read data check. */
++ case 0x25: /* Write data check. */
++ case 0x26: /* Data check (read opposite). */
++ case 0x28: /* Write id mark check. */
++ case 0x31: /* Tape void. */
++ case 0x40: /* Overrun error. */
++ case 0x41: /* Record sequence error. */
++ /* All other erpas are reserved for future use. */
++ default:
++ return tape_34xx_erp_bug(device, request, sense[3]);
++ }
++}
++
++/*
++ * 3480/3490 interrupt handler
++ */
++static int
++tape_34xx_irq(struct tape_device *device, struct tape_request *request)
++{
++ if (request == NULL)
++ return tape_34xx_unsolicited_irq(device);
++
++ if ((device->devstat.dstat & DEV_STAT_UNIT_EXCEP) &&
++ (device->devstat.dstat & DEV_STAT_DEV_END) &&
++ (request->op == TO_WRI)) {
++ /* Write at end of volume */
++ PRINT_INFO("End of volume\n"); /* XXX */
++ return tape_34xx_erp_failed(device, request, -ENOSPC);
++ }
++
++ if ((device->devstat.dstat & DEV_STAT_UNIT_EXCEP) &&
++ (request->op == TO_BSB || request->op == TO_FSB))
++ DBF_EVENT(5, "Skipped over tapemark\n");
++
++ if (device->devstat.dstat & DEV_STAT_UNIT_CHECK)
++ return tape_34xx_unit_check(device, request);
++
++ if (device->devstat.dstat & DEV_STAT_DEV_END)
++ return tape_34xx_done(device, request);
++
++ DBF_EVENT(6, "xunknownirq\n");
++ PRINT_ERR("Unexpected interrupt.\n");
++ PRINT_ERR("Current op is: %s", tape_op_verbose[request->op]);
++ tape_dump_sense(device, request);
++ return TAPE_IO_STOP;
++}
++
++/*
++ * ioctl_overload
++ */
++static int
++tape_34xx_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
++{
++ if (cmd == TAPE390_DISPLAY) {
++ struct display_struct disp;
++
++ if(copy_from_user(&disp, (char *) arg, sizeof(disp)) != 0)
++ return -EFAULT;
++
++ return tape_std_display(device, &disp);
++ } else
++ return -EINVAL;
++}
++
++static int
++tape_34xx_setup_device(struct tape_device * device)
++{
++ struct tape_34xx_discdata *discdata;
++
++ DBF_EVENT(5, "tape_34xx_setup_device(%p)\n", device);
++ DBF_EVENT(6, "34xx minor1: %x\n", device->first_minor);
++ discdata = kmalloc(sizeof(struct tape_34xx_discdata), GFP_ATOMIC);
++ if(discdata) {
++ memset(discdata, 0, sizeof(struct tape_34xx_discdata));
++ INIT_LIST_HEAD(&discdata->sbid_list);
++ device->discdata = discdata;
++ }
++
++ if(!TAPE_BOXED(device))
++ tape_34xx_medium_sense(device);
++ return 0;
++}
++
++static void
++tape_34xx_cleanup_device(struct tape_device * device)
++{
++ if (device->discdata) {
++ tape_34xx_clear_sbid_list(device);
++ kfree(device->discdata);
++ device->discdata = NULL;
++ }
++}
++
++/*
++ * Build up the lookup table...
++ */
++static void
++tape_34xx_add_sbid(struct tape_device *device, struct tape_34xx_block_id bid)
++{
++ struct tape_34xx_discdata * discdata = device->discdata;
++ struct sbid_entry * new;
++ struct sbid_entry * cur;
++ struct list_head * l;
++
++ if(discdata == NULL)
++ return;
++ if((new = kmalloc(sizeof(struct sbid_entry), GFP_ATOMIC)) == NULL)
++ return;
++
++ new->bid = bid;
++ new->bid.tbi_format = 0;
++
++ /*
++ * Search the position where to insert the new entry. It is possible
++ * that the entry should not be added but the block number has to be
++ * updated to approximate the logical block, where a segment starts.
++ */
++ list_for_each(l, &discdata->sbid_list) {
++ cur = list_entry(l, struct sbid_entry, list);
++
++ /*
++ * If the current entry has the same segment and wrap, then
++ * there is no new entry needed. Only the block number of the
++ * current entry might be adjusted to reflect an earlier start
++ * of the segment.
++ */
++ if(
++ (cur->bid.tbi_segment == new->bid.tbi_segment) &&
++ (cur->bid.tbi_wrap == new->bid.tbi_wrap)
++ ) {
++ if(new->bid.tbi_block < cur->bid.tbi_block) {
++ cur->bid.tbi_block = new->bid.tbi_block;
++ }
++ kfree(new);
++ break;
++ }
++
++ /*
++ * Otherwise the list is sorted by block number because it
++ * is alway ascending while the segment number decreases on
++ * the second wrap.
++ */
++ if(cur->bid.tbi_block > new->bid.tbi_block) {
++ list_add_tail(&new->list, l);
++ break;
++ }
++ }
++
++ /*
++ * The loop went through without finding a merge or adding an entry
++ * add the new entry to the end of the list.
++ */
++ if(l == &discdata->sbid_list) {
++ list_add_tail(&new->list, &discdata->sbid_list);
++ }
++
++ list_for_each(l, &discdata->sbid_list) {
++ cur = list_entry(l, struct sbid_entry, list);
++
++ DBF_EVENT(3, "sbid_list(%03i:%1i:%08i)\n",
++ cur->bid.tbi_segment, cur->bid.tbi_wrap,
++ cur->bid.tbi_block);
++ }
++
++ return;
++}
++
++/*
++ * Fill hardware positioning information into the given block id. With that
++ * seeks don't have to go back to the beginning of the tape and are done at
++ * faster speed because the vicinity of a segment can be located at faster
++ * speed.
++ *
++ * The caller must have set tbi_block.
++ */
++static void
++tape_34xx_merge_sbid(
++ struct tape_device * device,
++ struct tape_34xx_block_id * bid
++) {
++ struct tape_34xx_discdata * discdata = device->discdata;
++ struct sbid_entry * cur;
++ struct list_head * l;
++
++ bid->tbi_wrap = 0;
++ bid->tbi_segment = 1;
++ bid->tbi_format = (*device->modeset_byte & 0x08) ?
++ TBI_FORMAT_3480_XF : TBI_FORMAT_3480;
++
++ if(discdata == NULL)
++ goto tape_34xx_merge_sbid_exit;
++ if(list_empty(&discdata->sbid_list))
++ goto tape_34xx_merge_sbid_exit;
++
++ list_for_each(l, &discdata->sbid_list) {
++ cur = list_entry(l, struct sbid_entry, list);
++
++ if(cur->bid.tbi_block > bid->tbi_block)
++ break;
++ }
++
++ /* If block comes before first entries block, use seek from start. */
++ if(l->prev == &discdata->sbid_list)
++ goto tape_34xx_merge_sbid_exit;
++
++ cur = list_entry(l->prev, struct sbid_entry, list);
++ bid->tbi_wrap = cur->bid.tbi_wrap;
++ bid->tbi_segment = cur->bid.tbi_segment;
++
++tape_34xx_merge_sbid_exit:
++ DBF_EVENT(6, "merged_bid = %08x\n", *((unsigned int *) bid));
++ return;
++}
++
++static void
++tape_34xx_clear_sbid_list(struct tape_device *device)
++{
++ struct list_head * l;
++ struct list_head * n;
++ struct tape_34xx_discdata * discdata;
++
++ if((discdata = device->discdata) == NULL)
++ return;
++
++ list_for_each_safe(l, n, &discdata->sbid_list) {
++ list_del(l);
++ kfree(list_entry(l, struct sbid_entry, list));
++ }
++}
++
++/*
++ * MTTELL: Tell block. Return the number of block relative to current file.
++ */
++int
++tape_34xx_mttell(struct tape_device *device, int mt_count)
++{
++ struct tape_34xx_block_id bid;
++ int rc;
++
++ rc = tape_std_read_block_id(device, (unsigned int *) &bid);
++ if (rc)
++ return rc;
++
++ /*
++ * Build up a lookup table. The format id is ingored.
++ */
++ tape_34xx_add_sbid(device, bid);
++
++ return bid.tbi_block;
++}
++
++/*
++ * MTSEEK: seek to the specified block.
++ */
++int
++tape_34xx_mtseek(struct tape_device *device, int mt_count)
++{
++ struct tape_34xx_block_id bid;
++
++ if (mt_count > 0x400000) {
++ DBF_EXCEPTION(6, "xsee parm\n");
++ return -EINVAL;
++ }
++
++ bid.tbi_block = mt_count;
++
++ /*
++ * Set hardware seek information in the block id.
++ */
++ tape_34xx_merge_sbid(device, &bid);
++
++ return tape_std_seek_block_id(device, *((unsigned int *) &bid));
++}
++
++/*
++ * Tape block read for 34xx.
++ */
++#ifdef CONFIG_S390_TAPE_BLOCK
++struct tape_request *
++tape_34xx_bread(struct tape_device *device, struct request *req)
++{
++ struct tape_request *request;
++ struct buffer_head *bh;
++ ccw1_t *ccw;
++ int count;
++ int size;
++
++ DBF_EVENT(6, "tape_34xx_bread(sector=%u,size=%u)\n",
++ req->sector, req->nr_sectors);
++
++ /* Count the number of blocks for the request. */
++ count = 0;
++ size = 0;
++ for(bh = req->bh; bh; bh = bh->b_reqnext) {
++ for(size = 0; size < bh->b_size; size += TAPEBLOCK_HSEC_SIZE)
++ count++;
++ }
++
++ /* Allocate the ccw request. */
++ request = tape_alloc_request(3+count+1, 8);
++ if (IS_ERR(request))
++ return request;
++
++ /*
++ * Setup the tape block id to start the read from. The block number
++ * is later compared to the current position to decide whether a
++ * locate block is required. If one is needed this block id is used
++ * to locate it.
++ */
++ ((struct tape_34xx_block_id *) request->cpdata)->tbi_block =
++ req->sector >> TAPEBLOCK_HSEC_S2B;
++
++ /* Setup ccws. */
++ request->op = TO_BLOCK;
++ ccw = request->cpaddr;
++ ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte);
++
++ /*
++ * We always setup a nop after the mode set ccw. This slot is
++ * used in tape_std_check_locate to insert a locate ccw if the
++ * current tape position doesn't match the start block to be read.
++ * The second nop will be filled with a read block id which is in
++ * turn used by tape_34xx_free_bread to populate the segment bid
++ * table.
++ */
++ ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
++ ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
++
++ for(bh = req->bh; bh; bh = bh->b_reqnext) {
++ for(size = 0; size < bh->b_size; size += TAPEBLOCK_HSEC_SIZE) {
++ ccw->flags = CCW_FLAG_CC;
++ ccw->cmd_code = READ_FORWARD;
++ ccw->count = TAPEBLOCK_HSEC_SIZE;
++ set_normalized_cda(ccw, (void *) __pa(bh->b_data+size));
++ ccw++;
++ }
++ }
++
++ ccw = tape_ccw_end(ccw, NOP, 0, NULL);
++
++ return request;
++}
++
++void
++tape_34xx_free_bread (struct tape_request *request)
++{
++ ccw1_t* ccw = request->cpaddr;
++
++ if((ccw + 2)->cmd_code == READ_BLOCK_ID) {
++ struct {
++ struct tape_34xx_block_id channel_block_id;
++ struct tape_34xx_block_id device_block_id;
++ } __attribute__ ((packed)) *rbi_data;
++
++ rbi_data = request->cpdata;
++
++ if(!request->device)
++ DBF_EVENT(6, "tape_34xx_free_bread: no device!\n");
++ DBF_EVENT(6, "tape_34xx_free_bread: update_sbid\n");
++ tape_34xx_add_sbid(
++ request->device,
++ rbi_data->channel_block_id
++ );
++ } else {
++ DBF_EVENT(3, "tape_34xx_free_bread: no block info\n");
++ }
++
++ /* Last ccw is a nop and doesn't need clear_normalized_cda */
++ for (ccw = request->cpaddr; ccw->flags & CCW_FLAG_CC; ccw++)
++ if (ccw->cmd_code == READ_FORWARD)
++ clear_normalized_cda(ccw);
++ tape_put_request(request);
++}
++
++/*
++ * check_locate is called just before the tape request is passed to
++ * the common io layer for execution. It has to check the current
++ * tape position and insert a locate ccw if it doesn't match the
++ * start block for the request.
++ */
++void
++tape_34xx_check_locate(struct tape_device *device, struct tape_request *request)
++{
++ struct tape_34xx_block_id *id;
++ struct tape_34xx_block_id *start;
++
++ id = (struct tape_34xx_block_id *) request->cpdata;
++
++ /*
++ * The tape is already at the correct position. No seek needed.
++ */
++ if (id->tbi_block == device->blk_data.block_position)
++ return;
++
++ /*
++ * In case that the block device image doesn't start at the beginning
++ * of the tape, adjust the blocknumber for the locate request.
++ */
++ start = (struct tape_34xx_block_id *) &device->blk_data.start_block_id;
++ if(start->tbi_block)
++ id->tbi_block = id->tbi_block + start->tbi_block;
++
++ /*
++ * Merge HW positioning information to the block id. This information
++ * is used by the device for faster seeks.
++ */
++ tape_34xx_merge_sbid(device, id);
++
++ /*
++ * Transform the NOP to a LOCATE entry.
++ */
++ tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
++ tape_ccw_cc(request->cpaddr + 2, READ_BLOCK_ID, 8, request->cpdata);
++
++ return;
++}
++#endif
++
++static int
++tape_34xx_mtweof(struct tape_device *device, int count)
++{
++ tape_34xx_clear_sbid_list(device);
++ return tape_std_mtweof(device, count);
++}
++
++/*
++ * List of 3480/3490 magnetic tape commands.
++ */
++static tape_mtop_fn tape_34xx_mtop[TAPE_NR_MTOPS] =
++{
++ [MTRESET] = tape_std_mtreset,
++ [MTFSF] = tape_std_mtfsf,
++ [MTBSF] = tape_std_mtbsf,
++ [MTFSR] = tape_std_mtfsr,
++ [MTBSR] = tape_std_mtbsr,
++ [MTWEOF] = tape_34xx_mtweof,
++ [MTREW] = tape_std_mtrew,
++ [MTOFFL] = tape_std_mtoffl,
++ [MTNOP] = tape_std_mtnop,
++ [MTRETEN] = tape_std_mtreten,
++ [MTBSFM] = tape_std_mtbsfm,
++ [MTFSFM] = tape_std_mtfsfm,
++ [MTEOM] = tape_std_mteom,
++ [MTERASE] = tape_std_mterase,
++ [MTRAS1] = NULL,
++ [MTRAS2] = NULL,
++ [MTRAS3] = NULL,
++ [MTSETBLK] = tape_std_mtsetblk,
++ [MTSETDENSITY] = NULL,
++ [MTSEEK] = tape_34xx_mtseek,
++ [MTTELL] = tape_34xx_mttell,
++ [MTSETDRVBUFFER] = NULL,
++ [MTFSS] = NULL,
++ [MTBSS] = NULL,
++ [MTWSM] = NULL,
++ [MTLOCK] = NULL,
++ [MTUNLOCK] = NULL,
++ [MTLOAD] = tape_std_mtload,
++ [MTUNLOAD] = tape_std_mtunload,
++ [MTCOMPRESSION] = tape_std_mtcompression,
++ [MTSETPART] = NULL,
++ [MTMKPART] = NULL
++};
++
++/*
++ * Tape discipline structures for 3480 and 3490.
++ */
++static struct tape_discipline tape_discipline_3480 = {
++ .owner = THIS_MODULE,
++ .cu_type = 0x3480,
++ .setup_device = tape_34xx_setup_device,
++ .cleanup_device = tape_34xx_cleanup_device,
++ .process_eov = tape_std_process_eov,
++ .irq = tape_34xx_irq,
++ .read_block = tape_std_read_block,
++ .write_block = tape_std_write_block,
++ .assign = tape_std_assign,
++ .unassign = tape_std_unassign,
++#ifdef TAPE390_FORCE_UNASSIGN
++ .force_unassign = tape_std_force_unassign,
++#endif
++#ifdef CONFIG_S390_TAPE_BLOCK
++ .bread = tape_34xx_bread,
++ .free_bread = tape_34xx_free_bread,
++ .check_locate = tape_34xx_check_locate,
++#endif
++ .ioctl_fn = tape_34xx_ioctl,
++ .mtop_array = tape_34xx_mtop
++};
++
++static struct tape_discipline tape_discipline_3490 = {
++ .owner = THIS_MODULE,
++ .cu_type = 0x3490,
++ .setup_device = tape_34xx_setup_device,
++ .cleanup_device = tape_34xx_cleanup_device,
++ .process_eov = tape_std_process_eov,
++ .irq = tape_34xx_irq,
++ .read_block = tape_std_read_block,
++ .write_block = tape_std_write_block,
++ .assign = tape_std_assign,
++ .unassign = tape_std_unassign,
++#ifdef TAPE390_FORCE_UNASSIGN
++ .force_unassign = tape_std_force_unassign,
++#endif
++#ifdef CONFIG_S390_TAPE_BLOCK
++ .bread = tape_34xx_bread,
++ .free_bread = tape_34xx_free_bread,
++ .check_locate = tape_34xx_check_locate,
++#endif
++ .ioctl_fn = tape_34xx_ioctl,
++ .mtop_array = tape_34xx_mtop
++};
++
++int
++tape_34xx_init (void)
++{
++ int rc;
++
++ TAPE_DBF_AREA = debug_register ( "tape_34xx", 1, 2, 4*sizeof(long));
++ debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
++
++ DBF_EVENT(3, "34xx init: $Revision: 1.9.4.5 $\n");
++ /* Register discipline. */
++ rc = tape_register_discipline(&tape_discipline_3480);
++ if (rc == 0) {
++ rc = tape_register_discipline(&tape_discipline_3490);
++ if (rc)
++ tape_unregister_discipline(&tape_discipline_3480);
++ }
++ if (rc)
++ DBF_EVENT(3, "34xx init failed\n");
++ else
++ DBF_EVENT(3, "34xx registered\n");
++ return rc;
++}
++
++void
++tape_34xx_exit(void)
++{
++ tape_unregister_discipline(&tape_discipline_3480);
++ tape_unregister_discipline(&tape_discipline_3490);
++ debug_unregister(TAPE_DBF_AREA);
++}
++
++MODULE_AUTHOR("(C) 2001-2002 IBM Deutschland Entwicklung GmbH");
++MODULE_DESCRIPTION("Linux on zSeries channel attached 3480 tape "
++ "device driver ($Revision: 1.9.4.5 $)");
++MODULE_LICENSE("GPL");
++
++module_init(tape_34xx_init);
++module_exit(tape_34xx_exit);
+=== drivers/s390/char/tuball.c
+==================================================================
+--- drivers/s390/char/tuball.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tuball.c (/trunk/2.4.27) (revision 52)
+@@ -29,10 +29,8 @@
+ MODULE_PARM(tubdebug, "i");
+ MODULE_PARM(tubscrolltime, "i");
+ MODULE_PARM(tubxcorrect, "i");
+-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,12))
+-MODULE_LICENSE ("GPL");
++MODULE_LICENSE("GPL");
+ #endif
+-#endif
+ /*
+ * Values for tubdebug and their effects:
+ * 1 - print in hex on console the first 16 bytes received
+=== drivers/s390/char/vmlogrdr.c
+==================================================================
+--- drivers/s390/char/vmlogrdr.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/vmlogrdr.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,741 @@
++/*
++ * drivers/s390/char/vmlogrdr.c
++ * character device driver for reading z/VM system service records
++ *
++ *
++ * Copyright (C) 2004 IBM Corporation
++ * character device driver for reading z/VM system service records,
++ * Version 1.0
++ * Author(s): Xenia Tkatschow <xenia at us.ibm.com>
++ * Stefan Weinhuber <wein at de.ibm.com>
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/interrupt.h>
++#include <linux/spinlock.h>
++#include <asm/atomic.h>
++#include <asm/uaccess.h>
++#include <asm/cpcmd.h>
++#include <asm/debug.h>
++#include <asm/ebcdic.h>
++#include "../net/iucv.h"
++#include <linux/kmod.h>
++
++
++
++#define MAXSERVICES 1
++
++enum vmlogrdr_hotplug_action {
++ VMLOGRDR_HOTPLUG_ADD=0,
++ VMLOGRDR_HOTPLUG_REMOVE=1
++};
++
++MODULE_AUTHOR
++ ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia at us.ibm.com)\n"
++ " Stefan Weinhuber (wein at de.ibm.com)");
++MODULE_DESCRIPTION ("Character device driver for reading z/VM "
++ "system service records.");
++MODULE_PARM (services, "1-"__MODULE_STRING(MAXSERVICES)"s");
++MODULE_PARM_DESC (services,
++ "Specify the system services\n"
++ "services=system_service0,system_service1,..,"
++ "system_serviceN\n");
++MODULE_LICENSE("GPL");
++
++static char *services[MAXSERVICES];
++
++char userid[9];
++
++static char FENCE[] = {"EOR"};
++static int logreader_major = 0;
++
++static int logreader_open(struct inode *, struct file *);
++static int logreader_release(struct inode *, struct file *);
++static ssize_t logreader_read (struct file *filp, char *data, size_t count,
++ loff_t * ppos);
++
++
++
++/*
++ * File operation structures for logreader devices
++ */
++static struct file_operations logreader_fops = {
++ .owner=THIS_MODULE,
++ .open = logreader_open,
++ .release = logreader_release,
++ .read = logreader_read,
++};
++
++
++static __u8 iucvMagic[16] = {
++ 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
++ 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
++};
++
++static __u8 mask[] = {
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
++};
++
++static __u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
++
++
++static void
++logreader_iucv_ConnectionComplete(iucv_ConnectionComplete *eib,
++ void *pgm_data);
++
++static void
++logreader_iucv_ConnectionSevered(iucv_ConnectionSevered *eib,
++ void *pgm_data);
++
++static void
++logreader_iucv_MessagePending(iucv_MessagePending *eib,
++ void *pgm_data);
++
++/*
++ * Name: iucv_interrupt_ops_t
++ * Descriptor: Defines the iucv interrupt routines that the IUCV driver
++ * will call when there is an interrupt for the logreader
++ * character device driver
++ * Members:
++ * ConnectionSevered: This routine will be called by the IUCV driver
++ * if the system service severes the iucv connection.
++ * MessagePending: This routine will be called be the IUCV driver
++ * when it receives a message pending interrupt.
++ * Note: The remaining routines are not defined as we do not expect to
++ * handle these situations.
++ */
++static iucv_interrupt_ops_t logreader_iucvops = {
++ .ConnectionComplete= logreader_iucv_ConnectionComplete,
++ .ConnectionSevered= logreader_iucv_ConnectionSevered,
++ .MessagePending= logreader_iucv_MessagePending,
++};
++
++
++
++#define buflen 4088
++
++struct logreader_priv_t {
++ char system_service[8];
++ u16 pathid;
++ int connection_established;
++ int iucv_path_severed;
++ iucv_MessagePending local_interrupt_buffer;
++ atomic_t receive_ready;
++ iucv_handle_t iucv_handle;
++ int minor_num;
++ char * buffer;
++ char * current_position;
++ int remaining;
++ ulong residual_length;
++ int buffer_free;
++ int dev_in_use; /* 1: already opened, 0: not opened*/
++ spinlock_t priv_lock;
++};
++
++DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
++DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
++
++/*
++ * pointer to system service private structure
++ * minor number 0 --> logrec
++ * minor number 1 --> account -> not yet implemented
++ * minor number 2 --> symptom -> not yet implemented
++ */
++#define MAXMINOR 1
++static struct logreader_priv_t * sys_ser[MAXMINOR];
++
++
++static void
++logreader_iucv_ConnectionComplete (iucv_ConnectionComplete * eib,
++ void * pgm_data)
++{
++ struct logreader_priv_t * logptr =
++ (struct logreader_priv_t *) pgm_data;
++ spin_lock(&logptr->priv_lock);
++ logptr->connection_established = 1;
++ spin_unlock(&logptr->priv_lock);
++ wake_up(&conn_wait_queue);
++ return;
++}
++
++
++static void
++logreader_iucv_ConnectionSevered (iucv_ConnectionSevered * eib, void * pgm_data)
++{
++
++ u8 reason = (u8) eib->ipuser[8];
++ struct logreader_priv_t * logptr = (struct logreader_priv_t *) pgm_data;
++
++ printk (KERN_ERR "vmlogrdr: connection severed with"
++ " reason %i/n", reason);
++
++ spin_lock(&logptr->priv_lock);
++ logptr->connection_established = 0;
++ logptr->iucv_path_severed = 1;
++ spin_unlock(&logptr->priv_lock);
++
++ wake_up(&conn_wait_queue);
++ /* just in case we're sleeping waiting for a record */
++ wake_up_interruptible(&read_wait_queue);
++
++ return;
++}
++
++
++static void
++logreader_iucv_MessagePending (iucv_MessagePending * eib, void * pgm_data)
++{
++ struct logreader_priv_t * logptr = (struct logreader_priv_t *) pgm_data;
++
++ /*
++ * This function is the bottom half so it should be quick.
++ * Copy the external interrupt data into our local eib and increment
++ * the usage count
++ */
++ spin_lock(&logptr->priv_lock);
++ memcpy(&(logptr->local_interrupt_buffer), eib,
++ sizeof(iucv_MessagePending));
++ atomic_inc(&logptr->receive_ready);
++ spin_unlock(&logptr->priv_lock);
++
++ wake_up_interruptible(&read_wait_queue);
++
++ return;
++}
++
++static int
++logreader_open (struct inode *inode, struct file *filp)
++{
++ int dev_num = 0;
++ struct logreader_priv_t * logptr = NULL;
++ int connect_rc = 0;
++
++ char cp_command[80];
++ char cp_response[80];
++ unsigned long flags;
++
++ /* extract the minor number */
++ dev_num = MINOR(inode->i_rdev);
++
++
++ /* located the private structure of dev_num */
++ if (dev_num > MAXMINOR)
++ return -ENODEV;
++
++ logptr = sys_ser[dev_num];
++ if (logptr == NULL)
++ return -ENODEV;
++
++ /*
++ * only allow for blocking reads to be open
++ */
++ if (filp->f_flags & O_NONBLOCK)
++ return -ENOSYS;
++
++ /* Besure this device hasn't already been opened */
++ spin_lock_irqsave(&logptr->priv_lock, flags);
++ if (logptr->dev_in_use) {
++ spin_unlock_irqrestore(&logptr->priv_lock, flags);
++ return -EBUSY;
++ } else {
++ logptr->dev_in_use = 1;
++ spin_unlock_irqrestore(&logptr->priv_lock, flags);
++ }
++
++ /* set the file options */
++ filp->private_data = logptr;
++ filp->f_op = &logreader_fops;
++
++
++ /*
++ * Purge "old" CP records that have been collected but not retrieved and
++ * request CP to begin collecting records on behalf of this virtual
++ * machine.
++ * "RECORDING EREP ON QID userid PURGE"
++ */
++ memset(cp_command, 0x00, sizeof(cp_command));
++ memset(cp_response, 0x00, sizeof(cp_response));
++
++ /* Set up CP command depending on the system service */
++ switch(dev_num) {
++ case 0:
++ sprintf(cp_command, "RECORDING EREP ON QID %s PURGE",
++ userid);
++ break;
++ default:
++ return -ENODEV;
++ }
++
++ cpcmd(cp_command, cp_response, 80);
++
++ /* Register with iucv driver */
++ logptr->iucv_handle = iucv_register_program(iucvMagic,
++ logptr->system_service, mask, &logreader_iucvops,
++ logptr);
++
++ if (logptr->iucv_handle == NULL) {
++ printk (KERN_ERR "vmlogrdr: failed to register with"
++ "iucv driver\n");
++
++ /* Set up CP command depending on the system service */
++ memset(cp_command, 0x00, sizeof(cp_command));
++ memset(cp_response, 0x00, sizeof(cp_response));
++ switch(dev_num) {
++ case 0:
++ sprintf(cp_command,"RECORDING EREP OFF QID %s ",
++ userid);
++ break;
++ default:
++ return -ENODEV;
++ }
++ cpcmd(cp_command, cp_response, 80);
++ logptr->dev_in_use = 0;
++ return -EIO;
++ }
++
++ /* create connection to the system service */
++ spin_lock_irqsave(&logptr->priv_lock, flags);
++ logptr->connection_established = 0;
++ logptr->iucv_path_severed = 0;
++ spin_unlock_irqrestore(&logptr->priv_lock, flags);
++
++ connect_rc = iucv_connect (&(logptr->pathid), 10, iucvMagic,
++ logptr->system_service, iucv_host, 0,
++ NULL, NULL,
++ logptr->iucv_handle, NULL);
++ if (connect_rc) {
++ printk (KERN_ERR "vmlogrdr: iucv connection to %s "
++ "failed with rc %i \n", logptr->system_service,
++ connect_rc);
++
++ /* Set up CP command depending on the system service */
++ memset(cp_command, 0x00, sizeof(cp_command));
++ memset(cp_response, 0x00, sizeof(cp_response));
++ switch(dev_num) {
++ case 0:
++ sprintf(cp_command, "RECORDING EREP OFF QID %s ",
++ userid);
++ break;
++ default:
++ return -ENODEV;
++ }
++ cpcmd(cp_command, cp_response, 80);
++
++ iucv_unregister_program(logptr->iucv_handle);
++ logptr->iucv_handle = NULL;
++ logptr->dev_in_use = 0;
++ return -EIO;
++ }
++
++ /* We've issued the connect and now we must wait for a
++ * ConnectionComplete or ConnectinSevered Interrupt
++ * before we can continue to process.
++ * note: When the condition in the second parameter is true,
++ * we'll wake up
++ */
++ wait_event(conn_wait_queue, (logptr->connection_established)
++ || (logptr->iucv_path_severed));
++
++
++ if (logptr->iucv_path_severed) {
++ iucv_unregister_program(logptr->iucv_handle);
++ logptr->iucv_handle = NULL;
++
++ memset(cp_command, 0x00, sizeof(cp_command));
++ memset(cp_response, 0x00, sizeof(cp_response));
++
++ switch(dev_num) {
++ case 0:
++ sprintf(cp_command, "RECORDING EREP OFF QID %s ",
++ userid);
++ break;
++ default:
++ return -ENODEV;
++ }
++
++ cpcmd(cp_command, cp_response, 80);
++
++ logptr->dev_in_use = 0;
++ return -EIO;
++ }
++
++
++ return 0;
++}
++
++static int
++logreader_release (struct inode *inode, struct file *filp)
++{
++ int dev_num = 0;
++ struct logreader_priv_t * logptr =
++ (struct logreader_priv_t *) filp->private_data;
++
++ char cp_command[80];
++ char cp_response[80];
++
++ if (logptr == NULL )
++ return -ENODEV;
++
++ if (logptr->iucv_handle == NULL)
++ return 0;
++
++
++ /* extract the minor number */
++ dev_num = MINOR(inode->i_rdev);
++
++ iucv_unregister_program(logptr->iucv_handle);
++
++ /* Set data structures back to original state
++ * 1. private struct
++ * 2. buffer pointer structure (binary, hex buffers)
++ * 3. buffer
++ */
++ logptr->iucv_handle = NULL;
++
++ memset(&(logptr->local_interrupt_buffer), 0x00,
++ sizeof(iucv_MessagePending));
++ atomic_set(&logptr->receive_ready, 0);
++ logptr->current_position = logptr->buffer;
++ logptr->remaining = 0;
++ logptr->residual_length = 0;
++ logptr->buffer_free = 1;
++
++
++ /* Purge any records remaining in CP storage and turn recording off
++ * "RECORDING record_type OFF QID userid PURGE"
++ */
++ memset(cp_command, 0x00, sizeof(cp_command));
++ memset(cp_response, 0x00, sizeof(cp_response));
++
++ /* Set up the CP command according to system service */
++ switch(dev_num) {
++ case 0:
++ sprintf(cp_command, "RECORDING EREP OFF QID %s PURGE",
++ userid);
++ break;
++ default:
++ return -ENODEV;
++ }
++ cpcmd(cp_command, cp_response, 80);
++
++ logptr->dev_in_use = 0;
++
++ return 0;
++}
++
++
++static ssize_t
++logreader_read (struct file *filp, char *data, size_t count, loff_t * ppos)
++{
++ int rc = 0;
++ struct logreader_priv_t * logptr =
++ (struct logreader_priv_t *) filp->private_data;
++ int new_record = 0;
++ int total_record_length = 0;
++ unsigned long flags;
++ while (logptr->buffer_free) {
++
++ /* assume we're not going to receive a a record by
++ * setting rc != 0
++ */
++ rc = 1;
++ spin_lock_irqsave(&logptr->priv_lock, flags);
++ if (atomic_read(&logptr->receive_ready)) {
++ /*
++ * first check whether we need to receive the second
++ * half of a record (residual_length != 0).
++ */
++
++ if (logptr->residual_length){
++ /* collecting part of the record */
++ logptr->remaining = logptr->residual_length;
++ logptr->current_position = logptr->buffer;
++ } else {
++ /* beginning a new record */
++ logptr->remaining = (int)
++ logptr->local_interrupt_buffer.ln1msg2.ipbfln1f;
++ logptr->current_position = logptr->buffer + 4;
++ total_record_length = (int)
++ logptr->local_interrupt_buffer.ln1msg2.ipbfln1f;
++ new_record = 1;
++ }
++ if (logptr->remaining > buflen)
++ logptr->remaining = buflen;
++
++
++ rc = iucv_receive(logptr->pathid,
++ logptr->local_interrupt_buffer.ipmsgid,
++ logptr->local_interrupt_buffer.iptrgcls,
++ logptr->current_position,
++ logptr->remaining,
++ NULL,
++ NULL,
++ &logptr->residual_length);
++
++ if (!logptr->residual_length)
++ atomic_dec(&logptr->receive_ready);
++ }
++ spin_unlock_irqrestore(&logptr->priv_lock, flags);
++
++ if (rc) {
++ wait_event_interruptible(read_wait_queue,
++ atomic_read(&logptr->receive_ready));
++ if (atomic_read(&logptr->receive_ready) == 0)
++ return -ERESTARTSYS; /* woken up by signal */
++
++ } else {
++ /*
++ * just received some data so we must mark the
++ * buffer busy
++ */
++ logptr->buffer_free = 0;
++ if (new_record) {
++ /*
++ * if we just received a new record then we
++ * need to add the header length.
++ * To the first 4 bytes of the buffer we add
++ * the length field, which is record + fence
++ */
++ int * total_record_length_ptr =
++ (int *)logptr->buffer;
++
++ *total_record_length_ptr =
++ total_record_length + 4;
++
++ logptr->remaining += 4;
++ logptr->current_position = logptr->buffer;
++
++ new_record = 0;
++ }
++
++ if (logptr->residual_length == 0){
++ /* the whole record has been captured,
++ * now add the fence */
++ char * temp_position = logptr->current_position
++ + logptr->remaining;
++ memcpy(temp_position, FENCE, sizeof(FENCE));
++ logptr->remaining += 4;
++ }
++ }
++
++ }
++ /* copy only up to end of record */
++ if (count > logptr->remaining)
++ count = logptr->remaining;
++
++ if (copy_to_user(data, logptr->current_position, count))
++ return -EFAULT;
++
++ *ppos += count;
++ logptr->current_position += count;
++ logptr->remaining -= count;
++
++ /* if all data has been transferred, set buffer free */
++ if (logptr->remaining == 0)
++ logptr->buffer_free = 1;
++
++ return count;
++
++
++}
++
++
++static void
++get_vm_usrid (char * userid)
++{
++ char cp_response[81];
++ cpcmd("Q USERID", cp_response, 80);
++ cp_response[80]=0;
++ memcpy(userid, cp_response, 8);
++ printk (KERN_DEBUG "vmlogrdr: VM guest id: %s\n", userid);
++ return;
++}
++
++
++static void
++logreader_hotplug_event(int devmaj, struct logreader_priv_t *logptr,
++ enum vmlogrdr_hotplug_action action) {
++#ifdef CONFIG_HOTPLUG
++ char *argv[3];
++ char *envp[8];
++ char major[20];
++ char minor[20];
++ char service[20];
++ char *firstblank;
++
++ sprintf(major, "MAJOR=%d", devmaj);
++ sprintf(minor, "MINOR=%d", logptr->minor_num);
++ // remember: system_service starts with an * and padded with blanks
++ sprintf(service, "SERVICE=%.7s", logptr->system_service+1);
++ firstblank = strchr(service, ' ');
++ if (firstblank != NULL) {
++ *firstblank=0;
++ }
++
++ argv[0] = hotplug_path;
++ argv[1] = "vmlogrdr";
++ argv[2] = NULL;
++
++ envp[0] = "HOME=/";
++ envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
++
++ switch(action) {
++ case VMLOGRDR_HOTPLUG_ADD:
++ envp[2] = "ACTION=add";
++ break;
++ case VMLOGRDR_HOTPLUG_REMOVE:
++ envp[2] = "ACTION=remove";
++ break;
++ default:
++ BUG();
++ }
++ envp[3] = major;
++ envp[4] = minor;
++ envp[5] = service;
++ envp[6] = NULL;
++
++ call_usermodehelper(argv[0], argv, envp);
++#endif
++}
++
++
++
++static int
++logreader_init(void)
++{
++ char temp_system[9];
++ struct logreader_priv_t * logptr = NULL;
++ int rc=0;
++ int i;
++
++ if (! MACHINE_IS_VM) {
++ printk (KERN_ERR "vmlogrdr: not running under VM, "
++ "driver not loaded.\n");
++ return -ENODEV;
++ }
++
++ get_vm_usrid(userid);
++ userid[8] = '\0';
++
++ logreader_major = register_chrdev(logreader_major,"vmlogrdr",
++ &logreader_fops);
++ if (logreader_major < 0 ) {
++ printk (KERN_ERR "vmlogrdr: can't get major %d\n",
++ logreader_major);
++ return -EIO;
++ }
++
++ int parm_no;
++ int minor;
++ for (parm_no=0; parm_no < MAXSERVICES; ++parm_no ) {
++ printk (KERN_DEBUG "vmlogrdr: services[%d] == %s \n",parm_no,
++ services[parm_no]);
++ // compare to the know system services (excluding the leading *)
++ // important: the system name needs to be 8 characters,
++ // padded with blanks and not 0.
++ if (memcmp (services[parm_no], "LOGREC", 7) == 0) {
++ memcpy(temp_system, "*LOGREC ", 8);
++ minor = 0;
++ } else if (services[parm_no] == 0) {
++ break;
++ } else {
++ printk (KERN_ERR "vmlogrdr: unknown service: %s \n",
++ services[parm_no] );
++ rc=-EINVAL;
++ goto free_sys_ser;
++ }
++
++ if (sys_ser[minor] == NULL) {
++
++ /* Allocate memory for:
++ * 1 logreader_priv_t
++ * 1 buffer (4096 bytes)
++ */
++ logptr = kmalloc (sizeof(struct logreader_priv_t),
++ GFP_KERNEL);
++ if (!logptr) {
++ rc=-ENOMEM;
++ goto free_sys_ser;
++ }
++ memset(logptr, 0x00, sizeof(struct logreader_priv_t));
++ sys_ser[minor] = logptr;
++
++ /* set all structures */
++ memcpy(logptr->system_service, temp_system, 8);
++ logptr->minor_num = minor;
++ logptr->priv_lock = SPIN_LOCK_UNLOCKED;
++ logptr->buffer_free = 1;
++ logptr->buffer = kmalloc (4096, GFP_KERNEL);
++ if (!logptr->buffer) {
++ kfree(logptr);
++ sys_ser[minor]=0;
++ rc=-ENOMEM;
++ goto free_sys_ser;
++ }
++ memset(logptr->buffer, 0x00, buflen);
++ logptr->current_position = logptr->buffer;
++
++ logreader_hotplug_event(logreader_major, logptr,
++ VMLOGRDR_HOTPLUG_ADD);
++
++ } else {
++ printk (KERN_WARNING "vmlogrdr: Service %s defined more"
++ " then once -> ignore \n", services[parm_no]);
++ }
++ }
++ printk (KERN_INFO "vmlogrdr: driver loaded\n");
++
++ return 0;
++free_sys_ser:
++ for (i=0; i < MAXMINOR; ++i ) {
++ if ( sys_ser[i] != 0 ) {
++ logreader_hotplug_event(logreader_major, sys_ser[i],
++ VMLOGRDR_HOTPLUG_REMOVE);
++ kfree (sys_ser[i]->buffer);
++ kfree (sys_ser[i]);
++ }
++ }
++
++ unregister_chrdev(logreader_major, "vmlogrdr");
++ printk (KERN_ERR "vmlogrdr: driver not loaded.\n");
++
++ return rc;
++}
++
++
++static void
++logreader_exit(void)
++{
++ /* return all storage and unregister driver */
++ int index = 0;
++ struct logreader_priv_t * logptr = NULL;
++
++ for (index = 0; index < MAXMINOR; index++) {
++ logptr = sys_ser[index];
++ if (logptr) {
++ kfree(logptr->buffer);
++ kfree(logptr);
++ sys_ser[index] = NULL;
++ logreader_hotplug_event(logreader_major, logptr,
++ VMLOGRDR_HOTPLUG_REMOVE);
++ }
++ }
++ unregister_chrdev(logreader_major, "vmlogrdr");
++ printk (KERN_INFO "vmlogrdr: driver unloaded\n");
++ return;
++}
++
++module_init(logreader_init);
++module_exit(logreader_exit);
++
++
++
++
++
+=== drivers/s390/char/Makefile
+==================================================================
+--- drivers/s390/char/Makefile (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/Makefile (/trunk/2.4.27) (revision 52)
+@@ -4,31 +4,39 @@
+
+ O_TARGET := s390-char.o
+
+-list-multi := tub3270.o tape390.o
+-export-objs := hwc_rw.o
++list-multi := tub3270.o \
++ tape390.o
+
++export-objs := sclp.o \
++ tape_core.o \
++ tape_devmap.o \
++ tape_std.o
++
+ tub3270-objs := tuball.o tubfs.o tubtty.o \
+ tubttyaid.o tubttybld.o tubttyscl.o \
+ tubttyrcl.o tubttysiz.o
+
+-tape390-$(CONFIG_S390_TAPE_CHAR) += tapechar.o
+-tape390-$(CONFIG_S390_TAPE_BLOCK) += tapeblock.o
+-tape390-$(CONFIG_S390_TAPE_3480) += tape3480.o tape34xx.o
+-tape390-$(CONFIG_S390_TAPE_3490) += tape3490.o tape34xx.o
+-tape390-objs := tape.o $(sort $(tape390-y))
++tape-$(CONFIG_S390_TAPE_BLOCK) += tape_block.o
++tape-objs := tape_core.o tape_devmap.o tape_proc.o tape_std.o tape_char.o \
++ $(sort $(tape-y))
++obj-$(CONFIG_S390_TAPE) += tape390.o
++obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o
+
+ obj-y += ctrlchar.o
+ obj-$(CONFIG_TN3215) += con3215.o
+-obj-$(CONFIG_HWC) += hwc_con.o hwc_rw.o hwc_tty.o
+-obj-$(CONFIG_HWC_CPI) += hwc_cpi.o
++obj-$(CONFIG_SCLP) += sclp.o sclp_rw.o
++obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
++obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
++obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
++obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o
+ obj-$(CONFIG_TN3270) += tub3270.o
+-obj-$(CONFIG_S390_TAPE) += tape390.o
++obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
+
+ include $(TOPDIR)/Rules.make
+
+ tub3270.o: $(tub3270-objs)
+ $(LD) -r -o $@ $(tub3270-objs)
+
+-tape390.o: $(tape390-objs)
+- $(LD) -r -o $@ $(tape390-objs)
++tape390.o: $(tape-objs)
++ $(LD) -r -o $@ $(tape-objs)
+
+=== drivers/s390/char/sclp_cpi.c
+==================================================================
+--- drivers/s390/char/sclp_cpi.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/sclp_cpi.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,244 @@
++/*
++ * Author: Martin Peschke <mpeschke at de.ibm.com>
++ * Copyright (C) 2001 IBM Entwicklung GmbH, IBM Corporation
++ *
++ * SCLP Control-Program Identification.
++ */
++
++#include <linux/config.h>
++#include <linux/version.h>
++#include <linux/kmod.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/timer.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/slab.h>
++#include <asm/ebcdic.h>
++#include <asm/semaphore.h>
++
++#include "sclp.h"
++#include "sclp_rw.h"
++
++#define CPI_LENGTH_SYSTEM_TYPE 8
++#define CPI_LENGTH_SYSTEM_NAME 8
++#define CPI_LENGTH_SYSPLEX_NAME 8
++
++struct cpi_evbuf {
++ struct evbuf_header header;
++ u8 id_format;
++ u8 reserved0;
++ u8 system_type[CPI_LENGTH_SYSTEM_TYPE];
++ u64 reserved1;
++ u8 system_name[CPI_LENGTH_SYSTEM_NAME];
++ u64 reserved2;
++ u64 system_level;
++ u64 reserved3;
++ u8 sysplex_name[CPI_LENGTH_SYSPLEX_NAME];
++ u8 reserved4[16];
++} __attribute__((packed));
++
++struct cpi_sccb {
++ struct sccb_header header;
++ struct cpi_evbuf cpi_evbuf;
++} __attribute__((packed));
++
++/* Event type structure for write message and write priority message */
++static struct sclp_register sclp_cpi_event =
++{
++ .send_mask = EvTyp_CtlProgIdent_Mask
++};
++
++MODULE_AUTHOR(
++ "Martin Peschke, IBM Deutschland Entwicklung GmbH "
++ "<mpeschke at de.ibm.com>");
++
++MODULE_DESCRIPTION(
++ "identify this operating system instance to the S/390 "
++ "or zSeries hardware");
++
++static char *system_name = NULL;
++MODULE_PARM(system_name, "s");
++MODULE_PARM_DESC(system_name, "e.g. hostname - max. 8 characters");
++
++static char *sysplex_name = NULL;
++#ifdef ALLOW_SYSPLEX_NAME
++MODULE_PARM(sysplex_name, "s");
++MODULE_PARM_DESC(sysplex_name, "if applicable - max. 8 characters");
++#endif
++
++/* use default value for this field (as well as for system level) */
++static char *system_type = "LINUX";
++
++static int
++cpi_check_parms(void)
++{
++ /* reject if no system type specified */
++ if (!system_type) {
++ printk("cpi: bug: no system type specified\n");
++ return -EINVAL;
++ }
++
++ /* reject if system type larger than 8 characters */
++ if (strlen(system_type) > CPI_LENGTH_SYSTEM_NAME) {
++ printk("cpi: bug: system type has length of %li characters - "
++ "only %i characters supported\n",
++ strlen(system_type), CPI_LENGTH_SYSTEM_TYPE);
++ return -EINVAL;
++ }
++
++ /* reject if no system name specified */
++ if (!system_name) {
++ printk("cpi: no system name specified\n");
++ return -EINVAL;
++ }
++
++ /* reject if system name larger than 8 characters */
++ if (strlen(system_name) > CPI_LENGTH_SYSTEM_NAME) {
++ printk("cpi: system name has length of %li characters - "
++ "only %i characters supported\n",
++ strlen(system_name), CPI_LENGTH_SYSTEM_NAME);
++ return -EINVAL;
++ }
++
++ /* reject if specified sysplex name larger than 8 characters */
++ if (sysplex_name && strlen(sysplex_name) > CPI_LENGTH_SYSPLEX_NAME) {
++ printk("cpi: sysplex name has length of %li characters"
++ " - only %i characters supported\n",
++ strlen(sysplex_name), CPI_LENGTH_SYSPLEX_NAME);
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static void
++cpi_callback(struct sclp_req *req, void *data)
++{
++ struct semaphore *sem;
++
++ sem = (struct semaphore *) data;
++ up(sem);
++}
++
++static struct sclp_req *
++cpi_prepare_req(void)
++{
++ struct sclp_req *req;
++ struct cpi_sccb *sccb;
++ struct cpi_evbuf *evb;
++
++ req = (struct sclp_req *) kmalloc(sizeof(struct sclp_req), GFP_KERNEL);
++ if (req == NULL)
++ return ERR_PTR(-ENOMEM);
++ sccb = (struct cpi_sccb *) get_free_page(GFP_KERNEL | GFP_DMA);
++ if (sccb == NULL) {
++ kfree(req);
++ return ERR_PTR(-ENOMEM);
++ }
++ memset(sccb, 0, sizeof(struct cpi_sccb));
++
++ /* setup SCCB for Control-Program Identification */
++ sccb->header.length = sizeof(struct cpi_sccb);
++ sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf);
++ sccb->cpi_evbuf.header.type = 0x0B;
++ evb = &sccb->cpi_evbuf;
++
++ /* set system type */
++ memset(evb->system_type, ' ', CPI_LENGTH_SYSTEM_TYPE);
++ memcpy(evb->system_type, system_type, strlen(system_type));
++ sclp_ascebc_str(evb->system_type, CPI_LENGTH_SYSTEM_TYPE);
++ EBC_TOUPPER(evb->system_type, CPI_LENGTH_SYSTEM_TYPE);
++
++ /* set system name */
++ memset(evb->system_name, ' ', CPI_LENGTH_SYSTEM_NAME);
++ memcpy(evb->system_name, system_name, strlen(system_name));
++ sclp_ascebc_str(evb->system_name, CPI_LENGTH_SYSTEM_NAME);
++ EBC_TOUPPER(evb->system_name, CPI_LENGTH_SYSTEM_NAME);
++
++ /* set sytem level */
++ evb->system_level = LINUX_VERSION_CODE;
++
++ /* set sysplex name */
++ if (sysplex_name) {
++ memset(evb->sysplex_name, ' ', CPI_LENGTH_SYSPLEX_NAME);
++ memcpy(evb->sysplex_name, sysplex_name, strlen(sysplex_name));
++ sclp_ascebc_str(evb->sysplex_name, CPI_LENGTH_SYSPLEX_NAME);
++ EBC_TOUPPER(evb->sysplex_name, CPI_LENGTH_SYSPLEX_NAME);
++ }
++
++ /* prepare request data structure presented to SCLP driver */
++ req->command = SCLP_CMDW_WRITEDATA;
++ req->sccb = sccb;
++ req->status = SCLP_REQ_FILLED;
++ req->callback = cpi_callback;
++ return req;
++}
++
++static void
++cpi_free_req(struct sclp_req *req)
++{
++ free_page((unsigned long) req->sccb);
++ kfree(req);
++}
++
++static int __init
++cpi_module_init(void)
++{
++ struct semaphore sem;
++ struct sclp_req *req;
++ int rc;
++
++ rc = cpi_check_parms();
++ if (rc)
++ return rc;
++
++ rc = sclp_register(&sclp_cpi_event);
++ if (rc) {
++ /* could not register sclp event. Die. */
++ printk("cpi: could not register to hardware console.\n");
++ return -EINVAL;
++ }
++ if (!(sclp_cpi_event.sclp_send_mask & EvTyp_CtlProgIdent_Mask)) {
++ printk("cpi: no control program identification support\n");
++ sclp_unregister(&sclp_cpi_event);
++ return -ENOTSUPP;
++ }
++
++ req = cpi_prepare_req();
++ if (IS_ERR(req)) {
++ printk("cpi: couldn't allocate request\n");
++ sclp_unregister(&sclp_cpi_event);
++ return PTR_ERR(req);
++ }
++
++ /* Prepare semaphore */
++ sema_init(&sem, 0);
++ req->callback_data = &sem;
++ /* Add request to sclp queue */
++ sclp_add_request(req);
++ /* make "insmod" sleep until callback arrives */
++ down(&sem);
++
++ rc = ((struct cpi_sccb *) req->sccb)->header.response_code;
++ if (rc != 0x0020) {
++ printk("cpi: failed with response code 0x%x\n", rc);
++ rc = -ECOMM;
++ } else
++ rc = 0;
++
++ cpi_free_req(req);
++ sclp_unregister(&sclp_cpi_event);
++
++ return rc;
++}
++
++
++static void __exit cpi_module_exit(void)
++{
++}
++
++
++/* declare driver module init/cleanup functions */
++module_init(cpi_module_init);
++module_exit(cpi_module_exit);
++
+=== drivers/s390/char/sclp_vt220.c
+==================================================================
+--- drivers/s390/char/sclp_vt220.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/sclp_vt220.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,817 @@
++/*
++ * drivers/s390/char/sclp_vt220.c
++ * SCLP VT220 terminal driver.
++ *
++ * S390 version
++ * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
++ * Author(s): Peter Oberparleiter <Peter.Oberparleiter at de.ibm.com>
++ */
++
++#include <linux/config.h>
++#include <linux/version.h>
++#include <linux/spinlock.h>
++#include <linux/list.h>
++#include <linux/wait.h>
++#include <linux/timer.h>
++#include <linux/kernel.h>
++#include <linux/tty.h>
++#include <linux/tty_driver.h>
++#include <linux/sched.h>
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/major.h>
++#include <linux/console.h>
++#include <linux/kdev_t.h>
++#include <linux/bootmem.h>
++#include <linux/interrupt.h>
++#include <asm/uaccess.h>
++#include "sclp.h"
++
++#define SCLP_VT220_PRINT_HEADER "sclp vt220 tty driver: "
++#define SCLP_VT220_MAJOR TTY_MAJOR
++#define SCLP_VT220_MINOR 65
++#define SCLP_VT220_DRIVER_NAME "sclp_vt220"
++#define SCLP_VT220_DEVICE_NAME "ttyS"
++#define SCLP_VT220_CONSOLE_NAME "ttyS"
++#define SCLP_VT220_CONSOLE_INDEX 1 /* console=ttyS1 */
++
++/* Representation of a single write request */
++struct sclp_vt220_request {
++ struct list_head list;
++ struct sclp_req sclp_req;
++ int retry_count;
++ struct timer_list retry_timer;
++};
++
++/* VT220 SCCB */
++struct sclp_vt220_sccb {
++ struct sccb_header header;
++ struct evbuf_header evbuf;
++};
++
++#define SCLP_VT220_MAX_CHARS_PER_BUFFER (PAGE_SIZE - \
++ sizeof(struct sclp_vt220_request) - \
++ sizeof(struct sclp_vt220_sccb))
++
++/* Structures and data needed to register tty driver */
++static struct tty_driver sclp_vt220_driver;
++static int sclp_vt220_refcount;
++static struct tty_struct * sclp_vt220_table[1];
++static struct termios * sclp_vt220_termios[1];
++static struct termios * sclp_vt220_termios_locked[1];
++
++/* The tty_struct that the kernel associated with us */
++static struct tty_struct *sclp_vt220_tty;
++
++/* Lock to protect internal data from concurrent access */
++static spinlock_t sclp_vt220_lock;
++
++/* List of empty pages to be used as write request buffers */
++static struct list_head sclp_vt220_empty;
++
++/* List of pending requests */
++static struct list_head sclp_vt220_outqueue;
++
++/* Number of requests in outqueue */
++static int sclp_vt220_outqueue_count;
++
++/* Wait queue used to delay write requests while we've run out of buffers */
++static wait_queue_head_t sclp_vt220_waitq;
++
++/* Timer used for delaying write requests to merge subsequent messages into
++ * a single buffer */
++static struct timer_list sclp_vt220_timer;
++
++/* Pointer to current request buffer which has been partially filled but not
++ * yet sent */
++static struct sclp_vt220_request *sclp_vt220_current_request;
++
++/* Number of characters in current request buffer */
++static int sclp_vt220_buffered_chars;
++
++/* Flag indicating whether this driver has already been initialized */
++static int sclp_vt220_initialized = 0;
++
++/* Flag indicating that sclp_vt220_current_request should really
++ * have been already queued but wasn't because the SCLP was processing
++ * another buffer */
++static int sclp_vt220_flush_later;
++
++static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
++static void __sclp_vt220_emit(struct sclp_vt220_request *request);
++static void sclp_vt220_emit_current(void);
++
++/* Registration structure for our interest in SCLP event buffers */
++static struct sclp_register sclp_vt220_register = {
++ .send_mask = EvTyp_VT220Msg_Mask,
++ .receive_mask = EvTyp_VT220Msg_Mask,
++ .state_change_fn = NULL,
++ .receiver_fn = sclp_vt220_receiver_fn
++};
++
++
++/*
++ * Put provided request buffer back into queue and check emit pending
++ * buffers if necessary.
++ */
++static void
++sclp_vt220_process_queue(struct sclp_vt220_request* request)
++{
++ unsigned long flags;
++ struct sclp_vt220_request *next;
++ void *page;
++
++ /* Put buffer back to list of empty buffers */
++ page = request->sclp_req.sccb;
++ spin_lock_irqsave(&sclp_vt220_lock, flags);
++ /* Move request from outqueue to empty queue */
++ list_del(&request->list);
++ sclp_vt220_outqueue_count--;
++ list_add_tail((struct list_head *) page, &sclp_vt220_empty);
++ /* Check if there is a pending buffer on the out queue. */
++ next = NULL;
++ if (!list_empty(&sclp_vt220_outqueue))
++ next = list_entry(sclp_vt220_outqueue.next,
++ struct sclp_vt220_request, list);
++ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
++ if (next != NULL)
++ __sclp_vt220_emit(next);
++ else if (sclp_vt220_flush_later)
++ sclp_vt220_emit_current();
++ wake_up(&sclp_vt220_waitq);
++ /* Check if the tty needs a wake up call */
++ if (sclp_vt220_tty != NULL) {
++ if ((sclp_vt220_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
++ (sclp_vt220_tty->ldisc.write_wakeup != NULL))
++ (sclp_vt220_tty->ldisc.write_wakeup)(sclp_vt220_tty);
++ wake_up_interruptible(&sclp_vt220_tty->write_wait);
++ }
++}
++
++/*
++ * Retry sclp write request after waiting some time for an sclp equipment
++ * check to pass.
++ */
++static void
++sclp_vt220_retry(unsigned long data)
++{
++ struct sclp_vt220_request *request;
++ struct sclp_vt220_sccb *sccb;
++
++ request = (struct sclp_vt220_request *) data;
++ request->sclp_req.status = SCLP_REQ_FILLED;
++ sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
++ sccb->header.response_code = 0x0000;
++ sclp_add_request(&request->sclp_req);
++}
++
++#define SCLP_BUFFER_MAX_RETRY 5
++#define SCLP_BUFFER_RETRY_INTERVAL 2
++
++/*
++ * Callback through which the result of a write request is reported by the
++ * SCLP.
++ */
++static void
++sclp_vt220_callback(struct sclp_req *request, void *data)
++{
++ struct sclp_vt220_request *vt220_request;
++ struct sclp_vt220_sccb *sccb;
++
++ vt220_request = (struct sclp_vt220_request *) data;
++ if (request->status == SCLP_REQ_FAILED) {
++ sclp_vt220_process_queue(vt220_request);
++ return;
++ }
++ sccb = (struct sclp_vt220_sccb *) vt220_request->sclp_req.sccb;
++
++ /* Check SCLP response code and choose suitable action */
++ switch (sccb->header.response_code) {
++ case 0x0020 :
++ break;
++
++ case 0x05f0: /* Target resource in improper state */
++ break;
++
++ case 0x0340: /* Contained SCLP equipment check */
++ if (vt220_request->retry_count++ > SCLP_BUFFER_MAX_RETRY)
++ break;
++ /* Remove processed buffers and requeue rest */
++ if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
++ /* Not all buffers were processed */
++ sccb->header.response_code = 0x0000;
++ vt220_request->sclp_req.status = SCLP_REQ_FILLED;
++ sclp_add_request(request);
++ return;
++ }
++ break;
++
++ case 0x0040: /* SCLP equipment check */
++ if (vt220_request->retry_count++ > SCLP_BUFFER_MAX_RETRY)
++ break;
++ /* Wait some time, then retry request */
++ vt220_request->retry_timer.function = sclp_vt220_retry;
++ vt220_request->retry_timer.data =
++ (unsigned long) vt220_request;
++ vt220_request->retry_timer.expires =
++ jiffies + SCLP_BUFFER_RETRY_INTERVAL*HZ;
++ add_timer(&vt220_request->retry_timer);
++ return;
++
++ default:
++ break;
++ }
++ sclp_vt220_process_queue(vt220_request);
++}
++
++/*
++ * Emit vt220 request buffer to SCLP.
++ */
++static void
++__sclp_vt220_emit(struct sclp_vt220_request *request)
++{
++ if (!(sclp_vt220_register.sclp_send_mask & EvTyp_VT220Msg_Mask)) {
++ request->sclp_req.status = SCLP_REQ_FAILED;
++ sclp_vt220_callback(&request->sclp_req, (void *) request);
++ return;
++ }
++ request->sclp_req.command = SCLP_CMDW_WRITEDATA;
++ request->sclp_req.status = SCLP_REQ_FILLED;
++ request->sclp_req.callback = sclp_vt220_callback;
++ request->sclp_req.callback_data = (void *) request;
++
++ sclp_add_request(&request->sclp_req);
++}
++
++/*
++ * Queue and emit given request.
++ */
++static void
++sclp_vt220_emit(struct sclp_vt220_request *request)
++{
++ unsigned long flags;
++ int count;
++
++ spin_lock_irqsave(&sclp_vt220_lock, flags);
++ list_add_tail(&request->list, &sclp_vt220_outqueue);
++ count = sclp_vt220_outqueue_count++;
++ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
++ /* Emit only the first buffer immediately - callback takes care of
++ * the rest */
++ if (count == 0)
++ __sclp_vt220_emit(request);
++}
++
++/*
++ * Queue and emit current request.
++ */
++static void
++sclp_vt220_emit_current(void)
++{
++ unsigned long flags;
++ struct sclp_vt220_request *request;
++ struct sclp_vt220_sccb *sccb;
++
++ spin_lock_irqsave(&sclp_vt220_lock, flags);
++ request = NULL;
++ if (sclp_vt220_current_request != NULL) {
++ sccb = (struct sclp_vt220_sccb *)
++ sclp_vt220_current_request->sclp_req.sccb;
++ /* Only emit buffers with content */
++ if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) {
++ request = sclp_vt220_current_request;
++ sclp_vt220_current_request = NULL;
++ if (timer_pending(&sclp_vt220_timer))
++ del_timer(&sclp_vt220_timer);
++ }
++ sclp_vt220_flush_later = 0;
++ }
++ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
++ if (request != NULL)
++ sclp_vt220_emit(request);
++}
++
++#define SCLP_NORMAL_WRITE 0x00
++
++/*
++ * Helper function to initialize a page with the sclp request structure.
++ */
++static struct sclp_vt220_request *
++sclp_vt220_initialize_page(void *page)
++{
++ struct sclp_vt220_request *request;
++ struct sclp_vt220_sccb *sccb;
++
++ /* Place request structure at end of page */
++ request = ((struct sclp_vt220_request *)
++ ((addr_t) page + PAGE_SIZE)) - 1;
++ init_timer(&request->retry_timer);
++ request->retry_count = 0;
++ request->sclp_req.sccb = page;
++ /* SCCB goes at start of page */
++ sccb = (struct sclp_vt220_sccb *) page;
++ memset((void *) sccb, 0, sizeof(struct sclp_vt220_sccb));
++ sccb->header.length = sizeof(struct sclp_vt220_sccb);
++ sccb->header.function_code = SCLP_NORMAL_WRITE;
++ sccb->header.response_code = 0x0000;
++ sccb->evbuf.type = EvTyp_VT220Msg;
++ sccb->evbuf.length = sizeof(struct evbuf_header);
++
++ return request;
++}
++
++static inline unsigned int
++sclp_vt220_space_left(struct sclp_vt220_request *request)
++{
++ struct sclp_vt220_sccb *sccb;
++ sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
++ return PAGE_SIZE - sizeof(struct sclp_vt220_request) -
++ sccb->header.length;
++}
++
++static inline unsigned int
++sclp_vt220_chars_stored(struct sclp_vt220_request *request)
++{
++ struct sclp_vt220_sccb *sccb;
++ sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
++ return sccb->evbuf.length - sizeof(struct evbuf_header);
++}
++
++/*
++ * Add msg to buffer associated with request. Return the number of characters
++ * added or -EFAULT on error.
++ */
++static int
++sclp_vt220_add_msg(struct sclp_vt220_request *request,
++ const unsigned char *msg, int count, int from_user,
++ int convertlf)
++{
++ struct sclp_vt220_sccb *sccb;
++ void *buffer;
++ unsigned char c;
++ int from;
++ int to;
++
++ if (count > sclp_vt220_space_left(request))
++ count = sclp_vt220_space_left(request);
++ if (count <= 0)
++ return 0;
++
++ sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
++ buffer = (void *) ((addr_t) sccb + sccb->header.length);
++
++ if (convertlf) {
++ /* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
++ for (from=0, to=0;
++ (from < count) && (to < sclp_vt220_space_left(request));
++ from++) {
++ /* Retrieve character */
++ if (from_user) {
++ if (get_user(c, msg + from) != 0)
++ return -EFAULT;
++ } else
++ c = msg[from];
++ /* Perform conversion */
++ if (c == 0x0a) {
++ if (to + 1 < sclp_vt220_space_left(request)) {
++ ((unsigned char *) buffer)[to++] = c;
++ ((unsigned char *) buffer)[to++] = 0x0d;
++ } else
++ break;
++
++ } else
++ ((unsigned char *) buffer)[to++] = c;
++ }
++ sccb->header.length += to;
++ sccb->evbuf.length += to;
++ return from;
++ } else {
++ if (from_user) {
++ if (copy_from_user(buffer, (void *) msg, count) != 0)
++ return -EFAULT;
++ }
++ else
++ memcpy(buffer, (const void *) msg, count);
++ sccb->header.length += count;
++ sccb->evbuf.length += count;
++ return count;
++ }
++}
++
++/*
++ * Emit buffer after having waited long enough for more data to arrive.
++ */
++static void
++sclp_vt220_timeout(unsigned long data)
++{
++ sclp_vt220_emit_current();
++}
++
++#define BUFFER_MAX_DELAY HZ/2
++
++/*
++ * Internal implementation of the write function. Write COUNT bytes of data
++ * from memory at BUF which may reside in user space (specified by FROM_USER)
++ * to the SCLP interface. In case that the data does not fit into the current
++ * write buffer, emit the current one and allocate a new one. If there are no
++ * more empty buffers available, wait until one gets emptied. If DO_SCHEDULE
++ * is non-zero, the buffer will be scheduled for emitting after a timeout -
++ * otherwise the user has to explicitly call the flush function.
++ * A non-zero CONVERTLF parameter indicates that 0x0a characters in the message
++ * buffer should be converted to 0x0a 0x0d. After completion, return the number
++ * of bytes written.
++ */
++static int
++__sclp_vt220_write(int from_user, const unsigned char *buf, int count,
++ int do_schedule, int convertlf)
++{
++ unsigned long flags;
++ void *page;
++ int written;
++ int overall_written;
++
++ if (count <= 0)
++ return 0;
++ overall_written = 0;
++ spin_lock_irqsave(&sclp_vt220_lock, flags);
++ do {
++ /* Create a sclp output buffer if none exists yet */
++ if (sclp_vt220_current_request == NULL) {
++ while (list_empty(&sclp_vt220_empty)) {
++ spin_unlock_irqrestore(&sclp_vt220_lock,
++ flags);
++ if (in_interrupt())
++ sclp_sync_wait();
++ else
++ wait_event(sclp_vt220_waitq,
++ !list_empty(&sclp_vt220_empty));
++ spin_lock_irqsave(&sclp_vt220_lock, flags);
++ }
++ page = (void *) sclp_vt220_empty.next;
++ list_del((struct list_head *) page);
++ sclp_vt220_current_request =
++ sclp_vt220_initialize_page(page);
++ }
++ /* Try to write the string to the current request buffer */
++ written = sclp_vt220_add_msg(sclp_vt220_current_request,
++ buf, count, from_user, convertlf);
++ if (written > 0)
++ overall_written += written;
++ if (written == -EFAULT || written == count)
++ break;
++ /*
++ * Not all characters could be written to the current
++ * output buffer. Emit the buffer, create a new buffer
++ * and then output the rest of the string.
++ */
++ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
++ sclp_vt220_emit_current();
++ spin_lock_irqsave(&sclp_vt220_lock, flags);
++ buf += written;
++ count -= written;
++ } while (count > 0);
++ /* Setup timer to output current console buffer after some time */
++ if (sclp_vt220_current_request != NULL &&
++ !timer_pending(&sclp_vt220_timer) && do_schedule) {
++ sclp_vt220_timer.function = sclp_vt220_timeout;
++ sclp_vt220_timer.data = 0UL;
++ sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY;
++ add_timer(&sclp_vt220_timer);
++ }
++ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
++ return overall_written;
++}
++
++/*
++ * This routine is called by the kernel to write a series of
++ * characters to the tty device. The characters may come from
++ * user space or kernel space. This routine will return the
++ * number of characters actually accepted for writing.
++ */
++static int
++sclp_vt220_write(struct tty_struct * tty, int from_user,
++ const unsigned char *buf, int count)
++{
++ return __sclp_vt220_write(from_user, buf, count, 1, 0);
++}
++
++#define SCLP_VT220_SESSION_ENDED 0x01
++#define SCLP_VT220_SESSION_STARTED 0x80
++#define SCLP_VT220_SESSION_DATA 0x00
++
++/*
++ * Called by the SCLP to report incoming event buffers.
++ */
++static void
++sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
++{
++ char *buffer;
++ unsigned int count;
++
++ /* Ignore input if device is not open */
++ if (sclp_vt220_tty == NULL)
++ return;
++
++ buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header));
++ count = evbuf->length - sizeof(struct evbuf_header);
++
++ switch (*buffer) {
++ case SCLP_VT220_SESSION_ENDED:
++ case SCLP_VT220_SESSION_STARTED:
++ break;
++ case SCLP_VT220_SESSION_DATA:
++ /* Send input to line discipline */
++ buffer++;
++ count--;
++ /* Prevent buffer overrun by discarding input. Note that
++ * because buffer_push works asynchronously, we cannot wait
++ * for the buffer to be emptied. */
++ if (count + sclp_vt220_tty->flip.count > TTY_FLIPBUF_SIZE)
++ count = TTY_FLIPBUF_SIZE - sclp_vt220_tty->flip.count;
++ memcpy(sclp_vt220_tty->flip.char_buf_ptr, buffer, count);
++ memset(sclp_vt220_tty->flip.flag_buf_ptr, TTY_NORMAL, count);
++ sclp_vt220_tty->flip.char_buf_ptr += count;
++ sclp_vt220_tty->flip.flag_buf_ptr += count;
++ sclp_vt220_tty->flip.count += count;
++ tty_flip_buffer_push(sclp_vt220_tty);
++ break;
++ }
++}
++
++/*
++ * This routine is called when a particular tty device is opened.
++ */
++static int
++sclp_vt220_open(struct tty_struct * tty, struct file * filp)
++{
++ sclp_vt220_tty = tty;
++ tty->driver_data = NULL;
++ tty->low_latency = 0;
++ return 0;
++}
++
++/*
++ * This routine is called when a particular tty device is closed.
++ */
++static void
++sclp_vt220_close(struct tty_struct * tty, struct file * filp)
++{
++ if (tty->count > 1)
++ return;
++ sclp_vt220_tty = NULL;
++}
++
++/*
++ * This routine is called by the kernel to write a single
++ * character to the tty device. If the kernel uses this routine,
++ * it must call the flush_chars() routine (if defined) when it is
++ * done stuffing characters into the driver.
++ *
++ * NOTE: include/linux/tty_driver.h specifies that a character should be
++ * ignored if there is no room in the queue. This driver implements a different
++ * semantic in that it will block when there is no more room left.
++ */
++static void
++sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
++{
++ __sclp_vt220_write(0, &ch, 1, 0, 0);
++}
++
++/*
++ * This routine is called by the kernel after it has written a
++ * series of characters to the tty device using put_char().
++ */
++static void
++sclp_vt220_flush_chars(struct tty_struct *tty)
++{
++ if (sclp_vt220_outqueue_count == 0)
++ sclp_vt220_emit_current();
++ else
++ sclp_vt220_flush_later = 1;
++}
++
++/*
++ * This routine returns the numbers of characters the tty driver
++ * will accept for queuing to be written. This number is subject
++ * to change as output buffers get emptied, or if the output flow
++ * control is acted.
++ */
++static int
++sclp_vt220_write_room(struct tty_struct *tty)
++{
++ unsigned long flags;
++ struct list_head *l;
++ int count;
++
++ spin_lock_irqsave(&sclp_vt220_lock, flags);
++ count = 0;
++ if (sclp_vt220_current_request != NULL)
++ count = sclp_vt220_space_left(sclp_vt220_current_request);
++ list_for_each(l, &sclp_vt220_empty)
++ count += SCLP_VT220_MAX_CHARS_PER_BUFFER;
++ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
++ return count;
++}
++
++/*
++ * Return number of buffered chars.
++ */
++static int
++sclp_vt220_chars_in_buffer(struct tty_struct *tty)
++{
++ unsigned long flags;
++ struct list_head *l;
++ struct sclp_vt220_request *r;
++ int count;
++
++ spin_lock_irqsave(&sclp_vt220_lock, flags);
++ count = 0;
++ if (sclp_vt220_current_request != NULL)
++ count = sclp_vt220_chars_stored(sclp_vt220_current_request);
++ list_for_each(l, &sclp_vt220_outqueue) {
++ r = list_entry(l, struct sclp_vt220_request, list);
++ count += sclp_vt220_chars_stored(r);
++ }
++ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
++ return count;
++}
++
++static void
++__sclp_vt220_flush_buffer(void)
++{
++ unsigned long flags;
++
++ sclp_vt220_emit_current();
++ spin_lock_irqsave(&sclp_vt220_lock, flags);
++ if (timer_pending(&sclp_vt220_timer))
++ del_timer(&sclp_vt220_timer);
++ while (sclp_vt220_outqueue_count > 0) {
++ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
++ sclp_sync_wait();
++ spin_lock_irqsave(&sclp_vt220_lock, flags);
++ }
++ spin_unlock_irqrestore(&sclp_vt220_lock, flags);
++}
++
++/*
++ * Pass on all buffers to the hardware. Return only when there are no more
++ * buffers pending.
++ */
++static void
++sclp_vt220_flush_buffer(struct tty_struct *tty)
++{
++ sclp_vt220_emit_current();
++}
++
++/*
++ * Initialize all relevant components and register driver with system.
++ */
++static void
++__sclp_vt220_init(int early)
++{
++ void *page;
++ int i;
++
++ if (sclp_vt220_initialized)
++ return;
++ sclp_vt220_initialized = 1;
++ spin_lock_init(&sclp_vt220_lock);
++ INIT_LIST_HEAD(&sclp_vt220_empty);
++ INIT_LIST_HEAD(&sclp_vt220_outqueue);
++ init_waitqueue_head(&sclp_vt220_waitq);
++ init_timer(&sclp_vt220_timer);
++ sclp_vt220_current_request = NULL;
++ sclp_vt220_buffered_chars = 0;
++ sclp_vt220_outqueue_count = 0;
++ sclp_vt220_tty = NULL;
++ sclp_vt220_refcount = 0;
++ sclp_vt220_flush_later = 0;
++
++ /* Allocate pages for output buffering */
++ for (i = 0; i < (early ? MAX_CONSOLE_PAGES : MAX_KMEM_PAGES); i++) {
++ if (early)
++ page = alloc_bootmem_low_pages(PAGE_SIZE);
++ else
++ page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
++ if (page == NULL)
++ return;
++ list_add_tail((struct list_head *) page, &sclp_vt220_empty);
++ }
++}
++
++/*
++ * Register driver with SCLP and Linux and initialize internal tty structures.
++ */
++void __init
++sclp_vt220_tty_init(void)
++{
++ int rc;
++
++ /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
++ * symmetry between VM and LPAR systems regarding ttyS1. */
++ __sclp_vt220_init(0);
++ rc = sclp_register(&sclp_vt220_register);
++ if (rc != 0) {
++ printk(KERN_ERR SCLP_VT220_PRINT_HEADER
++ "could not register tty - "
++ "sclp_register returned %d\n", rc);
++ return;
++ }
++
++ memset (&sclp_vt220_driver, 0, sizeof(struct tty_driver));
++ sclp_vt220_driver.magic = TTY_DRIVER_MAGIC;
++ sclp_vt220_driver.driver_name = SCLP_VT220_DRIVER_NAME;
++ sclp_vt220_driver.name = SCLP_VT220_DEVICE_NAME;
++ sclp_vt220_driver.name_base = 0;
++ sclp_vt220_driver.major = SCLP_VT220_MAJOR;
++ sclp_vt220_driver.minor_start = SCLP_VT220_MINOR;
++ sclp_vt220_driver.num = 1;
++ sclp_vt220_driver.type = TTY_DRIVER_TYPE_SYSTEM;
++ sclp_vt220_driver.subtype = SYSTEM_TYPE_TTY;
++ sclp_vt220_driver.init_termios = tty_std_termios;
++ sclp_vt220_driver.flags = TTY_DRIVER_REAL_RAW;
++ sclp_vt220_driver.refcount = &sclp_vt220_refcount;
++ sclp_vt220_driver.table = sclp_vt220_table;
++ sclp_vt220_driver.termios = sclp_vt220_termios;
++ sclp_vt220_driver.termios_locked = sclp_vt220_termios_locked;
++
++ /* Required callbacks */
++ sclp_vt220_driver.open = sclp_vt220_open;
++ sclp_vt220_driver.close = sclp_vt220_close;
++ sclp_vt220_driver.write = sclp_vt220_write;
++ sclp_vt220_driver.put_char = sclp_vt220_put_char;
++ sclp_vt220_driver.flush_chars = sclp_vt220_flush_chars;
++ sclp_vt220_driver.write_room = sclp_vt220_write_room;
++ sclp_vt220_driver.chars_in_buffer = sclp_vt220_chars_in_buffer;
++ sclp_vt220_driver.flush_buffer = sclp_vt220_flush_buffer;
++
++ /* Unsupported callbacks */
++ sclp_vt220_driver.ioctl = NULL;
++ sclp_vt220_driver.throttle = NULL;
++ sclp_vt220_driver.unthrottle = NULL;
++ sclp_vt220_driver.send_xchar = NULL;
++ sclp_vt220_driver.set_termios = NULL;
++ sclp_vt220_driver.set_ldisc = NULL;
++ sclp_vt220_driver.stop = NULL;
++ sclp_vt220_driver.start = NULL;
++ sclp_vt220_driver.hangup = NULL;
++ sclp_vt220_driver.break_ctl = NULL;
++ sclp_vt220_driver.wait_until_sent = NULL;
++ sclp_vt220_driver.read_proc = NULL;
++ sclp_vt220_driver.write_proc = NULL;
++
++ rc = tty_register_driver(&sclp_vt220_driver);
++ if (rc != 0)
++ printk(KERN_ERR SCLP_VT220_PRINT_HEADER
++ "could not register tty - "
++ "sclp_drv_register returned %d\n", rc);
++}
++
++#ifdef CONFIG_SCLP_VT220_CONSOLE
++
++static void
++sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
++{
++ __sclp_vt220_write(0, (const unsigned char *) buf, count, 1, 1);
++}
++
++static kdev_t
++sclp_vt220_con_device(struct console *c)
++{
++ return mk_kdev(SCLP_VT220_MAJOR, SCLP_VT220_MINOR);
++}
++
++/*
++ * This routine is called from panic when the kernel is going to give up.
++ * We have to make sure that all buffers will be flushed to the SCLP.
++ * Note that this function may be called from within an interrupt context.
++ */
++static void
++sclp_vt220_con_unblank(void)
++{
++ __sclp_vt220_flush_buffer();
++}
++
++/* Structure needed to register with printk */
++static struct console sclp_vt220_console =
++{
++ .name = SCLP_VT220_CONSOLE_NAME,
++ .write = sclp_vt220_con_write,
++ .device = sclp_vt220_con_device,
++ .unblank = sclp_vt220_con_unblank,
++ .flags = CON_PRINTBUFFER,
++ .index = SCLP_VT220_CONSOLE_INDEX
++};
++
++void
++sclp_vt220_con_init(void)
++{
++ if (!CONSOLE_IS_SCLP)
++ return;
++ __sclp_vt220_init(1);
++ /* Attach linux console */
++ register_console(&sclp_vt220_console);
++}
++
++#endif /* CONFIG_SCLP_VT220_CONSOLE */
++
+=== drivers/s390/char/tape_core.c
+==================================================================
+--- drivers/s390/char/tape_core.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tape_core.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,1434 @@
++/*
++ * drivers/s390/char/tape_core.c
++ * basic function of the tape device driver
++ *
++ * S390 and zSeries version
++ * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
++ * Author(s): Carsten Otte <cotte at de.ibm.com>
++ * Michael Holzheu <holzheu at de.ibm.com>
++ * Tuan Ngo-Anh <ngoanh at de.ibm.com>
++ * Martin Schwidefsky <schwidefsky at de.ibm.com>
++ * Stefan Bader <shbader at de.ibm.com>
++ */
++
++#include <linux/config.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/init.h> // for kernel parameters
++#include <linux/kmod.h> // for requesting modules
++#include <linux/spinlock.h> // for locks
++#include <linux/vmalloc.h>
++
++#include <asm/types.h> // for variable types
++#include <asm/irq.h>
++#include <asm/s390io.h>
++#include <asm/s390dyn.h>
++
++#define TAPE_DBF_AREA tape_core_dbf
++
++#include "tape.h"
++#include "tape_std.h"
++
++#ifdef CONFIG_S390_TAPE_3590
++#include "tape_3590.h"
++#endif
++
++#define PRINTK_HEADER "T390:"
++
++/*
++ * Prototypes for some static functions.
++ */
++static void __tape_do_irq (int, void *, struct pt_regs *);
++static void __tape_remove_request(struct tape_device *, struct tape_request *);
++static void tape_timeout_io (unsigned long);
++
++/*
++ * List of tape disciplines guarded by tape_discipline_lock.
++ */
++static struct list_head tape_disciplines = LIST_HEAD_INIT(tape_disciplines);
++static spinlock_t tape_discipline_lock = SPIN_LOCK_UNLOCKED;
++
++/*
++ * Pointer to debug area.
++ */
++debug_info_t *TAPE_DBF_AREA = NULL;
++
++const char *tape_op_verbose[TO_SIZE] =
++{
++ [TO_BLOCK] = "BLK",
++ [TO_BSB] = "BSB",
++ [TO_BSF] = "BSF",
++ [TO_DSE] = "DSE",
++ [TO_FSB] = "FSB",
++ [TO_FSF] = "FSF",
++ [TO_LBL] = "LBL",
++ [TO_NOP] = "NOP",
++ [TO_RBA] = "RBA",
++ [TO_RBI] = "RBI",
++ [TO_RFO] = "RFO",
++ [TO_REW] = "REW",
++ [TO_RUN] = "RUN",
++ [TO_WRI] = "WRI",
++ [TO_WTM] = "WTM",
++ [TO_MSEN] = "MSN",
++ [TO_LOAD] = "LOA",
++ [TO_READ_CONFIG] = "RCF",
++ [TO_READ_ATTMSG] = "RAT",
++ [TO_DIS] = "DIS",
++ [TO_ASSIGN] = "ASS",
++ [TO_UNASSIGN] = "UAS",
++ [TO_BREAKASS] = "BRK"
++};
++
++/*
++ * Inline functions, that have to be defined.
++ */
++static inline struct tape_request *
++tape_get_next_request(struct tape_device *device) {
++ if(list_empty(&device->req_queue))
++ return NULL;
++ return list_entry(device->req_queue.next, struct tape_request, list);
++}
++
++/*
++ * I/O helper function. Adds the request to the request queue
++ * and starts it if the tape is idle. Has to be called with
++ * the device lock held.
++ */
++static inline int
++__do_IO(struct tape_device *device, struct tape_request *request)
++{
++ int rc = 0;
++
++ if(request->cpaddr == NULL)
++ BUG();
++
++ if(request->timeout.expires > 0) {
++ /* Init should be done by caller */
++ DBF_EVENT(6, "(%04x): starting timed request\n",
++ device->devstat.devno);
++
++ request->timeout.function = tape_timeout_io;
++ request->timeout.data = (unsigned long)
++ tape_clone_request(request);
++ add_timer(&request->timeout);
++ }
++
++ rc = do_IO(device->devinfo.irq, request->cpaddr,
++ (unsigned long) request, 0x00, request->options);
++
++ return rc;
++}
++
++static void
++__tape_process_queue(void *data)
++{
++ struct tape_device *device = (struct tape_device *) data;
++ struct list_head *l, *n;
++ struct tape_request *request;
++ int rc;
++
++ DBF_EVENT(6, "tape_process_queue(%p)\n", device);
++
++ /*
++ * We were told to be quiet. Do nothing for now.
++ */
++ if (TAPE_NOACCESS(device)) {
++ return;
++ }
++
++ /*
++ * Try to start each request on request queue until one is
++ * started successful.
++ */
++ list_for_each_safe(l, n, &device->req_queue) {
++ request = list_entry(l, struct tape_request, list);
++
++ /* Happens when new request arrive while still doing one. */
++ if (request->status == TAPE_REQUEST_IN_IO)
++ break;
++
++#ifdef CONFIG_S390_TAPE_BLOCK
++ if (request->op == TO_BLOCK)
++ device->discipline->check_locate(device, request);
++#endif
++ switch(request->op) {
++ case TO_MSEN:
++ case TO_ASSIGN:
++ case TO_UNASSIGN:
++ case TO_BREAKASS:
++ break;
++ default:
++ if (TAPE_OPEN(device))
++ break;
++ DBF_EVENT(3,
++ "TAPE(%04x): REQ in UNUSED state\n",
++ device->devstat.devno);
++ }
++
++ rc = __do_IO(device, request);
++ if (rc == 0) {
++ DBF_EVENT(6, "tape: do_IO success\n");
++ request->status = TAPE_REQUEST_IN_IO;
++ break;
++ }
++ /* Start failed. Remove request and indicate failure. */
++ if(rc == -EBUSY) {
++ DBF_EVENT(1, "tape: DOIO request on busy tape\n");
++ break;
++ }
++ DBF_EVENT(1, "tape: DOIO failed with er = %i\n", rc);
++
++ /* Set final status and remove. */
++ request->rc = rc;
++ __tape_remove_request(device, request);
++ }
++}
++
++static void
++tape_process_queue(void *data)
++{
++ unsigned long flags;
++ struct tape_device * device;
++
++ device = (struct tape_device *) data;
++ spin_lock_irqsave(get_irq_lock(device->devinfo.irq), flags);
++ atomic_set(&device->bh_scheduled, 0);
++ __tape_process_queue(device);
++ spin_unlock_irqrestore(get_irq_lock(device->devinfo.irq), flags);
++}
++
++void
++tape_schedule_bh(struct tape_device *device)
++{
++ /* Protect against rescheduling, when already running. */
++ if (atomic_compare_and_swap(0, 1, &device->bh_scheduled))
++ return;
++
++ INIT_LIST_HEAD(&device->bh_task.list);
++ device->bh_task.sync = 0;
++ device->bh_task.routine = tape_process_queue;
++ device->bh_task.data = device;
++
++ queue_task(&device->bh_task, &tq_immediate);
++ mark_bh(IMMEDIATE_BH);
++
++ return;
++}
++
++/*
++ * Stop running ccw. Has to be called with the device lock held.
++ */
++static inline int
++__tape_halt_io(struct tape_device *device, struct tape_request *request)
++{
++ int retries;
++ int rc;
++
++ /* SMB: This should never happen */
++ if(request->cpaddr == NULL)
++ BUG();
++
++ /* Check if interrupt has already been processed */
++ if (request->callback == NULL)
++ return 0;
++
++ /* Stop a possibly running timer */
++ if(request->timeout.expires) {
++ if(del_timer(&request->timeout) > 0) {
++ tape_put_request(request);
++ request->timeout.data = 0L;
++ }
++ }
++
++ rc = 0;
++ for (retries = 0; retries < 5; retries++) {
++ if (retries < 2)
++ rc = halt_IO(device->devinfo.irq,
++ (long) request, request->options);
++ else
++ rc = clear_IO(device->devinfo.irq,
++ (long) request, request->options);
++ if (rc == 0)
++ break; /* termination successful */
++ if (rc == -ENODEV)
++ DBF_EXCEPTION(2, "device gone, retry\n");
++ else if (rc == -EIO)
++ DBF_EXCEPTION(2, "I/O error, retry\n");
++ else if (rc == -EBUSY)
++ DBF_EXCEPTION(2, "device busy, retry later\n");
++ else
++ BUG();
++ }
++ if (rc == 0) {
++ request->rc = -EIO;
++ request->status = TAPE_REQUEST_DONE;
++ }
++ return rc;
++}
++
++static void
++__tape_remove_request(struct tape_device *device, struct tape_request *request)
++{
++ /* First remove the request from the queue. */
++ list_del(&request->list);
++
++ /* This request isn't processed any further. */
++ request->status = TAPE_REQUEST_DONE;
++
++ /* Finally, if the callback hasn't been called, do it now. */
++ if (request->callback != NULL) {
++ request->callback(request, request->callback_data);
++ request->callback = NULL;
++ }
++}
++
++/*
++ * Tape state functions
++ */
++/*
++ * Printable strings for tape enumerations.
++ */
++const char *tape_state_string(struct tape_device *device) {
++ char *s = " ???? ";
++
++ if (TAPE_NOT_OPER(device)) {
++ s = "NOT_OP";
++ } else if (TAPE_NOACCESS(device)) {
++ s = "NO_ACC";
++ } else if (TAPE_BOXED(device)) {
++ s = "BOXED ";
++ } else if (TAPE_OPEN(device)) {
++ s = "IN_USE";
++ } else if (TAPE_ASSIGNED(device)) {
++ s = "ASSIGN";
++ } else if (TAPE_INIT(device)) {
++ s = "INIT ";
++ } else if (TAPE_UNUSED(device)) {
++ s = "UNUSED";
++ }
++
++ return s;
++}
++
++void
++tape_state_set(struct tape_device *device, unsigned int status)
++{
++ const char *str;
++
++ /* Maybe nothing changed. */
++ if (device->tape_status == status)
++ return;
++
++ DBF_EVENT(4, "ts. dev: %x\n", device->first_minor);
++ str = tape_state_string(device);
++ DBF_EVENT(4, "old ts: 0x%08x %s\n", device->tape_status, str);
++
++ device->tape_status = status;
++
++ str = tape_state_string(device);
++ DBF_EVENT(4, "new ts: 0x%08x %s\n", status, str);
++
++ wake_up(&device->state_change_wq);
++}
++
++void
++tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
++{
++ if (device->medium_state == newstate)
++ return;
++
++ switch(newstate){
++ case MS_UNLOADED:
++ device->tape_generic_status |= GMT_DR_OPEN(~0);
++ PRINT_INFO("(%04x): Tape is unloaded\n",
++ device->devstat.devno);
++ break;
++ case MS_LOADED:
++ device->tape_generic_status &= ~GMT_DR_OPEN(~0);
++ PRINT_INFO("(%04x): Tape has been mounted\n",
++ device->devstat.devno);
++ break;
++ default:
++ // print nothing
++ break;
++ }
++#ifdef CONFIG_S390_TAPE_BLOCK
++ tapeblock_medium_change(device);
++#endif
++ device->medium_state = newstate;
++ wake_up(&device->state_change_wq);
++}
++
++static void
++tape_timeout_io(unsigned long data)
++{
++ struct tape_request *request;
++ struct tape_device *device;
++ unsigned long flags;
++
++ request = (struct tape_request *) data;
++ device = request->device;
++
++ spin_lock_irqsave(get_irq_lock(device->devinfo.irq), flags);
++ request->timeout.expires = 0;
++
++ if(request->callback != NULL) {
++ DBF_EVENT(3, "TAPE(%04x): %s timeout\n",
++ device->devstat.devno, tape_op_verbose[request->op]);
++ PRINT_ERR("TAPE(%04x): %s timeout\n",
++ device->devstat.devno, tape_op_verbose[request->op]);
++
++ if(__tape_halt_io(device, request) == 0)
++ DBF_EVENT(6, "tape_timeout_io: success\n");
++ else {
++ DBF_EVENT(2, "tape_timeout_io: halt_io failed\n");
++ PRINT_ERR("tape_timeout_io: halt_io failed\n");
++ }
++ }
++ spin_unlock_irqrestore(get_irq_lock(device->devinfo.irq), flags);
++ tape_put_request(request);
++}
++
++/*
++ * DEVFS Functions
++ */
++#ifdef CONFIG_DEVFS_FS
++devfs_handle_t tape_devfs_root_entry;
++
++/*
++ * Create devfs root entry (devno in hex) for device td
++ */
++static int
++tape_mkdevfsroot (struct tape_device* device)
++{
++ char devno [5];
++
++ sprintf(devno, "%04x", device->devinfo.devno);
++ device->devfs_dir = devfs_mk_dir(tape_devfs_root_entry, devno, device);
++ return (device->devfs_dir == NULL) ? -ENOMEM : 0;
++}
++
++/*
++ * Remove devfs root entry for a device
++ */
++static void
++tape_rmdevfsroot (struct tape_device *device)
++{
++ if (device->devfs_dir) {
++ devfs_unregister(device->devfs_dir);
++ device->devfs_dir = NULL;
++ }
++}
++#endif
++
++/*
++ * Enable tape device
++ */
++int
++tape_request_irq(struct tape_device *device)
++{
++ int rc;
++
++ if (device->devinfo.status & DEVINFO_UNFRIENDLY_DEV) {
++ s390_trigger_resense(device->devinfo.irq);
++ rc = get_dev_info_by_devno(
++ device->devinfo.devno,
++ &device->devinfo
++ );
++ if (rc) {
++ DBF_EVENT(3, "get_dev_info_by_devno returned %d\n", rc);
++ if (rc == -EUSERS) {
++ device->devinfo.status |=
++ DEVINFO_UNFRIENDLY_DEV;
++ TAPE_SET_STATE(device, TAPE_STATUS_BOXED);
++ }
++ return rc;
++ }
++ }
++
++ /* Register IRQ. */
++ rc = s390_request_irq_special(
++ device->devinfo.irq,
++ __tape_do_irq,
++ tape_noper_handler,
++ SA_DOPATHGROUP,
++ TAPE_MAGIC,
++ &device->devstat
++ );
++ if (rc) {
++ DBF_EVENT(3, "s390_request_irq_special returned %d\n", rc);
++ if (rc == -EUSERS) {
++ TAPE_SET_STATE(device, TAPE_STATUS_BOXED);
++ device->devinfo.status |= DEVINFO_UNFRIENDLY_DEV;
++ }
++ } else {
++ s390_set_private_data(
++ device->devinfo.irq,
++ tape_clone_device(device)
++ );
++ TAPE_CLEAR_STATE(device, TAPE_STATUS_BOXED);
++ }
++
++ DBF_EVENT(3, "tape_request_irq returns %d\n", rc);
++ return rc;
++}
++
++void
++tape_free_irq(struct tape_device *device)
++{
++ if(!(device->devinfo.status & DEVINFO_UNFRIENDLY_DEV)) {
++ s390_set_private_data(device->devinfo.irq, NULL);
++ tape_put_device(device);
++ free_irq(device->devinfo.irq, &device->devstat);
++ }
++}
++
++int
++tape_enable_device(struct tape_device *device,
++ struct tape_discipline *discipline)
++{
++ int rc;
++
++ device->discipline = discipline;
++ if (!TAPE_INIT(device))
++ return -EINVAL;
++
++ rc = tape_request_irq(device);
++ if(rc && rc != -EUSERS)
++ return rc;
++
++ /* Let the discipline have a go at the device. */
++ rc = discipline->setup_device(device);
++ if (rc) {
++ DBF_EVENT(3, "discipline->setup_device returned %d\n", rc);
++ tape_free_irq(device);
++ return rc;
++ }
++
++#ifdef CONFIG_DEVFS_FS
++ /* Create devfs entries */
++ rc = tape_mkdevfsroot(device);
++ if (rc){
++ DBF_EVENT(3, "tape_mkdevfsroot returned %d\n", rc);
++ PRINT_WARN ("Cannot create a devfs directory for "
++ "device %04x\n", device->devinfo.devno);
++ device->discipline->cleanup_device(device);
++ tape_free_irq(device);
++ return rc;
++ }
++#endif
++ rc = tapechar_setup_device(device);
++ if (rc) {
++ DBF_EVENT(3, "tapechar_setup_device returned %d\n", rc);
++#ifdef CONFIG_DEVFS_FS
++ tape_rmdevfsroot(device);
++#endif
++ device->discipline->cleanup_device(device);
++ tape_free_irq(device);
++ return rc;
++ }
++#ifdef CONFIG_S390_TAPE_BLOCK
++ rc = tapeblock_setup_device(device);
++ if (rc) {
++ DBF_EVENT(3, "tapeblock_setup_device returned %d\n", rc);
++ tapechar_cleanup_device(device);
++#ifdef CONFIG_DEVFS_FS
++ tape_rmdevfsroot(device);
++#endif
++ device->discipline->cleanup_device(device);
++ tape_free_irq(device);
++ return rc;
++ }
++#endif
++
++ if(!TAPE_BOXED(device))
++ TAPE_CLEAR_STATE(device, TAPE_STATUS_INIT);
++
++ return 0;
++}
++
++/*
++ * Disable tape device. Check if there is a running request and
++ * terminate it. Post all queued requests with -EIO.
++ */
++void
++tape_disable_device(struct tape_device *device, int gone)
++{
++ struct list_head * l, *n;
++ struct tape_request * request;
++ unsigned long flags;
++
++ spin_lock_irqsave(get_irq_lock(device->devinfo.irq), flags);
++ /* Post remaining requests with -EIO */
++ list_for_each_safe(l, n, &device->req_queue) {
++ request = list_entry(l, struct tape_request, list);
++ if (request->status == TAPE_REQUEST_IN_IO && !gone) {
++ __tape_halt_io(device, request);
++ }
++
++ request->rc = -EIO;
++ request->status = TAPE_REQUEST_DONE;
++ __tape_remove_request(device, request);
++ }
++
++ if (TAPE_ASSIGNED(device) && !gone) {
++ spin_unlock(get_irq_lock(device->devinfo.irq));
++ if(
++ tape_unassign(
++ device,
++ TAPE_STATUS_ASSIGN_M|TAPE_STATUS_ASSIGN_A
++ ) == 0
++ ) {
++ printk(KERN_WARNING "%04x: automatically unassigned\n",
++ device->devinfo.devno);
++ }
++ spin_lock(get_irq_lock(device->devinfo.irq));
++ }
++
++ TAPE_SET_STATE(device, TAPE_STATUS_NOT_OPER);
++ spin_unlock_irqrestore(get_irq_lock(device->devinfo.irq), flags);
++
++#ifdef CONFIG_S390_TAPE_BLOCK
++ tapeblock_cleanup_device(device);
++#endif
++ tapechar_cleanup_device(device);
++#ifdef CONFIG_DEVFS_FS
++ tape_rmdevfsroot(device);
++#endif
++ device->discipline->cleanup_device(device);
++ tape_free_irq(device);
++}
++
++/*
++ * Find discipline by cu_type.
++ */
++struct tape_discipline *
++tape_get_discipline(int cu_type)
++{
++ struct list_head *l;
++ struct tape_discipline *discipline, *tmp;
++
++ discipline = NULL;
++ spin_lock(&tape_discipline_lock);
++ list_for_each(l, &tape_disciplines) {
++ tmp = list_entry(l, struct tape_discipline, list);
++ if (tmp->cu_type == cu_type) {
++ discipline = tmp;
++ break;
++ }
++ }
++ if (discipline->owner != NULL) {
++ if (!try_inc_mod_count(discipline->owner))
++ /* Discipline is currently unloaded! */
++ discipline = NULL;
++ }
++ spin_unlock(&tape_discipline_lock);
++ return discipline;
++}
++
++/*
++ * Decrement usage count for discipline.
++ */
++void
++tape_put_discipline(struct tape_discipline *discipline)
++{
++ spin_lock(&tape_discipline_lock);
++ if (discipline->owner)
++ __MOD_DEC_USE_COUNT(discipline->owner);
++ spin_unlock(&tape_discipline_lock);
++}
++
++/*
++ * Register backend discipline
++ */
++int
++tape_register_discipline(struct tape_discipline *discipline)
++{
++ if (!try_inc_mod_count(THIS_MODULE))
++ /* Tape module is currently unloaded! */
++ return -ENOSYS;
++ spin_lock(&tape_discipline_lock);
++ list_add_tail(&discipline->list, &tape_disciplines);
++ spin_unlock(&tape_discipline_lock);
++ /* Now add the tape devices with matching cu_type. */
++ tape_add_devices(discipline);
++ return 0;
++}
++
++/*
++ * Unregister backend discipline
++ */
++void
++__tape_unregister_discipline(struct tape_discipline *discipline)
++{
++ list_del(&discipline->list);
++ /* Remove tape devices with matching cu_type. */
++ tape_remove_devices(discipline);
++ MOD_DEC_USE_COUNT;
++}
++
++void
++tape_unregister_discipline(struct tape_discipline *discipline)
++{
++ struct list_head *l;
++
++ spin_lock(&tape_discipline_lock);
++ list_for_each(l, &tape_disciplines) {
++ if (list_entry(l, struct tape_discipline, list) == discipline){
++ __tape_unregister_discipline(discipline);
++ break;
++ }
++ }
++ spin_unlock(&tape_discipline_lock);
++}
++
++/*
++ * Allocate a new tape ccw request
++ */
++struct tape_request *
++tape_alloc_request(int cplength, int datasize)
++{
++ struct tape_request *request;
++
++ if (datasize > PAGE_SIZE || (cplength*sizeof(ccw1_t)) > PAGE_SIZE)
++ BUG();
++
++ DBF_EVENT(5, "tape_alloc_request(%d,%d)\n", cplength, datasize);
++
++ request = (struct tape_request *)
++ kmalloc(sizeof(struct tape_request), GFP_KERNEL);
++ if (request == NULL) {
++ DBF_EXCEPTION(1, "cqra nomem\n");
++ return ERR_PTR(-ENOMEM);
++ }
++ memset(request, 0, sizeof(struct tape_request));
++ INIT_LIST_HEAD(&request->list);
++ atomic_set(&request->ref_count, 1);
++
++ /* allocate channel program */
++ if (cplength > 0) {
++ request->cpaddr =
++ kmalloc(cplength*sizeof(ccw1_t), GFP_ATOMIC | GFP_DMA);
++ if (request->cpaddr == NULL) {
++ DBF_EXCEPTION(1, "cqra nomem\n");
++ kfree(request);
++ return ERR_PTR(-ENOMEM);
++ }
++ memset(request->cpaddr, 0, cplength*sizeof(ccw1_t));
++ }
++ /* alloc small kernel buffer */
++ if (datasize > 0) {
++ request->cpdata = kmalloc(datasize, GFP_KERNEL | GFP_DMA);
++ if (request->cpdata == NULL) {
++ DBF_EXCEPTION(1, "cqra nomem\n");
++ if (request->cpaddr != NULL)
++ kfree(request->cpaddr);
++ kfree(request);
++ return ERR_PTR(-ENOMEM);
++ }
++ memset(request->cpdata, 0, datasize);
++ }
++
++ DBF_EVENT(5, "request=%p(%p/%p)\n", request, request->cpaddr,
++ request->cpdata);
++
++ return request;
++}
++
++/*
++ * Free tape ccw request
++ */
++void
++tape_free_request (struct tape_request * request)
++{
++ DBF_EVENT(5, "tape_free_request(%p)\n", request);
++
++ if (request->device != NULL) {
++ tape_put_device(request->device);
++ request->device = NULL;
++ }
++ if (request->cpdata != NULL) {
++ kfree(request->cpdata);
++ }
++ if (request->cpaddr != NULL) {
++ kfree(request->cpaddr);
++ }
++ kfree(request);
++}
++
++struct tape_request *
++tape_clone_request(struct tape_request *request)
++{
++ DBF_EVENT(5, "tape_clone_request(%p) = %i\n", request,
++ atomic_inc_return(&request->ref_count));
++ return request;
++}
++
++struct tape_request *
++tape_put_request(struct tape_request *request)
++{
++ int remain;
++
++ DBF_EVENT(4, "tape_put_request(%p)\n", request);
++ if((remain = atomic_dec_return(&request->ref_count)) > 0) {
++ DBF_EVENT(5, "remaining = %i\n", remain);
++ } else {
++ tape_free_request(request);
++ }
++
++ return NULL;
++}
++
++/*
++ * Write sense data to console/dbf
++ */
++void
++tape_dump_sense(struct tape_device* device, struct tape_request *request)
++{
++ devstat_t *stat;
++ unsigned int *sptr;
++
++ stat = &device->devstat;
++ PRINT_INFO("-------------------------------------------------\n");
++ PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n",
++ stat->dstat, stat->cstat, stat->cpa);
++ PRINT_INFO("DEVICE: %04x\n", device->devinfo.devno);
++ if (request != NULL)
++ PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]);
++
++ sptr = (unsigned int *) stat->ii.sense.data;
++ PRINT_INFO("Sense data: %08X %08X %08X %08X \n",
++ sptr[0], sptr[1], sptr[2], sptr[3]);
++ PRINT_INFO("Sense data: %08X %08X %08X %08X \n",
++ sptr[4], sptr[5], sptr[6], sptr[7]);
++ PRINT_INFO("--------------------------------------------------\n");
++}
++
++/*
++ * Write sense data to dbf
++ */
++void
++tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request)
++{
++ devstat_t *stat = &device->devstat;
++ unsigned int *sptr;
++ const char* op;
++
++ if (request != NULL)
++ op = tape_op_verbose[request->op];
++ else
++ op = "---";
++ DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n", stat->dstat,stat->cstat);
++ DBF_EVENT(3, "DEVICE: %04x OP\t: %s\n", device->devinfo.devno,op);
++ sptr = (unsigned int *) stat->ii.sense.data;
++ DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]);
++ DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]);
++ DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]);
++ DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]);
++}
++
++static inline int
++__tape_do_io(struct tape_device *device, struct tape_request *request)
++{
++ if(TAPE_NOT_OPER(device))
++ return -ENODEV;
++
++ /* Some operations may happen even on an unused tape device */
++ switch(request->op) {
++ case TO_MSEN:
++ case TO_ASSIGN:
++ case TO_UNASSIGN:
++ case TO_BREAKASS:
++ break;
++ default:
++ if (!TAPE_OPEN(device))
++ return -ENODEV;
++ }
++
++ /* Add reference to device to the request. This increases the reference
++ count. */
++ request->device = tape_clone_device(device);
++ request->status = TAPE_REQUEST_QUEUED;
++
++ list_add_tail(&request->list, &device->req_queue);
++ __tape_process_queue(device);
++
++ return 0;
++}
++
++/*
++ * Add the request to the request queue, try to start it if the
++ * tape is idle. Return without waiting for end of i/o.
++ */
++int
++tape_do_io_async(struct tape_device *device, struct tape_request *request)
++{
++ int rc;
++ long flags;
++
++ spin_lock_irqsave(get_irq_lock(device->devinfo.irq), flags);
++ /* Add request to request queue and try to start it. */
++ rc = __tape_do_io(device, request);
++ spin_unlock_irqrestore(get_irq_lock(device->devinfo.irq), flags);
++ return rc;
++}
++
++/*
++ * tape_do_io/__tape_wake_up
++ * Add the request to the request queue, try to start it if the
++ * tape is idle and wait uninterruptible for its completion.
++ */
++static void
++__tape_wake_up(struct tape_request *request, void *data)
++{
++ request->callback = NULL;
++ wake_up((wait_queue_head_t *) data);
++}
++
++int
++tape_do_io(struct tape_device *device, struct tape_request *request)
++{
++ wait_queue_head_t wq;
++ long flags;
++ int rc;
++
++ DBF_EVENT(5, "tape: tape_do_io(%p, %p)\n", device, request);
++
++ init_waitqueue_head(&wq);
++ spin_lock_irqsave(get_irq_lock(device->devinfo.irq), flags);
++ /* Setup callback */
++ request->callback = __tape_wake_up;
++ request->callback_data = &wq;
++ /* Add request to request queue and try to start it. */
++ rc = __tape_do_io(device, request);
++ spin_unlock_irqrestore(get_irq_lock(device->devinfo.irq), flags);
++ if (rc)
++ return rc;
++ /* Request added to the queue. Wait for its completion. */
++ wait_event(wq, (request->callback == NULL));
++ /* Get rc from request */
++ return request->rc;
++}
++
++/*
++ * tape_do_io_interruptible/__tape_wake_up_interruptible
++ * Add the request to the request queue, try to start it if the
++ * tape is idle and wait uninterruptible for its completion.
++ */
++static void
++__tape_wake_up_interruptible(struct tape_request *request, void *data)
++{
++ request->callback = NULL;
++ wake_up_interruptible((wait_queue_head_t *) data);
++}
++
++int
++tape_do_io_interruptible(struct tape_device *device,
++ struct tape_request *request)
++{
++ wait_queue_head_t wq;
++ long flags;
++ int rc;
++
++ DBF_EVENT(5, "tape: tape_do_io_int(%p, %p)\n", device, request);
++
++ init_waitqueue_head(&wq);
++ // debug paranoia
++ if(!device) BUG();
++ if(!request) BUG();
++
++ spin_lock_irqsave(get_irq_lock(device->devinfo.irq), flags);
++ /* Setup callback */
++ request->callback = __tape_wake_up_interruptible;
++ request->callback_data = &wq;
++ rc = __tape_do_io(device, request);
++ spin_unlock_irqrestore(get_irq_lock(device->devinfo.irq), flags);
++ if (rc)
++ return rc;
++ /* Request added to the queue. Wait for its completion. */
++ rc = wait_event_interruptible(wq, (request->callback == NULL));
++ if (rc != -ERESTARTSYS)
++ /* Request finished normally. */
++ return request->rc;
++ /* Interrupted by a signal. We have to stop the current request. */
++ spin_lock_irqsave(get_irq_lock(device->devinfo.irq), flags);
++ rc = __tape_halt_io(device, request);
++ if (rc == 0) {
++ DBF_EVENT(3, "IO stopped on irq %d\n", device->devinfo.irq);
++ rc = -ERESTARTSYS;
++ }
++ if(request->callback != NULL)
++ request->callback = __tape_wake_up;
++ spin_unlock_irqrestore(get_irq_lock(device->devinfo.irq), flags);
++ wait_event(wq, (request->callback == NULL));
++
++ return rc;
++}
++
++
++/*
++ * Tape interrupt routine, called from Ingo's I/O layer
++ */
++static void
++__tape_do_irq (int irq, void *ds, struct pt_regs *regs)
++{
++ struct tape_device *device;
++ struct tape_request *request;
++ devstat_t *devstat;
++ int final;
++ int rc;
++
++ devstat = (devstat_t *) ds;
++ device = (struct tape_device *) s390_get_private_data(irq);
++ if (device == NULL) {
++ PRINT_ERR("could not get device structure for irq %d "
++ "in interrupt\n", irq);
++ return;
++ }
++ request = (struct tape_request *) devstat->intparm;
++
++ DBF_EVENT(5, "tape: __tape_do_irq(%p, %p)\n", device, request);
++
++ if(request != NULL) {
++ if(request->status == TAPE_REQUEST_DONE) {
++ DBF_EVENT(3, "tape: IO stopped successfully\n");
++ __tape_remove_request(device, request);
++
++ /* Start next request. */
++ if (!list_empty(&device->req_queue))
++ tape_schedule_bh(device);
++ return;
++ }
++
++ /* Interrupt on a canceled request */
++ if(request != tape_get_next_request(device)) {
++ DBF_EVENT(3, "tape: late interrupt ingored\n");
++ return;
++ }
++
++ if(request->timeout.expires) {
++ /*
++ * If the timer was not yet startet the reference to
++ * the request has to be dropped here. Otherwise it
++ * will be dropped by the timeout handler.
++ */
++ if(del_timer(&request->timeout) > 0)
++ request->timeout.data = (unsigned long)
++ tape_put_request(request);
++ }
++ }
++
++ if (device->devstat.cstat & SCHN_STAT_INCORR_LEN)
++ DBF_EVENT(4, "tape: incorrect blocksize\n");
++
++ if (device->devstat.dstat != 0x0c){
++ /*
++ * Any request that does not come back with channel end
++ * and device end is unusual. Log the sense data.
++ */
++ DBF_EVENT(3,"-- Tape Interrupthandler --\n");
++ tape_dump_sense_dbf(device, request);
++ }
++ if (TAPE_NOT_OPER(device)) {
++ DBF_EVENT(6, "tape:device is not operational\n");
++ return;
++ }
++
++ /* Some status handling */
++ if(devstat && devstat->dstat & DEV_STAT_UNIT_CHECK) {
++ unsigned char *sense = devstat->ii.sense.data;
++
++ if(!(sense[1] & SENSE_DRIVE_ONLINE))
++ device->tape_generic_status &= ~GMT_ONLINE(~0);
++ } else {
++ device->tape_generic_status |= GMT_ONLINE(~0);
++ }
++
++ rc = device->discipline->irq(device, request);
++ /*
++ * rc < 0 : request finished unsuccessfully.
++ * rc == TAPE_IO_SUCCESS: request finished successfully.
++ * rc == TAPE_IO_PENDING: request is still running. Ignore rc.
++ * rc == TAPE_IO_RETRY: request finished but needs another go.
++ * rc == TAPE_IO_STOP: request needs to get terminated.
++ */
++ final = 0;
++ switch (rc) {
++ case TAPE_IO_SUCCESS:
++ final = 1;
++ break;
++ case TAPE_IO_PENDING:
++ break;
++ case TAPE_IO_RETRY:
++#ifdef CONFIG_S390_TAPE_BLOCK
++ if (request->op == TO_BLOCK)
++ device->discipline->check_locate(device, request);
++#endif
++ rc = __do_IO(device, request);
++ if (rc) {
++ DBF_EVENT(1, "tape: DOIO failed with er = %i\n", rc);
++ final = 1;
++ }
++ break;
++ case TAPE_IO_STOP:
++ __tape_halt_io(device, request);
++ rc = -EIO;
++ break;
++ default:
++ if (rc > 0) {
++ DBF_EVENT(6, "xunknownrc\n");
++ PRINT_ERR("Invalid return code from discipline "
++ "interrupt function.\n");
++ rc = -EIO;
++ }
++ final = 1;
++ break;
++ }
++ if (final) {
++ /* This might be an unsolicited interrupt (no request) */
++ if(request != NULL) {
++ /* Set ending status. */
++ request->rc = rc;
++ __tape_remove_request(device, request);
++ }
++ /* Start next request. */
++ if (!list_empty(&device->req_queue))
++ tape_schedule_bh(device);
++ }
++}
++
++/*
++ * Lock a shared tape for our exclusive use.
++ */
++int
++tape_assign(struct tape_device *device, int type)
++{
++ int rc;
++
++ spin_lock_irq(&device->assign_lock);
++
++ /* The device is already assigned */
++ rc = 0;
++ if (!TAPE_ASSIGNED(device)) {
++ rc = device->discipline->assign(device);
++
++ spin_lock(get_irq_lock(device->devinfo.irq));
++ if (rc) {
++ PRINT_WARN(
++ "(%04x): assign failed - "
++ "device might be busy\n",
++ device->devstat.devno);
++ DBF_EVENT(3,
++ "(%04x): assign failed "
++ "- device might be busy\n",
++ device->devstat.devno);
++ TAPE_SET_STATE(device, TAPE_STATUS_BOXED);
++ } else {
++ DBF_EVENT(3, "(%04x): assign lpum = %02x\n",
++ device->devstat.devno, device->devstat.lpum);
++ tape_state_set(
++ device,
++ (device->tape_status | type) &
++ (~TAPE_STATUS_BOXED)
++ );
++ }
++ } else {
++ spin_lock(get_irq_lock(device->devinfo.irq));
++ TAPE_SET_STATE(device, type);
++ }
++ spin_unlock(get_irq_lock(device->devinfo.irq));
++ spin_unlock_irq(&device->assign_lock);
++
++ return rc;
++}
++
++/*
++ * Unlock a shared tape.
++ */
++int
++tape_unassign(struct tape_device *device, int type)
++{
++ int rc;
++
++ spin_lock_irq(&device->assign_lock);
++
++ rc = 0;
++ spin_lock(get_irq_lock(device->devinfo.irq));
++ if (!TAPE_ASSIGNED(device)) {
++ spin_unlock(get_irq_lock(device->devinfo.irq));
++ spin_unlock_irq(&device->assign_lock);
++ return 0;
++ }
++ TAPE_CLEAR_STATE(device, type);
++ spin_unlock(get_irq_lock(device->devinfo.irq));
++
++ if (!TAPE_ASSIGNED(device)) {
++ rc = device->discipline->unassign(device);
++ if (rc) {
++ PRINT_WARN("(%04x): unassign failed\n",
++ device->devstat.devno);
++ DBF_EVENT(3, "(%04x): unassign failed\n",
++ device->devstat.devno);
++ } else {
++ DBF_EVENT(3, "(%04x): unassign lpum = %02x\n",
++ device->devstat.devno, device->devstat.lpum);
++ }
++ }
++
++ spin_unlock_irq(&device->assign_lock);
++ return rc;
++}
++
++/*
++ * Tape device open function used by tape_char & tape_block frontends.
++ */
++int
++tape_open(struct tape_device *device)
++{
++ int rc;
++
++ if(TAPE_INIT(device) && TAPE_BOXED(device)) {
++ rc = tape_request_irq(device);
++ if (rc) {
++ if(rc == -EUSERS)
++ return -EPERM;
++ return rc;
++ } else {
++ TAPE_CLEAR_STATE(device, TAPE_STATUS_INIT);
++ }
++ }
++
++ spin_lock_irq(&tape_discipline_lock);
++ spin_lock(get_irq_lock(device->devinfo.irq));
++ if (TAPE_NOT_OPER(device)) {
++ DBF_EVENT(6, "TAPE:nodev\n");
++ rc = -ENODEV;
++ } else if (TAPE_OPEN(device)) {
++ DBF_EVENT(6, "TAPE:dbusy\n");
++ rc = -EBUSY;
++ } else if (device->discipline != NULL &&
++ !try_inc_mod_count(device->discipline->owner)) {
++ DBF_EVENT(6, "TAPE:nodisc\n");
++ rc = -ENODEV;
++ } else {
++ TAPE_SET_STATE(device, TAPE_STATUS_OPEN);
++ rc = 0;
++ }
++ spin_unlock(get_irq_lock(device->devinfo.irq));
++ spin_unlock_irq(&tape_discipline_lock);
++ return rc;
++}
++
++/*
++ * Tape device release function used by tape_char & tape_block frontends.
++ */
++int
++tape_release(struct tape_device *device)
++{
++ spin_lock_irq(&tape_discipline_lock);
++ spin_lock(get_irq_lock(device->devinfo.irq));
++
++ if (TAPE_OPEN(device)) {
++ TAPE_CLEAR_STATE(device, TAPE_STATUS_OPEN);
++
++ if (device->discipline->owner)
++ __MOD_DEC_USE_COUNT(device->discipline->owner);
++ }
++ spin_unlock(get_irq_lock(device->devinfo.irq));
++ spin_unlock_irq(&tape_discipline_lock);
++
++ return 0;
++}
++
++/*
++ * Execute a magnetic tape command a number of times.
++ */
++int
++tape_mtop(struct tape_device *device, int mt_op, int mt_count)
++{
++ tape_mtop_fn fn;
++ int rc;
++
++ DBF_EVENT(6, "TAPE:mtio\n");
++ DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op);
++ DBF_EVENT(6, "TAPE:arg: %x\n", mt_count);
++
++ if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS)
++ return -EINVAL;
++ fn = device->discipline->mtop_array[mt_op];
++ if(fn == NULL)
++ return -EINVAL;
++
++ /* We assume that the backends can handle count up to 500. */
++ if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF ||
++ mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) {
++ rc = 0;
++ for (; mt_count > 500; mt_count -= 500)
++ if ((rc = fn(device, 500)) != 0)
++ break;
++ if (rc == 0)
++ rc = fn(device, mt_count);
++ } else
++ rc = fn(device, mt_count);
++ return rc;
++
++}
++
++void
++tape_init_disciplines(void)
++{
++#ifdef CONFIG_S390_TAPE_34XX
++ tape_34xx_init();
++#endif
++#ifdef CONFIG_S390_TAPE_34XX_MODULE
++ request_module("tape_34xx");
++#endif
++
++#ifdef CONFIG_S390_TAPE_3590
++ tape_3590_init();
++#else
++ request_module("tape_3590");
++#endif
++ tape_auto_detect();
++}
++
++/*
++ * Tape init function.
++ */
++static int
++tape_init (void)
++{
++ TAPE_DBF_AREA = debug_register ( "tape", 1, 2, 4*sizeof(long));
++ debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
++ DBF_EVENT(3, "tape init: ($Revision: 1.7.4.8 $)\n");
++#ifdef CONFIG_DEVFS_FS
++ tape_devfs_root_entry = devfs_mk_dir (NULL, "tape", NULL);
++#endif /* CONFIG_DEVFS_FS */
++ DBF_EVENT(3, "dev detect\n");
++ /* Parse the parameters. */
++ tape_devmap_init();
++#ifdef CONFIG_PROC_FS
++ tape_proc_init();
++#endif /* CONFIG_PROC_FS */
++ tapechar_init();
++#ifdef CONFIG_S390_TAPE_BLOCK
++ tapeblock_init();
++#endif
++ tape_init_disciplines();
++ return 0;
++}
++
++/*
++ * Tape exit function.
++ */
++void
++tape_exit(void)
++{
++ struct list_head *l, *n;
++ struct tape_discipline *discipline;
++
++ DBF_EVENT(6, "tape exit\n");
++
++ /* Cleanup registered disciplines. */
++ spin_lock(&tape_discipline_lock);
++ list_for_each_safe(l, n, &tape_disciplines) {
++ discipline = list_entry(l, struct tape_discipline, list);
++ __tape_unregister_discipline(discipline);
++ }
++ spin_unlock(&tape_discipline_lock);
++
++ /* Get rid of the frontends */
++ tapechar_exit();
++#ifdef CONFIG_S390_TAPE_BLOCK
++ tapeblock_exit();
++#endif
++#ifdef CONFIG_PROC_FS
++ tape_proc_cleanup();
++#endif
++ tape_devmap_exit();
++#ifdef CONFIG_DEVFS_FS
++ devfs_unregister (tape_devfs_root_entry); /* devfs checks for NULL */
++#endif /* CONFIG_DEVFS_FS */
++ debug_unregister (TAPE_DBF_AREA);
++}
++
++/*
++ * Issue an hotplug event
++ */
++void tape_hotplug_event(struct tape_device *device, int devmaj, int action) {
++#ifdef CONFIG_HOTPLUG
++ char *argv[3];
++ char *envp[8];
++ char devno[20];
++ char major[20];
++ char minor[20];
++
++ sprintf(devno, "DEVNO=%04x", device->devinfo.devno);
++ sprintf(major, "MAJOR=%d", devmaj);
++ sprintf(minor, "MINOR=%d", device->first_minor);
++
++ argv[0] = hotplug_path;
++ argv[1] = "tape";
++ argv[2] = NULL;
++
++ envp[0] = "HOME=/";
++ envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
++
++ switch(action) {
++ case TAPE_HOTPLUG_CHAR_ADD:
++ case TAPE_HOTPLUG_BLOCK_ADD:
++ envp[2] = "ACTION=add";
++ break;
++ case TAPE_HOTPLUG_CHAR_REMOVE:
++ case TAPE_HOTPLUG_BLOCK_REMOVE:
++ envp[2] = "ACTION=remove";
++ break;
++ default:
++ BUG();
++ }
++ switch(action) {
++ case TAPE_HOTPLUG_CHAR_ADD:
++ case TAPE_HOTPLUG_CHAR_REMOVE:
++ envp[3] = "INTERFACE=char";
++ break;
++ case TAPE_HOTPLUG_BLOCK_ADD:
++ case TAPE_HOTPLUG_BLOCK_REMOVE:
++ envp[3] = "INTERFACE=block";
++ break;
++ }
++ envp[4] = devno;
++ envp[5] = major;
++ envp[6] = minor;
++ envp[7] = NULL;
++
++ call_usermodehelper(argv[0], argv, envp);
++#endif
++}
++
++MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and "
++ "Michael Holzheu (cotte at de.ibm.com,holzheu at de.ibm.com)");
++MODULE_DESCRIPTION("Linux on zSeries channel attached "
++ "tape device driver ($Revision: 1.7.4.8 $)");
++
++module_init(tape_init);
++module_exit(tape_exit);
++
++EXPORT_SYMBOL(tape_state_string);
++EXPORT_SYMBOL(tape_op_verbose);
++EXPORT_SYMBOL(tape_state_set);
++EXPORT_SYMBOL(tape_med_state_set);
++EXPORT_SYMBOL(tape_register_discipline);
++EXPORT_SYMBOL(tape_unregister_discipline);
++EXPORT_SYMBOL(tape_alloc_request);
++EXPORT_SYMBOL(tape_put_request);
++EXPORT_SYMBOL(tape_clone_request);
++EXPORT_SYMBOL(tape_dump_sense);
++EXPORT_SYMBOL(tape_dump_sense_dbf);
++EXPORT_SYMBOL(tape_do_io);
++EXPORT_SYMBOL(tape_do_io_free);
++EXPORT_SYMBOL(tape_do_io_async);
++EXPORT_SYMBOL(tape_do_io_interruptible);
++EXPORT_SYMBOL(tape_mtop);
++EXPORT_SYMBOL(tape_hotplug_event);
++
+=== drivers/s390/char/sclp.c
+==================================================================
+--- drivers/s390/char/sclp.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/sclp.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,854 @@
++/*
++ * drivers/s390/char/sclp.c
++ * core function to access sclp interface
++ *
++ * S390 version
++ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
++ * Author(s): Martin Peschke <mpeschke at de.ibm.com>
++ * Martin Schwidefsky <schwidefsky at de.ibm.com>
++ */
++
++#include <linux/config.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/kmod.h>
++#include <linux/bootmem.h>
++#include <linux/errno.h>
++#include <linux/ptrace.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/interrupt.h>
++#include <linux/timer.h>
++#include <linux/init.h>
++#include <linux/reboot.h>
++#include <asm/irq.h>
++#include <asm/s390_ext.h>
++#include <asm/processor.h>
++
++#include "sclp.h"
++
++#define SCLP_CORE_PRINT_HEADER "sclp low level driver: "
++
++/* Structure for register_early_external_interrupt. */
++static ext_int_info_t ext_int_info_hwc;
++
++/* spinlock to protect global variables of sclp_core */
++static spinlock_t sclp_lock;
++
++/* Mask of valid sclp events */
++static sccb_mask_t sclp_receive_mask;
++static sccb_mask_t sclp_send_mask;
++
++/* List of registered event types */
++static struct list_head sclp_reg_list;
++
++/* sccb queue */
++static struct list_head sclp_req_queue;
++
++/* sccb for unconditional read */
++static struct sclp_req sclp_read_req;
++static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
++/* sccb for write mask sccb */
++static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
++
++/* Timer for init mask retries. */
++static struct timer_list retry_timer;
++
++/* Timer for busy retries. */
++static struct timer_list sclp_busy_timer;
++
++static volatile unsigned long sclp_status = 0;
++/* some status flags */
++#define SCLP_INIT 0
++#define SCLP_RUNNING 1
++#define SCLP_READING 2
++#define SCLP_SHUTDOWN 3
++
++#define SCLP_INIT_POLL_INTERVAL 1
++#define SCLP_BUSY_POLL_INTERVAL 1
++
++#define SCLP_COMMAND_INITIATED 0
++#define SCLP_BUSY 2
++#define SCLP_NOT_OPERATIONAL 3
++
++/*
++ * assembler instruction for Service Call
++ */
++static int
++__service_call(sclp_cmdw_t command, void *sccb)
++{
++ int cc;
++
++ /*
++ * Mnemonic: SERVC Rx, Ry [RRE]
++ *
++ * Rx: SCLP command word
++ * Ry: address of SCCB
++ */
++ __asm__ __volatile__(
++ " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
++ " ipm %0\n"
++ " srl %0,28"
++ : "=&d" (cc)
++ : "d" (command), "a" (__pa(sccb))
++ : "cc", "memory" );
++ /*
++ * cc == 0: Service Call succesful initiated
++ * cc == 2: SCLP busy, new Service Call not initiated,
++ * new SCCB unchanged
++ * cc == 3: SCLP function not operational
++ */
++ if (cc == SCLP_NOT_OPERATIONAL)
++ return -EIO;
++ if (cc == SCLP_BUSY)
++ return -EBUSY;
++ return 0;
++}
++
++static void
++sclp_start_request(void)
++{
++ struct sclp_req *req;
++ int rc;
++ unsigned long flags;
++
++ spin_lock_irqsave(&sclp_lock, flags);
++ /* quick exit if sclp is already in use */
++ if (test_bit(SCLP_RUNNING, &sclp_status)) {
++ spin_unlock_irqrestore(&sclp_lock, flags);
++ return;
++ }
++ /* Try to start requests from the request queue. */
++ while (!list_empty(&sclp_req_queue)) {
++ req = list_entry(sclp_req_queue.next, struct sclp_req, list);
++ rc = __service_call(req->command, req->sccb);
++ if (rc == 0) {
++ /* Sucessfully started request. */
++ req->status = SCLP_REQ_RUNNING;
++ /* Request active. Set running indication. */
++ set_bit(SCLP_RUNNING, &sclp_status);
++ break;
++ }
++ if (rc == -EBUSY) {
++ /**
++ * SCLP is busy but no request is running.
++ * Try again later.
++ */
++ if (!timer_pending(&sclp_busy_timer) ||
++ !mod_timer(&sclp_busy_timer,
++ jiffies + SCLP_BUSY_POLL_INTERVAL*HZ)) {
++ sclp_busy_timer.function =
++ (void *) sclp_start_request;
++ sclp_busy_timer.expires =
++ jiffies + SCLP_BUSY_POLL_INTERVAL*HZ;
++ add_timer(&sclp_busy_timer);
++ }
++ break;
++ }
++ /* Request failed. */
++ req->status = SCLP_REQ_FAILED;
++ list_del(&req->list);
++ if (req->callback) {
++ spin_unlock_irqrestore(&sclp_lock, flags);
++ req->callback(req, req->callback_data);
++ spin_lock_irqsave(&sclp_lock, flags);
++ }
++ }
++ spin_unlock_irqrestore(&sclp_lock, flags);
++}
++
++static int
++sclp_process_evbufs(struct sccb_header *sccb)
++{
++ int result;
++ unsigned long flags;
++ struct evbuf_header *evbuf;
++ struct list_head *l;
++ struct sclp_register *t;
++
++ spin_lock_irqsave(&sclp_lock, flags);
++ evbuf = (struct evbuf_header *) (sccb + 1);
++ result = 0;
++ while ((addr_t) evbuf < (addr_t) sccb + sccb->length) {
++ /* check registered event */
++ t = NULL;
++ list_for_each(l, &sclp_reg_list) {
++ t = list_entry(l, struct sclp_register, list);
++ if (t->receive_mask & (1 << (32 - evbuf->type))) {
++ if (t->receiver_fn != NULL) {
++ spin_unlock_irqrestore(&sclp_lock,
++ flags);
++ t->receiver_fn(evbuf);
++ spin_lock_irqsave(&sclp_lock, flags);
++ }
++ break;
++ }
++ else
++ t = NULL;
++ }
++ /* Check for unrequested event buffer */
++ if (t == NULL)
++ result = -ENOSYS;
++ evbuf = (struct evbuf_header *)
++ ((addr_t) evbuf + evbuf->length);
++ }
++ spin_unlock_irqrestore(&sclp_lock, flags);
++ return result;
++}
++
++char *
++sclp_error_message(u16 rc)
++{
++ static struct {
++ u16 code; char *msg;
++ } sclp_errors[] = {
++ { 0x0000, "No response code stored (machine malfunction)" },
++ { 0x0020, "Normal Completion" },
++ { 0x0040, "SCLP equipment check" },
++ { 0x0100, "SCCB boundary violation" },
++ { 0x01f0, "Invalid command" },
++ { 0x0220, "Normal Completion; suppressed buffers pending" },
++ { 0x0300, "Insufficient SCCB length" },
++ { 0x0340, "Contained SCLP equipment check" },
++ { 0x05f0, "Target resource in improper state" },
++ { 0x40f0, "Invalid function code/not installed" },
++ { 0x60f0, "No buffers stored" },
++ { 0x62f0, "No buffers stored; suppressed buffers pending" },
++ { 0x70f0, "Invalid selection mask" },
++ { 0x71f0, "Event buffer exceeds available space" },
++ { 0x72f0, "Inconsistent lengths" },
++ { 0x73f0, "Event buffer syntax error" }
++ };
++ int i;
++ for (i = 0; i < sizeof(sclp_errors)/sizeof(sclp_errors[0]); i++)
++ if (rc == sclp_errors[i].code)
++ return sclp_errors[i].msg;
++ return "Invalid response code";
++}
++
++/*
++ * postprocessing of unconditional read service call
++ */
++static void
++sclp_unconditional_read_cb(struct sclp_req *read_req, void *data)
++{
++ struct sccb_header *sccb;
++
++ sccb = read_req->sccb;
++ if (sccb->response_code == 0x0020 ||
++ sccb->response_code == 0x0220) {
++ if (sclp_process_evbufs(sccb) != 0)
++ printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
++ "unconditional read: "
++ "unrequested event buffer received.\n");
++ }
++
++ if (sccb->response_code != 0x0020)
++ printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
++ "unconditional read: %s (response code=0x%x).\n",
++ sclp_error_message(sccb->response_code),
++ sccb->response_code);
++
++ clear_bit(SCLP_READING, &sclp_status);
++}
++
++/*
++ * Function to queue Read Event Data/Unconditional Read
++ */
++static void
++__sclp_unconditional_read(void)
++{
++ struct sccb_header *sccb;
++ struct sclp_req *read_req;
++
++ /*
++ * Don't try to initiate Unconditional Read if we are not able to
++ * receive anything
++ */
++ if (sclp_receive_mask == 0)
++ return;
++ /* Don't try reading if a read is already outstanding */
++ if (test_and_set_bit(SCLP_READING, &sclp_status))
++ return;
++ /* Initialize read sccb */
++ sccb = (struct sccb_header *) sclp_read_sccb;
++ clear_page(sccb);
++ sccb->length = PAGE_SIZE;
++ sccb->function_code = 0; /* unconditional read */
++ sccb->control_mask[2] = 0x80; /* variable length response */
++ /* Initialize request structure */
++ read_req = &sclp_read_req;
++ read_req->command = SCLP_CMDW_READDATA;
++ read_req->status = SCLP_REQ_QUEUED;
++ read_req->callback = sclp_unconditional_read_cb;
++ read_req->sccb = sccb;
++ /* Add read request to the head of queue */
++ list_add(&read_req->list, &sclp_req_queue);
++}
++
++/* Bit masks to interpret external interruption parameter contents. */
++#define EXT_INT_SCCB_MASK 0xfffffff8
++#define EXT_INT_STATECHANGE_PENDING 0x00000002
++#define EXT_INT_EVBUF_PENDING 0x00000001
++
++/*
++ * Handler for service-signal external interruptions
++ */
++static void
++sclp_interrupt_handler(struct pt_regs *regs, __u16 code)
++{
++ u32 ext_int_param, finished_sccb, evbuf_pending;
++ struct list_head *l;
++ struct sclp_req *req, *tmp;
++ int cpu;
++
++ spin_lock(&sclp_lock);
++ /*
++ * Only process interrupt if sclp is initialized.
++ * This avoids strange effects for a pending request
++ * from before the last re-ipl.
++ */
++ if (!test_bit(SCLP_INIT, &sclp_status)) {
++ /* Now clear the running bit */
++ clear_bit(SCLP_RUNNING, &sclp_status);
++ spin_unlock(&sclp_lock);
++ return;
++ }
++ ext_int_param = S390_lowcore.ext_params;
++ finished_sccb = ext_int_param & EXT_INT_SCCB_MASK;
++ evbuf_pending = ext_int_param & (EXT_INT_EVBUF_PENDING |
++ EXT_INT_STATECHANGE_PENDING);
++ cpu = smp_processor_id();
++ irq_enter(cpu, 0x2401);
++ req = NULL;
++ if (finished_sccb != 0U) {
++ list_for_each(l, &sclp_req_queue) {
++ tmp = list_entry(l, struct sclp_req, list);
++ if (finished_sccb == (u32)(addr_t) tmp->sccb) {
++ list_del(&tmp->list);
++ req = tmp;
++ break;
++ }
++ }
++ }
++ spin_unlock(&sclp_lock);
++ /* Perform callback */
++ if (req != NULL) {
++ req->status = SCLP_REQ_DONE;
++ if (req->callback != NULL)
++ req->callback(req, req->callback_data);
++ }
++ spin_lock(&sclp_lock);
++ /* Head queue a read sccb if an event buffer is pending */
++ if (evbuf_pending)
++ __sclp_unconditional_read();
++ /* Now clear the running bit if SCLP indicated a finished SCCB */
++ if (finished_sccb != 0U)
++ clear_bit(SCLP_RUNNING, &sclp_status);
++ spin_unlock(&sclp_lock);
++ /* and start next request on the queue */
++ sclp_start_request();
++ irq_exit(cpu, 0x2401);
++}
++
++/*
++ * Wait synchronously for external interrupt of sclp. We may not receive
++ * any other external interrupt, so we disable all other external interrupts
++ * in control register 0.
++ */
++void
++sclp_sync_wait(void)
++{
++ unsigned long psw_mask;
++ unsigned long cr0, cr0_sync;
++
++ /* Prevent BH from executing. */
++ local_bh_disable();
++ /*
++ * save cr0
++ * enable service signal external interruption (cr0.22)
++ * disable cr0.20-21, cr0.25, cr0.27, cr0.30-31
++ * don't touch any other bit in cr0
++ */
++ __ctl_store(cr0, 0, 0);
++ cr0_sync = cr0;
++ cr0_sync |= 0x00000200;
++ cr0_sync &= 0xFFFFF3AC;
++ __ctl_load(cr0_sync, 0, 0);
++
++ /* enable external interruptions (PSW-mask.7) */
++ asm volatile ("STOSM 0(%1),0x01"
++ : "=m" (psw_mask) : "a" (&psw_mask) : "memory");
++
++ /* wait until ISR signals receipt of interrupt */
++ while (test_bit(SCLP_RUNNING, &sclp_status)) {
++ barrier();
++ cpu_relax();
++ }
++
++ /* disable external interruptions */
++ asm volatile ("SSM 0(%0)"
++ : : "a" (&psw_mask) : "memory");
++
++ /* restore cr0 */
++ __ctl_load(cr0, 0, 0);
++ __local_bh_enable();
++}
++
++/*
++ * Queue an SCLP request. Request will immediately be processed if queue is
++ * empty.
++ */
++void
++sclp_add_request(struct sclp_req *req)
++{
++ unsigned long flags;
++
++ if (!test_bit(SCLP_INIT, &sclp_status)) {
++ req->status = SCLP_REQ_FAILED;
++ if (req->callback != NULL)
++ req->callback(req, req->callback_data);
++ return;
++ }
++ spin_lock_irqsave(&sclp_lock, flags);
++ /* queue the request */
++ req->status = SCLP_REQ_QUEUED;
++ list_add_tail(&req->list, &sclp_req_queue);
++ spin_unlock_irqrestore(&sclp_lock, flags);
++ /* try to start the first request on the queue */
++ sclp_start_request();
++}
++
++/* state change notification */
++struct sclp_statechangebuf {
++ struct evbuf_header header;
++ u8 validity_sclp_active_facility_mask : 1;
++ u8 validity_sclp_receive_mask : 1;
++ u8 validity_sclp_send_mask : 1;
++ u8 validity_read_data_function_mask : 1;
++ u16 _zeros : 12;
++ u16 mask_length;
++ u64 sclp_active_facility_mask;
++ sccb_mask_t sclp_receive_mask;
++ sccb_mask_t sclp_send_mask;
++ u32 read_data_function_mask;
++} __attribute__((packed));
++
++static inline void
++__sclp_notify_state_change(void)
++{
++ struct list_head *l;
++ struct sclp_register *t;
++ sccb_mask_t receive_mask, send_mask;
++
++ list_for_each(l, &sclp_reg_list) {
++ t = list_entry(l, struct sclp_register, list);
++ receive_mask = t->receive_mask & sclp_receive_mask;
++ send_mask = t->send_mask & sclp_send_mask;
++ if (t->sclp_receive_mask != receive_mask ||
++ t->sclp_send_mask != send_mask) {
++ t->sclp_receive_mask = receive_mask;
++ t->sclp_send_mask = send_mask;
++ if (t->state_change_fn != NULL)
++ t->state_change_fn(t);
++ }
++ }
++}
++
++static void
++sclp_state_change(struct evbuf_header *evbuf)
++{
++ unsigned long flags;
++ struct sclp_statechangebuf *scbuf;
++
++ spin_lock_irqsave(&sclp_lock, flags);
++ scbuf = (struct sclp_statechangebuf *) evbuf;
++
++ if (scbuf->validity_sclp_receive_mask) {
++ if (scbuf->mask_length != sizeof(sccb_mask_t))
++ printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
++ "state change event with mask length %i\n",
++ scbuf->mask_length);
++ else
++ /* set new receive mask */
++ sclp_receive_mask = scbuf->sclp_receive_mask;
++ }
++
++ if (scbuf->validity_sclp_send_mask) {
++ if (scbuf->mask_length != sizeof(sccb_mask_t))
++ printk(KERN_WARNING SCLP_CORE_PRINT_HEADER
++ "state change event with mask length %i\n",
++ scbuf->mask_length);
++ else
++ /* set new send mask */
++ sclp_send_mask = scbuf->sclp_send_mask;
++ }
++
++ __sclp_notify_state_change();
++ spin_unlock_irqrestore(&sclp_lock, flags);
++}
++
++static struct sclp_register sclp_state_change_event = {
++ .receive_mask = EvTyp_StateChange_Mask,
++ .receiver_fn = sclp_state_change
++};
++
++
++/*
++ * SCLP quiesce event handler
++ */
++#ifdef CONFIG_SMP
++static void
++do_load_quiesce_psw(void * __unused)
++{
++ psw_t quiesce_psw;
++ unsigned long status;
++ int i;
++
++ if (smp_processor_id() != 0)
++ signal_processor(smp_processor_id(), sigp_stop);
++ /* Wait for all other cpus to enter stopped state */
++ i = 1;
++ while (i < smp_num_cpus) {
++ switch (signal_processor_ps(&status, 0, i, sigp_sense)) {
++ case sigp_order_code_accepted:
++ case sigp_status_stored:
++ /* Check for stopped and check stop state */
++ if (test_bit(6, &status) || test_bit(4, &status))
++ i++;
++ break;
++ case sigp_busy:
++ break;
++ case sigp_not_operational:
++ i++;
++ break;
++ }
++ }
++ /* Quiesce the last cpu with the special psw */
++ quiesce_psw.mask = _DW_PSW_MASK;
++ quiesce_psw.addr = 0xfff;
++ __load_psw(quiesce_psw);
++}
++
++static void
++do_machine_quiesce(void)
++{
++ smp_call_function(do_load_quiesce_psw, NULL, 0, 0);
++ do_load_quiesce_psw(NULL);
++}
++#else
++static void
++do_machine_quiesce(void)
++{
++ psw_t quiesce_psw;
++
++ quiesce_psw.mask = _DW_PSW_MASK;
++ quiesce_psw.addr = 0xfff;
++ __load_psw(quiesce_psw);
++}
++#endif
++
++extern void ctrl_alt_del(void);
++
++static void
++sclp_quiesce(struct evbuf_header *evbuf)
++{
++ /*
++ * We got a "shutdown" request.
++ * Add a call to an appropriate "shutdown" routine here. This
++ * routine should set all PSWs to 'disabled-wait', 'stopped'
++ * or 'check-stopped' - except 1 PSW which needs to carry a
++ * special bit pattern called 'quiesce PSW'.
++ */
++ _machine_restart = (void *) do_machine_quiesce;
++ _machine_halt = do_machine_quiesce;
++ _machine_power_off = do_machine_quiesce;
++ ctrl_alt_del();
++}
++
++static struct sclp_register sclp_quiesce_event = {
++ .receive_mask = EvTyp_SigQuiesce_Mask,
++ .receiver_fn = sclp_quiesce
++};
++
++/* initialisation of SCLP */
++struct init_sccb {
++ struct sccb_header header;
++ u16 _reserved;
++ u16 mask_length;
++ sccb_mask_t receive_mask;
++ sccb_mask_t send_mask;
++ sccb_mask_t sclp_send_mask;
++ sccb_mask_t sclp_receive_mask;
++} __attribute__((packed));
++
++static void sclp_init_mask_retry(unsigned long);
++
++static int
++sclp_init_mask(void)
++{
++ unsigned long flags;
++ struct init_sccb *sccb;
++ struct sclp_req *req;
++ struct list_head *l;
++ struct sclp_register *t;
++ int rc;
++
++ sccb = (struct init_sccb *) sclp_init_sccb;
++ /* stick the request structure to the end of the init sccb page */
++ req = (struct sclp_req *) ((addr_t) sccb + PAGE_SIZE) - 1;
++
++ /* SCLP setup concerning receiving and sending Event Buffers */
++ req->command = SCLP_CMDW_WRITEMASK;
++ req->status = SCLP_REQ_QUEUED;
++ req->callback = NULL;
++ req->sccb = sccb;
++ /* setup sccb for writemask command */
++ memset(sccb, 0, sizeof(struct init_sccb));
++ sccb->header.length = sizeof(struct init_sccb);
++ sccb->mask_length = sizeof(sccb_mask_t);
++ /* copy in the sccb mask of the registered event types */
++ spin_lock_irqsave(&sclp_lock, flags);
++ if (!test_bit(SCLP_SHUTDOWN, &sclp_status)) {
++ list_for_each(l, &sclp_reg_list) {
++ t = list_entry(l, struct sclp_register, list);
++ sccb->receive_mask |= t->receive_mask;
++ sccb->send_mask |= t->send_mask;
++ }
++ }
++ sccb->sclp_receive_mask = 0;
++ sccb->sclp_send_mask = 0;
++ if (test_bit(SCLP_INIT, &sclp_status)) {
++ /* add request to sclp queue */
++ list_add_tail(&req->list, &sclp_req_queue);
++ spin_unlock_irqrestore(&sclp_lock, flags);
++ /* and start if SCLP is idle */
++ sclp_start_request();
++ /* now wait for completion */
++ while (req->status != SCLP_REQ_DONE &&
++ req->status != SCLP_REQ_FAILED)
++ sclp_sync_wait();
++ spin_lock_irqsave(&sclp_lock, flags);
++ } else {
++ /*
++ * Special case for the very first write mask command.
++ * The interrupt handler is not removing request from
++ * the request queue and doesn't call callbacks yet
++ * because there might be an pending old interrupt
++ * after a Re-IPL. We have to receive and ignore it.
++ */
++ do {
++ rc = __service_call(req->command, req->sccb);
++ if (rc == 0)
++ set_bit(SCLP_RUNNING, &sclp_status);
++ spin_unlock_irqrestore(&sclp_lock, flags);
++ if (rc == -EIO)
++ return -ENOSYS;
++ sclp_sync_wait();
++ spin_lock_irqsave(&sclp_lock, flags);
++ } while (rc == -EBUSY);
++ }
++ if (sccb->header.response_code != 0x0020) {
++ /* WRITEMASK failed - we cannot rely on receiving a state
++ change event, so initially, polling is the only alternative
++ for us to ever become operational. */
++ if (!test_bit(SCLP_SHUTDOWN, &sclp_status) &&
++ (!timer_pending(&retry_timer) ||
++ !mod_timer(&retry_timer,
++ jiffies + SCLP_INIT_POLL_INTERVAL*HZ))) {
++ retry_timer.function = sclp_init_mask_retry;
++ retry_timer.data = 0;
++ retry_timer.expires = jiffies +
++ SCLP_INIT_POLL_INTERVAL*HZ;
++ add_timer(&retry_timer);
++ }
++ } else {
++ sclp_receive_mask = sccb->sclp_receive_mask;
++ sclp_send_mask = sccb->sclp_send_mask;
++ __sclp_notify_state_change();
++ }
++ spin_unlock_irqrestore(&sclp_lock, flags);
++ return 0;
++}
++
++static void
++sclp_init_mask_retry(unsigned long data)
++{
++ sclp_init_mask();
++}
++
++/* Reboot event handler - reset send and receive mask to prevent pending SCLP
++ * events from interfering with rebooted system. */
++static int
++sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
++{
++ unsigned long flags;
++
++ /* Note: need spinlock to maintain atomicity when accessing global
++ * variables. */
++ spin_lock_irqsave(&sclp_lock, flags);
++ set_bit(SCLP_SHUTDOWN, &sclp_status);
++ spin_unlock_irqrestore(&sclp_lock, flags);
++ sclp_init_mask();
++ return NOTIFY_DONE;
++}
++
++static struct notifier_block sclp_reboot_notifier = {
++ .notifier_call = sclp_reboot_event
++};
++
++/*
++ * sclp setup function. Called early (no kmalloc!) from sclp_console_init().
++ */
++static int
++sclp_init(void)
++{
++ int rc;
++
++ if (test_bit(SCLP_INIT, &sclp_status))
++ /* Already initialized. */
++ return 0;
++
++ spin_lock_init(&sclp_lock);
++ INIT_LIST_HEAD(&sclp_req_queue);
++
++ /* init event list */
++ INIT_LIST_HEAD(&sclp_reg_list);
++ list_add(&sclp_state_change_event.list, &sclp_reg_list);
++ list_add(&sclp_quiesce_event.list, &sclp_reg_list);
++
++ rc = register_reboot_notifier(&sclp_reboot_notifier);
++ if (rc)
++ return rc;
++
++ /*
++ * request the 0x2401 external interrupt
++ * The sclp driver is initialized early (before kmalloc works). We
++ * need to use register_early_external_interrupt.
++ */
++ if (register_early_external_interrupt(0x2401, sclp_interrupt_handler,
++ &ext_int_info_hwc) != 0)
++ return -EBUSY;
++
++ /* enable service-signal external interruptions,
++ * Control Register 0 bit 22 := 1
++ * (besides PSW bit 7 must be set to 1 sometimes for external
++ * interruptions)
++ */
++ ctl_set_bit(0, 9);
++
++ init_timer(&retry_timer);
++ init_timer(&sclp_busy_timer);
++ /* do the initial write event mask */
++ rc = sclp_init_mask();
++ if (rc == 0) {
++ /* Ok, now everything is setup right. */
++ set_bit(SCLP_INIT, &sclp_status);
++ return 0;
++ }
++
++ /* The sclp_init_mask failed. SCLP is broken, unregister and exit. */
++ ctl_clear_bit(0,9);
++ unregister_early_external_interrupt(0x2401, sclp_interrupt_handler,
++ &ext_int_info_hwc);
++
++ return rc;
++}
++
++/*
++ * Register the SCLP event listener identified by REG. Return 0 on success.
++ * Some error codes and their meaning:
++ *
++ * -ENODEV = SCLP interface is not supported on this machine
++ * -EBUSY = there is already a listener registered for the requested
++ * event type
++ * -EIO = SCLP interface is currently not operational
++ */
++int
++sclp_register(struct sclp_register *reg)
++{
++ unsigned long flags;
++ struct list_head *l;
++ struct sclp_register *t;
++
++ if (!MACHINE_HAS_SCLP)
++ return -ENODEV;
++
++ if (!test_bit(SCLP_INIT, &sclp_status))
++ sclp_init();
++ spin_lock_irqsave(&sclp_lock, flags);
++ /* check already registered event masks for collisions */
++ list_for_each(l, &sclp_reg_list) {
++ t = list_entry(l, struct sclp_register, list);
++ if (t->receive_mask & reg->receive_mask ||
++ t->send_mask & reg->send_mask) {
++ spin_unlock_irqrestore(&sclp_lock, flags);
++ return -EBUSY;
++ }
++ }
++ /*
++ * set present mask to 0 to trigger state change
++ * callback in sclp_init_mask
++ */
++ reg->sclp_receive_mask = 0;
++ reg->sclp_send_mask = 0;
++ list_add(®->list, &sclp_reg_list);
++ spin_unlock_irqrestore(&sclp_lock, flags);
++ sclp_init_mask();
++ return 0;
++}
++
++/*
++ * Unregister the SCLP event listener identified by REG.
++ */
++void
++sclp_unregister(struct sclp_register *reg)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&sclp_lock, flags);
++ list_del(®->list);
++ spin_unlock_irqrestore(&sclp_lock, flags);
++ sclp_init_mask();
++}
++
++#define SCLP_EVBUF_PROCESSED 0x80
++
++/*
++ * Traverse array of event buffers contained in SCCB and remove all buffers
++ * with a set "processed" flag. Return the number of unprocessed buffers.
++ */
++int
++sclp_remove_processed(struct sccb_header *sccb)
++{
++ struct evbuf_header *evbuf;
++ int unprocessed;
++ u16 remaining;
++
++ evbuf = (struct evbuf_header *) (sccb + 1);
++ unprocessed = 0;
++ remaining = sccb->length - sizeof(struct sccb_header);
++ while (remaining > 0) {
++ remaining -= evbuf->length;
++ if (evbuf->flags & SCLP_EVBUF_PROCESSED) {
++ sccb->length -= evbuf->length;
++ memcpy((void *) evbuf,
++ (void *) ((addr_t) evbuf + evbuf->length),
++ remaining);
++ } else {
++ unprocessed++;
++ evbuf = (struct evbuf_header *)
++ ((addr_t) evbuf + evbuf->length);
++ }
++ }
++
++ return unprocessed;
++}
++
++module_init(sclp_init);
++
++EXPORT_SYMBOL(sclp_add_request);
++EXPORT_SYMBOL(sclp_sync_wait);
++EXPORT_SYMBOL(sclp_register);
++EXPORT_SYMBOL(sclp_unregister);
++EXPORT_SYMBOL(sclp_error_message);
+=== drivers/s390/char/tape_block.c
+==================================================================
+--- drivers/s390/char/tape_block.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tape_block.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,678 @@
++/*
++ * drivers/s390/char/tape_block.c
++ * block device frontend for tape device driver
++ *
++ * S390 and zSeries version
++ * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
++ * Author(s): Carsten Otte <cotte at de.ibm.com>
++ * Tuan Ngo-Anh <ngoanh at de.ibm.com>
++ * Martin Schwidefsky <schwidefsky at de.ibm.com>
++ */
++
++#include <linux/config.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/blkdev.h>
++#include <linux/blk.h>
++#include <linux/interrupt.h>
++#include <linux/cdrom.h>
++
++#include <asm/debug.h>
++#include <asm/irq.h>
++#include <asm/s390dyn.h>
++
++#define TAPE_DBF_AREA tape_core_dbf
++
++#include "tape.h"
++#include "tape_std.h"
++
++#define PRINTK_HEADER "TBLOCK:"
++
++#define TAPEBLOCK_DEVFSMODE 0060644 /* brwxrw-rw- */
++#define TAPEBLOCK_MAX_SEC 100
++#define TAPEBLOCK_MIN_REQUEUE 3
++
++/*
++ * file operation structure for tape block frontend
++ */
++static int tapeblock_open(struct inode *, struct file *);
++static int tapeblock_release(struct inode *, struct file *);
++static int tapeblock_ioctl(
++ struct inode *, struct file *, unsigned int, unsigned long);
++
++static struct block_device_operations tapeblock_bdops = {
++ .owner = THIS_MODULE,
++ .open = tapeblock_open,
++ .release = tapeblock_release,
++ .ioctl = tapeblock_ioctl,
++};
++
++int tapeblock_major = 0;
++
++/*
++ * Some helper inlines
++ */
++static inline int tapeblock_size(int minor) {
++ return blk_size[tapeblock_major][minor];
++}
++static inline int tapeblock_ssize(int minor) {
++ return blksize_size[tapeblock_major][minor];
++}
++static inline int tapeblock_hw_ssize(int minor) {
++ return hardsect_size[tapeblock_major][minor];
++}
++
++/*
++ * Post finished request.
++ */
++static inline void
++tapeblock_end_request(struct request *req, int uptodate)
++{
++ if (end_that_request_first(req, uptodate, "tBLK"))
++ BUG();
++ end_that_request_last(req);
++}
++
++static void
++__tapeblock_end_request(struct tape_request *ccw_req, void *data)
++{
++ struct tape_device *device;
++ struct request *req;
++
++ device = ccw_req->device;
++ req = (struct request *) data;
++ if(!device || !req)
++ BUG();
++
++ tapeblock_end_request(req, ccw_req->rc == 0);
++ if (ccw_req->rc == 0)
++ /* Update position. */
++ device->blk_data.block_position =
++ (req->sector + req->nr_sectors) >> TAPEBLOCK_HSEC_S2B;
++ else
++ /* We lost the position information due to an error. */
++ device->blk_data.block_position = -1;
++
++ device->discipline->free_bread(ccw_req);
++
++ if (!list_empty(&device->req_queue) ||
++ !list_empty(&device->blk_data.request_queue.queue_head))
++ tasklet_schedule(&device->blk_data.tasklet);
++}
++
++/*
++ * Fetch requests from block device queue.
++ */
++static inline void
++__tape_process_blk_queue(struct tape_device *device, struct list_head *new_req)
++{
++ request_queue_t *queue;
++ struct list_head *l;
++ struct request *req;
++ struct tape_request *ccw_req;
++ int nr_queued;
++
++ if (!TAPE_BLOCKDEV(device)) {
++ PRINT_WARN("can't process queue. Not a tape blockdevice.\n");
++ return;
++ }
++
++ nr_queued = 0;
++ queue = &device->blk_data.request_queue;
++
++ /* Count number of requests on ccw queue. */
++ list_for_each(l, &device->req_queue)
++ nr_queued++;
++
++ while (
++ !queue->plugged &&
++ !list_empty(&queue->queue_head) &&
++ nr_queued < TAPEBLOCK_MIN_REQUEUE
++ ) {
++ /* tape_block_next_request(queue); */
++ req = blkdev_entry_next_request(&queue->queue_head);
++
++ if (req->cmd == WRITE) {
++ DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
++ blkdev_dequeue_request(req);
++ tapeblock_end_request(req, 0);
++ continue;
++ }
++ ccw_req = device->discipline->bread(device, req);
++ if (IS_ERR(ccw_req)) {
++ if (PTR_ERR(ccw_req) == -ENOMEM)
++ break; /* don't try again */
++ DBF_EVENT(1, "TBLOCK: bread failed\n");
++ blkdev_dequeue_request(req);
++ tapeblock_end_request(req, 0);
++ continue;
++ }
++ blkdev_dequeue_request(req);
++ ccw_req->callback = __tapeblock_end_request;
++ ccw_req->callback_data = (void *) req;
++ ccw_req->retries = TAPEBLOCK_RETRIES;
++
++ list_add_tail(&ccw_req->list, new_req);
++ nr_queued++;
++ }
++}
++
++/*
++ * Feed requests to the tape device.
++ */
++static inline int
++tape_queue_requests(struct tape_device *device, struct list_head *new_req)
++{
++ struct list_head *l, *n;
++ struct tape_request *ccw_req;
++ struct request *req;
++ int rc, fail;
++
++ fail = 0;
++ list_for_each_safe(l, n, new_req) {
++ ccw_req = list_entry(l, struct tape_request, list);
++ list_del(&ccw_req->list);
++
++ rc = tape_do_io_async(device, ccw_req);
++ if (rc) {
++ /*
++ * Start/enqueueing failed. No retries in
++ * this case.
++ */
++ DBF_EVENT(5, "enqueueing failed\n");
++ req = (struct request *) ccw_req->callback_data;
++ tapeblock_end_request(req, 0);
++ device->discipline->free_bread(ccw_req);
++ fail = 1;
++ }
++ }
++ return fail;
++}
++
++/*
++ * Tape request queue function. Called from ll_rw_blk.c
++ */
++static void
++tapeblock_request_fn(request_queue_t *queue)
++{
++ struct list_head new_req;
++ struct tape_device *device;
++
++ device = (struct tape_device *) queue->queuedata;
++ if(device == NULL)
++ BUG();
++
++ while (!list_empty(&queue->queue_head)) {
++ INIT_LIST_HEAD(&new_req);
++ spin_lock(get_irq_lock(device->devinfo.irq));
++ __tape_process_blk_queue(device, &new_req);
++ spin_unlock(get_irq_lock(device->devinfo.irq));
++ /*
++ * Now queue the new request to the tape. This needs to be
++ * done without the device lock held.
++ */
++ if (tape_queue_requests(device, &new_req) == 0)
++ /* All requests queued. Thats enough for now. */
++ break;
++ }
++}
++
++/*
++ * Returns block frontend request queue for a tape device.
++ * FIXME: on shutdown make sure ll_rw_blk can put requests on a dead queue.
++ */
++static request_queue_t *
++tapeblock_get_queue(kdev_t kdev)
++{
++ struct tape_device *device;
++ request_queue_t *queue;
++
++ if (major(kdev) != tapeblock_major)
++ return NULL;
++
++ device = tape_get_device(minor(kdev) >> 1);
++ if (IS_ERR(device))
++ return NULL;
++
++ queue = &device->blk_data.request_queue;
++ tape_put_device(device);
++ return queue;
++}
++
++/*
++ * Acquire the device lock and process queues for the device.
++ */
++static void
++tapeblock_tasklet(unsigned long data)
++{
++ struct list_head new_req;
++ struct tape_device *device;
++
++ device = (struct tape_device *) data;
++ while (!list_empty(&device->blk_data.request_queue.queue_head)) {
++ INIT_LIST_HEAD(&new_req);
++ spin_lock_irq(get_irq_lock(device->devinfo.irq));
++ __tape_process_blk_queue(device, &new_req);
++ spin_unlock_irq(get_irq_lock(device->devinfo.irq));
++ /*
++ * Now queue the new request to the tape. This needs to be
++ * done without the device lock held.
++ */
++ if (tape_queue_requests(device, &new_req) == 0)
++ /* All requests queued. Thats enough for now. */
++ break;
++ }
++}
++
++/*
++ * Create block directory with disc entries
++ */
++static int
++tapeblock_mkdevfstree (struct tape_device *device)
++{
++#ifdef CONFIG_DEVFS_FS
++ device->blk_data.devfs_block_dir =
++ devfs_mk_dir (device->devfs_dir, "block", device);
++ if (device->blk_data.devfs_block_dir == 0)
++ return -ENOENT;
++ device->blk_data.devfs_disc =
++ devfs_register(device->blk_data.devfs_block_dir,
++ "disc", DEVFS_FL_DEFAULT,
++ tapeblock_major, device->first_minor,
++ TAPEBLOCK_DEVFSMODE, &tapeblock_bdops, device);
++ if (device->blk_data.devfs_disc == NULL) {
++ devfs_unregister(device->blk_data.devfs_block_dir);
++ return -ENOENT;
++ }
++#endif
++ return 0;
++}
++
++/*
++ * Remove devfs entries
++ */
++static void
++tapeblock_rmdevfstree (struct tape_device *device)
++{
++#ifdef CONFIG_DEVFS_FS
++ if (device->blk_data.devfs_disc)
++ devfs_unregister(device->blk_data.devfs_disc);
++ if (device->blk_data.devfs_block_dir)
++ devfs_unregister(device->blk_data.devfs_block_dir);
++#endif
++}
++
++/*
++ * This function is called for every new tapedevice
++ */
++int
++tapeblock_setup_device(struct tape_device * device)
++{
++ int rc;
++
++ /* FIXME: We should be able to sense the sector size */
++ blk_size[tapeblock_major][device->first_minor] = 0;
++ blksize_size[tapeblock_major][device->first_minor] =
++ hardsect_size[tapeblock_major][device->first_minor] =
++ TAPEBLOCK_HSEC_SIZE;
++
++ /* Create devfs entries. */
++ rc = tapeblock_mkdevfstree(device);
++ if (rc)
++ return rc;
++
++ /* Setup request queue and initialize gendisk for this device. */
++ device->blk_data.request_queue.queuedata = tape_clone_device(device);
++
++
++ /* As long as the tasklet is running it may access the device */
++ tasklet_init(&device->blk_data.tasklet, tapeblock_tasklet,
++ (unsigned long) tape_clone_device(device));
++
++ blk_init_queue(&device->blk_data.request_queue, tapeblock_request_fn);
++ blk_queue_headactive(&device->blk_data.request_queue, 0);
++
++ tape_hotplug_event(device, tapeblock_major, TAPE_HOTPLUG_BLOCK_ADD);
++
++ set_device_ro(mk_kdev(tapeblock_major, device->first_minor), 1);
++ return 0;
++}
++
++void
++tapeblock_cleanup_device(struct tape_device *device)
++{
++ /* Prevent further requests to the block request queue. */
++ blk_size[tapeblock_major][device->first_minor] = 0;
++
++ tapeblock_rmdevfstree(device);
++
++ /* With the tasklet gone the reference is gone as well. */
++ tasklet_kill(&device->blk_data.tasklet);
++ tape_put_device(device);
++
++ /* Cleanup the request queue. */
++ blk_cleanup_queue(&device->blk_data.request_queue);
++
++ /* Remove reference in private data */
++ device->blk_data.request_queue.queuedata = NULL;
++ tape_put_device(device);
++
++ tape_hotplug_event(device, tapeblock_major, TAPE_HOTPLUG_BLOCK_REMOVE);
++}
++
++/*
++ * Detect number of blocks of the tape.
++ * FIXME: can we extent this to detect the blocks size as well ?
++ * FIXME: (minor) On 34xx the block id also contains a format specification
++ * which is unknown before the block was skipped or read at
++ * least once. So detection is sometimes done a second time.
++ */
++int tapeblock_mediumdetect(struct tape_device *device)
++{
++ unsigned int bid;
++ unsigned int nr_of_blks;
++ int rc;
++
++ /*
++ * Identify the first records format
++ */
++ if((rc = tape_mtop(device, MTFSR, 1)) < 0)
++ return rc;
++ if((rc = tape_mtop(device, MTBSR, 1)) < 0)
++ return rc;
++
++ device->blk_data.block_position = 0;
++ if (tape_std_read_block_id(device, &bid)) {
++ rc = tape_mtop(device, MTREW, 1);
++ if (rc) {
++ device->blk_data.block_position = -1;
++ blk_size[tapeblock_major][device->first_minor] = 0;
++ return rc;
++ }
++ bid = 0;
++ }
++
++ if(bid != device->blk_data.start_block_id) {
++ device->blk_data.start_block_id = bid;
++ blk_size[tapeblock_major][device->first_minor] = 0;
++ }
++
++ if(blk_size[tapeblock_major][device->first_minor] > 0)
++ return 0;
++
++ PRINT_INFO("Detecting media size...\n");
++ blk_size[tapeblock_major][device->first_minor] = 0;
++
++ rc = tape_mtop(device, MTFSF, 1);
++ if (rc)
++ return rc;
++
++ rc = tape_mtop(device, MTTELL, 1);
++ if (rc < 0)
++ return rc;
++ nr_of_blks = rc - 1; /* don't count FM */
++
++ if (device->blk_data.start_block_id) {
++ rc = tape_std_seek_block_id(
++ device,
++ device->blk_data.start_block_id);
++ } else {
++ rc = tape_mtop(device, MTREW, 1);
++ }
++ if (rc)
++ return rc;
++
++ rc = tape_mtop(device, MTTELL, 1);
++ if (rc < 0)
++ return rc;
++
++ /* Don't include start offset */
++ nr_of_blks -= rc;
++
++ PRINT_INFO("Found %i blocks on media\n", nr_of_blks);
++ if (tapeblock_hw_ssize(device->first_minor) > 1024) {
++ nr_of_blks *= tapeblock_hw_ssize(device->first_minor) / 1024;
++ } else {
++ nr_of_blks /= 1024 / tapeblock_hw_ssize(device->first_minor);
++ }
++ PRINT_INFO("Tape block device size is %i KB\n", nr_of_blks);
++ blk_size[tapeblock_major][device->first_minor] = nr_of_blks;
++
++ return 0;
++}
++
++/*
++ * This function has to be called whenever a new medium has been inserted
++ * into the drive.
++ */
++void
++tapeblock_medium_change(struct tape_device *device) {
++ device->blk_data.start_block_id = 0;
++ blk_size[tapeblock_major][device->first_minor] = 0;
++}
++
++/*
++ * Block frontend tape device open function.
++ */
++int
++tapeblock_open(struct inode *inode, struct file *filp) {
++ struct tape_device *device;
++ int rc;
++
++ if (major(filp->f_dentry->d_inode->i_rdev) != tapeblock_major)
++ return -ENODEV;
++
++ MOD_INC_USE_COUNT;
++ device = tape_get_device(minor(filp->f_dentry->d_inode->i_rdev) >> 1);
++ if (IS_ERR(device)) {
++ MOD_DEC_USE_COUNT;
++ return PTR_ERR(device);
++ }
++
++ DBF_EVENT(6, "TBLOCK: open: %x\n", device->first_minor);
++
++ if(device->required_tapemarks) {
++ DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
++ PRINT_ERR("TBLOCK: Refusing to open tape with missing"
++ " end of file marks.\n");
++ tape_put_device(device);
++ MOD_DEC_USE_COUNT;
++ return -EPERM;
++ }
++
++ rc = tape_open(device);
++ if (rc == 0) {
++ rc = tape_assign(device, TAPE_STATUS_ASSIGN_A);
++ if (rc == 0) {
++ rc = tapeblock_mediumdetect(device);
++ if (rc == 0) {
++ TAPE_SET_STATE(device, TAPE_STATUS_BLOCKDEV);
++ tape_put_device(device);
++ return 0;
++ }
++ tape_unassign(device, TAPE_STATUS_ASSIGN_A);
++ }
++ tape_release(device);
++ }
++ tape_put_device(device);
++ MOD_DEC_USE_COUNT;
++ return rc;
++}
++
++/*
++ * Block frontend tape device release function.
++ */
++int
++tapeblock_release(struct inode *inode, struct file *filp) {
++ struct tape_device *device;
++
++ device = tape_get_device(minor(inode->i_rdev) >> 1);
++
++ DBF_EVENT(4, "TBLOCK: release %i\n", device->first_minor);
++
++ /* Remove all buffers at device close. */
++ /* FIXME: can we do that a tape unload ? */
++ invalidate_buffers(inode->i_rdev);
++
++ if (device->blk_data.start_block_id) {
++ tape_std_seek_block_id(device, device->blk_data.start_block_id);
++ } else {
++ tape_mtop(device, MTREW, 1);
++ }
++ TAPE_CLEAR_STATE(device, TAPE_STATUS_BLOCKDEV);
++ tape_unassign(device, TAPE_STATUS_ASSIGN_A);
++ tape_release(device);
++ tape_put_device(device);
++ MOD_DEC_USE_COUNT;
++
++ return 0;
++}
++
++int
++tapeblock_ioctl(
++ struct inode *inode,
++ struct file *file,
++ unsigned int command,
++ unsigned long arg
++) {
++ int rc = 0;
++ int minor = minor(inode->i_rdev);
++
++ DBF_EVENT(6, "tapeblock_ioctl(%x)\n", command);
++
++ switch(command) {
++ case BLKSSZGET:
++ if(put_user(tapeblock_ssize(minor), (int *) arg))
++ rc = -EFAULT;
++ break;
++ case BLKGETSIZE:
++ if(
++ put_user(
++ tapeblock_size(minor),
++ (unsigned long *) arg
++ )
++ )
++ rc = -EFAULT;
++ break;
++#ifdef BLKGETSIZE64
++ case BLKGETSIZE64:
++ if(put_user(tapeblock_size(minor) << 9, (u64 *) arg))
++ rc = -EFAULT;
++ break;
++#endif
++ case CDROMMULTISESSION:
++ case CDROMREADTOCENTRY:
++ /* No message for these... */
++ rc = -EINVAL;
++ break;
++ default:
++ PRINT_WARN("invalid ioctl 0x%x\n", command);
++ rc = -EINVAL;
++ }
++ return rc;
++}
++
++/*
++ * Initialize block device frontend.
++ */
++int
++tapeblock_init(void)
++{
++ int rc;
++
++ /* Register the tape major number to the kernel */
++#ifdef CONFIG_DEVFS_FS
++ if (tapeblock_major == 0)
++ tapeblock_major = devfs_alloc_major(DEVFS_SPECIAL_BLK);
++#endif
++ rc = register_blkdev(tapeblock_major, "tBLK", &tapeblock_bdops);
++ if (rc < 0) {
++ PRINT_ERR("can't get major %d for block device\n",
++ tapeblock_major);
++ return rc;
++ }
++ if(tapeblock_major == 0)
++ tapeblock_major = rc;
++
++ /* Allocate memory for kernel block device tables */
++ rc = -ENOMEM;
++ blk_size[tapeblock_major] = kmalloc(256*sizeof(int), GFP_KERNEL);
++ if(blk_size[tapeblock_major] == NULL)
++ goto tapeblock_init_fail;
++ memset(blk_size[tapeblock_major], 0, 256*sizeof(int));
++ blksize_size[tapeblock_major] = kmalloc(256*sizeof(int), GFP_KERNEL);
++ if(blksize_size[tapeblock_major] == NULL)
++ goto tapeblock_init_fail;
++ memset(blksize_size[tapeblock_major], 0, 256*sizeof(int));
++ hardsect_size[tapeblock_major] = kmalloc(256*sizeof(int), GFP_KERNEL);
++ if(hardsect_size[tapeblock_major] == NULL)
++ goto tapeblock_init_fail;
++ memset(hardsect_size[tapeblock_major], 0, 256*sizeof(int));
++ max_sectors[tapeblock_major] = kmalloc(256*sizeof(int), GFP_KERNEL);
++ if(max_sectors[tapeblock_major] == NULL)
++ goto tapeblock_init_fail;
++ memset(max_sectors[tapeblock_major], 0, 256*sizeof(int));
++
++ blk_dev[tapeblock_major].queue = tapeblock_get_queue;
++
++ PRINT_INFO("tape gets major %d for block device\n", tapeblock_major);
++ DBF_EVENT(3, "TBLOCK: major = %d\n", tapeblock_major);
++ DBF_EVENT(3, "TBLOCK: init ok\n");
++
++ return 0;
++
++tapeblock_init_fail:
++ if(tapeblock_major > 0) {
++ if(blk_size[tapeblock_major]) {
++ kfree(blk_size[tapeblock_major]);
++ blk_size[tapeblock_major] = NULL;
++ }
++ if(blksize_size[tapeblock_major]) {
++ kfree(blksize_size[tapeblock_major]);
++ blksize_size[tapeblock_major] = NULL;
++ }
++ if(hardsect_size[tapeblock_major]) {
++ kfree(hardsect_size[tapeblock_major]);
++ hardsect_size[tapeblock_major] = NULL;
++ }
++ if(max_sectors[tapeblock_major]) {
++ kfree(max_sectors[tapeblock_major]);
++ max_sectors[tapeblock_major] = NULL;
++ }
++#ifdef CONFIG_DEVFS_FS
++ devfs_unregister_blkdev(tapeblock_major, "tBLK");
++#else
++ unregister_blkdev(tapeblock_major, "tBLK");
++#endif
++ tapeblock_major = -1;
++ }
++
++ DBF_EVENT(3, "TBLOCK: init failed(%d)\n", rc);
++ return rc;
++}
++
++/*
++ * Deregister major for block device frontend
++ */
++void
++tapeblock_exit(void)
++{
++ if(blk_size[tapeblock_major]) {
++ kfree(blk_size[tapeblock_major]);
++ blk_size[tapeblock_major] = NULL;
++ }
++ if(blksize_size[tapeblock_major]) {
++ kfree(blksize_size[tapeblock_major]);
++ blksize_size[tapeblock_major] = NULL;
++ }
++ if(hardsect_size[tapeblock_major]) {
++ kfree(hardsect_size[tapeblock_major]);
++ hardsect_size[tapeblock_major] = NULL;
++ }
++ if(max_sectors[tapeblock_major]) {
++ kfree(max_sectors[tapeblock_major]);
++ max_sectors[tapeblock_major] = NULL;
++ }
++ blk_dev[tapeblock_major].queue = NULL;
++ unregister_blkdev(tapeblock_major, "tBLK");
++}
+=== drivers/s390/char/sclp.h
+==================================================================
+--- drivers/s390/char/sclp.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/sclp.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,157 @@
++/*
++ * drivers/s390/char/sclp.h
++ *
++ * S390 version
++ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
++ * Author(s): Martin Peschke <mpeschke at de.ibm.com>
++ * Martin Schwidefsky <schwidefsky at de.ibm.com>
++ */
++
++#ifndef __SCLP_H__
++#define __SCLP_H__
++
++#include <linux/types.h>
++#include <linux/list.h>
++
++#include <asm/ebcdic.h>
++
++/* maximum number of pages concerning our own memory management */
++#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
++#define MAX_CONSOLE_PAGES 4
++
++#define EvTyp_OpCmd 0x01
++#define EvTyp_Msg 0x02
++#define EvTyp_StateChange 0x08
++#define EvTyp_PMsgCmd 0x09
++#define EvTyp_CntlProgOpCmd 0x20
++#define EvTyp_CntlProgIdent 0x0B
++#define EvTyp_SigQuiesce 0x1D
++#define EvTyp_VT220Msg 0x1A
++
++#define EvTyp_OpCmd_Mask 0x80000000
++#define EvTyp_Msg_Mask 0x40000000
++#define EvTyp_StateChange_Mask 0x01000000
++#define EvTyp_PMsgCmd_Mask 0x00800000
++#define EvTyp_CtlProgOpCmd_Mask 0x00000001
++#define EvTyp_CtlProgIdent_Mask 0x00200000
++#define EvTyp_SigQuiesce_Mask 0x00000008
++#define EvTyp_VT220Msg_Mask 0x00000040
++
++#define GnrlMsgFlgs_DOM 0x8000
++#define GnrlMsgFlgs_SndAlrm 0x4000
++#define GnrlMsgFlgs_HoldMsg 0x2000
++
++#define LnTpFlgs_CntlText 0x8000
++#define LnTpFlgs_LabelText 0x4000
++#define LnTpFlgs_DataText 0x2000
++#define LnTpFlgs_EndText 0x1000
++#define LnTpFlgs_PromptText 0x0800
++
++typedef unsigned int sclp_cmdw_t;
++
++#define SCLP_CMDW_READDATA 0x00770005
++#define SCLP_CMDW_WRITEDATA 0x00760005
++#define SCLP_CMDW_WRITEMASK 0x00780005
++
++#define GDS_ID_MDSMU 0x1310
++#define GDS_ID_MDSRouteInfo 0x1311
++#define GDS_ID_AgUnWrkCorr 0x1549
++#define GDS_ID_SNACondReport 0x1532
++#define GDS_ID_CPMSU 0x1212
++#define GDS_ID_RoutTargInstr 0x154D
++#define GDS_ID_OpReq 0x8070
++#define GDS_ID_TextCmd 0x1320
++
++#define GDS_KEY_SelfDefTextMsg 0x31
++
++typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */
++
++struct sccb_header {
++ u16 length;
++ u8 function_code;
++ u8 control_mask[3];
++ u16 response_code;
++} __attribute__((packed));
++
++struct gds_subvector {
++ u8 length;
++ u8 key;
++} __attribute__((packed));
++
++struct gds_vector {
++ u16 length;
++ u16 gds_id;
++} __attribute__((packed));
++
++struct evbuf_header {
++ u16 length;
++ u8 type;
++ u8 flags;
++ u16 _reserved;
++} __attribute__((packed));
++
++struct sclp_req {
++ struct list_head list; /* list_head for request queueing. */
++ sclp_cmdw_t command; /* sclp command to execute */
++ void *sccb; /* pointer to the sccb to execute */
++ char status; /* status of this request */
++ /* Callback that is called after reaching final status. */
++ void (*callback)(struct sclp_req *, void *data);
++ void *callback_data;
++};
++
++#define SCLP_REQ_FILLED 0x00 /* request is ready to be processed */
++#define SCLP_REQ_QUEUED 0x01 /* request is queued to be processed */
++#define SCLP_REQ_RUNNING 0x02 /* request is currently running */
++#define SCLP_REQ_DONE 0x03 /* request is completed successfully */
++#define SCLP_REQ_FAILED 0x05 /* request is finally failed */
++
++/* function pointers that a high level driver has to use for registration */
++/* of some routines it wants to be called from the low level driver */
++struct sclp_register {
++ struct list_head list;
++ /* event masks this user is registered for */
++ sccb_mask_t receive_mask;
++ sccb_mask_t send_mask;
++ /* actually present events */
++ sccb_mask_t sclp_receive_mask;
++ sccb_mask_t sclp_send_mask;
++ /* called if event type availability changes */
++ void (*state_change_fn)(struct sclp_register *);
++ /* called for events in cp_receive_mask/sclp_receive_mask */
++ void (*receiver_fn)(struct evbuf_header *);
++};
++
++/* externals from sclp.c */
++void sclp_add_request(struct sclp_req *req);
++void sclp_sync_wait(void);
++int sclp_register(struct sclp_register *reg);
++void sclp_unregister(struct sclp_register *reg);
++char *sclp_error_message(u16 response_code);
++int sclp_remove_processed(struct sccb_header *sccb);
++
++/* useful inlines */
++
++/* VM uses EBCDIC 037, LPAR+native(SE+HMC) use EBCDIC 500 */
++/* translate single character from ASCII to EBCDIC */
++static inline unsigned char
++sclp_ascebc(unsigned char ch)
++{
++ return (MACHINE_IS_VM) ? _ascebc[ch] : _ascebc_500[ch];
++}
++
++/* translate string from EBCDIC to ASCII */
++static inline void
++sclp_ebcasc_str(unsigned char *str, int nr)
++{
++ (MACHINE_IS_VM) ? EBCASC(str, nr) : EBCASC_500(str, nr);
++}
++
++/* translate string from ASCII to EBCDIC */
++static inline void
++sclp_ascebc_str(unsigned char *str, int nr)
++{
++ (MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr);
++}
++
++#endif /* __SCLP_H__ */
+=== drivers/s390/char/sclp_rw.c
+==================================================================
+--- drivers/s390/char/sclp_rw.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/sclp_rw.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,496 @@
++/*
++ * drivers/s390/char/sclp_rw.c
++ * driver: reading from and writing to system console on S/390 via SCLP
++ *
++ * S390 version
++ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
++ * Author(s): Martin Peschke <mpeschke at de.ibm.com>
++ * Martin Schwidefsky <schwidefsky at de.ibm.com>
++ */
++
++#include <linux/config.h>
++#include <linux/version.h>
++#include <linux/kmod.h>
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/spinlock.h>
++#include <linux/ctype.h>
++#include <asm/uaccess.h>
++
++#include "sclp.h"
++#include "sclp_rw.h"
++
++#define SCLP_RW_PRINT_HEADER "sclp low level driver: "
++
++/*
++ * The room for the SCCB (only for writing) is not equal to a pages size
++ * (as it is specified as the maximum size in the the SCLP ducumentation)
++ * because of the additional data structure described above.
++ */
++#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
++
++/* Event type structure for write message and write priority message */
++static struct sclp_register sclp_rw_event = {
++ .send_mask = EvTyp_Msg_Mask | EvTyp_PMsgCmd_Mask
++};
++
++/*
++ * Setup a sclp write buffer. Gets a page as input (4K) and returns
++ * a pointer to a struct sclp_buffer structure that is located at the
++ * end of the input page. This reduces the buffer space by a few
++ * bytes but simplifies things.
++ */
++struct sclp_buffer *
++sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
++{
++ struct sclp_buffer *buffer;
++ struct write_sccb *sccb;
++
++ sccb = (struct write_sccb *) page;
++ /*
++ * We keep the struct sclp_buffer structure at the end
++ * of the sccb page.
++ */
++ buffer = ((struct sclp_buffer *) ((addr_t) sccb + PAGE_SIZE)) - 1;
++ buffer->sccb = sccb;
++ buffer->retry_count = 0;
++ init_timer(&buffer->retry_timer);
++ buffer->mto_number = 0;
++ buffer->mto_char_sum = 0;
++ buffer->current_line = NULL;
++ buffer->current_length = 0;
++ buffer->columns = columns;
++ buffer->htab = htab;
++
++ /* initialize sccb */
++ memset(sccb, 0, sizeof(struct write_sccb));
++ sccb->header.length = sizeof(struct write_sccb);
++ sccb->msg_buf.header.length = sizeof(struct msg_buf);
++ sccb->msg_buf.header.type = EvTyp_Msg;
++ sccb->msg_buf.mdb.header.length = sizeof(struct mdb);
++ sccb->msg_buf.mdb.header.type = 1;
++ sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */
++ sccb->msg_buf.mdb.header.revision_code = 1;
++ sccb->msg_buf.mdb.go.length = sizeof(struct go);
++ sccb->msg_buf.mdb.go.type = 1;
++
++ return buffer;
++}
++
++/*
++ * Return a pointer to the orignal page that has been used to create
++ * the buffer.
++ */
++void *
++sclp_unmake_buffer(struct sclp_buffer *buffer)
++{
++ return buffer->sccb;
++}
++
++/*
++ * Initialize a new Message Text Object (MTO) at the end of the provided buffer
++ * with enough room for max_len characters. Return 0 on success.
++ */
++static int
++sclp_initialize_mto(struct sclp_buffer *buffer, int max_len)
++{
++ struct write_sccb *sccb;
++ struct mto *mto;
++ int mto_size;
++
++ /* max size of new Message Text Object including message text */
++ mto_size = sizeof(struct mto) + max_len;
++
++ /* check if current buffer sccb can contain the mto */
++ sccb = buffer->sccb;
++ if ((MAX_SCCB_ROOM - sccb->header.length) < mto_size)
++ return -ENOMEM;
++
++ /* find address of new message text object */
++ mto = (struct mto *)(((addr_t) sccb) + sccb->header.length);
++
++ /*
++ * fill the new Message-Text Object,
++ * starting behind the former last byte of the SCCB
++ */
++ memset(mto, 0, sizeof(struct mto));
++ mto->length = sizeof(struct mto);
++ mto->type = 4; /* message text object */
++ mto->line_type_flags = LnTpFlgs_EndText; /* end text */
++
++ /* set pointer to first byte after struct mto. */
++ buffer->current_line = (char *) (mto + 1);
++ buffer->current_length = 0;
++
++ return 0;
++}
++
++/*
++ * Finalize MTO initialized by sclp_initialize_mto(), updating the sizes of
++ * MTO, enclosing MDB, event buffer and SCCB.
++ */
++static void
++sclp_finalize_mto(struct sclp_buffer *buffer)
++{
++ struct write_sccb *sccb;
++ struct mto *mto;
++ int str_len, mto_size;
++
++ str_len = buffer->current_length;
++ buffer->current_line = NULL;
++ buffer->current_length = 0;
++
++ /* real size of new Message Text Object including message text */
++ mto_size = sizeof(struct mto) + str_len;
++
++ /* find address of new message text object */
++ sccb = buffer->sccb;
++ mto = (struct mto *)(((addr_t) sccb) + sccb->header.length);
++
++ /* set size of message text object */
++ mto->length = mto_size;
++
++ /*
++ * update values of sizes
++ * (SCCB, Event(Message) Buffer, Message Data Block)
++ */
++ sccb->header.length += mto_size;
++ sccb->msg_buf.header.length += mto_size;
++ sccb->msg_buf.mdb.header.length += mto_size;
++
++ /*
++ * count number of buffered messages (= number of Message Text
++ * Objects) and number of buffered characters
++ * for the SCCB currently used for buffering and at all
++ */
++ buffer->mto_number++;
++ buffer->mto_char_sum += str_len;
++}
++
++/*
++ * processing of a message including escape characters,
++ * returns number of characters written to the output sccb
++ * ("processed" means that is not guaranteed that the character have already
++ * been sent to the SCLP but that it will be done at least next time the SCLP
++ * is not busy)
++ */
++int
++sclp_write(struct sclp_buffer *buffer,
++ const unsigned char *msg, int count, int from_user)
++{
++ int spaces, i_msg;
++ char ch;
++ int rc;
++
++ /*
++ * parse msg for escape sequences (\t,\v ...) and put formated
++ * msg into an mto (created by sclp_initialize_mto).
++ *
++ * We have to do this work ourselfs because there is no support for
++ * these characters on the native machine and only partial support
++ * under VM (Why does VM interpret \n but the native machine doesn't ?)
++ *
++ * Depending on i/o-control setting the message is always written
++ * immediately or we wait for a final new line maybe coming with the
++ * next message. Besides we avoid a buffer overrun by writing its
++ * content.
++ *
++ * RESTRICTIONS:
++ *
++ * \r and \b work within one line because we are not able to modify
++ * previous output that have already been accepted by the SCLP.
++ *
++ * \t combined with following \r is not correctly represented because
++ * \t is expanded to some spaces but \r does not know about a
++ * previous \t and decreases the current position by one column.
++ * This is in order to a slim and quick implementation.
++ */
++ for (i_msg = 0; i_msg < count; i_msg++) {
++ if (from_user) {
++ if (get_user(ch, msg + i_msg) != 0)
++ return -EFAULT;
++ } else
++ ch = msg[i_msg];
++
++ switch (ch) {
++ case '\n': /* new line, line feed (ASCII) */
++ /* check if new mto needs to be created */
++ if (buffer->current_line == NULL) {
++ rc = sclp_initialize_mto(buffer, 0);
++ if (rc)
++ return i_msg;
++ }
++ sclp_finalize_mto(buffer);
++ break;
++ case '\a': /* bell, one for several times */
++ /* set SCLP sound alarm bit in General Object */
++ buffer->sccb->msg_buf.mdb.go.general_msg_flags |=
++ GnrlMsgFlgs_SndAlrm;
++ break;
++ case '\t': /* horizontal tabulator */
++ /* check if new mto needs to be created */
++ if (buffer->current_line == NULL) {
++ rc = sclp_initialize_mto(buffer,
++ buffer->columns);
++ if (rc)
++ return i_msg;
++ }
++ /* "go to (next htab-boundary + 1, same line)" */
++ do {
++ if (buffer->current_length >= buffer->columns)
++ break;
++ /* ok, add a blank */
++ *buffer->current_line++ = 0x40;
++ buffer->current_length++;
++ } while (buffer->current_length % buffer->htab);
++ break;
++ case '\f': /* form feed */
++ case '\v': /* vertical tabulator */
++ /* "go to (actual column, actual line + 1)" */
++ /* = new line, leading spaces */
++ if (buffer->current_line != NULL) {
++ spaces = buffer->current_length;
++ sclp_finalize_mto(buffer);
++ rc = sclp_initialize_mto(buffer,
++ buffer->columns);
++ if (rc)
++ return i_msg;
++ memset(buffer->current_line, 0x40, spaces);
++ buffer->current_line += spaces;
++ buffer->current_length = spaces;
++ } else {
++ /* one an empty line this is the same as \n */
++ rc = sclp_initialize_mto(buffer,
++ buffer->columns);
++ if (rc)
++ return i_msg;
++ sclp_finalize_mto(buffer);
++ }
++ break;
++ case '\b': /* backspace */
++ /* "go to (actual column - 1, actual line)" */
++ /* decrement counter indicating position, */
++ /* do not remove last character */
++ if (buffer->current_line != NULL &&
++ buffer->current_length > 0) {
++ buffer->current_length--;
++ buffer->current_line--;
++ }
++ break;
++ case 0x00: /* end of string */
++ /* transfer current line to SCCB */
++ if (buffer->current_line != NULL)
++ sclp_finalize_mto(buffer);
++ /* skip the rest of the message including the 0 byte */
++ i_msg = count - 1;
++ break;
++ default: /* no escape character */
++ /* do not output unprintable characters */
++ if (!isprint(ch))
++ break;
++ /* check if new mto needs to be created */
++ if (buffer->current_line == NULL) {
++ rc = sclp_initialize_mto(buffer,
++ buffer->columns);
++ if (rc)
++ return i_msg;
++ }
++ *buffer->current_line++ = sclp_ascebc(ch);
++ buffer->current_length++;
++ break;
++ }
++ /* check if current mto is full */
++ if (buffer->current_line != NULL &&
++ buffer->current_length >= buffer->columns)
++ sclp_finalize_mto(buffer);
++ }
++
++ /* return number of processed characters */
++ return i_msg;
++}
++
++/*
++ * Return the number of free bytes in the sccb
++ */
++int
++sclp_buffer_space(struct sclp_buffer *buffer)
++{
++ int count;
++
++ count = MAX_SCCB_ROOM - buffer->sccb->header.length;
++ if (buffer->current_line != NULL)
++ count -= sizeof(struct mto) + buffer->current_length;
++ return count;
++}
++
++/*
++ * Return number of characters in buffer
++ */
++int
++sclp_chars_in_buffer(struct sclp_buffer *buffer)
++{
++ int count;
++
++ count = buffer->mto_char_sum;
++ if (buffer->current_line != NULL)
++ count += buffer->current_length;
++ return count;
++}
++
++/*
++ * sets or provides some values that influence the drivers behaviour
++ */
++void
++sclp_set_columns(struct sclp_buffer *buffer, unsigned short columns)
++{
++ buffer->columns = columns;
++ if (buffer->current_line != NULL &&
++ buffer->current_length > buffer->columns)
++ sclp_finalize_mto(buffer);
++}
++
++void
++sclp_set_htab(struct sclp_buffer *buffer, unsigned short htab)
++{
++ buffer->htab = htab;
++}
++
++/*
++ * called by sclp_console_init and/or sclp_tty_init
++ */
++int
++sclp_rw_init(void)
++{
++ static int init_done = 0;
++ int rc;
++
++ if (init_done)
++ return 0;
++
++ rc = sclp_register(&sclp_rw_event);
++ if (rc == 0)
++ init_done = 1;
++ return rc;
++}
++
++static void
++sclp_buffer_retry(unsigned long data)
++{
++ struct sclp_buffer *buffer = (struct sclp_buffer *) data;
++ buffer->request.status = SCLP_REQ_FILLED;
++ buffer->sccb->header.response_code = 0x0000;
++ sclp_add_request(&buffer->request);
++}
++
++#define SCLP_BUFFER_MAX_RETRY 5
++#define SCLP_BUFFER_RETRY_INTERVAL 2
++
++/*
++ * second half of Write Event Data-function that has to be done after
++ * interruption indicating completion of Service Call.
++ */
++static void
++sclp_writedata_callback(struct sclp_req *request, void *data)
++{
++ int rc;
++ struct sclp_buffer *buffer;
++ struct write_sccb *sccb;
++
++ buffer = (struct sclp_buffer *) data;
++ sccb = buffer->sccb;
++
++ if (request->status == SCLP_REQ_FAILED) {
++ if (buffer->callback != NULL)
++ buffer->callback(buffer, -EIO);
++ return;
++ }
++ /* check SCLP response code and choose suitable action */
++ switch (sccb->header.response_code) {
++ case 0x0020 :
++ /* Normal completion, buffer processed, message(s) sent */
++ rc = 0;
++ break;
++
++ case 0x0340: /* Contained SCLP equipment check */
++ if (buffer->retry_count++ > SCLP_BUFFER_MAX_RETRY) {
++ rc = -EIO;
++ break;
++ }
++ /* remove processed buffers and requeue rest */
++ if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
++ /* not all buffers were processed */
++ sccb->header.response_code = 0x0000;
++ buffer->request.status = SCLP_REQ_FILLED;
++ sclp_add_request(request);
++ return;
++ }
++ rc = 0;
++ break;
++
++ case 0x0040: /* SCLP equipment check */
++ case 0x05f0: /* Target resource in improper state */
++ if (buffer->retry_count++ > SCLP_BUFFER_MAX_RETRY) {
++ rc = -EIO;
++ break;
++ }
++ /* wait some time, then retry request */
++ buffer->retry_timer.function = sclp_buffer_retry;
++ buffer->retry_timer.data = (unsigned long) buffer;
++ buffer->retry_timer.expires = jiffies +
++ SCLP_BUFFER_RETRY_INTERVAL*HZ;
++ add_timer(&buffer->retry_timer);
++ return;
++
++ default:
++ if (sccb->header.response_code == 0x71f0)
++ rc = -ENOMEM;
++ else
++ rc = -EINVAL;
++ break;
++ }
++ if (buffer->callback != NULL)
++ buffer->callback(buffer, rc);
++}
++
++/*
++ * Setup the request structure in the struct sclp_buffer to do SCLP Write
++ * Event Data and pass the request to the core SCLP loop.
++ */
++void
++sclp_emit_buffer(struct sclp_buffer *buffer,
++ void (*callback)(struct sclp_buffer *, int))
++{
++ struct write_sccb *sccb;
++
++ /* add current line if there is one */
++ if (buffer->current_line != NULL)
++ sclp_finalize_mto(buffer);
++
++ /* Are there messages in the output buffer ? */
++ if (buffer->mto_number == 0) {
++ if (callback != NULL)
++ callback(buffer, 0);
++ return;
++ }
++
++ sccb = buffer->sccb;
++ if (sclp_rw_event.sclp_send_mask & EvTyp_Msg_Mask)
++ /* Use normal write message */
++ sccb->msg_buf.header.type = EvTyp_Msg;
++ else if (sclp_rw_event.sclp_send_mask & EvTyp_PMsgCmd_Mask)
++ /* Use write priority message */
++ sccb->msg_buf.header.type = EvTyp_PMsgCmd;
++ else {
++ if (callback != NULL)
++ callback(buffer, -ENOSYS);
++ return;
++ }
++ buffer->request.command = SCLP_CMDW_WRITEDATA;
++ buffer->request.status = SCLP_REQ_FILLED;
++ buffer->request.callback = sclp_writedata_callback;
++ buffer->request.callback_data = buffer;
++ buffer->request.sccb = sccb;
++ buffer->callback = callback;
++ sclp_add_request(&buffer->request);
++}
+=== drivers/s390/char/tape_proc.c
+==================================================================
+--- drivers/s390/char/tape_proc.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tape_proc.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,385 @@
++/*
++ * drivers/s390/char/tape.c
++ * tape device driver for S/390 and zSeries tapes.
++ *
++ * S390 and zSeries version
++ * Copyright (C) 2001 IBM Corporation
++ * Author(s): Carsten Otte <cotte at de.ibm.com>
++ * Michael Holzheu <holzheu at de.ibm.com>
++ * Tuan Ngo-Anh <ngoanh at de.ibm.com>
++ *
++ * PROCFS Functions
++ */
++
++#include <linux/config.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#include <linux/seq_file.h>
++#include <asm/irq.h>
++#include <asm/s390io.h>
++
++#define TAPE_DBF_AREA tape_core_dbf
++
++#include "tape.h"
++
++#define PRINTK_HEADER "T390:"
++
++static const char *tape_med_st_verbose[MS_SIZE] =
++{
++ [MS_UNKNOWN] = "UNKNOWN ",
++ [MS_LOADED] = "LOADED ",
++ [MS_UNLOADED] = "UNLOADED"
++};
++
++/* our proc tapedevices entry */
++static struct proc_dir_entry *tape_proc_devices;
++
++static int tape_proc_show(struct seq_file *m, void *v) {
++ struct tape_device *device;
++ struct tape_request *request;
++ unsigned long n;
++
++ n = ((unsigned long) v - 1);
++
++ if(n == 0) {
++ seq_printf(m,
++ "TapeNo\tDevNo\tCuType\tCuModel\tDevType\t"
++ "DevMod\tBlkSize\tState\tOp\tMedState\n"
++ );
++ }
++
++ device = tape_get_device(n);
++ if(IS_ERR(device))
++ return 0;
++
++ spin_lock_irq(get_irq_lock(device->devinfo.irq));
++
++ seq_printf(m,
++ "%d\t%04X\t%04X\t%02X\t%04X\t%02X\t",
++ device->first_minor/TAPE_MINORS_PER_DEV,
++ device->devinfo.devno,
++ device->devinfo.sid_data.cu_type,
++ device->devinfo.sid_data.cu_model,
++ device->devinfo.sid_data.dev_type,
++ device->devinfo.sid_data.dev_model
++ );
++
++ /*
++ * the blocksize is either 'auto' or the blocksize as a decimal number
++ */
++ if(device->char_data.block_size == 0)
++ seq_printf(m, "auto\t");
++ else
++ seq_printf(m, "%i\t", device->char_data.block_size);
++
++ seq_printf(m, "%s\t", tape_state_string(device));
++
++ /*
++ * verbose desciption of current tape operation
++ */
++ if(!list_empty(&device->req_queue)) {
++ request = list_entry(
++ device->req_queue.next, struct tape_request, list
++ );
++
++ seq_printf(m, "%s\t", tape_op_verbose[request->op]);
++ } else {
++ seq_printf(m, "---\t");
++ }
++
++ seq_printf(m, "%s\n", tape_med_st_verbose[device->medium_state]);
++
++ spin_unlock_irq(get_irq_lock(device->devinfo.irq));
++ tape_put_device(device);
++
++ return 0;
++}
++
++static void *tape_proc_start(struct seq_file *m, loff_t *pos) {
++ if(*pos < tape_max_devindex)
++ return (void *) ((unsigned long) (*pos) + 1);
++ return NULL;
++}
++
++static void tape_proc_stop(struct seq_file *m, void *v) {
++}
++
++static void *tape_proc_next(struct seq_file *m, void *v, loff_t *pos) {
++ (*pos)++;
++ return tape_proc_start(m, pos);
++}
++
++static struct seq_operations tape_proc_seq = {
++ .start = tape_proc_start,
++ .next = tape_proc_next,
++ .stop = tape_proc_stop,
++ .show = tape_proc_show,
++};
++
++static int tape_proc_open(struct inode *inode, struct file *file) {
++ return seq_open(file, &tape_proc_seq);
++}
++
++static int
++tape_proc_assign(int devno)
++{
++ int rc;
++ struct tape_device *device;
++
++ if(IS_ERR(device = tape_get_device_by_devno(devno))) {
++ DBF_EVENT(3, "TAPE(%04x): assign invalid device\n", devno);
++ PRINT_ERR("TAPE(%04x): assign invalid device\n", devno);
++ return PTR_ERR(device);
++ }
++
++ rc = tape_assign(device, TAPE_STATUS_ASSIGN_M);
++
++ tape_put_device(device);
++
++ return rc;
++}
++
++static int
++tape_proc_unassign(int devno)
++{
++ int rc;
++ struct tape_device *device;
++
++ if(IS_ERR(device = tape_get_device_by_devno(devno))) {
++ DBF_EVENT(3, "TAPE(%04x): unassign invalid device\n", devno);
++ PRINT_ERR("TAPE(%04x): unassign invalid device\n", devno);
++ return PTR_ERR(device);
++ }
++
++ rc = tape_unassign(device, TAPE_STATUS_ASSIGN_M);
++
++ tape_put_device(device);
++
++ return rc;
++}
++
++#ifdef SMB_DEBUG_BOX
++static int
++tape_proc_put_into_box(int devno)
++{
++ struct tape_device *device;
++
++ if(IS_ERR(device = tape_get_device_by_devno(devno))) {
++ DBF_EVENT(3, "TAPE(%04x): invalid device\n", devno);
++ PRINT_ERR("TAPE(%04x): invalid device\n", devno);
++ return PTR_ERR(device);
++ }
++
++ TAPE_SET_STATE(device, TAPE_STATUS_BOXED);
++
++ tape_put_device(device);
++
++ return 0;
++}
++#endif
++
++#ifdef TAPE390_FORCE_UNASSIGN
++static int
++tape_proc_force_unassign(int devno)
++{
++ int rc;
++ struct tape_device *device;
++
++ if(IS_ERR(device = tape_get_device_by_devno(devno))) {
++ DBF_EVENT(3, "TAPE(%04x): force unassign invalid device\n",
++ devno);
++ PRINT_ERR("TAPE(%04x): force unassign invalid device\n",
++ devno);
++ return PTR_ERR(device);
++ }
++
++ if (!TAPE_BOXED(device)) {
++ DBF_EVENT(3, "TAPE(%04x): forced unassignment only allowed for"
++ " boxed device\n", devno);
++ PRINT_ERR("TAPE(%04x): forced unassignment only allowed for"
++ " boxed device\n", devno);
++ rc = -EPERM;
++ } else if(device->discipline->force_unassign == NULL) {
++ DBF_EVENT(3, "TAPE(%04x: force unassign is not supported on"
++ " this device\n", devno);
++ PRINT_ERR("TAPE(%04x: force unassign is not supported on"
++ " this device\n", devno);
++ rc = -EPERM;
++ } else {
++ rc = device->discipline->force_unassign(device);
++ if(rc == 0)
++ spin_lock_irq(get_irq_lock(device->devinfo.irq));
++ TAPE_CLEAR_STATE(
++ device,
++ TAPE_STATUS_BOXED
++ | TAPE_STATUS_ASSIGN_A
++ | TAPE_STATUS_ASSIGN_M
++ );
++ spin_unlock_irq(get_irq_lock(device->devinfo.irq));
++ }
++
++ tape_put_device(device);
++ return rc;
++}
++#endif
++
++/*
++ * Skips over all characters to the position after a newline or beyond the
++ * last character of the string.
++ * Returns the number of characters skiped.
++ */
++static size_t
++tape_proc_skip_eol(const char *buf, size_t len, loff_t *off)
++{
++ loff_t start = *off;
++
++ while((*off - start) < len) {
++ if(*(buf+*off) == '\n') {
++ *off += 1;
++ break;
++ }
++ *off += 1;
++ }
++
++ return (size_t) (*off - start);
++}
++
++/*
++ * Skips over whitespace characters and returns the number of characters
++ * that where skiped.
++ */
++static size_t
++tape_proc_skip_ws(const char *buf, size_t len, loff_t *off)
++{
++ loff_t start = *off;
++
++ while((*off - start) < len) {
++ if(*(buf + *off) != ' ' && *(buf + *off) != '\t')
++ break;
++ *off += 1;
++ }
++
++ return (size_t) (*off - start);
++}
++
++static size_t
++tape_proc_get_hexvalue(char *buf, size_t len, loff_t *off, unsigned int *hex)
++{
++ int hexdigit;
++ loff_t start = *off;
++
++ /* Skip possible space characters */
++ tape_proc_skip_ws(buf, len, off);
++
++ /* The hexvalue might start with '0x' or '0X' */
++ if((*off - start)+1 < len && *(buf + *off) == '0')
++ if(*(buf + *off + 1) == 'x' || *(buf + *off + 1) == 'X')
++ *off += 2;
++
++ *hex = 0;
++ while((*off - start) < len) {
++ if(*(buf + *off) >= '0' && *(buf + *off) <= '9') {
++ hexdigit = *(buf + *off) - '0';
++ } else if(*(buf + *off) >= 'a' && *(buf + *off) <= 'f') {
++ hexdigit = *(buf + *off) - 'a' + 10;
++ } else if(*(buf + *off) >= 'A' && *(buf + *off) <= 'F') {
++ hexdigit = *(buf + *off) - 'A' + 10;
++ } else {
++ break;
++ }
++ *hex = (*hex << 4) + hexdigit;
++ *off += 1;
++ }
++
++ return (size_t) (*off - start);
++}
++
++static ssize_t tape_proc_write(
++ struct file *file,
++ const char *buf,
++ size_t len,
++ loff_t *off
++) {
++ loff_t start = *off;
++ int devno;
++ char *s;
++
++ if(PAGE_SIZE < len)
++ return -EINVAL;
++
++ if((s = kmalloc(len, GFP_KERNEL)) == NULL)
++ return -ENOMEM;
++
++ if(copy_from_user(s, buf, len) != 0) {
++ kfree(s);
++ return -EFAULT;
++ }
++
++ if(strncmp(s+*off, "assign", 6) == 0) {
++ (*off) += 6;
++ tape_proc_get_hexvalue(s, len - 6, off, &devno);
++ if(devno > 0)
++ tape_proc_assign(devno);
++ } else if(strncmp(s+*off, "unassign", 8) == 0) {
++ (*off) += 8;
++ tape_proc_get_hexvalue(s, len - (*off - start), off, &devno);
++ if(devno > 0)
++ tape_proc_unassign(devno);
++#ifdef TAPE390_FORCE_UNASSIGN
++ } else if(strncmp(s+*off, "forceunassign", 13) == 0) {
++ (*off) += 13;
++ tape_proc_get_hexvalue(s, len - (*off - start), off, &devno);
++ if(devno > 0)
++ tape_proc_force_unassign(devno);
++#endif
++#ifdef SMB_DEBUG_BOX
++ } else if(strncmp(s+*off, "putintobox", 10) == 0) {
++ (*off) += 10;
++ tape_proc_get_hexvalue(s, len - (*off - start), off, &devno);
++ if(devno > 0)
++ tape_proc_put_into_box(devno);
++#endif
++ } else {
++ DBF_EVENT(3, "tape_proc_write() parse error\n");
++ PRINT_ERR("Invalid /proc/tapedevices command.\n");
++ }
++ tape_proc_skip_eol(s, len - (*off - start), off);
++
++ kfree(s);
++
++ /* Just pretend to have processed all the stuff */
++ return len;
++}
++
++static struct file_operations tape_proc_ops =
++{
++ .open = tape_proc_open,
++ .read = seq_read,
++ .write = tape_proc_write,
++ .llseek = seq_lseek,
++ .release = seq_release,
++};
++
++/*
++ * Initialize procfs stuff on startup
++ */
++void tape_proc_init(void) {
++ tape_proc_devices = create_proc_entry(
++ "tapedevices", S_IFREG | S_IRUGO | S_IWUSR, &proc_root);
++
++ if (tape_proc_devices == NULL) {
++ PRINT_WARN("tape: Cannot register procfs entry tapedevices\n");
++ return;
++ }
++ tape_proc_devices->proc_fops = &tape_proc_ops;
++ tape_proc_devices->owner = THIS_MODULE;
++}
++
++/*
++ * Cleanup all stuff registered to the procfs
++ */
++void tape_proc_cleanup(void) {
++ if(tape_proc_devices != NULL)
++ remove_proc_entry ("tapedevices", &proc_root);
++}
+=== drivers/s390/char/sclp_rw.h
+==================================================================
+--- drivers/s390/char/sclp_rw.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/sclp_rw.h (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,98 @@
++/*
++ * drivers/s390/char/sclp_rw.h
++ * interface to the SCLP-read/write driver
++ *
++ * S390 version
++ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
++ * Author(s): Martin Peschke <mpeschke at de.ibm.com>
++ * Martin Schwidefsky <schwidefsky at de.ibm.com>
++ */
++
++#ifndef __SCLP_RW_H__
++#define __SCLP_RW_H__
++
++#include <linux/list.h>
++#include <linux/timer.h>
++
++struct mto {
++ u16 length;
++ u16 type;
++ u16 line_type_flags;
++ u8 alarm_control;
++ u8 _reserved[3];
++} __attribute__((packed));
++
++struct go {
++ u16 length;
++ u16 type;
++ u32 domid;
++ u8 hhmmss_time[8];
++ u8 th_time[3];
++ u8 reserved_0;
++ u8 dddyyyy_date[7];
++ u8 _reserved_1;
++ u16 general_msg_flags;
++ u8 _reserved_2[10];
++ u8 originating_system_name[8];
++ u8 job_guest_name[8];
++} __attribute__((packed));
++
++struct mdb_header {
++ u16 length;
++ u16 type;
++ u32 tag;
++ u32 revision_code;
++} __attribute__((packed));
++
++struct mdb {
++ struct mdb_header header;
++ struct go go;
++} __attribute__((packed));
++
++struct msg_buf {
++ struct evbuf_header header;
++ struct mdb mdb;
++} __attribute__((packed));
++
++struct write_sccb {
++ struct sccb_header header;
++ struct msg_buf msg_buf;
++} __attribute__((packed));
++
++/* The number of empty mto buffers that can be contained in a single sccb. */
++#define NR_EMPTY_MTO_PER_SCCB ((PAGE_SIZE - sizeof(struct sclp_buffer) - \
++ sizeof(struct write_sccb)) / sizeof(struct mto))
++
++/*
++ * data structure for information about list of SCCBs (only for writing),
++ * will be located at the end of a SCCBs page
++ */
++struct sclp_buffer {
++ struct list_head list; /* list_head for sccb_info chain */
++ struct sclp_req request;
++ struct write_sccb *sccb;
++ char *current_line;
++ int current_length;
++ int retry_count;
++ struct timer_list retry_timer;
++ /* output format settings */
++ unsigned short columns;
++ unsigned short htab;
++ /* statistics about this buffer */
++ unsigned int mto_char_sum; /* # chars in sccb */
++ unsigned int mto_number; /* # mtos in sccb */
++ /* Callback that is called after reaching final status. */
++ void (*callback)(struct sclp_buffer *, int);
++};
++
++int sclp_rw_init(void);
++struct sclp_buffer *sclp_make_buffer(void *, unsigned short, unsigned short);
++void *sclp_unmake_buffer(struct sclp_buffer *);
++int sclp_buffer_space(struct sclp_buffer *);
++int sclp_write(struct sclp_buffer *buffer, const unsigned char *, int, int);
++void sclp_emit_buffer(struct sclp_buffer *,void (*)(struct sclp_buffer *,int));
++void sclp_set_columns(struct sclp_buffer *, unsigned short);
++void sclp_set_htab(struct sclp_buffer *, unsigned short);
++int sclp_chars_in_buffer(struct sclp_buffer *);
++
++#endif /* __SCLP_RW_H__ */
+=== drivers/s390/char/tape_devmap.c
+==================================================================
+--- drivers/s390/char/tape_devmap.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tape_devmap.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,949 @@
++/*
++ * drivers/s390/char/tape_devmap.c
++ * device mapping for tape device driver
++ *
++ * S390 and zSeries version
++ * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
++ * Author(s): Carsten Otte <cotte at de.ibm.com>
++ * Michael Holzheu <holzheu at de.ibm.com>
++ * Tuan Ngo-Anh <ngoanh at de.ibm.com>
++ * Martin Schwidefsky <schwidefsky at de.ibm.com>
++ * Stefan Bader <shbader at de.ibm.com>
++ *
++ * Device mapping and tape= parameter parsing functions. All devmap
++ * functions may not be called from interrupt context. In particular
++ * tape_get_device is a no-no from interrupt context.
++ */
++
++#include <linux/config.h>
++#include <linux/version.h>
++#include <linux/ctype.h>
++#include <linux/init.h>
++
++#include <asm/debug.h>
++#include <asm/irq.h>
++#include <asm/uaccess.h>
++
++/* This is ugly... */
++#define PRINTK_HEADER "tape_devmap:"
++
++#define TAPE_DBF_AREA tape_core_dbf
++#include "tape.h"
++
++struct tape_devmap {
++ struct list_head list;
++ int devindex;
++ unsigned short devno;
++ devreg_t devreg;
++ struct tape_device *device;
++};
++
++struct tape_discmap {
++ struct list_head list;
++ devreg_t devreg;
++ struct tape_discipline *discipline;
++};
++
++/*
++ * List of all registered tapes and disciplines.
++ */
++static struct list_head tape_devreg_list = LIST_HEAD_INIT(tape_devreg_list);
++static struct list_head tape_disc_devreg_list = LIST_HEAD_INIT(tape_disc_devreg_list);
++int tape_max_devindex = 0;
++
++/*
++ * Single spinlock to protect devmap structures and lists.
++ */
++static spinlock_t tape_devmap_lock = SPIN_LOCK_UNLOCKED;
++
++/*
++ * Module/Kernel Parameter Handling. The syntax of tape= is:
++ * <devno> : (0x)?[0-9a-fA-F]+
++ * <range> : <devno>(-<devno>)?
++ * <tape> : <range>(,<range>)*
++ */
++int tape_autodetect = 0; /* is true, when autodetection is active */
++
++/*
++ * char *tape[] is intended to hold the ranges supplied by the tape= statement
++ * it is named 'tape' to directly be filled by insmod with the comma separated
++ * strings when running as a module.
++ */
++static char *tape[256];
++MODULE_PARM (tape, "1-" __MODULE_STRING (256) "s");
++
++#ifndef MODULE
++/*
++ * The parameter parsing functions for builtin-drivers are called
++ * before kmalloc works. Store the pointers to the parameters strings
++ * into tape[] for later processing.
++ */
++static int __init
++tape_call_setup (char *str)
++{
++ static int count = 0;
++
++ if (count < 256)
++ tape[count++] = str;
++ return 1;
++}
++
++__setup("tape=", tape_call_setup);
++#endif /* not defined MODULE */
++
++/*
++ * Add a range of devices and create the corresponding devreg_t
++ * structures. The order of the ranges added by this function
++ * will define the kdevs for the individual devices.
++ */
++int
++tape_add_range(int from, int to)
++{
++ struct tape_devmap *devmap, *tmp;
++ struct list_head *l;
++ int devno;
++ int rc;
++
++ if (from > to) {
++ PRINT_ERR("Invalid device range %04x-%04x", from, to);
++ return -EINVAL;
++ }
++
++ rc = 0;
++ spin_lock(&tape_devmap_lock);
++ for (devno = from; devno <= to; devno++) {
++ devmap = NULL;
++ list_for_each(l, &tape_devreg_list) {
++ tmp = list_entry(l, struct tape_devmap, list);
++ if (tmp->devno == devno) {
++ devmap = tmp;
++ break;
++ }
++ }
++ if (devmap == NULL) {
++ if(tape_max_devindex >= 256/TAPE_MINORS_PER_DEV) {
++ PRINT_ERR(" No more device slots available."
++ " Range %04x-%04x ignored\n",
++ devno, to);
++ rc = -E2BIG;
++ break;
++ }
++ /* This devno is new. */
++ devmap = (struct tape_devmap *)
++ kmalloc(sizeof(struct tape_devmap),
++ GFP_KERNEL);
++ if (devmap == NULL) {
++ rc = -ENOMEM;
++ break;
++ }
++ memset(devmap, 0, sizeof(struct tape_devmap));
++ devmap->devno = devno;
++ devmap->devindex = tape_max_devindex++;
++ list_add(&devmap->list, &tape_devreg_list);
++ devmap->devreg.ci.devno = devno;
++ devmap->devreg.flag = DEVREG_TYPE_DEVNO;
++ devmap->devreg.oper_func = tape_oper_handler;
++ s390_device_register(&devmap->devreg);
++ }
++ }
++ spin_unlock(&tape_devmap_lock);
++
++ return rc;
++}
++
++/*
++ * Read device number from string. The number is always is hex,
++ * a leading 0x is accepted (and has to be removed for simple_stroul
++ * to work).
++ */
++static inline int
++tape_devno(char *str, char **endp)
++{
++ int devno;
++
++ /* remove leading '0x' */
++ if (*str == '0' && (*(str+1) == 'x' || *(str+1) == 'X'))
++ str += 2;
++
++ if (!isxdigit(*str))
++ return -EINVAL;
++
++ devno = simple_strtoul(str, endp, 16); /* interpret anything as hex */
++
++ if(devno < 0 || devno > 0xffff) {
++ PRINT_ERR(" Invalid devno(0x%04x) specified\n", devno);
++ return -EINVAL;
++ }
++
++ return devno;
++}
++
++/*
++ * Parse Kernel/Module Parameters and create devregs for dynamic attach/detach
++ */
++static int
++tape_parm_parse (char *str)
++{
++ int from, to, rc;
++
++ while (1) {
++ to = from = tape_devno(str, &str);
++ if (*str == '-') {
++ str++;
++ to = tape_devno(str, &str);
++ }
++ /* Negative numbers in from/to indicate errors. */
++ if (from >= 0 && to >= 0) {
++ rc = tape_add_range(from, to);
++ if (rc)
++ return rc;
++ }
++ if (*str != ',')
++ break;
++ str++;
++ }
++ if (*str != '\0') {
++ PRINT_WARN(" Junk at end of tape parameter string: %s\n", str);
++ return -EINVAL;
++ }
++ return 0;
++}
++
++/*
++ * Parse parameters stored in tape[].
++ */
++static int
++tape_parse(void)
++{
++ int rc, i;
++
++ if (*tape == NULL) {
++ /* No parameters present */
++ PRINT_INFO ("No parameters supplied, enabling auto detect "
++ "mode for all supported devices.\n");
++ tape_autodetect = 1;
++ return 0;
++ }
++ PRINT_INFO("Using ranges supplied in parameters, "
++ "disabling auto detect mode.\n");
++ rc = 0;
++ for (i = 0; i < 256; i++) {
++ if (tape[i] == NULL)
++ break;
++ rc = tape_parm_parse(tape[i]);
++ if (rc) {
++ PRINT_ERR(" Error while parsing parameters. "
++ "Setup may be incomplete.\n");
++ break;
++ }
++ }
++ return rc;
++}
++
++/*
++ * Create a devreg for a discipline. This is only done if no explicit
++ * tape range is given. The tape_oper_handler will call tape_add_range
++ * for each device that appears.
++ */
++static int
++tape_add_disc_devreg(struct tape_discipline *discipline)
++{
++ struct tape_discmap *discmap;
++
++ discmap = (struct tape_discmap *) kmalloc(sizeof(struct tape_discmap),
++ GFP_KERNEL);
++ if (discmap == NULL) {
++ PRINT_WARN("Could not alloc devreg: Out of memory\n"
++ "Dynamic attach/detach will not work!\n");
++ return -ENOMEM;
++ }
++ spin_lock(&tape_devmap_lock);
++ discmap->devreg.ci.hc.ctype = discipline->cu_type;
++ discmap->devreg.flag = DEVREG_MATCH_CU_TYPE | DEVREG_TYPE_DEVCHARS;
++ discmap->devreg.oper_func = tape_oper_handler;
++ s390_device_register(&discmap->devreg);
++ list_add(&discmap->list, &tape_disc_devreg_list);
++ spin_unlock(&tape_devmap_lock);
++ return 0;
++}
++
++/*
++ * Free devregs for a discipline.
++ */
++static void
++tape_del_disc_devreg(struct tape_discipline *discipline)
++{
++ struct list_head *l;
++ struct tape_discmap *discmap;
++
++ spin_lock(&tape_devmap_lock);
++ list_for_each(l, &tape_disc_devreg_list) {
++ discmap = list_entry(l, struct tape_discmap, list);
++ if (discmap->discipline == discipline) {
++ s390_device_unregister(&discmap->devreg);
++ list_del(&discmap->list);
++ kfree(discmap);
++ break;
++ }
++ }
++ spin_unlock(&tape_devmap_lock);
++}
++
++
++/*
++ * Forget all about device numbers and disciplines.
++ * This may only be called at module unload or system shutdown.
++ */
++static void
++tape_forget_devregs(void)
++{
++ struct list_head *l, *n;
++ struct tape_devmap *devmap;
++ struct tape_discmap *discmap;
++
++ spin_lock(&tape_devmap_lock);
++ list_for_each_safe(l, n, &tape_devreg_list) {
++ devmap = list_entry(l, struct tape_devmap, list);
++ if (devmap->device != NULL)
++ BUG();
++ s390_device_unregister(&devmap->devreg);
++ list_del(&devmap->list);
++ kfree(devmap);
++ }
++ list_for_each_safe(l, n, &tape_disc_devreg_list) {
++ discmap = list_entry(l, struct tape_discmap, list);
++ s390_device_unregister(&discmap->devreg);
++ list_del(&discmap->list);
++ kfree(discmap);
++ }
++ spin_unlock(&tape_devmap_lock);
++}
++
++/*
++ * Allocate memory for a new device structure.
++ */
++static struct tape_device *
++tape_alloc_device(void)
++{
++ struct tape_device *device;
++
++ device = (struct tape_device *)
++ kmalloc(sizeof(struct tape_device), GFP_KERNEL);
++ if (device == NULL) {
++ DBF_EXCEPTION(2, "ti:no mem\n");
++ PRINT_INFO ("can't allocate memory for "
++ "tape info structure\n");
++ return ERR_PTR(-ENOMEM);
++ }
++ memset(device, 0, sizeof(struct tape_device));
++ device->modeset_byte = (char *) kmalloc(1, GFP_KERNEL | GFP_DMA);
++ if (device->modeset_byte == NULL) {
++ DBF_EXCEPTION(2, "ti:no mem\n");
++ PRINT_INFO("can't allocate memory for modeset byte\n");
++ kfree(device);
++ return ERR_PTR(-ENOMEM);
++ }
++ INIT_LIST_HEAD(&device->req_queue);
++ init_waitqueue_head(&device->state_change_wq);
++ spin_lock_init(&device->assign_lock);
++ atomic_set(&device->ref_count, 1);
++ TAPE_SET_STATE(device, TAPE_STATUS_INIT);
++ device->medium_state = MS_UNKNOWN;
++ *device->modeset_byte = 0;
++
++ return device;
++}
++
++/*
++ * Create a device structure.
++ */
++static struct tape_device *
++tape_create_device(int devno)
++{
++ struct list_head *l;
++ struct tape_devmap *devmap, *tmp;
++ struct tape_device *device;
++ int rc;
++
++ DBF_EVENT(4, "tape_create_device(0x%04x)\n", devno);
++
++ device = tape_alloc_device();
++ if (IS_ERR(device))
++ return device;
++ /* Get devinfo from the common io layer. */
++ rc = get_dev_info_by_devno(devno, &device->devinfo);
++ if (rc) {
++ DBF_EVENT(3, "get_dev_info_by_devno returned %d\n", rc);
++ if (rc == -EUSERS) {
++ device->devinfo.status |= DEVSTAT_UNFRIENDLY_DEV;
++ } else {
++ tape_put_device(device);
++ return ERR_PTR(rc);
++ }
++ }
++
++ spin_lock(&tape_devmap_lock);
++ devmap = NULL;
++ list_for_each(l, &tape_devreg_list) {
++ tmp = list_entry(l, struct tape_devmap, list);
++ if (tmp->devno == devno) {
++ devmap = tmp;
++ break;
++ }
++ }
++ if (devmap != NULL && devmap->device == NULL) {
++ devmap->device = tape_clone_device(device);
++ device->first_minor = devmap->devindex * TAPE_MINORS_PER_DEV;
++ } else if (devmap == NULL) {
++ /* devno not in tape range. */
++ DBF_EVENT(4, "No devmap for entry 0x%04x\n", devno);
++ tape_put_device(device);
++ device = ERR_PTR(-ENODEV);
++ } else {
++ /* Should not happen. */
++ DBF_EVENT(4, "A devmap entry for 0x%04x already exists\n",
++ devno);
++ tape_put_device(device);
++ device = ERR_PTR(-EEXIST);
++ }
++ spin_unlock(&tape_devmap_lock);
++
++ return device;
++}
++
++struct tape_device *
++tape_clone_device(struct tape_device *device)
++{
++ DBF_EVENT(4, "tape_clone_device(%p) = %i\n", device,
++ atomic_inc_return(&device->ref_count));
++ return device;
++}
++
++/*
++ * Find tape device by a device index.
++ */
++struct tape_device *
++tape_get_device(int devindex)
++{
++ struct list_head *l;
++ struct tape_devmap *devmap;
++ struct tape_device *device;
++
++ DBF_EVENT(5, "tape_get_device(%i)\n", devindex);
++
++ device = ERR_PTR(-ENODEV);
++ spin_lock(&tape_devmap_lock);
++ /* Find devmap for device with device number devno. */
++ list_for_each(l, &tape_devreg_list) {
++ devmap = list_entry(l, struct tape_devmap, list);
++ if (devmap->devindex == devindex) {
++ if (devmap->device != NULL) {
++ device = tape_clone_device(devmap->device);
++ }
++ break;
++ }
++ }
++ spin_unlock(&tape_devmap_lock);
++ return device;
++}
++
++/*
++ * Find tape handle by a devno.
++ */
++struct tape_device *
++tape_get_device_by_devno(int devno)
++{
++ struct list_head *l;
++ struct tape_devmap *devmap;
++ struct tape_device *device;
++
++ DBF_EVENT(5, "tape_get_device_by_devno(0x%04x)\n", devno);
++
++ device = ERR_PTR(-ENODEV);
++ spin_lock(&tape_devmap_lock);
++
++ list_for_each(l, &tape_devreg_list) {
++ devmap = list_entry(l, struct tape_devmap, list);
++ if(devmap->device != NULL && devmap->devno == devno) {
++ device = tape_clone_device(devmap->device);
++ break;
++ }
++ }
++ spin_unlock(&tape_devmap_lock);
++
++ return device;
++}
++
++/*
++ * Find tape handle by a device irq.
++ */
++struct tape_device *
++tape_get_device_by_irq(int irq)
++{
++ struct list_head *l;
++ struct tape_devmap *devmap;
++ struct tape_device *device;
++
++ DBF_EVENT(5, "tape_get_device_by_irq(0x%02x)\n", irq);
++
++ device = ERR_PTR(-ENODEV);
++ spin_lock(&tape_devmap_lock);
++ /* Find devmap for device with device number devno. */
++ list_for_each(l, &tape_devreg_list) {
++ devmap = list_entry(l, struct tape_devmap, list);
++ if (devmap->device != NULL &&
++ devmap->device->devinfo.irq == irq) {
++ device = tape_clone_device(devmap->device);
++ break;
++ }
++ }
++ spin_unlock(&tape_devmap_lock);
++ return device;
++}
++
++/*
++ * Decrease the reference counter of a devices structure. If the
++ * reference counter reaches zero free the device structure and
++ * wake up sleepers.
++ */
++void
++tape_put_device(struct tape_device *device)
++{
++ int remain;
++
++ DBF_EVENT(4, "tape_put_device(%p)\n", device);
++
++ if ((remain = atomic_dec_return(&device->ref_count)) > 0) {
++ DBF_EVENT(5, "remaining = %i\n", remain);
++ return;
++ }
++
++ /*
++ * Reference counter dropped to zero. This means
++ * that the device is deleted and the last user
++ * of the device structure is gone. That is what
++ * tape_delete_device is waiting for. Do a wake up.
++ */
++ if(remain < 0) {
++ PRINT_ERR("put device without reference\n");
++ return;
++ }
++
++ /*
++ * Free memory of a device structure.
++ */
++ kfree(device->modeset_byte);
++ kfree(device);
++}
++
++void
++tape_devmap_remove_device(struct tape_device *device) {
++ struct tape_devmap * devmap;
++ struct list_head * l;
++ unsigned long flags;
++
++ spin_lock_irqsave(&tape_devmap_lock, flags);
++ list_for_each(l, &tape_devreg_list) {
++ devmap = list_entry(l, struct tape_devmap, list);
++
++ if(device->devinfo.devno == devmap->devno) {
++ devmap->device = NULL;
++ tape_put_device(device);
++ break;
++ }
++ }
++ spin_unlock_irqrestore(&tape_devmap_lock, flags);
++}
++
++/*
++ * Scan the device range for devices with matching cu_type, create
++ * their device structures and enable them.
++ */
++void
++tape_add_devices(struct tape_discipline *discipline)
++{
++ struct list_head *l;
++ struct tape_devmap *devmap;
++ struct tape_device *device;
++
++ /*
++ * Scan tape devices for matching cu type.
++ */
++ list_for_each(l, &tape_devreg_list) {
++ devmap = list_entry(l, struct tape_devmap, list);
++ device = tape_create_device(devmap->devno);
++ if (IS_ERR(device))
++ continue;
++
++ if (device->devinfo.sid_data.cu_type == discipline->cu_type) {
++ DBF_EVENT(4, "tape_add_devices(%p)\n", discipline);
++ DBF_EVENT(4, "det irq: %x\n", device->devinfo.irq);
++ DBF_EVENT(4, "cu : %x\n", discipline->cu_type);
++
++ if(tape_enable_device(device, discipline) < 0) {
++ devmap->device = NULL;
++ tape_put_device(device);
++ }
++ } else {
++ devmap->device = NULL;
++ tape_put_device(device);
++ }
++ tape_put_device(device);
++ }
++ if (tape_autodetect)
++ tape_add_disc_devreg(discipline);
++}
++
++/*
++ * Scan the device range for devices with matching cu_type, disable them
++ * and remove their device structures.
++ */
++void
++tape_remove_devices(struct tape_discipline *discipline)
++{
++ struct list_head *l;
++ struct tape_devmap *devmap;
++ struct tape_device *device;
++
++ if (tape_autodetect)
++ tape_del_disc_devreg(discipline);
++ /*
++ * Go through our tape info list and disable, deq and free
++ * all devices with matching discipline
++ */
++ list_for_each(l, &tape_devreg_list) {
++ devmap = list_entry(l, struct tape_devmap, list);
++ device = devmap->device;
++ if (device == NULL)
++ continue;
++ if (device->discipline == discipline) {
++ tape_disable_device(device, 0);
++ tape_put_device(device);
++ devmap->device = NULL;
++ }
++ }
++}
++
++/*
++ * Auto detect tape devices.
++ */
++void
++tape_auto_detect(void)
++{
++ struct tape_device *device;
++ struct tape_discipline *discipline;
++ s390_dev_info_t dinfo;
++ int irq, devno;
++
++ if (!tape_autodetect)
++ return;
++ for (irq = get_irq_first(); irq != -ENODEV; irq = get_irq_next(irq)) {
++ /* Get device info block. */
++ devno = get_devno_by_irq(irq);
++ if (get_dev_info_by_irq(irq, &dinfo) < 0)
++ continue;
++ /* Search discipline with matching cu_type */
++ discipline = tape_get_discipline(dinfo.sid_data.cu_type);
++ if (discipline == NULL)
++ continue;
++ DBF_EVENT(4, "tape_auto_detect()\n");
++ DBF_EVENT(4, "det irq: %x\n", irq);
++ DBF_EVENT(4, "cu : %x\n", dinfo.sid_data.cu_type);
++ if (tape_add_range(dinfo.devno, dinfo.devno) == 0) {
++ device = tape_create_device(devno);
++ if (!IS_ERR(device)) {
++ if(tape_enable_device(device, discipline) < 0)
++ tape_devmap_remove_device(device);
++ tape_put_device(device);
++ }
++ }
++ tape_put_discipline(discipline);
++ }
++}
++
++/*
++ * Private task queue for oper/noper handling...
++ */
++static DECLARE_TASK_QUEUE(tape_cio_tasks);
++
++/*
++ * Oper Handler is called from Ingo's I/O layer when a new tape device is
++ * attached.
++ */
++static void
++do_tape_oper_handler(void *data)
++{
++ struct {
++ int devno;
++ int cu_type;
++ struct tq_struct task;
++ } *p;
++ struct tape_device *device;
++ struct tape_discipline *discipline;
++ unsigned long flags;
++
++ p = (void *) data;
++
++ /*
++ * Handling the path revalidation scheme or common IO. Devices that
++ * were detected before will be reactivated.
++ */
++ if(!IS_ERR(device = tape_get_device_by_devno(p->devno))) {
++ spin_lock_irqsave(get_irq_lock(device->devinfo.irq), flags);
++ if (!TAPE_NOACCESS(device)) {
++ PRINT_ERR(
++ "Oper handler for irq %d called, "
++ "which is (still) internally used.\n",
++ device->devinfo.irq);
++ } else {
++ DBF_EVENT(3,
++ "T390(%04x): resume processing\n",
++ p->devno);
++ TAPE_CLEAR_STATE(device, TAPE_STATUS_NOACCESS);
++ tape_schedule_bh(device);
++ }
++ spin_unlock_irqrestore(
++ get_irq_lock(device->devinfo.irq), flags);
++
++ tape_put_device(device);
++ kfree(p);
++ return;
++ }
++
++ /* If we get here device is NULL. */
++ if (tape_autodetect && tape_add_range(p->devno, p->devno) != 0) {
++ kfree(p);
++ return;
++ }
++
++ /* Find discipline for this device. */
++ discipline = tape_get_discipline(p->cu_type);
++ if (discipline == NULL) {
++ /* Strange. Should not happen. */
++ kfree(p);
++ return;
++ }
++
++ device = tape_create_device(p->devno);
++ if (IS_ERR(device)) {
++ tape_put_discipline(discipline);
++ kfree(p);
++ return;
++ }
++ if(tape_enable_device(device, discipline) < 0)
++ tape_devmap_remove_device(device);
++ tape_put_device(device);
++ tape_put_discipline(discipline);
++ kfree(p);
++}
++
++int
++tape_oper_handler(int irq, devreg_t *devreg)
++{
++ struct {
++ int devno;
++ int cu_type;
++ struct tq_struct task;
++ } *p;
++ s390_dev_info_t dinfo;
++ int rc;
++
++ rc = get_dev_info_by_irq (irq, &dinfo);
++ if (rc < 0)
++ return rc;
++
++ /* No memory, we loose. */
++ if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
++ return -ENOMEM;
++
++ p->devno = dinfo.devno;
++ p->cu_type = dinfo.sid_data.cu_type;
++ memset(&p->task, 0, sizeof(struct tq_struct));
++ p->task.routine = do_tape_oper_handler;
++ p->task.data = p;
++
++ /* queue call to do_oper_handler. */
++ queue_task(&p->task, &tape_cio_tasks);
++ run_task_queue(&tape_cio_tasks);
++
++ return 0;
++}
++
++
++/*
++ * Not Oper Handler is called from Ingo's IO layer, when a tape device
++ * is detached.
++ */
++static void
++do_tape_noper_handler(void *data)
++{
++ struct {
++ int irq;
++ int status;
++ struct tq_struct task;
++ } *p;
++ struct tape_device *device;
++ struct list_head *l;
++ struct tape_devmap *devmap;
++ unsigned long flags;
++
++ p = data;
++
++ /*
++ * find out devno of leaving device: CIO has already deleted
++ * this information so we need to find it by irq!
++ */
++ device = tape_get_device_by_irq(p->irq);
++ if (IS_ERR(device)) {
++ kfree(p);
++ return;
++ }
++
++ /*
++ * Handle the new path revalidation scheme of the common IO layer.
++ */
++ switch(p->status) {
++ case DEVSTAT_DEVICE_GONE:
++ case DEVSTAT_REVALIDATE: /* FIXME: What to do? */
++ tape_disable_device(device, 1);
++
++ /*
++ * Remove the device reference from the device map.
++ */
++ spin_lock_irqsave(&tape_devmap_lock, flags);
++ list_for_each(l, &tape_devreg_list) {
++ devmap = list_entry(
++ l, struct tape_devmap, list
++ );
++ if (devmap->device == device) {
++ tape_put_device(device);
++ devmap->device = NULL;
++ break;
++ }
++ }
++ spin_unlock_irqrestore(&tape_devmap_lock, flags);
++ break;
++ case DEVSTAT_NOT_ACC:
++ /*
++ * Device shouldn't be accessed at the moment. The
++ * currently running request will complete.
++ */
++ spin_lock_irqsave(
++ get_irq_lock(device->devinfo.irq), flags
++ );
++ DBF_EVENT(3, "T390(%04x): suspend processing\n",
++ device->devinfo.devno);
++ TAPE_SET_STATE(device, TAPE_STATUS_NOACCESS);
++ spin_unlock_irqrestore(
++ get_irq_lock(device->devinfo.irq), flags
++ );
++ break;
++ case DEVSTAT_NOT_ACC_ERR: {
++ struct tape_request *request;
++
++ /*
++ * Device shouldn't be accessed at the moment. The
++ * request that was running is lost.
++ */
++ spin_lock_irqsave(
++ get_irq_lock(device->devinfo.irq), flags
++ );
++
++ request = list_entry(device->req_queue.next,
++ struct tape_request, list);
++ if(
++ !list_empty(&device->req_queue)
++ &&
++ request->status == TAPE_REQUEST_IN_IO
++ ) {
++ /* Argh! Might better belong to tape_core.c */
++ list_del(&request->list);
++ request->rc = -EIO;
++ request->status = TAPE_REQUEST_DONE;
++ if (request->callback != NULL) {
++ request->callback(
++ request,
++ request->callback_data
++ );
++ request->callback = NULL;
++ }
++ }
++ DBF_EVENT(3, "T390(%04x): suspend processing\n",
++ device->devinfo.devno);
++ DBF_EVENT(3, "T390(%04x): request lost\n",
++ device->devinfo.devno);
++ TAPE_SET_STATE(device, TAPE_STATUS_NOACCESS);
++ spin_unlock_irqrestore(
++ get_irq_lock(device->devinfo.irq), flags
++ );
++ break;
++ }
++ default:
++ PRINT_WARN("T390(%04x): no operation handler called "
++ "with unknown status(0x%x)\n",
++ device->devinfo.devno, p->status);
++ tape_disable_device(device, 1);
++
++ /*
++ * Remove the device reference from the device map.
++ */
++ spin_lock_irqsave(&tape_devmap_lock, flags);
++ list_for_each(l, &tape_devreg_list) {
++ devmap = list_entry(
++ l, struct tape_devmap, list
++ );
++ if (devmap->device == device) {
++ tape_put_device(device);
++ devmap->device = NULL;
++ break;
++ }
++ }
++ spin_unlock_irqrestore(&tape_devmap_lock, flags);
++ }
++
++ tape_put_device(device);
++ kfree(p);
++}
++
++void
++tape_noper_handler(int irq, int status)
++{
++ struct {
++ int irq;
++ int status;
++ struct tq_struct task;
++ } *p;
++
++ /* No memory, we loose. */
++ if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
++ return;
++
++ p->irq = irq;
++ p->status = status;
++ memset(&p->task, 0, sizeof(struct tq_struct));
++ p->task.routine = do_tape_noper_handler;
++ p->task.data = p;
++
++ /* queue call to do_oper_handler. */
++ queue_task(&p->task, &tape_cio_tasks);
++ run_task_queue(&tape_cio_tasks);
++}
++
++
++int
++tape_devmap_init(void)
++{
++ return tape_parse();
++}
++
++void
++tape_devmap_exit(void)
++{
++ tape_forget_devregs();
++}
++
++EXPORT_SYMBOL(tape_get_device);
++EXPORT_SYMBOL(tape_get_device_by_irq);
++EXPORT_SYMBOL(tape_get_device_by_devno);
++EXPORT_SYMBOL(tape_put_device);
++EXPORT_SYMBOL(tape_clone_device);
+=== drivers/s390/char/tape_char.c
+==================================================================
+--- drivers/s390/char/tape_char.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tape_char.c (/trunk/2.4.27) (revision 52)
+@@ -0,0 +1,534 @@
++/*
++ * drivers/s390/char/tape_char.c
++ * character device frontend for tape device driver
++ *
++ * S390 and zSeries version
++ * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
++ * Author(s): Carsten Otte <cotte at de.ibm.com>
++ * Michael Holzheu <holzheu at de.ibm.com>
++ * Tuan Ngo-Anh <ngoanh at de.ibm.com>
++ * Martin Schwidefsky <schwidefsky at de.ibm.com>
++ * Stefan Bader <shbader at de.ibm.com>
++ */
++
++#include <linux/config.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/proc_fs.h>
++#include <linux/mtio.h>
++
++#include <asm/irq.h>
++#include <asm/s390dyn.h>
++#include <asm/uaccess.h>
++
++#define TAPE_DBF_AREA tape_core_dbf
++
++#include "tape.h"
++#include "tape_std.h"
++
++#define PRINTK_HEADER "TCHAR:"
++
++#define TAPECHAR_DEVFSMODE 0020644 /* crwxrw-rw- */
++#define TAPECHAR_MAJOR 0 /* get dynamic major */
++
++int tapechar_major = TAPECHAR_MAJOR;
++
++/*
++ * Prototypes for file operation functions
++ */
++static ssize_t tapechar_read(struct file *, char *, size_t, loff_t *);
++static ssize_t tapechar_write(struct file *, const char *, size_t, loff_t *);
++static int tapechar_open(struct inode *,struct file *);
++static int tapechar_release(struct inode *,struct file *);
++static int tapechar_ioctl(struct inode *, struct file *, unsigned int,
++ unsigned long);
++
++/*
++ * File operation structure for tape character frontend
++ */
++static struct file_operations tape_fops =
++{
++ .read = tapechar_read,
++ .write = tapechar_write,
++ .ioctl = tapechar_ioctl,
++ .open = tapechar_open,
++ .release = tapechar_release,
++};
++
++#ifdef CONFIG_DEVFS_FS
++/*
++ * Create Char directory with (non)rewinding entries
++ */
++static int
++tapechar_mkdevfstree(struct tape_device *device)
++{
++ device->char_data.devfs_char_dir =
++ devfs_mk_dir(device->devfs_dir, "char", device);
++ if (device->char_data.devfs_char_dir == NULL)
++ return -ENOENT;
++ device->char_data.devfs_nonrewinding =
++ devfs_register(device->char_data.devfs_char_dir,
++ "nonrewinding", DEVFS_FL_DEFAULT,
++ tapechar_major, device->first_minor,
++ TAPECHAR_DEVFSMODE, &tape_fops, device);
++ if (device->char_data.devfs_nonrewinding == NULL) {
++ devfs_unregister(device->char_data.devfs_char_dir);
++ return -ENOENT;
++ }
++ device->char_data.devfs_rewinding =
++ devfs_register(device->char_data.devfs_char_dir,
++ "rewinding", DEVFS_FL_DEFAULT,
++ tapechar_major, device->first_minor + 1,
++ TAPECHAR_DEVFSMODE, &tape_fops, device);
++ if (device->char_data.devfs_rewinding == NULL) {
++ devfs_unregister(device->char_data.devfs_nonrewinding);
++ devfs_unregister(device->char_data.devfs_char_dir);
++ return -ENOENT;
++ }
++ return 0;
++}
++
++/*
++ * Remove devfs entries
++ */
++static void
++tapechar_rmdevfstree (struct tape_device *device)
++{
++ if (device->char_data.devfs_nonrewinding)
++ devfs_unregister(device->char_data.devfs_nonrewinding);
++ if (device->char_data.devfs_rewinding)
++ devfs_unregister(device->char_data.devfs_rewinding);
++ if (device->char_data.devfs_char_dir)
++ devfs_unregister(device->char_data.devfs_char_dir);
++}
++#endif
++
++/*
++ * This function is called for every new tapedevice
++ */
++int
++tapechar_setup_device(struct tape_device * device)
++{
++#ifdef CONFIG_DEVFS_FS
++ int rc;
++
++ rc = tapechar_mkdevfstree(device);
++ if (rc)
++ return rc;
++#endif
++
++ tape_hotplug_event(device, tapechar_major, TAPE_HOTPLUG_CHAR_ADD);
++ return 0;
++
++}
++
++void
++tapechar_cleanup_device(struct tape_device* device)
++{
++#ifdef CONFIG_DEVFS_FS
++ tapechar_rmdevfstree(device);
++#endif
++ tape_hotplug_event(device, tapechar_major, TAPE_HOTPLUG_CHAR_REMOVE);
++}
++
++static inline int
++tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
++{
++ struct idal_buffer *new;
++
++ /* Idal buffer must be the same size as the requested block size! */
++ if (device->char_data.idal_buf != NULL &&
++ device->char_data.idal_buf->size == block_size)
++ return 0;
++
++ if(block_size > MAX_BLOCKSIZE) {
++ DBF_EVENT(3, "Invalid blocksize (%ld > %ld)\n",
++ block_size, MAX_BLOCKSIZE);
++ PRINT_ERR("Invalid blocksize (%ld > %ld)\n",
++ block_size, MAX_BLOCKSIZE);
++ return -EINVAL;
++ }
++
++ /* The current idal buffer is not big enough. Allocate a new one. */
++ new = idal_buffer_alloc(block_size, 0);
++ if (new == NULL)
++ return -ENOMEM;
++ if (device->char_data.idal_buf != NULL)
++ idal_buffer_free(device->char_data.idal_buf);
++ device->char_data.idal_buf = new;
++ return 0;
++}
++
++/*
++ * Tape device read function
++ */
++ssize_t
++tapechar_read (struct file *filp, char *data, size_t count, loff_t *ppos)
++{
++ struct tape_device *device;
++ struct tape_request *request;
++ size_t block_size;
++ int rc;
++
++ DBF_EVENT(6, "TCHAR:read\n");
++ device = (struct tape_device *) filp->private_data;
++
++ /* Check position. */
++ if (ppos != &filp->f_pos) {
++ /*
++ * "A request was outside the capabilities of the device."
++ * This check uses internal knowledge about how pread and
++ * read work...
++ */
++ DBF_EVENT(6, "TCHAR:ppos wrong\n");
++ return -EOVERFLOW;
++ }
++
++ /*
++ * If the tape isn't terminated yet, do it now. And since we then
++ * are at the end of the tape there wouldn't be anything to read
++ * anyways. So we return immediatly.
++ */
++ if(device->required_tapemarks) {
++ return tape_std_terminate_write(device);
++ }
++
++ /* Find out block size to use */
++ if (device->char_data.block_size != 0) {
++ if (count < device->char_data.block_size) {
++ DBF_EVENT(3, "TCHAR:read smaller than block "
++ "size was requested\n");
++ return -EINVAL;
++ }
++ block_size = device->char_data.block_size;
++ } else {
++ block_size = count;
++ }
++
++ /*
++ * Set the idal buffer to the correct size. The fixed block size
++ * could have been set some time ago. And the idal buffer is re-
++ * leased when the device is closed!
++ */
++ rc = tapechar_check_idalbuffer(device, block_size);
++ if (rc)
++ return rc;
++
++ DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size);
++ /* Let the discipline build the ccw chain. */
++ request = device->discipline->read_block(device, block_size);
++ if (IS_ERR(request))
++ return PTR_ERR(request);
++ /* Execute it. */
++ rc = tape_do_io(device, request);
++ if (rc == 0) {
++ rc = block_size - device->devstat.rescnt;
++ DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc);
++ filp->f_pos += rc;
++ /* Copy data from idal buffer to user space. */
++ if (idal_buffer_to_user(device->char_data.idal_buf,
++ data, rc) != 0)
++ rc = -EFAULT;
++ }
++ tape_put_request(request);
++ return rc;
++}
++
++/*
++ * Tape device write function
++ */
++ssize_t
++tapechar_write(struct file *filp, const char *data, size_t count, loff_t *ppos)
++{
++ struct tape_device *device;
++ struct tape_request *request;
++ size_t block_size;
++ size_t written;
++ int nblocks;
++ int i, rc;
++
++ DBF_EVENT(6, "TCHAR:write\n");
++ device = (struct tape_device *) filp->private_data;
++ /* Check position */
++ if (ppos != &filp->f_pos) {
++ /* "A request was outside the capabilities of the device." */
++ DBF_EVENT(6, "TCHAR:ppos wrong\n");
++ return -EOVERFLOW;
++ }
++ /* Find out block size and number of blocks */
++ if (device->char_data.block_size != 0) {
++ if (count < device->char_data.block_size) {
++ DBF_EVENT(3, "TCHAR:write smaller than block "
++ "size was requested\n");
++ return -EINVAL;
++ }
++ block_size = device->char_data.block_size;
++ nblocks = count / block_size;
++ } else {
++ block_size = count;
++ nblocks = 1;
++ }
++
++ /* Set the idal buffer to the correct size. */
++ rc = tapechar_check_idalbuffer(device, block_size);
++ if (rc)
++ return rc;
++
++ DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size);
++ DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks);
++ /* Let the discipline build the ccw chain. */
++ request = device->discipline->write_block(device, block_size);
++ if (IS_ERR(request))
++ return PTR_ERR(request);
++ rc = 0;
++ written = 0;
++ for (i = 0; i < nblocks; i++) {
++ /* Copy data from user space to idal buffer. */
++ if (idal_buffer_from_user(device->char_data.idal_buf,
++ data, block_size)) {
++ rc = -EFAULT;
++ break;
++ }
++ rc = tape_do_io(device, request);
++ if (rc)
++ break;
++ DBF_EVENT(6, "TCHAR:wbytes: %lx\n",
++ block_size - device->devstat.rescnt);
++ filp->f_pos += block_size - device->devstat.rescnt;
++ written += block_size - device->devstat.rescnt;
++ if (device->devstat.rescnt != 0)
++ break;
++ data += block_size;
++ }
++ tape_put_request(request);
++
++ if (rc == -ENOSPC) {
++ /*
++ * Ok, the device has no more space. It has NOT written
++ * the block.
++ */
++ if (device->discipline->process_eov)
++ device->discipline->process_eov(device);
++ if (written > 0)
++ rc = 0;
++ }
++
++ /*
++ * After doing a write we always need two tapemarks to correctly
++ * terminate the tape (one to terminate the file, the second to
++ * flag the end of recorded data.
++ * Since process_eov positions the tape in front of the written
++ * tapemark it doesn't hurt to write two marks again.
++ */
++ if(!rc)
++ device->required_tapemarks = 2;
++
++ return rc ? rc : written;
++}
++
++/*
++ * Character frontend tape device open function.
++ */
++int
++tapechar_open (struct inode *inode, struct file *filp)
++{
++ struct tape_device *device;
++ int minor, rc;
++
++ MOD_INC_USE_COUNT;
++ if (major(filp->f_dentry->d_inode->i_rdev) != tapechar_major)
++ return -ENODEV;
++ minor = minor(filp->f_dentry->d_inode->i_rdev);
++ device = tape_get_device(minor / TAPE_MINORS_PER_DEV);
++ if (IS_ERR(device)) {
++ MOD_DEC_USE_COUNT;
++ return PTR_ERR(device);
++ }
++ DBF_EVENT(6, "TCHAR:open: %x\n", minor(inode->i_rdev));
++ rc = tape_open(device);
++ if (rc == 0) {
++ rc = tape_assign(device, TAPE_STATUS_ASSIGN_A);
++ if (rc == 0) {
++ filp->private_data = device;
++ return 0;
++ }
++ tape_release(device);
++ }
++ tape_put_device(device);
++ MOD_DEC_USE_COUNT;
++ return rc;
++}
++
++/*
++ * Character frontend tape device release function.
++ */
++
++int
++tapechar_release(struct inode *inode, struct file *filp)
++{
++ struct tape_device *device;
++
++ device = (struct tape_device *) filp->private_data;
++ DBF_EVENT(6, "TCHAR:release: %x\n", minor(inode->i_rdev));
++
++ /*
++ * If this is the rewinding tape minor then rewind. In that case we
++ * write all required tapemarks. Otherwise only one to terminate the
++ * file.
++ */
++ if ((minor(inode->i_rdev) & 1) != 0) {
++ if(device->required_tapemarks)
++ tape_std_terminate_write(device);
++ tape_mtop(device, MTREW, 1);
++ } else {
++ if(device->required_tapemarks > 1) {
++ if(tape_mtop(device, MTWEOF, 1) == 0)
++ device->required_tapemarks--;
++ }
++ }
++
++ if (device->char_data.idal_buf != NULL) {
++ idal_buffer_free(device->char_data.idal_buf);
++ device->char_data.idal_buf = NULL;
++ }
++ tape_unassign(device, TAPE_STATUS_ASSIGN_A);
++ tape_release(device);
++ filp->private_data = NULL; tape_put_device(device);
++ MOD_DEC_USE_COUNT;
++ return 0;
++}
++
++/*
++ * Tape device io controls.
++ */
++static int
++tapechar_ioctl(struct inode *inp, struct file *filp,
++ unsigned int no, unsigned long data)
++{
++ struct tape_device *device;
++ int rc;
++
++ DBF_EVENT(6, "TCHAR:ioct(%x)\n", no);
++
++ device = (struct tape_device *) filp->private_data;
++
++ if (no == MTIOCTOP) {
++ struct mtop op;
++
++ if (copy_from_user(&op, (char *) data, sizeof(op)) != 0)
++ return -EFAULT;
++ if (op.mt_count < 0)
++ return -EINVAL;
++
++ /*
++ * Operations that change tape position should write final
++ * tapemarks
++ */
++ switch(op.mt_op) {
++ case MTFSF:
++ case MTBSF:
++ case MTFSR:
++ case MTBSR:
++ case MTREW:
++ case MTOFFL:
++ case MTEOM:
++ case MTRETEN:
++ case MTBSFM:
++ case MTFSFM:
++ case MTSEEK:
++ if(device->required_tapemarks)
++ tape_std_terminate_write(device);
++ default:
++ ;
++ }
++ rc = tape_mtop(device, op.mt_op, op.mt_count);
++
++ if(op.mt_op == MTWEOF && rc == 0) {
++ if(op.mt_count > device->required_tapemarks)
++ device->required_tapemarks = 0;
++ else
++ device->required_tapemarks -= op.mt_count;
++ }
++ return rc;
++ }
++ if (no == MTIOCPOS) {
++ /* MTIOCPOS: query the tape position. */
++ struct mtpos pos;
++
++ rc = tape_mtop(device, MTTELL, 1);
++ if (rc < 0)
++ return rc;
++ pos.mt_blkno = rc;
++ if (copy_to_user((char *) data, &pos, sizeof(pos)) != 0)
++ return -EFAULT;
++ return 0;
++ }
++ if (no == MTIOCGET) {
++ /* MTIOCGET: query the tape drive status. */
++ struct mtget get;
++
++ memset(&get, 0, sizeof(get));
++ get.mt_type = MT_ISUNKNOWN;
++ get.mt_resid = device->devstat.rescnt;
++ get.mt_dsreg = device->tape_status;
++ /* FIXME: mt_erreg, mt_fileno */
++ get.mt_gstat = device->tape_generic_status;
++
++ if(device->medium_state == MS_LOADED) {
++ rc = tape_mtop(device, MTTELL, 1);
++
++ if(rc < 0)
++ return rc;
++
++ if(rc == 0)
++ get.mt_gstat |= GMT_BOT(~0);
++
++ get.mt_blkno = rc;
++ }
++ get.mt_erreg = 0;
++ if (copy_to_user((char *) data, &get, sizeof(get)) != 0)
++ return -EFAULT;
++ return 0;
++ }
++ /* Try the discipline ioctl function. */
++ if (device->discipline->ioctl_fn == NULL)
++ return -EINVAL;
++ return device->discipline->ioctl_fn(device, no, data);
++}
++
++/*
++ * Initialize character device frontend.
++ */
++int
++tapechar_init (void)
++{
++ int rc;
++
++ /* Register the tape major number to the kernel */
++#ifdef CONFIG_DEVFS_FS
++ if (tapechar_major == 0)
++ tapechar_major = devfs_alloc_major(DEVFS_SPECIAL_CHR);
++#endif
++ rc = register_chrdev(tapechar_major, "tape", &tape_fops);
++ if (rc < 0) {
++ PRINT_ERR("can't get major %d\n", tapechar_major);
++ DBF_EVENT(3, "TCHAR:initfail\n");
++ return rc;
++ }
++ if (tapechar_major == 0)
++ tapechar_major = rc; /* accept dynamic major number */
++ PRINT_INFO("Tape gets major %d for char device\n", tapechar_major);
++ DBF_EVENT(3, "Tape gets major %d for char device\n", rc);
++ DBF_EVENT(3, "TCHAR:init ok\n");
++ return 0;
++}
++
++/*
++ * cleanup
++ */
++void
++tapechar_exit(void)
++{
++ unregister_chrdev (tapechar_major, "tape");
++}
+=== drivers/s390/char/hwc_rw.h
+==================================================================
+--- drivers/s390/char/hwc_rw.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/hwc_rw.h (/trunk/2.4.27) (revision 52)
+@@ -1,132 +0,0 @@
+-/*
+- * drivers/s390/char/hwc_rw.h
+- * interface to the HWC-read/write driver
+- *
+- * S390 version
+- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+- * Author(s): Martin Peschke <mpeschke at de.ibm.com>
+- */
+-
+-#ifndef __HWC_RW_H__
+-#define __HWC_RW_H__
+-
+-#include <linux/ioctl.h>
+-
+-typedef struct {
+-
+- void (*move_input) (unsigned char *, unsigned int);
+-
+- void (*wake_up) (void);
+-} hwc_high_level_calls_t;
+-
+-struct _hwc_request;
+-
+-typedef void hwc_callback_t (struct _hwc_request *);
+-
+-typedef struct _hwc_request {
+- void *block;
+- u32 word;
+- hwc_callback_t *callback;
+- void *data;
+-} __attribute__ ((packed))
+-
+-hwc_request_t;
+-
+-#define HWC_ASCEBC(x) ((MACHINE_IS_VM ? _ascebc[x] : _ascebc_500[x]))
+-
+-#define HWC_EBCASC_STR(s,c) ((MACHINE_IS_VM ? EBCASC(s,c) : EBCASC_500(s,c)))
+-
+-#define HWC_ASCEBC_STR(s,c) ((MACHINE_IS_VM ? ASCEBC(s,c) : ASCEBC_500(s,c)))
+-
+-#define IN_HWCB 1
+-#define IN_WRITE_BUF 2
+-#define IN_BUFS_TOTAL (IN_HWCB | IN_WRITE_BUF)
+-
+-typedef unsigned short int ioctl_htab_t;
+-typedef unsigned char ioctl_echo_t;
+-typedef unsigned short int ioctl_cols_t;
+-typedef signed char ioctl_nl_t;
+-typedef unsigned short int ioctl_obuf_t;
+-typedef unsigned char ioctl_case_t;
+-typedef unsigned char ioctl_delim_t;
+-
+-typedef struct {
+- ioctl_htab_t width_htab;
+- ioctl_echo_t echo;
+- ioctl_cols_t columns;
+- ioctl_nl_t final_nl;
+- ioctl_obuf_t max_hwcb;
+- ioctl_obuf_t kmem_hwcb;
+- ioctl_case_t tolower;
+- ioctl_delim_t delim;
+-} hwc_ioctls_t;
+-
+-static hwc_ioctls_t _hwc_ioctls;
+-
+-#define HWC_IOCTL_LETTER 'B'
+-
+-#define TIOCHWCSHTAB _IOW(HWC_IOCTL_LETTER, 0, _hwc_ioctls.width_htab)
+-
+-#define TIOCHWCSECHO _IOW(HWC_IOCTL_LETTER, 1, _hwc_ioctls.echo)
+-
+-#define TIOCHWCSCOLS _IOW(HWC_IOCTL_LETTER, 2, _hwc_ioctls.columns)
+-
+-#define TIOCHWCSNL _IOW(HWC_IOCTL_LETTER, 4, _hwc_ioctls.final_nl)
+-
+-#define TIOCHWCSOBUF _IOW(HWC_IOCTL_LETTER, 5, _hwc_ioctls.max_hwcb)
+-
+-#define TIOCHWCSINIT _IO(HWC_IOCTL_LETTER, 6)
+-
+-#define TIOCHWCSCASE _IOW(HWC_IOCTL_LETTER, 7, _hwc_ioctls.tolower)
+-
+-#define TIOCHWCSDELIM _IOW(HWC_IOCTL_LETTER, 9, _hwc_ioctls.delim)
+-
+-#define TIOCHWCGHTAB _IOR(HWC_IOCTL_LETTER, 10, _hwc_ioctls.width_htab)
+-
+-#define TIOCHWCGECHO _IOR(HWC_IOCTL_LETTER, 11, _hwc_ioctls.echo)
+-
+-#define TIOCHWCGCOLS _IOR(HWC_IOCTL_LETTER, 12, _hwc_ioctls.columns)
+-
+-#define TIOCHWCGNL _IOR(HWC_IOCTL_LETTER, 14, _hwc_ioctls.final_nl)
+-
+-#define TIOCHWCGOBUF _IOR(HWC_IOCTL_LETTER, 15, _hwc_ioctls.max_hwcb)
+-
+-#define TIOCHWCGINIT _IOR(HWC_IOCTL_LETTER, 16, _hwc_ioctls)
+-
+-#define TIOCHWCGCASE _IOR(HWC_IOCTL_LETTER, 17, _hwc_ioctls.tolower)
+-
+-#define TIOCHWCGDELIM _IOR(HWC_IOCTL_LETTER, 19, _hwc_ioctls.delim)
+-
+-#define TIOCHWCGKBUF _IOR(HWC_IOCTL_LETTER, 20, _hwc_ioctls.max_hwcb)
+-
+-#define TIOCHWCGCURR _IOR(HWC_IOCTL_LETTER, 21, _hwc_ioctls)
+-
+-#ifndef __HWC_RW_C__
+-
+-extern int hwc_init (void);
+-
+-extern int hwc_write (int from_user, const unsigned char *, unsigned int);
+-
+-extern unsigned int hwc_chars_in_buffer (unsigned char);
+-
+-extern unsigned int hwc_write_room (unsigned char);
+-
+-extern void hwc_flush_buffer (unsigned char);
+-
+-extern void hwc_unblank (void);
+-
+-extern signed int hwc_ioctl (unsigned int, unsigned long);
+-
+-extern void do_hwc_interrupt (void);
+-
+-extern int hwc_printk (const char *,...);
+-
+-extern signed int hwc_register_calls (hwc_high_level_calls_t *);
+-
+-extern signed int hwc_unregister_calls (hwc_high_level_calls_t *);
+-
+-extern int hwc_send (hwc_request_t *);
+-
+-#endif
+-
+-#endif
+=== drivers/s390/char/tape3490.c
+==================================================================
+--- drivers/s390/char/tape3490.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tape3490.c (/trunk/2.4.27) (revision 52)
+@@ -1,156 +0,0 @@
+-/***************************************************************************
+- *
+- * drivers/s390/char/tape3490.c
+- * tape device discipline for 3490E tapes.
+- *
+- * S390 and zSeries version
+- * Copyright (C) 2001 IBM Corporation
+- * Author(s): Carsten Otte <cotte at de.ibm.com>
+- * Tuan Ngo-Anh <ngoanh at de.ibm.com>
+- *
+- ****************************************************************************
+- */
+-
+-#include "tapedefs.h"
+-#include <linux/version.h>
+-#include <asm/ccwcache.h> /* CCW allocations */
+-#include <asm/s390dyn.h>
+-#include <asm/debug.h>
+-#include <linux/compatmac.h>
+-#include "tape.h"
+-#include "tape34xx.h"
+-#include "tape3490.h"
+-
+-tape_event_handler_t tape3490_event_handler_table[TS_SIZE][TE_SIZE] =
+-{
+- /* {START , DONE, FAILED, ERROR, OTHER } */
+- {NULL, tape34xx_unused_done, NULL, NULL, NULL}, /* TS_UNUSED */
+- {NULL, tape34xx_idle_done, NULL, NULL, NULL}, /* TS_IDLE */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_DONE */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_FAILED */
+- {NULL, tape34xx_block_done, NULL, NULL, NULL}, /* TS_BLOCK_INIT */
+- {NULL, tape34xx_bsb_init_done, NULL, NULL, NULL}, /* TS_BSB_INIT */
+- {NULL, tape34xx_bsf_init_done, NULL, NULL, NULL}, /* TS_BSF_INIT */
+- {NULL, tape34xx_dse_init_done, NULL, NULL, NULL}, /* TS_DSE_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_EGA_INIT */
+- {NULL, tape34xx_fsb_init_done, NULL, NULL, NULL}, /* TS_FSB_INIT */
+- {NULL, tape34xx_fsf_init_done, NULL, NULL, NULL}, /* TS_FSF_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_LDI_INIT */
+- {NULL, tape34xx_lbl_init_done, NULL, NULL, NULL}, /* TS_LBL_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_MSE_INIT */
+- {NULL, tape34xx_nop_init_done, NULL, NULL, NULL}, /* TS_NOP_INIT */
+- {NULL, tape34xx_rfo_init_done, NULL, NULL, NULL}, /* TS_RBA_INIT */
+- {NULL, tape34xx_rbi_init_done, NULL, NULL, NULL}, /* TS_RBI_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_RBU_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_RBL_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_RDC_INIT */
+- {NULL, tape34xx_rfo_init_done, NULL, NULL, NULL}, /* TS_RFO_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_RSD_INIT */
+- {NULL, tape34xx_rew_init_done, NULL, NULL, NULL}, /* TS_REW_INIT */
+- {NULL, tape34xx_rew_release_init_done, NULL, NULL, NULL}, /* TS_REW_RELEASE_IMIT */
+- {NULL, tape34xx_run_init_done, NULL, NULL, NULL}, /* TS_RUN_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_SEN_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_SID_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_SNP_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_SPG_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_SWI_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_SMR_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_SYN_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_TIO_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_UNA_INIT */
+- {NULL, tape34xx_wri_init_done, NULL, NULL, NULL}, /* TS_WRI_INIT */
+- {NULL, tape34xx_wtm_init_done, NULL, NULL, NULL}, /* TS_WTM_INIT */
+- {NULL, NULL, NULL, NULL, NULL}}; /* TS_NOT_OPER */
+-
+-devreg_t tape3490_devreg = {
+- ci:
+- {hc:
+- {ctype:0x3490}},
+- flag:DEVREG_MATCH_CU_TYPE | DEVREG_TYPE_DEVCHARS,
+- oper_func:tape_oper_handler
+-};
+-
+-void
+-tape3490_setup_assist (tape_info_t * ti)
+-{
+- tape3490_disc_data_t *data = NULL;
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"3490 dsetu");
+- debug_text_event (tape_debug_area,6,"dev:");
+- debug_int_event (tape_debug_area,6,ti->blk_minor);
+-#endif /* TAPE_DEBUG */
+- while (data == NULL)
+- data = kmalloc (sizeof (tape3490_disc_data_t), GFP_KERNEL);
+- data->modeset_byte = 0x00;
+- ti->discdata = (void *) data;
+-}
+-
+-
+-void
+-tape3490_shutdown (int autoprobe) {
+- if (autoprobe)
+- s390_device_unregister(&tape3490_devreg);
+-}
+-
+-
+-tape_discipline_t *
+-tape3490_init (int autoprobe)
+-{
+- tape_discipline_t *disc;
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"3490 init");
+-#endif /* TAPE_DEBUG */
+- disc = kmalloc (sizeof (tape_discipline_t), GFP_KERNEL);
+- if (disc == NULL) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,3,"disc:nomem");
+-#endif /* TAPE_DEBUG */
+- return disc;
+- }
+- disc->cu_type = 0x3490;
+- disc->setup_assist = tape3490_setup_assist;
+- disc->error_recovery = tape34xx_error_recovery;
+- disc->write_block = tape34xx_write_block;
+- disc->free_write_block = tape34xx_free_write_block;
+- disc->read_block = tape34xx_read_block;
+- disc->free_read_block = tape34xx_free_read_block;
+- disc->mtfsf = tape34xx_mtfsf;
+- disc->mtbsf = tape34xx_mtbsf;
+- disc->mtfsr = tape34xx_mtfsr;
+- disc->mtbsr = tape34xx_mtbsr;
+- disc->mtweof = tape34xx_mtweof;
+- disc->mtrew = tape34xx_mtrew;
+- disc->mtoffl = tape34xx_mtoffl;
+- disc->mtnop = tape34xx_mtnop;
+- disc->mtbsfm = tape34xx_mtbsfm;
+- disc->mtfsfm = tape34xx_mtfsfm;
+- disc->mteom = tape34xx_mteom;
+- disc->mterase = tape34xx_mterase;
+- disc->mtsetdensity = tape34xx_mtsetdensity;
+- disc->mtseek = tape34xx_mtseek;
+- disc->mttell = tape34xx_mttell;
+- disc->mtsetdrvbuffer = tape34xx_mtsetdrvbuffer;
+- disc->mtlock = tape34xx_mtlock;
+- disc->mtunlock = tape34xx_mtunlock;
+- disc->mtload = tape34xx_mtload;
+- disc->mtunload = tape34xx_mtunload;
+- disc->mtcompression = tape34xx_mtcompression;
+- disc->mtsetpart = tape34xx_mtsetpart;
+- disc->mtmkpart = tape34xx_mtmkpart;
+- disc->mtiocget = tape34xx_mtiocget;
+- disc->mtiocpos = tape34xx_mtiocpos;
+- disc->shutdown = tape3490_shutdown;
+- disc->discipline_ioctl_overload = tape34xx_ioctl_overload;
+- disc->event_table = &tape3490_event_handler_table;
+- disc->default_handler = tape34xx_default_handler;
+- disc->bread = tape34xx_bread;
+- disc->free_bread = tape34xx_free_bread;
+- disc->tape = NULL; /* pointer for backreference */
+- disc->next = NULL;
+- if (autoprobe)
+- s390_device_register(&tape3490_devreg);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"3490 regis");
+-#endif /* TAPE_DEBUG */
+- return disc;
+-}
+=== drivers/s390/char/tape3590.c
+==================================================================
+--- drivers/s390/char/tape3590.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tape3590.c (/trunk/2.4.27) (revision 52)
+@@ -1 +0,0 @@
+-// tbd
+=== drivers/s390/char/tape3490.h
+==================================================================
+--- drivers/s390/char/tape3490.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tape3490.h (/trunk/2.4.27) (revision 52)
+@@ -1,24 +0,0 @@
+-
+-/***************************************************************************
+- *
+- * drivers/s390/char/tape3490.h
+- * tape device discipline for 3490E tapes.
+- *
+- * S390 and zSeries version
+- * Copyright (C) 2001 IBM Corporation
+- * Author(s): Carsten Otte <cotte at de.ibm.com>
+- * Tuan Ngo-Anh <ngoanh at de.ibm.com>
+- *
+- ****************************************************************************
+- */
+-
+-#ifndef _TAPE3490_H
+-
+-#define _TAPE3490_H
+-
+-
+-typedef struct _tape3490_disc_data_t {
+- __u8 modeset_byte;
+-} tape3490_disc_data_t __attribute__ ((packed, aligned(8)));
+-tape_discipline_t * tape3490_init (int);
+-#endif // _TAPE3490_H
+=== drivers/s390/char/tape3590.h
+==================================================================
+--- drivers/s390/char/tape3590.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tape3590.h (/trunk/2.4.27) (revision 52)
+@@ -1 +0,0 @@
+-// tbd
+=== drivers/s390/char/hwc_con.c
+==================================================================
+--- drivers/s390/char/hwc_con.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/hwc_con.c (/trunk/2.4.27) (revision 52)
+@@ -1,89 +0,0 @@
+-/*
+- * drivers/s390/char/hwc_con.c
+- * HWC line mode console driver
+- *
+- * S390 version
+- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+- * Author(s): Martin Peschke <mpeschke at de.ibm.com>
+- */
+-
+-#include <linux/config.h>
+-#include <linux/kernel.h>
+-#include <linux/major.h>
+-#include <linux/errno.h>
+-#include <linux/kdev_t.h>
+-#include <linux/string.h>
+-#include <linux/console.h>
+-#include <linux/fs.h>
+-#include <linux/init.h>
+-
+-#include "hwc_rw.h"
+-
+-#ifdef CONFIG_HWC_CONSOLE
+-
+-#define hwc_console_major 4
+-#define hwc_console_minor 64
+-#define hwc_console_name "console"
+-
+-void hwc_console_write (struct console *, const char *, unsigned int);
+-kdev_t hwc_console_device (struct console *);
+-void hwc_console_unblank (void);
+-
+-#define HWC_CON_PRINT_HEADER "hwc console driver: "
+-
+-struct console hwc_console = {
+- name: hwc_console_name,
+- write: hwc_console_write,
+- device: hwc_console_device,
+- unblank:hwc_console_unblank,
+- flags: CON_PRINTBUFFER,
+-};
+-
+-void
+-hwc_console_write (
+- struct console *console,
+- const char *message,
+- unsigned int count)
+-{
+-
+- if (console->device (console) != hwc_console.device (&hwc_console)) {
+-
+- hwc_printk (KERN_WARNING HWC_CON_PRINT_HEADER
+- "hwc_console_write() called with wrong "
+- "device number");
+- return;
+- }
+- hwc_write (0, message, count);
+-}
+-
+-kdev_t
+-hwc_console_device (struct console * c)
+-{
+- return MKDEV (hwc_console_major, hwc_console_minor);
+-}
+-
+-void
+-hwc_console_unblank (void)
+-{
+- hwc_unblank ();
+-}
+-
+-#endif
+-
+-void __init
+-hwc_console_init (void)
+-{
+- if (!MACHINE_HAS_HWC)
+- return;
+-
+- if (hwc_init () == 0) {
+-#ifdef CONFIG_HWC_CONSOLE
+-
+- if (CONSOLE_IS_HWC)
+- register_console (&hwc_console);
+-#endif
+- } else
+- panic (HWC_CON_PRINT_HEADER "hwc initialisation failed !");
+-
+- return;
+-}
+=== drivers/s390/char/hwc_tty.c
+==================================================================
+--- drivers/s390/char/hwc_tty.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/hwc_tty.c (/trunk/2.4.27) (revision 52)
+@@ -1,273 +0,0 @@
+-/*
+- * drivers/s390/char/hwc_tty.c
+- * HWC line mode terminal driver.
+- *
+- * S390 version
+- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+- * Author(s): Martin Peschke <mpeschke at de.ibm.com>
+- *
+- * Thanks to Martin Schwidefsky.
+- */
+-
+-#include <linux/config.h>
+-#include <linux/major.h>
+-#include <linux/termios.h>
+-#include <linux/tty.h>
+-#include <linux/tty_driver.h>
+-#include <linux/sched.h>
+-#include <linux/mm.h>
+-#include <linux/devfs_fs_kernel.h>
+-#include <linux/init.h>
+-
+-#include <asm/uaccess.h>
+-
+-#include "hwc_rw.h"
+-#include "ctrlchar.h"
+-
+-#define HWC_TTY_PRINT_HEADER "hwc tty driver: "
+-
+-#define HWC_TTY_BUF_SIZE 512
+-
+-typedef struct {
+-
+- struct tty_struct *tty;
+-
+- unsigned char buf[HWC_TTY_BUF_SIZE];
+-
+- unsigned short int buf_count;
+-
+- spinlock_t lock;
+-
+- hwc_high_level_calls_t calls;
+-} hwc_tty_data_struct;
+-
+-static hwc_tty_data_struct hwc_tty_data =
+-{ /* NULL/0 */ };
+-static struct tty_driver hwc_tty_driver;
+-static struct tty_struct *hwc_tty_table[1];
+-static struct termios *hwc_tty_termios[1];
+-static struct termios *hwc_tty_termios_locked[1];
+-static int hwc_tty_refcount = 0;
+-
+-extern struct termios tty_std_termios;
+-
+-void hwc_tty_wake_up (void);
+-void hwc_tty_input (unsigned char *, unsigned int);
+-
+-static int
+-hwc_tty_open (struct tty_struct *tty,
+- struct file *filp)
+-{
+-
+- if (MINOR (tty->device) - tty->driver.minor_start)
+- return -ENODEV;
+-
+- tty->driver_data = &hwc_tty_data;
+- hwc_tty_data.buf_count = 0;
+- hwc_tty_data.tty = tty;
+- tty->low_latency = 0;
+-
+- hwc_tty_data.calls.wake_up = hwc_tty_wake_up;
+- hwc_tty_data.calls.move_input = hwc_tty_input;
+- hwc_register_calls (&(hwc_tty_data.calls));
+-
+- return 0;
+-}
+-
+-static void
+-hwc_tty_close (struct tty_struct *tty,
+- struct file *filp)
+-{
+- if (MINOR (tty->device) != tty->driver.minor_start) {
+- printk (KERN_WARNING HWC_TTY_PRINT_HEADER
+- "do not close hwc tty because of wrong device number");
+- return;
+- }
+- if (tty->count > 1)
+- return;
+-
+- hwc_tty_data.tty = NULL;
+-
+- hwc_unregister_calls (&(hwc_tty_data.calls));
+-}
+-
+-static int
+-hwc_tty_write_room (struct tty_struct *tty)
+-{
+- int retval;
+-
+- retval = hwc_write_room (IN_BUFS_TOTAL);
+- return retval;
+-}
+-
+-static int
+-hwc_tty_write (struct tty_struct *tty,
+- int from_user,
+- const unsigned char *buf,
+- int count)
+-{
+- int retval;
+-
+- if (hwc_tty_data.buf_count > 0) {
+- hwc_write (0, hwc_tty_data.buf, hwc_tty_data.buf_count);
+- hwc_tty_data.buf_count = 0;
+- }
+- retval = hwc_write (from_user, buf, count);
+- return retval;
+-}
+-
+-static void
+-hwc_tty_put_char (struct tty_struct *tty,
+- unsigned char ch)
+-{
+- unsigned long flags;
+-
+- spin_lock_irqsave (&hwc_tty_data.lock, flags);
+- if (hwc_tty_data.buf_count >= HWC_TTY_BUF_SIZE) {
+- hwc_write (0, hwc_tty_data.buf, hwc_tty_data.buf_count);
+- hwc_tty_data.buf_count = 0;
+- }
+- hwc_tty_data.buf[hwc_tty_data.buf_count] = ch;
+- hwc_tty_data.buf_count++;
+- spin_unlock_irqrestore (&hwc_tty_data.lock, flags);
+-}
+-
+-static void
+-hwc_tty_flush_chars (struct tty_struct *tty)
+-{
+- unsigned long flags;
+-
+- spin_lock_irqsave (&hwc_tty_data.lock, flags);
+- hwc_write (0, hwc_tty_data.buf, hwc_tty_data.buf_count);
+- hwc_tty_data.buf_count = 0;
+- spin_unlock_irqrestore (&hwc_tty_data.lock, flags);
+-}
+-
+-static int
+-hwc_tty_chars_in_buffer (struct tty_struct *tty)
+-{
+- int retval;
+-
+- retval = hwc_chars_in_buffer (IN_BUFS_TOTAL);
+- return retval;
+-}
+-
+-static void
+-hwc_tty_flush_buffer (struct tty_struct *tty)
+-{
+- hwc_tty_wake_up ();
+-}
+-
+-static int
+-hwc_tty_ioctl (
+- struct tty_struct *tty,
+- struct file *file,
+- unsigned int cmd,
+- unsigned long arg)
+-{
+- if (tty->flags & (1 << TTY_IO_ERROR))
+- return -EIO;
+-
+- return hwc_ioctl (cmd, arg);
+-}
+-
+-void
+-hwc_tty_wake_up (void)
+-{
+- if (hwc_tty_data.tty == NULL)
+- return;
+- if ((hwc_tty_data.tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+- hwc_tty_data.tty->ldisc.write_wakeup)
+- (hwc_tty_data.tty->ldisc.write_wakeup) (hwc_tty_data.tty);
+- wake_up_interruptible (&hwc_tty_data.tty->write_wait);
+-}
+-
+-void
+-hwc_tty_input (unsigned char *buf, unsigned int count)
+-{
+- struct tty_struct *tty = hwc_tty_data.tty;
+-
+- if (tty != NULL) {
+- char *cchar;
+- if ((cchar = ctrlchar_handle (buf, count, tty))) {
+- if (cchar == (char *) -1)
+- return;
+- tty->flip.count++;
+- *tty->flip.flag_buf_ptr++ = TTY_NORMAL;
+- *tty->flip.char_buf_ptr++ = *cchar;
+- } else {
+-
+- memcpy (tty->flip.char_buf_ptr, buf, count);
+- if (count < 2 || (
+- strncmp (buf + count - 2, "^n", 2) ||
+- strncmp (buf + count - 2, "\0252n", 2))) {
+- tty->flip.char_buf_ptr[count] = '\n';
+- count++;
+- } else
+- count -= 2;
+- memset (tty->flip.flag_buf_ptr, TTY_NORMAL, count);
+- tty->flip.char_buf_ptr += count;
+- tty->flip.flag_buf_ptr += count;
+- tty->flip.count += count;
+- }
+- tty_flip_buffer_push (tty);
+- hwc_tty_wake_up ();
+- }
+-}
+-
+-void
+-hwc_tty_init (void)
+-{
+- if (!CONSOLE_IS_HWC)
+- return;
+-
+- ctrlchar_init ();
+-
+- memset (&hwc_tty_driver, 0, sizeof (struct tty_driver));
+- memset (&hwc_tty_data, 0, sizeof (hwc_tty_data_struct));
+- hwc_tty_driver.magic = TTY_DRIVER_MAGIC;
+- hwc_tty_driver.driver_name = "tty_hwc";
+- hwc_tty_driver.name = "ttyS";
+- hwc_tty_driver.name_base = 0;
+- hwc_tty_driver.major = TTY_MAJOR;
+- hwc_tty_driver.minor_start = 64;
+- hwc_tty_driver.num = 1;
+- hwc_tty_driver.type = TTY_DRIVER_TYPE_SYSTEM;
+- hwc_tty_driver.subtype = SYSTEM_TYPE_TTY;
+- hwc_tty_driver.init_termios = tty_std_termios;
+- hwc_tty_driver.init_termios.c_iflag = IGNBRK | IGNPAR;
+- hwc_tty_driver.init_termios.c_oflag = ONLCR;
+- hwc_tty_driver.init_termios.c_lflag = ISIG | ECHO;
+- hwc_tty_driver.flags = TTY_DRIVER_REAL_RAW;
+- hwc_tty_driver.refcount = &hwc_tty_refcount;
+-
+- hwc_tty_driver.table = hwc_tty_table;
+- hwc_tty_driver.termios = hwc_tty_termios;
+- hwc_tty_driver.termios_locked = hwc_tty_termios_locked;
+-
+- hwc_tty_driver.open = hwc_tty_open;
+- hwc_tty_driver.close = hwc_tty_close;
+- hwc_tty_driver.write = hwc_tty_write;
+- hwc_tty_driver.put_char = hwc_tty_put_char;
+- hwc_tty_driver.flush_chars = hwc_tty_flush_chars;
+- hwc_tty_driver.write_room = hwc_tty_write_room;
+- hwc_tty_driver.chars_in_buffer = hwc_tty_chars_in_buffer;
+- hwc_tty_driver.flush_buffer = hwc_tty_flush_buffer;
+- hwc_tty_driver.ioctl = hwc_tty_ioctl;
+-
+- hwc_tty_driver.throttle = NULL;
+- hwc_tty_driver.unthrottle = NULL;
+- hwc_tty_driver.send_xchar = NULL;
+- hwc_tty_driver.set_termios = NULL;
+- hwc_tty_driver.set_ldisc = NULL;
+- hwc_tty_driver.stop = NULL;
+- hwc_tty_driver.start = NULL;
+- hwc_tty_driver.hangup = NULL;
+- hwc_tty_driver.break_ctl = NULL;
+- hwc_tty_driver.wait_until_sent = NULL;
+- hwc_tty_driver.read_proc = NULL;
+- hwc_tty_driver.write_proc = NULL;
+-
+- if (tty_register_driver (&hwc_tty_driver))
+- panic ("Couldn't register hwc_tty driver\n");
+-}
+=== drivers/s390/char/tapechar.c
+==================================================================
+--- drivers/s390/char/tapechar.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tapechar.c (/trunk/2.4.27) (revision 52)
+@@ -1,764 +0,0 @@
+-
+-/***************************************************************************
+- *
+- * drivers/s390/char/tapechar.c
+- * character device frontend for tape device driver
+- *
+- * S390 and zSeries version
+- * Copyright (C) 2001 IBM Corporation
+- * Author(s): Carsten Otte <cotte at de.ibm.com>
+- * Tuan Ngo-Anh <ngoanh at de.ibm.com>
+- *
+- *
+- ****************************************************************************
+- */
+-
+-#include "tapedefs.h"
+-#include <linux/config.h>
+-#include <linux/version.h>
+-#include <linux/types.h>
+-#include <linux/proc_fs.h>
+-#include <asm/ccwcache.h> /* CCW allocations */
+-#include <asm/s390dyn.h>
+-#include <asm/debug.h>
+-#include <linux/mtio.h>
+-#include <asm/uaccess.h>
+-#include <linux/compatmac.h>
+-#ifdef MODULE
+-#define __NO_VERSION__
+-#include <linux/module.h>
+-#endif
+-#include "tape.h"
+-#include "tapechar.h"
+-
+-#define PRINTK_HEADER "TCHAR:"
+-
+-/*
+- * file operation structure for tape devices
+- */
+-static struct file_operations tape_fops =
+-{
+- // owner : THIS_MODULE,
+- llseek:NULL, /* lseek - default */
+- read:tape_read, /* read */
+- write:tape_write, /* write */
+- readdir:NULL, /* readdir - bad */
+- poll:NULL, /* poll */
+- ioctl:tape_ioctl, /* ioctl */
+- mmap:NULL, /* mmap */
+- open:tape_open, /* open */
+- flush:NULL, /* flush */
+- release:tape_release, /* release */
+- fsync:NULL, /* fsync */
+- fasync:NULL, /* fasync */
+- lock:NULL,
+-};
+-
+-int tape_major = TAPE_MAJOR;
+-
+-#ifdef CONFIG_DEVFS_FS
+-void
+-tapechar_mkdevfstree (tape_info_t* ti) {
+- ti->devfs_char_dir=devfs_mk_dir (ti->devfs_dir, "char", ti);
+- ti->devfs_nonrewinding=devfs_register(ti->devfs_char_dir, "nonrewinding",
+- DEVFS_FL_DEFAULT,tape_major,
+- ti->nor_minor, TAPECHAR_DEFAULTMODE,
+- &tape_fops, ti);
+- ti->devfs_rewinding=devfs_register(ti->devfs_char_dir, "rewinding",
+- DEVFS_FL_DEFAULT, tape_major, ti->rew_minor,
+- TAPECHAR_DEFAULTMODE, &tape_fops, ti);
+-}
+-
+-void
+-tapechar_rmdevfstree (tape_info_t* ti) {
+- devfs_unregister(ti->devfs_nonrewinding);
+- devfs_unregister(ti->devfs_rewinding);
+- devfs_unregister(ti->devfs_char_dir);
+-}
+-#endif
+-
+-void
+-tapechar_setup (tape_info_t * ti)
+-{
+-#ifdef CONFIG_DEVFS_FS
+- tapechar_mkdevfstree(ti);
+-#endif
+-}
+-
+-void
+-tapechar_init (void)
+-{
+- int result;
+- tape_frontend_t *charfront,*temp;
+- tape_info_t* ti;
+-
+- tape_init();
+-
+- /* Register the tape major number to the kernel */
+-#ifdef CONFIG_DEVFS_FS
+- result = devfs_register_chrdev (tape_major, "tape", &tape_fops);
+-#else
+- result = register_chrdev (tape_major, "tape", &tape_fops);
+-#endif
+-
+- if (result < 0) {
+- PRINT_WARN (KERN_ERR "tape: can't get major %d\n", tape_major);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"c:initfail");
+- debug_text_event (tape_debug_area,3,"regchrfail");
+-#endif /* TAPE_DEBUG */
+- panic ("no major number available for tape char device");
+- }
+- if (tape_major == 0)
+- tape_major = result; /* accept dynamic major number */
+- PRINT_WARN (KERN_ERR " tape gets major %d for character device\n", result);
+- charfront = kmalloc (sizeof (tape_frontend_t), GFP_KERNEL);
+- if (charfront == NULL) {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"c:initfail");
+- debug_text_event (tape_debug_area,3,"no mem");
+-#endif /* TAPE_DEBUG */
+- panic ("no major number available for tape char device");
+- }
+- charfront->device_setup = tapechar_setup;
+-#ifdef CONFIG_DEVFS_FS
+- charfront->mkdevfstree = tapechar_mkdevfstree;
+- charfront->rmdevfstree = tapechar_rmdevfstree;
+-#endif
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"c:init ok");
+-#endif /* TAPE_DEBUG */
+- charfront->next=NULL;
+- if (first_frontend==NULL) {
+- first_frontend=charfront;
+- } else {
+- temp=first_frontend;
+- while (temp->next!=NULL)
+- temp=temp->next;
+- temp->next=charfront;
+- }
+- ti=first_tape_info;
+- while (ti!=NULL) {
+- tapechar_setup(ti);
+- ti=ti->next;
+- }
+-}
+-
+-void
+-tapechar_uninit (void)
+-{
+- unregister_chrdev (tape_major, "tape");
+-}
+-
+-/*
+- * Tape device read function
+- */
+-ssize_t
+-tape_read (struct file *filp, char *data, size_t count, loff_t * ppos)
+-{
+- long lockflags;
+- tape_info_t *ti;
+- size_t block_size;
+- ccw_req_t *cqr;
+- int rc;
+- loff_t pos = *ppos;
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:read");
+-#endif /* TAPE_DEBUG */
+- ti = first_tape_info;
+- while ((ti != NULL) && (ti->rew_filp != filp) && (ti->nor_filp != filp))
+- ti = (tape_info_t *) ti->next;
+- if (ti == NULL) {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:nodev");
+-#endif /* TAPE_DEBUG */
+- return -ENODEV;
+- }
+- if (ppos != &filp->f_pos) {
+- /* "A request was outside the capabilities of the device." */
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:ppos wrong");
+-#endif /* TAPE_DEBUG */
+- return -EOVERFLOW; /* errno=75 Value too large for def. data type */
+- }
+- if (ti->block_size == 0) {
+- block_size = count;
+- } else {
+- block_size = ti->block_size;
+- }
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:nbytes:");
+- debug_int_event (tape_debug_area,6,block_size);
+-#endif
+- cqr = ti->discipline->read_block (data, block_size, ti);
+- if (!cqr) {
+- return -ENOBUFS;
+- }
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->cqr = cqr;
+- ti->wanna_wakeup=0;
+- rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options);
+- if (rc) {
+- tapestate_set(ti,TS_IDLE);
+- kfree (cqr);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- return rc;
+- }
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- wait_event (ti->wq,ti->wanna_wakeup);
+- ti->cqr = NULL;
+- ti->discipline->free_read_block (cqr, ti);
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- if (tapestate_get (ti) == TS_FAILED) {
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- return ti->rc;
+- }
+- if (tapestate_get (ti) == TS_NOT_OPER) {
+- ti->blk_minor=ti->rew_minor=ti->nor_minor=-1;
+- ti->devinfo.irq=-1;
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq,lockflags);
+- return -ENODEV;
+- }
+- if (tapestate_get (ti) != TS_DONE) {
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- return -EIO;
+- }
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:rbytes:");
+- debug_int_event (tape_debug_area,6,block_size - ti->devstat.rescnt);
+-#endif /* TAPE_DEBUG */
+- *ppos = pos + (block_size - ti->devstat.rescnt);
+- return block_size - ti->devstat.rescnt;
+-}
+-
+-/*
+- * Tape device write function
+- */
+-ssize_t
+-tape_write (struct file *filp, const char *data, size_t count, loff_t * ppos)
+-{
+- long lockflags;
+- tape_info_t *ti;
+- size_t block_size;
+- ccw_req_t *cqr;
+- int nblocks, i, rc;
+- size_t written = 0;
+- loff_t pos = *ppos;
+-
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:write");
+-#endif
+- ti = first_tape_info;
+- while ((ti != NULL) && (ti->nor_filp != filp) && (ti->rew_filp != filp))
+- ti = (tape_info_t *) ti->next;
+- if (ti == NULL)
+- return -ENODEV;
+- if (ppos != &filp->f_pos) {
+- /* "A request was outside the capabilities of the device." */
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:ppos wrong");
+-#endif
+- return -EOVERFLOW; /* errno=75 Value too large for def. data type */
+- }
+- if ((ti->block_size != 0) && (count % ti->block_size != 0))
+- return -EIO;
+- if (ti->block_size == 0) {
+- block_size = count;
+- nblocks = 1;
+- } else {
+- block_size = ti->block_size;
+- nblocks = count / (ti->block_size);
+- }
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:nbytes:");
+- debug_int_event (tape_debug_area,6,block_size);
+- debug_text_event (tape_debug_area,6,"c:nblocks:");
+- debug_int_event (tape_debug_area,6,nblocks);
+-#endif
+- for (i = 0; i < nblocks; i++) {
+- cqr = ti->discipline->write_block (data + i * block_size, block_size, ti);
+- if (!cqr) {
+- return -ENOBUFS;
+- }
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->cqr = cqr;
+- ti->wanna_wakeup=0;
+- rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- wait_event_interruptible (ti->wq,ti->wanna_wakeup);
+- ti->cqr = NULL;
+- ti->discipline->free_write_block (cqr, ti);
+- if (signal_pending (current)) {
+- tapestate_set (ti, TS_IDLE);
+- return -ERESTARTSYS;
+- }
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- if (tapestate_get (ti) == TS_FAILED) {
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- if ((ti->rc==-ENOSPC) && (i!=0))
+- return i*block_size;
+- return ti->rc;
+- }
+- if (tapestate_get (ti) == TS_NOT_OPER) {
+- ti->blk_minor=ti->rew_minor=ti->nor_minor=-1;
+- ti->devinfo.irq=-1;
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq,lockflags);
+- return -ENODEV;
+- }
+- if (tapestate_get (ti) != TS_DONE) {
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- return -EIO;
+- }
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:wbytes:");
+- debug_int_event (tape_debug_area,6,block_size - ti->devstat.rescnt);
+-#endif
+- written += block_size - ti->devstat.rescnt;
+- if (ti->devstat.rescnt > 0) {
+- *ppos = pos + written;
+- return written;
+- }
+- }
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:wtotal:");
+- debug_int_event (tape_debug_area,6,written);
+-#endif
+- *ppos = pos + written;
+- return written;
+-}
+-
+-static int
+-tape_mtioctop (struct file *filp, short mt_op, int mt_count)
+-{
+- tape_info_t *ti;
+- ccw_req_t *cqr = NULL;
+- int rc;
+- long lockflags;
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:mtio");
+- debug_text_event (tape_debug_area,6,"c:ioop:");
+- debug_int_event (tape_debug_area,6,mt_op);
+- debug_text_event (tape_debug_area,6,"c:arg:");
+- debug_int_event (tape_debug_area,6,mt_count);
+-#endif
+- ti = first_tape_info;
+- while ((ti != NULL) && (ti->rew_filp != filp) && (ti->nor_filp != filp))
+- ti = (tape_info_t *) ti->next;
+- if (ti == NULL)
+- return -ENODEV;
+- switch (mt_op) {
+- case MTREW: // rewind
+-
+- cqr = ti->discipline->mtrew (ti, mt_count);
+- break;
+- case MTOFFL: // put drive offline
+-
+- cqr = ti->discipline->mtoffl (ti, mt_count);
+- break;
+- case MTUNLOAD: // unload the tape
+-
+- cqr = ti->discipline->mtunload (ti, mt_count);
+- break;
+- case MTWEOF: // write tapemark
+-
+- cqr = ti->discipline->mtweof (ti, mt_count);
+- break;
+- case MTFSF: // forward space file
+-
+- cqr = ti->discipline->mtfsf (ti, mt_count);
+- break;
+- case MTBSF: // backward space file
+-
+- cqr = ti->discipline->mtbsf (ti, mt_count);
+- break;
+- case MTFSFM: // forward space file, stop at BOT side
+-
+- cqr = ti->discipline->mtfsfm (ti, mt_count);
+- break;
+- case MTBSFM: // backward space file, stop at BOT side
+-
+- cqr = ti->discipline->mtbsfm (ti, mt_count);
+- break;
+- case MTFSR: // forward space file
+-
+- cqr = ti->discipline->mtfsr (ti, mt_count);
+- break;
+- case MTBSR: // backward space file
+-
+- cqr = ti->discipline->mtbsr (ti, mt_count);
+- break;
+- case MTNOP:
+- cqr = ti->discipline->mtnop (ti, mt_count);
+- break;
+- case MTEOM: // postion at the end of portion
+-
+- case MTRETEN: // retension the tape
+-
+- cqr = ti->discipline->mteom (ti, mt_count);
+- break;
+- case MTERASE:
+- cqr = ti->discipline->mterase (ti, mt_count);
+- break;
+- case MTSETDENSITY:
+- cqr = ti->discipline->mtsetdensity (ti, mt_count);
+- break;
+- case MTSEEK:
+- cqr = ti->discipline->mtseek (ti, mt_count);
+- break;
+- case MTSETDRVBUFFER:
+- cqr = ti->discipline->mtsetdrvbuffer (ti, mt_count);
+- break;
+- case MTLOCK:
+- cqr = ti->discipline->mtsetdrvbuffer (ti, mt_count);
+- break;
+- case MTUNLOCK:
+- cqr = ti->discipline->mtsetdrvbuffer (ti, mt_count);
+- break;
+- case MTLOAD:
+- cqr = ti->discipline->mtload (ti, mt_count);
+- if (cqr!=NULL) break; // if backend driver has an load function ->use it
+- // if no medium is in, wait until it gets inserted
+- if (ti->medium_is_unloaded) {
+- wait_event_interruptible (ti->wq,ti->medium_is_unloaded==0);
+- }
+- return 0;
+- case MTCOMPRESSION:
+- cqr = ti->discipline->mtcompression (ti, mt_count);
+- break;
+- case MTSETPART:
+- cqr = ti->discipline->mtsetpart (ti, mt_count);
+- break;
+- case MTMKPART:
+- cqr = ti->discipline->mtmkpart (ti, mt_count);
+- break;
+- case MTTELL: // return number of block relative to current file
+-
+- cqr = ti->discipline->mttell (ti, mt_count);
+- break;
+- case MTSETBLK:
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->block_size = mt_count;
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:setblk:");
+- debug_int_event (tape_debug_area,6,mt_count);
+-#endif
+- return 0;
+- case MTRESET:
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = ti->userbuf = NULL;
+- tapestate_set (ti, TS_IDLE);
+- ti->block_size = 0;
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:devreset:");
+- debug_int_event (tape_debug_area,6,ti->blk_minor);
+-#endif
+- return 0;
+- default:
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:inv.mtio");
+-#endif
+- return -EINVAL;
+- }
+- if (cqr == NULL) {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:ccwg fail");
+-#endif
+- return -ENOSPC;
+- }
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->cqr = cqr;
+- ti->wanna_wakeup=0;
+- rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- wait_event_interruptible (ti->wq,ti->wanna_wakeup);
+- ti->cqr = NULL;
+- if (ti->kernbuf != NULL) {
+- kfree (ti->kernbuf);
+- ti->kernbuf = NULL;
+- }
+- tape_free_request (cqr);
+- // if medium was unloaded, update the corresponding variable.
+- switch (mt_op) {
+- case MTOFFL:
+- case MTUNLOAD:
+- ti->medium_is_unloaded=1;
+- }
+- if (signal_pending (current)) {
+- tapestate_set (ti, TS_IDLE);
+- return -ERESTARTSYS;
+- }
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- if (((mt_op == MTEOM) || (mt_op == MTRETEN)) && (tapestate_get (ti) == TS_FAILED))
+- tapestate_set (ti, TS_DONE);
+- if (tapestate_get (ti) == TS_FAILED) {
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- return ti->rc;
+- }
+- if (tapestate_get (ti) == TS_NOT_OPER) {
+- ti->blk_minor=ti->rew_minor=ti->nor_minor=-1;
+- ti->devinfo.irq=-1;
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq,lockflags);
+- return -ENODEV;
+- }
+- if (tapestate_get (ti) != TS_DONE) {
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- return -EIO;
+- }
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- switch (mt_op) {
+- case MTRETEN: //need to rewind the tape after moving to eom
+-
+- return tape_mtioctop (filp, MTREW, 1);
+- case MTFSFM: //need to skip back over the filemark
+-
+- return tape_mtioctop (filp, MTBSFM, 1);
+- case MTBSF: //need to skip forward over the filemark
+-
+- return tape_mtioctop (filp, MTFSF, 1);
+- }
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:mtio done");
+-#endif
+- return 0;
+-}
+-
+-/*
+- * Tape device io controls.
+- */
+-int
+-tape_ioctl (struct inode *inode, struct file *filp,
+- unsigned int cmd, unsigned long arg)
+-{
+- long lockflags;
+- tape_info_t *ti;
+- ccw_req_t *cqr;
+- struct mtop op; /* structure for MTIOCTOP */
+- struct mtpos pos; /* structure for MTIOCPOS */
+- struct mtget get;
+-
+- int rc;
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:ioct");
+-#endif
+- ti = first_tape_info;
+- while ((ti != NULL) &&
+- (ti->rew_minor != MINOR (inode->i_rdev)) &&
+- (ti->nor_minor != MINOR (inode->i_rdev)))
+- ti = (tape_info_t *) ti->next;
+- if (ti == NULL) {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:nodev");
+-#endif
+- return -ENODEV;
+- }
+- // check for discipline ioctl overloading
+- if ((rc = ti->discipline->discipline_ioctl_overload (inode, filp, cmd, arg))
+- != -EINVAL) {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:ioverloa");
+-#endif
+- return rc;
+- }
+-
+- switch (cmd) {
+- case MTIOCTOP: /* tape op command */
+- if (copy_from_user (&op, (char *) arg, sizeof (struct mtop))) {
+- return -EFAULT;
+- }
+- return (tape_mtioctop (filp, op.mt_op, op.mt_count));
+- case MTIOCPOS: /* query tape position */
+- cqr = ti->discipline->mttell (ti, 0);
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->cqr = cqr;
+- ti->wanna_wakeup=0;
+- do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- wait_event_interruptible (ti->wq,ti->wanna_wakeup);
+- pos.mt_blkno = ti->rc;
+- ti->cqr = NULL;
+- if (ti->kernbuf != NULL) {
+- kfree (ti->kernbuf);
+- ti->kernbuf = NULL;
+- }
+- tape_free_request (cqr);
+- if (signal_pending (current)) {
+- tapestate_set (ti, TS_IDLE);
+- return -ERESTARTSYS;
+- }
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- if (copy_to_user ((char *) arg, &pos, sizeof (struct mtpos)))
+- return -EFAULT;
+- return 0;
+- case MTIOCGET:
+- get.mt_erreg = ti->rc;
+- cqr = ti->discipline->mttell (ti, 0);
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->cqr = cqr;
+- ti->wanna_wakeup=0;
+- do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- wait_event_interruptible (ti->wq,ti->wanna_wakeup);
+- get.mt_blkno = ti->rc;
+- get.mt_fileno = 0;
+- get.mt_type = MT_ISUNKNOWN;
+- get.mt_resid = ti->devstat.rescnt;
+- get.mt_dsreg = ti->devstat.ii.sense.data[3];
+- get.mt_gstat = 0;
+- if (ti->devstat.ii.sense.data[1] & 0x08)
+- get.mt_gstat &= GMT_BOT (1); // BOT
+-
+- if (ti->devstat.ii.sense.data[1] & 0x02)
+- get.mt_gstat &= GMT_WR_PROT (1); // write protected
+-
+- if (ti->devstat.ii.sense.data[1] & 0x40)
+- get.mt_gstat &= GMT_ONLINE (1); //drive online
+-
+- ti->cqr = NULL;
+- if (ti->kernbuf != NULL) {
+- kfree (ti->kernbuf);
+- ti->kernbuf = NULL;
+- }
+- tape_free_request (cqr);
+- if (signal_pending (current)) {
+- tapestate_set (ti, TS_IDLE);
+- return -ERESTARTSYS;
+- }
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- if (copy_to_user ((char *) arg, &get, sizeof (struct mtget)))
+- return -EFAULT;
+- return 0;
+- default:
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"c:ioct inv");
+-#endif
+- return -EINVAL;
+- }
+-}
+-
+-/*
+- * Tape device open function.
+- */
+-int
+-tape_open (struct inode *inode, struct file *filp)
+-{
+- tape_info_t *ti;
+- kdev_t dev;
+- long lockflags;
+-
+- inode = filp->f_dentry->d_inode;
+- ti = first_tape_info;
+- while ((ti != NULL) &&
+- (ti->rew_minor != MINOR (inode->i_rdev)) &&
+- (ti->nor_minor != MINOR (inode->i_rdev)))
+- ti = (tape_info_t *) ti->next;
+- if (ti == NULL)
+- return -ENODEV;
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:open:");
+- debug_int_event (tape_debug_area,6,ti->blk_minor);
+-#endif
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- if (tapestate_get (ti) != TS_UNUSED) {
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:dbusy");
+-#endif
+- return -EBUSY;
+- }
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-
+- dev = MKDEV (tape_major, MINOR (inode->i_rdev)); /* Get the device */
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- if (ti->rew_minor == MINOR (inode->i_rdev))
+- ti->rew_filp = filp; /* save for later reference */
+- else
+- ti->nor_filp = filp;
+- filp->private_data = ti; /* save the dev.info for later reference */
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-
+-#ifdef MODULE
+- MOD_INC_USE_COUNT;
+-#endif /* MODULE */
+- return 0;
+-}
+-
+-/*
+- * Tape device release function.
+- */
+-int
+-tape_release (struct inode *inode, struct file *filp)
+-{
+- long lockflags;
+- tape_info_t *ti,*lastti;
+- ccw_req_t *cqr = NULL;
+- int rc = 0;
+-
+- ti = first_tape_info;
+- while ((ti != NULL) && (ti->rew_minor != MINOR (inode->i_rdev)) && (ti->nor_minor != MINOR (inode->i_rdev)))
+- ti = (tape_info_t *) ti->next;
+- if ((ti != NULL) && (tapestate_get (ti) == TS_NOT_OPER)) {
+- if (ti==first_tape_info) {
+- first_tape_info=ti->next;
+- } else {
+- lastti=first_tape_info;
+- while (lastti->next!=ti) lastti=lastti->next;
+- lastti->next=ti->next;
+- }
+- kfree(ti);
+- goto out;
+- }
+- if ((ti == NULL) || (tapestate_get (ti) != TS_IDLE)) {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:notidle!");
+-#endif
+- rc = -ENXIO; /* error in tape_release */
+- goto out;
+- }
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:release:");
+- debug_int_event (tape_debug_area,6,ti->blk_minor);
+-#endif
+- if (ti->rew_minor == MINOR (inode->i_rdev)) {
+- cqr = ti->discipline->mtrew (ti, 1);
+- if (cqr != NULL) {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"c:rewrelea");
+-#endif
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- tapestate_set (ti, TS_REW_RELEASE_INIT);
+- ti->cqr = cqr;
+- ti->wanna_wakeup=0;
+- rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- wait_event (ti->wq,ti->wanna_wakeup);
+- ti->cqr = NULL;
+- tape_free_request (cqr);
+- }
+- }
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- tapestate_set (ti, TS_UNUSED);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-out:
+-#ifdef MODULE
+- MOD_DEC_USE_COUNT;
+-#endif /* MODULE */
+- return rc;
+-}
+=== drivers/s390/char/tapechar.h
+==================================================================
+--- drivers/s390/char/tapechar.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tapechar.h (/trunk/2.4.27) (revision 52)
+@@ -1,34 +0,0 @@
+-
+-/***************************************************************************
+- *
+- * drivers/s390/char/tapechar.h
+- * character device frontend for tape device driver
+- *
+- * S390 and zSeries version
+- * Copyright (C) 2001 IBM Corporation
+- * Author(s): Carsten Otte <cotte at de.ibm.com>
+- * Tuan Ngo-Anh <ngoanh at de.ibm.com>
+- *
+- *
+- ****************************************************************************
+- */
+-
+-#ifndef TAPECHAR_H
+-#define TAPECHAR_H
+-#include <linux/config.h>
+-#define TAPECHAR_DEFAULTMODE 0020644
+-#define TAPE_MAJOR 0 /* get dynamic major since no major officialy defined for tape */
+-/*
+- * Prototypes for tape_fops
+- */
+-ssize_t tape_read(struct file *, char *, size_t, loff_t *);
+-ssize_t tape_write(struct file *, const char *, size_t, loff_t *);
+-int tape_ioctl(struct inode *,struct file *,unsigned int,unsigned long);
+-int tape_open (struct inode *,struct file *);
+-int tape_release (struct inode *,struct file *);
+-#ifdef CONFIG_DEVFS_FS
+-void tapechar_mkdevfstree (tape_info_t* ti);
+-#endif
+-void tapechar_init (void);
+-void tapechar_uninit (void);
+-#endif /* TAPECHAR_H */
+=== drivers/s390/char/tapedefs.h
+==================================================================
+--- drivers/s390/char/tapedefs.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tapedefs.h (/trunk/2.4.27) (revision 52)
+@@ -1,76 +0,0 @@
+-/***********************************************************************
+- * drivers/s390/char/tapedefs.h
+- * tape device driver for S/390 and zSeries tapes.
+- *
+- * S390 and zSeries version
+- * Copyright (C) 2001 IBM Corporation
+- * Author(s): Carsten Otte <cotte at de.ibm.com>
+- * Tuan Ngo-Anh <ngoanh at de.ibm.com>
+- *
+- *
+- ***********************************************************************
+- */
+-
+-/* Kernel Version Compatibility section */
+-#include <linux/version.h>
+-#include <linux/blkdev.h>
+-#include <linux/blk.h>
+-#include <asm/irq.h>
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,2,17))
+-#define TAPE_DEBUG // use s390 debug feature
+-#else
+-#undef TAPE_DEBUG // debug feature not supported by our 2.2.16 code
+-static inline void set_normalized_cda ( ccw1_t * cp, unsigned long address ) {
+- cp -> cda = address;
+-}
+-static inline void clear_normalized_cda ( ccw1_t * ccw ) {
+- ccw -> cda = 0;
+-}
+-#define BUG() PRINT_FATAL("tape390: CRITICAL INTERNAL ERROR OCCURED. REPORT THIS BACK TO LINUX390 at DE.IBM.COM\n")
+-#endif
+-#define CONFIG_S390_TAPE_DYNAMIC // allow devices to be attached or detached on the fly
+-#define TAPEBLOCK_RETRIES 20 // number of retries, when a block-dev request fails.
+-
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98))
+-#define INIT_BLK_DEV(d_major,d_request_fn,d_queue_fn,d_current) \
+-do { \
+- blk_dev[d_major].queue = d_queue_fn; \
+-} while(0)
+-static inline struct request *
+-tape_next_request( request_queue_t *queue )
+-{
+- return blkdev_entry_next_request(&queue->queue_head);
+-}
+-static inline void
+-tape_dequeue_request( request_queue_t * q, struct request *req )
+-{
+- blkdev_dequeue_request (req);
+-}
+-#else
+-#define s390_dev_info_t dev_info_t
+-typedef struct request *request_queue_t;
+-#ifndef init_waitqueue_head
+-#define init_waitqueue_head(x) do { *x = NULL; } while(0)
+-#endif
+-#define blk_init_queue(x,y) do {} while(0)
+-#define blk_queue_headactive(x,y) do {} while(0)
+-#define INIT_BLK_DEV(d_major,d_request_fn,d_queue_fn,d_current) \
+-do { \
+- blk_dev[d_major].request_fn = d_request_fn; \
+- blk_dev[d_major].queue = d_queue_fn; \
+- blk_dev[d_major].current_request = d_current; \
+-} while(0)
+-static inline struct request *
+-tape_next_request( request_queue_t *queue )
+-{
+- return *queue;
+-}
+-static inline void
+-tape_dequeue_request( request_queue_t * q, struct request *req )
+-{
+- *q = req->next;
+- req->next = NULL;
+-}
+-#endif
+=== drivers/s390/char/tapeblock.c
+==================================================================
+--- drivers/s390/char/tapeblock.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tapeblock.c (/trunk/2.4.27) (revision 52)
+@@ -1,593 +0,0 @@
+-
+-/***************************************************************************
+- *
+- * drivers/s390/char/tapeblock.c
+- * block device frontend for tape device driver
+- *
+- * S390 and zSeries version
+- * Copyright (C) 2001 IBM Corporation
+- * Author(s): Carsten Otte <cotte at de.ibm.com>
+- * Tuan Ngo-Anh <ngoanh at de.ibm.com>
+- *
+- *
+- ****************************************************************************
+- */
+-
+-#include "tapedefs.h"
+-#include <linux/config.h>
+-#include <linux/blkdev.h>
+-#include <linux/blk.h>
+-#include <linux/version.h>
+-#include <linux/interrupt.h>
+-#include <asm/ccwcache.h> /* CCW allocations */
+-#include <asm/debug.h>
+-#include <asm/s390dyn.h>
+-#include <linux/compatmac.h>
+-#ifdef MODULE
+-#define __NO_VERSION__
+-#include <linux/module.h>
+-#endif
+-#include "tape.h"
+-#include "tapeblock.h"
+-
+-#define PRINTK_HEADER "TBLOCK:"
+-
+-/*
+- * file operation structure for tape devices
+- */
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98))
+-static struct block_device_operations tapeblock_fops = {
+-#else
+-static struct file_operations tapeblock_fops = {
+-#endif
+- owner : THIS_MODULE,
+- open : tapeblock_open, /* open */
+- release : tapeblock_release, /* release */
+- };
+-
+-int tapeblock_major = 0;
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98))
+-static void tape_request_fn (request_queue_t * queue);
+-#else
+-static void tape_request_fn (void);
+-#endif
+-
+-static request_queue_t* tapeblock_getqueue (kdev_t kdev);
+-
+-#ifdef CONFIG_DEVFS_FS
+-void
+-tapeblock_mkdevfstree (tape_info_t* ti) {
+- ti->devfs_block_dir=devfs_mk_dir (ti->devfs_dir, "block", ti);
+- ti->devfs_disc=devfs_register(ti->devfs_block_dir, "disc",DEVFS_FL_DEFAULT,
+- tapeblock_major, ti->blk_minor,
+- TAPEBLOCK_DEFAULTMODE, &tapeblock_fops, ti);
+-}
+-
+-void
+-tapeblock_rmdevfstree (tape_info_t* ti) {
+- devfs_unregister(ti->devfs_disc);
+- devfs_unregister(ti->devfs_block_dir);
+-}
+-#endif
+-
+-void
+-tapeblock_setup(tape_info_t* ti) {
+- blk_size[tapeblock_major][ti->blk_minor]=0; // this will be detected
+- blksize_size[tapeblock_major][ti->blk_minor]=2048; // blocks are 2k by default.
+- hardsect_size[tapeblock_major][ti->blk_minor]=512;
+- blk_init_queue (&ti->request_queue, tape_request_fn);
+- blk_queue_headactive (&ti->request_queue, 0);
+-#ifdef CONFIG_DEVFS_FS
+- tapeblock_mkdevfstree(ti);
+-#endif
+-}
+-
+-int
+-tapeblock_init(void) {
+- int result;
+- tape_frontend_t* blkfront,*temp;
+- tape_info_t* ti;
+-
+- tape_init();
+- /* Register the tape major number to the kernel */
+-#ifdef CONFIG_DEVFS_FS
+- result = devfs_register_blkdev(tapeblock_major, "tBLK", &tapeblock_fops);
+-#else
+- result = register_blkdev(tapeblock_major, "tBLK", &tapeblock_fops);
+-#endif
+- if (result < 0) {
+- PRINT_WARN(KERN_ERR "tape: can't get major %d for block device\n", tapeblock_major);
+- panic ("cannot get major number for tape block device");
+- }
+- if (tapeblock_major == 0) tapeblock_major = result; /* accept dynamic major number*/
+- INIT_BLK_DEV(tapeblock_major,tape_request_fn,tapeblock_getqueue,NULL);
+- read_ahead[tapeblock_major]=TAPEBLOCK_READAHEAD;
+- PRINT_WARN(KERN_ERR " tape gets major %d for block device\n", result);
+- blk_size[tapeblock_major] = (int*) kmalloc (256*sizeof(int),GFP_ATOMIC);
+- memset(blk_size[tapeblock_major],0,256*sizeof(int));
+- blksize_size[tapeblock_major] = (int*) kmalloc (256*sizeof(int),GFP_ATOMIC);
+- memset(blksize_size[tapeblock_major],0,256*sizeof(int));
+- hardsect_size[tapeblock_major] = (int*) kmalloc (256*sizeof(int),GFP_ATOMIC);
+- memset(hardsect_size[tapeblock_major],0,256*sizeof(int));
+- max_sectors[tapeblock_major] = (int*) kmalloc (256*sizeof(int),GFP_ATOMIC);
+- memset(max_sectors[tapeblock_major],0,256*sizeof(int));
+- blkfront = kmalloc(sizeof(tape_frontend_t),GFP_KERNEL);
+- if (blkfront==NULL) panic ("no mem for tape block device structure");
+- blkfront->device_setup=tapeblock_setup;
+-#ifdef CONFIG_DEVFS_FS
+- blkfront->mkdevfstree = tapeblock_mkdevfstree;
+- blkfront->rmdevfstree = tapeblock_rmdevfstree;
+-#endif
+- blkfront->next=NULL;
+- if (first_frontend==NULL) {
+- first_frontend=blkfront;
+- } else {
+- temp=first_frontend;
+- while (temp->next!=NULL)
+- temp=temp->next;
+- temp->next=blkfront;
+- }
+- ti=first_tape_info;
+- while (ti!=NULL) {
+- tapeblock_setup(ti);
+- ti=ti->next;
+- }
+- return 0;
+-}
+-
+-
+-void
+-tapeblock_uninit(void) {
+- unregister_blkdev(tapeblock_major, "tBLK");
+-}
+-
+-int
+-tapeblock_open(struct inode *inode, struct file *filp) {
+- tape_info_t *ti;
+- kdev_t dev;
+- int rc;
+- long lockflags;
+-
+- inode = filp->f_dentry->d_inode;
+- ti = first_tape_info;
+- while ((ti != NULL) && (ti->blk_minor != MINOR (inode->i_rdev)))
+- ti = (tape_info_t *) ti->next;
+- if (ti == NULL)
+- return -ENODEV;
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"b:open:");
+- debug_int_event (tape_debug_area,6,ti->blk_minor);
+-#endif
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- if (tapestate_get (ti) != TS_UNUSED) {
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"b:dbusy");
+-#endif
+- return -EBUSY;
+- }
+- tapestate_set (ti, TS_IDLE);
+- ti->position=-1;
+-
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- rc=tapeblock_mediumdetect(ti);
+- if (rc) {
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- tapestate_set (ti, TS_UNUSED);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- return rc; // in case of errors, we don't have a size of the medium
+- }
+- dev = MKDEV (tapeblock_major, MINOR (inode->i_rdev)); /* Get the device */
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->blk_filp = filp;
+- filp->private_data = ti; /* save the dev.info for later reference */
+- ti->cqr=NULL;
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-
+- return 0;
+-}
+-
+-int
+-tapeblock_release(struct inode *inode, struct file *filp) {
+- long lockflags;
+- tape_info_t *ti,*lastti;
+- ti = first_tape_info;
+- while ((ti != NULL) && (ti->blk_minor != MINOR (inode->i_rdev)))
+- ti = (tape_info_t *) ti->next;
+- if ((ti != NULL) && (tapestate_get (ti) == TS_NOT_OPER)) {
+- if (ti==first_tape_info) {
+- first_tape_info=ti->next;
+- } else {
+- lastti=first_tape_info;
+- while (lastti->next!=ti) lastti=lastti->next;
+- lastti->next=ti->next;
+- }
+- kfree(ti);
+- return 0;
+- }
+- if ((ti == NULL) || (tapestate_get (ti) != TS_IDLE)) {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"b:notidle!");
+-#endif
+- return -ENXIO; /* error in tape_release */
+- }
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"b:release:");
+- debug_int_event (tape_debug_area,6,ti->blk_minor);
+-#endif
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- tapestate_set (ti, TS_UNUSED);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- invalidate_buffers(inode->i_rdev);
+- return 0;
+-}
+-
+-static void
+-tapeblock_end_request(tape_info_t* ti) {
+- struct buffer_head *bh;
+- int uptodate;
+- if ((tapestate_get(ti)!=TS_FAILED) &&
+- (tapestate_get(ti)!=TS_DONE))
+- BUG(); // A request has to be completed to end it
+- uptodate=(tapestate_get(ti)==TS_DONE); // is the buffer up to date?
+-#ifdef TAPE_DEBUG
+- if (uptodate) {
+- debug_text_event (tape_debug_area,6,"b:done:");
+- debug_int_event (tape_debug_area,6,(long)ti->cqr);
+- } else {
+- debug_text_event (tape_debug_area,3,"b:failed:");
+- debug_int_event (tape_debug_area,3,(long)ti->cqr);
+- }
+-#endif
+- // now inform ll_rw_block about a request status
+- while ((bh = ti->current_request->bh) != NULL) {
+- ti->current_request->bh = bh->b_reqnext;
+- bh->b_reqnext = NULL;
+- bh->b_end_io (bh, uptodate);
+- }
+- if (!end_that_request_first (ti->current_request, uptodate, "tBLK")) {
+-#ifndef DEVICE_NO_RANDOM
+- add_blkdev_randomness (MAJOR (ti->current_request->rq_dev));
+-#endif
+- end_that_request_last (ti->current_request);
+- }
+- ti->discipline->free_bread(ti->cqr,ti);
+- ti->cqr=NULL;
+- ti->current_request=NULL;
+- if (tapestate_get(ti)!=TS_NOT_OPER) tapestate_set(ti,TS_IDLE);
+- return;
+-}
+-
+-static void
+-tapeblock_exec_IO (tape_info_t* ti) {
+- int rc;
+- struct request* req;
+- if (ti->cqr) { // process done/failed request
+- while ((tapestate_get(ti)==TS_FAILED) &&
+- ti->blk_retries>0) {
+- ti->blk_retries--;
+- ti->position=-1;
+- tapestate_set(ti,TS_BLOCK_INIT);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"b:retryreq:");
+- debug_int_event (tape_debug_area,3,(long)ti->cqr);
+-#endif
+- rc = do_IO (ti->devinfo.irq, ti->cqr->cpaddr, (unsigned long) ti->cqr,
+- 0x00, ti->cqr->options);
+- if (rc) {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"b:doIOfail:");
+- debug_int_event (tape_debug_area,3,(long)ti->cqr);
+-#endif
+- continue; // one retry lost 'cause doIO failed
+- }
+- return;
+- }
+- tapeblock_end_request (ti); // check state, inform user, free mem, dev=idl
+- }
+- if (ti->cqr!=NULL) BUG(); // tape should be idle now, request should be freed!
+- if (tapestate_get (ti) == TS_NOT_OPER) {
+- ti->blk_minor=ti->rew_minor=ti->nor_minor=-1;
+- ti->devinfo.irq=-1;
+- return;
+- }
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98))
+- if (list_empty (&ti->request_queue.queue_head)) {
+-#else
+- if (ti->request_queue==NULL) {
+-#endif
+- // nothing more to do or device has dissapeared;)
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"b:Qempty");
+-#endif
+- tapestate_set(ti,TS_IDLE);
+- return;
+- }
+- // queue is not empty, fetch a request and start IO!
+- req=ti->current_request=tape_next_request(&ti->request_queue);
+- if (req==NULL) {
+- BUG(); // Yo. The queue was not reported empy, but no request found. This is _bad_.
+- }
+- if (req->cmd!=READ) { // we only support reading
+- tapestate_set(ti,TS_FAILED);
+- tapeblock_end_request (ti); // check state, inform user, free mem, dev=idl
+- tapestate_set(ti,TS_BLOCK_INIT);
+- schedule_tapeblock_exec_IO(ti);
+- return;
+- }
+- ti->cqr=ti->discipline->bread(req,ti,tapeblock_major); //build channel program from request
+- if (!ti->cqr) {
+- // ccw generation failed. we try again later.
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"b:cqrNULL");
+-#endif
+- schedule_tapeblock_exec_IO(ti);
+- ti->current_request=NULL;
+- return;
+- }
+- ti->blk_retries = TAPEBLOCK_RETRIES;
+- rc= do_IO (ti->devinfo.irq, ti->cqr->cpaddr,
+- (unsigned long) ti->cqr, 0x00, ti->cqr->options);
+- if (rc) {
+- // okay. ssch failed. we try later.
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"b:doIOfail");
+-#endif
+- ti->discipline->free_bread(ti->cqr,ti);
+- ti->cqr=NULL;
+- ti->current_request=NULL;
+- schedule_tapeblock_exec_IO(ti);
+- return;
+- }
+- // our request is in IO. we remove it from the queue and exit
+- tape_dequeue_request (&ti->request_queue,req);
+-}
+-
+-static void
+-do_tape_request (request_queue_t * queue) {
+- tape_info_t* ti;
+- long lockflags;
+- for (ti=first_tape_info;
+- ((ti!=NULL) && ((&ti->request_queue)!=queue));
+- ti=ti->next);
+- if (ti==NULL) BUG();
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- if (tapestate_get(ti)!=TS_IDLE) {
+- s390irq_spin_unlock_irqrestore(ti->devinfo.irq,lockflags);
+- return;
+- }
+- if (tapestate_get(ti)!=TS_IDLE) BUG();
+- tapestate_set(ti,TS_BLOCK_INIT);
+- tapeblock_exec_IO(ti);
+- s390irq_spin_unlock_irqrestore(ti->devinfo.irq,lockflags);
+-}
+-
+-static void
+-run_tapeblock_exec_IO (tape_info_t* ti) {
+- long flags_390irq,flags_ior;
+- spin_lock_irqsave (&io_request_lock, flags_ior);
+- s390irq_spin_lock_irqsave(ti->devinfo.irq,flags_390irq);
+- atomic_set(&ti->bh_scheduled,0);
+- tapeblock_exec_IO(ti);
+- s390irq_spin_unlock_irqrestore(ti->devinfo.irq,flags_390irq);
+- spin_unlock_irqrestore (&io_request_lock, flags_ior);
+-}
+-
+-void
+-schedule_tapeblock_exec_IO (tape_info_t *ti)
+-{
+- /* Protect against rescheduling, when already running */
+- if (atomic_compare_and_swap(0,1,&ti->bh_scheduled)) {
+- return;
+- }
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98))
+- INIT_LIST_HEAD(&ti->bh_tq.list);
+-#endif
+- ti->bh_tq.sync = 0;
+- ti->bh_tq.routine = (void *) (void *) run_tapeblock_exec_IO;
+- ti->bh_tq.data = ti;
+-
+- queue_task (&ti->bh_tq, &tq_immediate);
+- mark_bh (IMMEDIATE_BH);
+- return;
+-}
+-
+-/* wrappers around do_tape_request for different kernel versions */
+-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,3,98))
+-static void tape_request_fn (void) {
+- tape_info_t* ti=first_tape_info;
+- while (ti!=NULL) {
+- do_tape_request(&ti->request_queue);
+- ti=ti->next;
+- }
+-}
+-#else
+-static void tape_request_fn (request_queue_t* queue) {
+- do_tape_request(queue);
+-}
+-#endif
+-
+-static request_queue_t* tapeblock_getqueue (kdev_t kdev) {
+- tape_info_t* ti=first_tape_info;
+- while ((ti!=NULL) && (MINOR(kdev)!=ti->blk_minor))
+- ti=ti->next;
+- if (ti!=NULL) return &ti->request_queue;
+- return NULL;
+-}
+-
+-int tapeblock_mediumdetect(tape_info_t* ti) {
+- ccw_req_t* cqr;
+- int losize=1,hisize=1,rc;
+- long lockflags;
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"b:medDet");
+-#endif
+- PRINT_WARN("Detecting media size. This will take _long_, so get yourself a coffee...\n");
+- while (1) { //is interruped by break
+- hisize=hisize << 1; // try twice the size tested before
+- cqr=ti->discipline->mtseek (ti, hisize);
+- if (cqr == NULL) {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"b:ccwg fail");
+-#endif
+- return -ENOSPC;
+- }
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->cqr = cqr;
+- ti->wanna_wakeup=0;
+- rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- if (rc) return -EIO;
+- wait_event_interruptible (ti->wq,ti->wanna_wakeup);
+- ti->cqr = NULL;
+- tape_free_request (cqr);
+- if (ti->kernbuf) {
+- kfree (ti->kernbuf);
+- ti->kernbuf=NULL;
+- }
+- if (signal_pending (current)) {
+- tapestate_set (ti, TS_IDLE);
+- return -ERESTARTSYS;
+- }
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- if (tapestate_get (ti) == TS_FAILED) {
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- break;
+- }
+- if (tapestate_get (ti) == TS_NOT_OPER) {
+- ti->blk_minor=ti->rew_minor=ti->nor_minor=-1;
+- ti->devinfo.irq=-1;
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq,lockflags);
+- return -ENODEV;
+- }
+- if (tapestate_get (ti) != TS_DONE) {
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- return -EIO;
+- }
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- losize=hisize;
+- }
+- cqr = ti->discipline->mtrew (ti, 1);
+- if (cqr == NULL) {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"b:ccwg fail");
+-#endif
+- return -ENOSPC;
+- }
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->cqr = cqr;
+- ti->wanna_wakeup=0;
+- rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- wait_event_interruptible (ti->wq,ti->wanna_wakeup);
+- ti->cqr = NULL;
+- tape_free_request (cqr);
+- if (signal_pending (current)) {
+- tapestate_set (ti, TS_IDLE);
+- return -ERESTARTSYS;
+- }
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- if (tapestate_get (ti) == TS_FAILED) {
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- return -EIO;
+- }
+- if (tapestate_get (ti) == TS_NOT_OPER) {
+- ti->blk_minor=ti->rew_minor=ti->nor_minor=-1;
+- ti->devinfo.irq=-1;
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq,lockflags);
+- return -ENODEV;
+- }
+- if (tapestate_get (ti) != TS_DONE) {
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- return -EIO;
+- }
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- while (losize!=hisize) {
+- cqr=ti->discipline->mtseek (ti, (hisize+losize)/2+1);
+- if (cqr == NULL) {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"b:ccwg fail");
+-#endif
+- return -ENOSPC;
+- }
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->cqr = cqr;
+- ti->wanna_wakeup=0;
+- rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- if (rc) return -EIO;
+- wait_event_interruptible (ti->wq,ti->wanna_wakeup);
+- ti->cqr = NULL;
+- tape_free_request (cqr);
+- if (ti->kernbuf) {
+- kfree (ti->kernbuf);
+- ti->kernbuf=NULL;
+- }
+- if (signal_pending (current)) {
+- tapestate_set (ti, TS_IDLE);
+- return -ERESTARTSYS;
+- }
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- if (tapestate_get (ti) == TS_NOT_OPER) {
+- ti->blk_minor=ti->rew_minor=ti->nor_minor=-1;
+- ti->devinfo.irq=-1;
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq,lockflags);
+- return -ENODEV;
+- }
+- if (tapestate_get (ti) == TS_FAILED) {
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- hisize=(hisize+losize)/2;
+- cqr = ti->discipline->mtrew (ti, 1);
+- if (cqr == NULL) {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"b:ccwg fail");
+-#endif
+- return -ENOSPC;
+- }
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->cqr = cqr;
+- ti->wanna_wakeup=0;
+- rc = do_IO (ti->devinfo.irq, cqr->cpaddr, (unsigned long) cqr, 0x00, cqr->options);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- wait_event_interruptible (ti->wq,ti->wanna_wakeup);
+- ti->cqr = NULL;
+- tape_free_request (cqr);
+- if (signal_pending (current)) {
+- tapestate_set (ti, TS_IDLE);
+- return -ERESTARTSYS;
+- }
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- if (tapestate_get (ti) == TS_FAILED) {
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- return -EIO;
+- }
+- if (tapestate_get (ti) != TS_DONE) {
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- return -EIO;
+- }
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- continue;
+- }
+- if (tapestate_get (ti) != TS_DONE) {
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- return -EIO;
+- }
+- tapestate_set (ti, TS_IDLE);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- losize=(hisize+losize)/2+1;
+- }
+- blk_size[tapeblock_major][ti->blk_minor]=(losize)*(blksize_size[tapeblock_major][ti->blk_minor]/1024);
+- return 0;
+-}
+=== drivers/s390/char/tape3480.c
+==================================================================
+--- drivers/s390/char/tape3480.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tape3480.c (/trunk/2.4.27) (revision 52)
+@@ -1,156 +0,0 @@
+-/***************************************************************************
+- *
+- * drivers/s390/char/tape3480.c
+- * tape device discipline for 3480 tapes.
+- *
+- * S390 and zSeries version
+- * Copyright (C) 2001 IBM Corporation
+- * Author(s): Carsten Otte <cotte at de.ibm.com>
+- * Tuan Ngo-Anh <ngoanh at de.ibm.com>
+- *
+- ****************************************************************************
+- */
+-
+-#include "tapedefs.h"
+-#include <linux/version.h>
+-#include <asm/ccwcache.h> /* CCW allocations */
+-#include <asm/s390dyn.h>
+-#include <asm/debug.h>
+-#include <linux/compatmac.h>
+-#include "tape.h"
+-#include "tape34xx.h"
+-#include "tape3480.h"
+-
+-tape_event_handler_t tape3480_event_handler_table[TS_SIZE][TE_SIZE] =
+-{
+- /* {START , DONE, FAILED, ERROR, OTHER } */
+- {NULL, tape34xx_unused_done, NULL, NULL, NULL}, /* TS_UNUSED */
+- {NULL, tape34xx_idle_done, NULL, NULL, NULL}, /* TS_IDLE */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_DONE */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_FAILED */
+- {NULL, tape34xx_block_done, NULL, NULL, NULL}, /* TS_BLOCK_INIT */
+- {NULL, tape34xx_bsb_init_done, NULL, NULL, NULL}, /* TS_BSB_INIT */
+- {NULL, tape34xx_bsf_init_done, NULL, NULL, NULL}, /* TS_BSF_INIT */
+- {NULL, tape34xx_dse_init_done, NULL, NULL, NULL}, /* TS_DSE_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_EGA_INIT */
+- {NULL, tape34xx_fsb_init_done, NULL, NULL, NULL}, /* TS_FSB_INIT */
+- {NULL, tape34xx_fsf_init_done, NULL, NULL, NULL}, /* TS_FSF_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_LDI_INIT */
+- {NULL, tape34xx_lbl_init_done, NULL, NULL, NULL}, /* TS_LBL_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_MSE_INIT */
+- {NULL, tape34xx_nop_init_done, NULL, NULL, NULL}, /* TS_NOP_INIT */
+- {NULL, tape34xx_rfo_init_done, NULL, NULL, NULL}, /* TS_RBA_INIT */
+- {NULL, tape34xx_rbi_init_done, NULL, NULL, NULL}, /* TS_RBI_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_RBU_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_RBL_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_RDC_INIT */
+- {NULL, tape34xx_rfo_init_done, NULL, NULL, NULL}, /* TS_RFO_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_RSD_INIT */
+- {NULL, tape34xx_rew_init_done, NULL, NULL, NULL}, /* TS_REW_INIT */
+- {NULL, tape34xx_rew_release_init_done, NULL, NULL, NULL}, /* TS_REW_RELEASE_IMIT */
+- {NULL, tape34xx_run_init_done, NULL, NULL, NULL}, /* TS_RUN_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_SEN_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_SID_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_SNP_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_SPG_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_SWI_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_SMR_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_SYN_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_TIO_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_UNA_INIT */
+- {NULL, tape34xx_wri_init_done, NULL, NULL, NULL}, /* TS_WRI_INIT */
+- {NULL, tape34xx_wtm_init_done, NULL, NULL, NULL}, /* TS_WTM_INIT */
+- {NULL, NULL, NULL, NULL, NULL}}; /* TS_NOT_OPER */
+-
+-devreg_t tape3480_devreg = {
+- ci:
+- {hc:
+- {ctype:0x3480}},
+- flag:DEVREG_MATCH_CU_TYPE | DEVREG_TYPE_DEVCHARS,
+- oper_func:tape_oper_handler
+-};
+-
+-
+-void
+-tape3480_setup_assist (tape_info_t * ti)
+-{
+- tape3480_disc_data_t *data = NULL;
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"3480 dsetu");
+- debug_text_event (tape_debug_area,6,"dev:");
+- debug_int_event (tape_debug_area,6,ti->blk_minor);
+-#endif /* TAPE_DEBUG */
+- while (data == NULL)
+- data = kmalloc (sizeof (tape3480_disc_data_t), GFP_KERNEL);
+- data->modeset_byte = 0x00;
+- ti->discdata = (void *) data;
+-}
+-
+-
+-void
+-tape3480_shutdown (int autoprobe) {
+- if (autoprobe)
+- s390_device_unregister(&tape3480_devreg);
+-}
+-
+-tape_discipline_t *
+-tape3480_init (int autoprobe)
+-{
+- tape_discipline_t *disc;
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"3480 init");
+-#endif /* TAPE_DEBUG */
+- disc = kmalloc (sizeof (tape_discipline_t), GFP_KERNEL);
+- if (disc == NULL) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,3,"disc:nomem");
+-#endif /* TAPE_DEBUG */
+- return disc;
+- }
+- disc->cu_type = 0x3480;
+- disc->setup_assist = tape3480_setup_assist;
+- disc->error_recovery = tape34xx_error_recovery;
+- disc->write_block = tape34xx_write_block;
+- disc->free_write_block = tape34xx_free_write_block;
+- disc->read_block = tape34xx_read_block;
+- disc->free_read_block = tape34xx_free_read_block;
+- disc->mtfsf = tape34xx_mtfsf;
+- disc->mtbsf = tape34xx_mtbsf;
+- disc->mtfsr = tape34xx_mtfsr;
+- disc->mtbsr = tape34xx_mtbsr;
+- disc->mtweof = tape34xx_mtweof;
+- disc->mtrew = tape34xx_mtrew;
+- disc->mtoffl = tape34xx_mtoffl;
+- disc->mtnop = tape34xx_mtnop;
+- disc->mtbsfm = tape34xx_mtbsfm;
+- disc->mtfsfm = tape34xx_mtfsfm;
+- disc->mteom = tape34xx_mteom;
+- disc->mterase = tape34xx_mterase;
+- disc->mtsetdensity = tape34xx_mtsetdensity;
+- disc->mtseek = tape34xx_mtseek;
+- disc->mttell = tape34xx_mttell;
+- disc->mtsetdrvbuffer = tape34xx_mtsetdrvbuffer;
+- disc->mtlock = tape34xx_mtlock;
+- disc->mtunlock = tape34xx_mtunlock;
+- disc->mtload = tape34xx_mtload;
+- disc->mtunload = tape34xx_mtunload;
+- disc->mtcompression = tape34xx_mtcompression;
+- disc->mtsetpart = tape34xx_mtsetpart;
+- disc->mtmkpart = tape34xx_mtmkpart;
+- disc->mtiocget = tape34xx_mtiocget;
+- disc->mtiocpos = tape34xx_mtiocpos;
+- disc->shutdown = tape3480_shutdown;
+- disc->discipline_ioctl_overload = tape34xx_ioctl_overload;
+- disc->event_table = &tape3480_event_handler_table;
+- disc->default_handler = tape34xx_default_handler;
+- disc->bread = tape34xx_bread;
+- disc->free_bread = tape34xx_free_bread;
+- disc->tape = NULL; /* pointer for backreference */
+- disc->next = NULL;
+- if (autoprobe)
+- s390_device_register(&tape3480_devreg);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"3480 regis");
+-#endif /* TAPE_DEBUG */
+- return disc;
+-}
+=== drivers/s390/char/tapeblock.h
+==================================================================
+--- drivers/s390/char/tapeblock.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tapeblock.h (/trunk/2.4.27) (revision 52)
+@@ -1,36 +0,0 @@
+-
+-/***************************************************************************
+- *
+- * drivers/s390/char/tapechar.h
+- * character device frontend for tape device driver
+- *
+- * S390 and zSeries version
+- * Copyright (C) 2001 IBM Corporation
+- * Author(s): Carsten Otte <cotte at de.ibm.com>
+- * Tuan Ngo-Anh <ngoanh at de.ibm.com>
+- *
+- *
+- ****************************************************************************
+- */
+-
+-#ifndef TAPEBLOCK_H
+-#define TAPEBLOCK_H
+-#include <linux/config.h>
+-#define PARTN_BITS 0
+-
+-#define TAPEBLOCK_READAHEAD 30
+-#define TAPEBLOCK_MAJOR 0
+-
+-#define TAPEBLOCK_DEFAULTMODE 0060644
+-
+-int tapeblock_open(struct inode *, struct file *);
+-int tapeblock_release(struct inode *, struct file *);
+-void tapeblock_setup(tape_info_t* ti);
+-void schedule_tapeblock_exec_IO (tape_info_t *ti);
+-int tapeblock_mediumdetect(tape_info_t* ti);
+-#ifdef CONFIG_DEVFS_FS
+-void tapeblock_mkdevfstree (tape_info_t* ti);
+-#endif
+-int tapeblock_init (void);
+-void tapeblock_uninit (void);
+-#endif
+=== drivers/s390/char/hwc_cpi.c
+==================================================================
+--- drivers/s390/char/hwc_cpi.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/hwc_cpi.c (/trunk/2.4.27) (revision 52)
+@@ -1,211 +0,0 @@
+-
+-/*
+- * Author: Martin Peschke <mpeschke at de.ibm.com>
+- * Copyright (C) 2001 IBM Entwicklung GmbH, IBM Corporation
+- */
+-
+-#include <linux/string.h>
+-#include <linux/ctype.h>
+-#include <linux/module.h>
+-#include <linux/init.h>
+-#include <linux/errno.h>
+-#include <linux/slab.h>
+-#include <linux/version.h>
+-#include <asm/semaphore.h>
+-#include <asm/ebcdic.h>
+-#include "hwc_rw.h"
+-#include "hwc.h"
+-
+-#define CPI_RETRIES 3
+-#define CPI_SLEEP_TICKS 50
+-
+-#define CPI_LENGTH_SYSTEM_TYPE 8
+-#define CPI_LENGTH_SYSTEM_NAME 8
+-#define CPI_LENGTH_SYSPLEX_NAME 8
+-
+-typedef struct {
+- _EBUF_HEADER
+- u8 id_format;
+- u8 reserved0;
+- u8 system_type[CPI_LENGTH_SYSTEM_TYPE];
+- u64 reserved1;
+- u8 system_name[CPI_LENGTH_SYSTEM_NAME];
+- u64 reserved2;
+- u64 system_level;
+- u64 reserved3;
+- u8 sysplex_name[CPI_LENGTH_SYSPLEX_NAME];
+- u8 reserved4[16];
+-} __attribute__ ((packed))
+-
+-cpi_evbuf_t;
+-
+-typedef struct _cpi_hwcb_t {
+- _HWCB_HEADER
+- cpi_evbuf_t cpi_evbuf;
+-} __attribute__ ((packed))
+-
+-cpi_hwcb_t;
+-
+-cpi_hwcb_t *cpi_hwcb;
+-
+-static int __init cpi_module_init (void);
+-static void __exit cpi_module_exit (void);
+-
+-module_init (cpi_module_init);
+-module_exit (cpi_module_exit);
+-
+-MODULE_AUTHOR (
+- "Martin Peschke, IBM Deutschland Entwicklung GmbH "
+- "<mpeschke at de.ibm.com>");
+-
+-MODULE_DESCRIPTION (
+- "identify this operating system instance to the S/390 or zSeries hardware");
+-
+-static char *system_name = NULL;
+-MODULE_PARM (system_name, "s");
+-MODULE_PARM_DESC (system_name, "e.g. hostname - max. 8 characters");
+-
+-static char *sysplex_name = NULL;
+-#ifdef ALLOW_SYSPLEX_NAME
+-MODULE_PARM (sysplex_name, "s");
+-MODULE_PARM_DESC (sysplex_name, "if applicable - max. 8 characters");
+-#endif
+-
+-static char *system_type = "LINUX";
+-
+-hwc_request_t cpi_request =
+-{};
+-
+-hwc_callback_t cpi_callback;
+-
+-static DECLARE_MUTEX_LOCKED (sem);
+-
+-static int __init
+-cpi_module_init (void)
+-{
+- int retval;
+- int system_type_length;
+- int system_name_length;
+- int sysplex_name_length = 0;
+- int retries;
+-
+- if (!MACHINE_HAS_HWC) {
+- printk ("cpi: bug: hardware console not present\n");
+- retval = -EINVAL;
+- goto out;
+- }
+- if (!system_type) {
+- printk ("cpi: bug: no system type specified\n");
+- retval = -EINVAL;
+- goto out;
+- }
+- system_type_length = strlen (system_type);
+- if (system_type_length > CPI_LENGTH_SYSTEM_NAME) {
+- printk ("cpi: bug: system type has length of %i characters - "
+- "only %i characters supported\n",
+- system_type_length,
+- CPI_LENGTH_SYSTEM_TYPE);
+- retval = -EINVAL;
+- goto out;
+- }
+- if (!system_name) {
+- printk ("cpi: no system name specified\n");
+- retval = -EINVAL;
+- goto out;
+- }
+- system_name_length = strlen (system_name);
+- if (system_name_length > CPI_LENGTH_SYSTEM_NAME) {
+- printk ("cpi: system name has length of %i characters - "
+- "only %i characters supported\n",
+- system_name_length,
+- CPI_LENGTH_SYSTEM_NAME);
+- retval = -EINVAL;
+- goto out;
+- }
+- if (sysplex_name) {
+- sysplex_name_length = strlen (sysplex_name);
+- if (sysplex_name_length > CPI_LENGTH_SYSPLEX_NAME) {
+- printk ("cpi: sysplex name has length of %i characters - "
+- "only %i characters supported\n",
+- sysplex_name_length,
+- CPI_LENGTH_SYSPLEX_NAME);
+- retval = -EINVAL;
+- goto out;
+- }
+- }
+- cpi_hwcb = kmalloc (sizeof (cpi_hwcb_t), GFP_KERNEL);
+- if (!cpi_hwcb) {
+- printk ("cpi: no storage to fulfill request\n");
+- retval = -ENOMEM;
+- goto out;
+- }
+- memset (cpi_hwcb, 0, sizeof (cpi_hwcb_t));
+-
+- cpi_hwcb->length = sizeof (cpi_hwcb_t);
+- cpi_hwcb->cpi_evbuf.length = sizeof (cpi_evbuf_t);
+- cpi_hwcb->cpi_evbuf.type = 0x0B;
+-
+- memset (cpi_hwcb->cpi_evbuf.system_type, ' ', CPI_LENGTH_SYSTEM_TYPE);
+- memcpy (cpi_hwcb->cpi_evbuf.system_type, system_type, system_type_length);
+- HWC_ASCEBC_STR (cpi_hwcb->cpi_evbuf.system_type, CPI_LENGTH_SYSTEM_TYPE);
+- EBC_TOUPPER (cpi_hwcb->cpi_evbuf.system_type, CPI_LENGTH_SYSTEM_TYPE);
+-
+- memset (cpi_hwcb->cpi_evbuf.system_name, ' ', CPI_LENGTH_SYSTEM_NAME);
+- memcpy (cpi_hwcb->cpi_evbuf.system_name, system_name, system_name_length);
+- HWC_ASCEBC_STR (cpi_hwcb->cpi_evbuf.system_name, CPI_LENGTH_SYSTEM_NAME);
+- EBC_TOUPPER (cpi_hwcb->cpi_evbuf.system_name, CPI_LENGTH_SYSTEM_NAME);
+-
+- cpi_hwcb->cpi_evbuf.system_level = LINUX_VERSION_CODE;
+-
+- if (sysplex_name) {
+- memset (cpi_hwcb->cpi_evbuf.sysplex_name, ' ', CPI_LENGTH_SYSPLEX_NAME);
+- memcpy (cpi_hwcb->cpi_evbuf.sysplex_name, sysplex_name, sysplex_name_length);
+- HWC_ASCEBC_STR (cpi_hwcb->cpi_evbuf.sysplex_name, CPI_LENGTH_SYSPLEX_NAME);
+- EBC_TOUPPER (cpi_hwcb->cpi_evbuf.sysplex_name, CPI_LENGTH_SYSPLEX_NAME);
+- }
+- cpi_request.block = cpi_hwcb;
+- cpi_request.word = HWC_CMDW_WRITEDATA;
+- cpi_request.callback = cpi_callback;
+-
+- for (retries = CPI_RETRIES; retries; retries--) {
+- retval = hwc_send (&cpi_request);
+- if (retval) {
+-
+- set_current_state (TASK_INTERRUPTIBLE);
+- schedule_timeout (CPI_SLEEP_TICKS);
+- } else {
+-
+- down (&sem);
+-
+- switch (cpi_hwcb->response_code) {
+- case 0x0020:
+- printk ("cpi: succeeded\n");
+- break;
+- default:
+- printk ("cpi: failed with response code 0x%x\n",
+- cpi_hwcb->response_code);
+- }
+- goto free;
+- }
+- }
+-
+- printk ("cpi: failed (%i)\n", retval);
+-
+- free:
+- kfree (cpi_hwcb);
+-
+- out:
+- return retval;
+-}
+-
+-static void __exit
+-cpi_module_exit (void)
+-{
+- printk ("cpi: exit\n");
+-}
+-
+-void
+-cpi_callback (hwc_request_t * req)
+-{
+- up (&sem);
+-}
+=== drivers/s390/char/tape3480.h
+==================================================================
+--- drivers/s390/char/tape3480.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tape3480.h (/trunk/2.4.27) (revision 52)
+@@ -1,23 +0,0 @@
+-/***************************************************************************
+- *
+- * drivers/s390/char/tape3480.h
+- * tape device discipline for 3480 tapes.
+- *
+- * S390 and zSeries version
+- * Copyright (C) 2001 IBM Corporation
+- * Author(s): Carsten Otte <cotte at de.ibm.com>
+- * Tuan Ngo-Anh <ngoanh at de.ibm.com>
+- *
+- ****************************************************************************
+- */
+-
+-#ifndef _TAPE3480_H
+-
+-#define _TAPE3480_H
+-
+-
+-typedef struct _tape3480_disc_data_t {
+- __u8 modeset_byte;
+-} tape3480_disc_data_t __attribute__ ((packed, aligned(8)));
+-tape_discipline_t * tape3480_init (int);
+-#endif // _TAPE3480_H
+=== drivers/s390/char/tape34xx.c
+==================================================================
+--- drivers/s390/char/tape34xx.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tape34xx.c (/trunk/2.4.27) (revision 52)
+@@ -1,2389 +0,0 @@
+-/***************************************************************************
+- *
+- * drivers/s390/char/tape34xx.c
+- * common tape device discipline for 34xx tapes.
+- *
+- * S390 and zSeries version
+- * Copyright (C) 2001 IBM Corporation
+- * Author(s): Carsten Otte <cotte at de.ibm.com>
+- * Tuan Ngo-Anh <ngoanh at de.ibm.com>
+- *
+- ****************************************************************************
+- */
+-
+-#include "tapedefs.h"
+-#include <linux/config.h>
+-#include <linux/version.h>
+-#include <linux/stddef.h>
+-#include <linux/kernel.h>
+-#include <asm/types.h>
+-#include <asm/uaccess.h>
+-#include <linux/stat.h>
+-#include <linux/proc_fs.h>
+-#include <asm/ccwcache.h>
+-#include <asm/idals.h>
+-#ifdef CONFIG_S390_TAPE_DYNAMIC
+-#include <asm/s390dyn.h>
+-#endif
+-#include <asm/debug.h>
+-#include <linux/compatmac.h>
+-#include "tape.h"
+-#include "tape34xx.h"
+-
+-#define PRINTK_HEADER "T34xx:"
+-
+-tape_event_handler_t tape34xx_event_handler_table[TS_SIZE][TE_SIZE] =
+-{
+- /* {START , DONE, FAILED, ERROR, OTHER } */
+- {NULL, tape34xx_unused_done, NULL, NULL, NULL}, /* TS_UNUSED */
+- {NULL, tape34xx_idle_done, NULL, NULL, NULL}, /* TS_IDLE */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_DONE */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_FAILED */
+- {NULL, tape34xx_block_done, NULL, NULL, NULL}, /* TS_BLOCK_INIT */
+- {NULL, tape34xx_bsb_init_done, NULL, NULL, NULL}, /* TS_BSB_INIT */
+- {NULL, tape34xx_bsf_init_done, NULL, NULL, NULL}, /* TS_BSF_INIT */
+- {NULL, tape34xx_dse_init_done, NULL, NULL, NULL}, /* TS_DSE_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_EGA_INIT */
+- {NULL, tape34xx_fsb_init_done, NULL, NULL, NULL}, /* TS_FSB_INIT */
+- {NULL, tape34xx_fsf_init_done, NULL, NULL, NULL}, /* TS_FSF_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_LDI_INIT */
+- {NULL, tape34xx_lbl_init_done, NULL, NULL, NULL}, /* TS_LBL_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_MSE_INIT */
+- {NULL, tape34xx_nop_init_done, NULL, NULL, NULL}, /* TS_NOP_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_RBA_INIT */
+- {NULL, tape34xx_rbi_init_done, NULL, NULL, NULL}, /* TS_RBI_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_RBU_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_RBL_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_RDC_INIT */
+- {NULL, tape34xx_rfo_init_done, NULL, NULL, NULL}, /* TS_RFO_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_RSD_INIT */
+- {NULL, tape34xx_rew_init_done, NULL, NULL, NULL}, /* TS_REW_INIT */
+- {NULL, tape34xx_rew_release_init_done, NULL, NULL, NULL}, /* TS_REW_RELEASE_IMIT */
+- {NULL, tape34xx_run_init_done, NULL, NULL, NULL}, /* TS_RUN_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_SEN_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_SID_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_SNP_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_SPG_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_SWI_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_SMR_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_SYN_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_TIO_INIT */
+- {NULL, NULL, NULL, NULL, NULL}, /* TS_UNA_INIT */
+- {NULL, tape34xx_wri_init_done, NULL, NULL, NULL}, /* TS_WRI_INIT */
+- {NULL, tape34xx_wtm_init_done, NULL, NULL, NULL}, /* TS_WTM_INIT */
+- {NULL, NULL, NULL, NULL, NULL}}; /* TS_NOT_OPER */
+-
+-
+-int
+-tape34xx_ioctl_overload (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+-{
+- return -EINVAL; // no additional ioctls
+-
+-}
+-
+-ccw_req_t *
+-tape34xx_write_block (const char *data, size_t count, tape_info_t * ti)
+-{
+- long lockflags;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- void *mem;
+- cqr = tape_alloc_ccw_req (ti, 2, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xwbl nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- mem = kmalloc (count, GFP_KERNEL);
+- if (!mem) {
+- tape_free_request (cqr);
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xwbl nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- if (copy_from_user (mem, data, count)) {
+- kfree (mem);
+- tape_free_request (cqr);
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xwbl segf.");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+-
+- ccw->cmd_code = WRITE_CMD;
+- ccw->flags = 0;
+- ccw->count = count;
+- set_normalized_cda (ccw, (unsigned long) mem);
+- if ((ccw->cda) == 0) {
+- kfree (mem);
+- tape_free_request (cqr);
+- return NULL;
+- }
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = mem;
+- ti->userbuf = (void *) data;
+- tapestate_set (ti, TS_WRI_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xwbl ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-void
+-tape34xx_free_write_block (ccw_req_t * cqr, tape_info_t * ti)
+-{
+- unsigned long lockflags;
+- ccw1_t *ccw;
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ccw = cqr->cpaddr;
+- ccw++;
+- clear_normalized_cda (ccw);
+- kfree (ti->kernbuf);
+- tape_free_request (cqr);
+- ti->kernbuf = ti->userbuf = NULL;
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xfwb free");
+-#endif /* TAPE_DEBUG */
+-}
+-
+-ccw_req_t *
+-tape34xx_read_block (const char *data, size_t count, tape_info_t * ti)
+-{
+- long lockflags;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- void *mem;
+- cqr = tape_alloc_ccw_req (ti, 2, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xrbl nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- mem = kmalloc (count, GFP_KERNEL);
+- if (!mem) {
+- tape_free_request (cqr);
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xrbl nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+-
+- ccw->cmd_code = READ_FORWARD;
+- ccw->flags = 0;
+- ccw->count = count;
+- set_normalized_cda (ccw, (unsigned long) mem);
+- if ((ccw->cda) == 0) {
+- kfree (mem);
+- tape_free_request (cqr);
+- return NULL;
+- }
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = mem;
+- ti->userbuf = (void *) data;
+- tapestate_set (ti, TS_RFO_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xrbl ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-ccw_req_t *
+-tape34xx_read_opposite (tape_info_t * ti,int novalue)
+-{
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- size_t count;
+- // first, retrieve the count from the old cqr.
+- cqr = ti->cqr;
+- ccw = cqr->cpaddr;
+- ccw++;
+- count=ccw->count;
+- // free old cqr.
+- clear_normalized_cda (ccw);
+- tape_free_request (cqr);
+- // build new cqr
+- cqr = tape_alloc_ccw_req (ti, 3, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xrop nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+-
+- ccw->cmd_code = READ_BACKWARD;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = count;
+- set_normalized_cda (ccw, (unsigned long) ti->kernbuf);
+- if ((ccw->cda) == 0) {
+- tape_free_request (cqr);
+- return NULL;
+- }
+- ccw++;
+- ccw->cmd_code = FORSPACEBLOCK;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- ccw->cda = (unsigned long)ccw;
+- ccw++;
+- ccw->cmd_code = NOP;
+- ccw->flags = 0;
+- ccw->count = 1;
+- ccw->cda = (unsigned long)ccw;
+- tapestate_set (ti, TS_RBA_INIT);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xrop ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-void
+-tape34xx_free_read_block (ccw_req_t * cqr, tape_info_t * ti)
+-{
+- unsigned long lockflags;
+- size_t cpysize;
+- ccw1_t *ccw;
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ccw = cqr->cpaddr;
+- ccw++;
+- cpysize = ccw->count - ti->devstat.rescnt;
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+- if (copy_to_user (ti->userbuf, ti->kernbuf, cpysize)) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xfrb segf.");
+-#endif /* TAPE_DEBUG */
+- }
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- clear_normalized_cda (ccw);
+- kfree (ti->kernbuf);
+- tape_free_request (cqr);
+- ti->kernbuf = ti->userbuf = NULL;
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xfrb free");
+-#endif /* TAPE_DEBUG */
+-}
+-
+-/*
+- * The IOCTL interface is implemented in the following section,
+- * excepted the MTRESET, MTSETBLK which are handled by tapechar.c
+- */
+-/*
+- * MTFSF: Forward space over 'count' file marks. The tape is positioned
+- * at the EOT (End of Tape) side of the file mark.
+- */
+-ccw_req_t *
+-tape34xx_mtfsf (tape_info_t * ti, int count)
+-{
+- long lockflags;
+- int i;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- if ((count == 0) || (count > 510)) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xfsf parm");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- cqr = tape_alloc_ccw_req (ti, 2 + count, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xfsf nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+- for (i = 0; i < count; i++) {
+- ccw->cmd_code = FORSPACEFILE;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- ccw++;
+- }
+- ccw->cmd_code = NOP;
+- ccw->flags = 0;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = NULL;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_FSF_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xfsf ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-/*
+- * MTBSF: Backward space over 'count' file marks. The tape is positioned at
+- * the EOT (End of Tape) side of the last skipped file mark.
+- */
+-ccw_req_t *
+-tape34xx_mtbsf (tape_info_t * ti, int count)
+-{
+- long lockflags;
+- int i;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- if ((count == 0) || (count > 510)) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xbsf parm");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- cqr = tape_alloc_ccw_req (ti, 2 + count, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xbsf nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+- for (i = 0; i < count; i++) {
+- ccw->cmd_code = BACKSPACEFILE;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- ccw++;
+- }
+- ccw->cmd_code = NOP;
+- ccw->flags = 0;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = NULL;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_BSF_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xbsf ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-/*
+- * MTFSR: Forward space over 'count' tape blocks (blocksize is set
+- * via MTSETBLK.
+- */
+-ccw_req_t *
+-tape34xx_mtfsr (tape_info_t * ti, int count)
+-{
+- long lockflags;
+- int i;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- if ((count == 0) || (count > 510)) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xfsr parm");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- cqr = tape_alloc_ccw_req (ti, 2 + count, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xfsr nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+- for (i = 0; i < count; i++) {
+- ccw->cmd_code = FORSPACEBLOCK;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- ccw++;
+- }
+- ccw->cmd_code = NOP;
+- ccw->flags = 0;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = NULL;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_FSB_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xfsr ccwgen");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-/*
+- * MTBSR: Backward space over 'count' tape blocks.
+- * (blocksize is set via MTSETBLK.
+- */
+-ccw_req_t *
+-tape34xx_mtbsr (tape_info_t * ti, int count)
+-{
+- long lockflags;
+- int i;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- if ((count == 0) || (count > 510)) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xbsr parm");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- cqr = tape_alloc_ccw_req (ti, 2 + count, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xbsr nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+- for (i = 0; i < count; i++) {
+- ccw->cmd_code = BACKSPACEBLOCK;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- ccw++;
+- }
+- ccw->cmd_code = NOP;
+- ccw->flags = 0;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = NULL;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_BSB_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xbsr ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-/*
+- * MTWEOF: Write 'count' file marks at the current position.
+- */
+-ccw_req_t *
+-tape34xx_mtweof (tape_info_t * ti, int count)
+-{
+- long lockflags;
+- int i;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- if ((count == 0) || (count > 510)) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xweo parm");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- cqr = tape_alloc_ccw_req (ti, 2 + count, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xweo nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+- for (i = 0; i < count; i++) {
+- ccw->cmd_code = WRITETAPEMARK;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- ccw++;
+- }
+- ccw->cmd_code = NOP;
+- ccw->flags = 0;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- ccw++;
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = NULL;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_WTM_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xweo ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-/*
+- * MTREW: Rewind the tape.
+- */
+-ccw_req_t *
+-tape34xx_mtrew (tape_info_t * ti, int count)
+-{
+- long lockflags;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- cqr = tape_alloc_ccw_req (ti, 3, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xrew nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+- ccw->cmd_code = REWIND;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- ccw++;
+- ccw->cmd_code = NOP;
+- ccw->flags = 0;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = NULL;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_REW_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xrew ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-/*
+- * MTOFFL: Rewind the tape and put the drive off-line.
+- * Implement 'rewind unload'
+- */
+-ccw_req_t *
+-tape34xx_mtoffl (tape_info_t * ti, int count)
+-{
+- long lockflags;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- cqr = tape_alloc_ccw_req (ti, 3, 32);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xoff nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+- ccw->cmd_code = REWIND_UNLOAD;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- ccw++;
+- ccw->cmd_code = SENSE;
+- ccw->flags = 0;
+- ccw->count = 32;
+- ccw->cda = (unsigned long) cqr->cpaddr;
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = NULL;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_RUN_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xoff ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-/*
+- * MTNOP: 'No operation'.
+- */
+-ccw_req_t *
+-tape34xx_mtnop (tape_info_t * ti, int count)
+-{
+- long lockflags;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- cqr = tape_alloc_ccw_req (ti, 1, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xnop nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = NOP;
+- ccw->flags = 0;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) ccw->cmd_code;
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = NULL;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_NOP_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xnop ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-/*
+- * MTBSFM: Backward space over 'count' file marks.
+- * The tape is positioned at the BOT (Begin Of Tape) side of the
+- * last skipped file mark.
+- */
+-ccw_req_t *
+-tape34xx_mtbsfm (tape_info_t * ti, int count)
+-{
+- long lockflags;
+- int i;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- if ((count == 0) || (count > 510)) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xbsm parm");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- cqr = tape_alloc_ccw_req (ti, 2 + count, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xbsm nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+- for (i = 0; i < count; i++) {
+- ccw->cmd_code = BACKSPACEFILE;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- ccw++;
+- }
+- ccw->cmd_code = NOP;
+- ccw->flags = 0;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = NULL;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_BSF_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xbsm ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-/*
+- * MTFSFM: Forward space over 'count' file marks.
+- * The tape is positioned at the BOT (Begin Of Tape) side
+- * of the last skipped file mark.
+- */
+-ccw_req_t *
+-tape34xx_mtfsfm (tape_info_t * ti, int count)
+-{
+- long lockflags;
+- int i;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- if ((count == 0) || (count > 510)) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xfsm parm");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- cqr = tape_alloc_ccw_req (ti, 2 + count, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xfsm nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+- for (i = 0; i < count; i++) {
+- ccw->cmd_code = FORSPACEFILE;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- ccw++;
+- }
+- ccw->cmd_code = NOP;
+- ccw->flags = 0;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = NULL;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_FSF_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xfsm ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-/*
+- * MTEOM: positions at the end of the portion of the tape already used
+- * for recordind data. MTEOM positions after the last file mark, ready for
+- * appending another file.
+- * MTRETEN: Retension the tape, i.e. forward space to end of tape and rewind.
+- */
+-ccw_req_t *
+-tape34xx_mteom (tape_info_t * ti, int count)
+-{
+- long lockflags;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- cqr = tape_alloc_ccw_req (ti, 4, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xeom nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+- ccw->cmd_code = FORSPACEFILE;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- ccw++;
+- ccw->cmd_code = NOP;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- ccw++;
+- ccw->cmd_code = CCW_CMD_TIC;
+- ccw->flags = 0;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (cqr->cpaddr);
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = NULL;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_FSF_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xeom ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-/*
+- * MTERASE: erases the tape.
+- */
+-ccw_req_t *
+-tape34xx_mterase (tape_info_t * ti, int count)
+-{
+- long lockflags;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- cqr = tape_alloc_ccw_req (ti, 5, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xera nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+- ccw->cmd_code = REWIND;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- ccw++;
+- ccw->cmd_code = ERASE_GAP;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- ccw++;
+- ccw->cmd_code = DATA_SEC_ERASE;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- ccw++;
+- ccw->cmd_code = NOP;
+- ccw->flags = 0;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = NULL;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_DSE_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xera ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-/*
+- * MTSETDENSITY: set tape density.
+- */
+-ccw_req_t *
+-tape34xx_mtsetdensity (tape_info_t * ti, int count)
+-{
+- long lockflags;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- cqr = tape_alloc_ccw_req (ti, 2, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xden nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+- ccw->cmd_code = NOP;
+- ccw->flags = 0;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = NULL;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_NOP_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xden ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-/*
+- * MTSEEK: seek to the specified block.
+- */
+-ccw_req_t *
+-tape34xx_mtseek (tape_info_t * ti, int count)
+-{
+- long lockflags;
+- __u8 *data;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- if ((data = kmalloc (4 * sizeof (__u8), GFP_KERNEL)) == NULL) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xsee nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- data[0] = 0x01;
+- data[1] = data[2] = data[3] = 0x00;
+- if (count >= 4194304) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xsee parm");
+-#endif /* TAPE_DEBUG */
+- kfree(data);
+- return NULL;
+- }
+- if (((tape34xx_disc_data_t *) ti->discdata)->modeset_byte & 0x08) // IDRC on
+-
+- data[1] = data[1] | 0x80;
+- data[3] += count % 256;
+- data[2] += (count / 256) % 256;
+- data[1] += (count / 65536);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xsee id:");
+- debug_int_event (tape_debug_area,6,count);
+-#endif /* TAPE_DEBUG */
+- cqr = tape_alloc_ccw_req (ti, 3, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xsee nomem");
+-#endif /* TAPE_DEBUG */
+- kfree (data);
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+- ccw->cmd_code = LOCATE;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 4;
+- set_normalized_cda (ccw, (unsigned long) data);
+- ccw++;
+- ccw->cmd_code = NOP;
+- ccw->flags = 0;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = data;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_LBL_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xsee ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-/*
+- * MTTELL: Tell block. Return the number of block relative to current file.
+- */
+-ccw_req_t *
+-tape34xx_mttell (tape_info_t * ti, int count)
+-{
+- long lockflags;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- void *mem;
+- cqr = tape_alloc_ccw_req (ti, 2, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xtel nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- mem = kmalloc (8, GFP_KERNEL);
+- if (!mem) {
+- tape_free_request (cqr);
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xtel nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+-
+- ccw->cmd_code = READ_BLOCK_ID;
+- ccw->flags = 0;
+- ccw->count = 8;
+- set_normalized_cda (ccw, (unsigned long) mem);
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = mem;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_RBI_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xtel ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-/*
+- * MTSETDRVBUFFER: Set the tape drive buffer code to number.
+- * Implement NOP.
+- */
+-ccw_req_t *
+-tape34xx_mtsetdrvbuffer (tape_info_t * ti, int count)
+-{
+- long lockflags;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- cqr = tape_alloc_ccw_req (ti, 2, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xbuf nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+- ccw->cmd_code = NOP;
+- ccw->flags = 0;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = NULL;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_NOP_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xbuf ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-/*
+- * MTLOCK: Locks the tape drive door.
+- * Implement NOP CCW command.
+- */
+-ccw_req_t *
+-tape34xx_mtlock (tape_info_t * ti, int count)
+-{
+- long lockflags;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- cqr = tape_alloc_ccw_req (ti, 2, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xloc nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+- ccw->cmd_code = NOP;
+- ccw->flags = 0;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = NULL;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_NOP_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xloc ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-/*
+- * MTUNLOCK: Unlocks the tape drive door.
+- * Implement the NOP CCW command.
+- */
+-ccw_req_t *
+-tape34xx_mtunlock (tape_info_t * ti, int count)
+-{
+- long lockflags;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- cqr = tape_alloc_ccw_req (ti, 2, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xulk nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+- ccw->cmd_code = NOP;
+- ccw->flags = 0;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = NULL;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_NOP_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xulk ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-/*
+- * MTLOAD: Loads the tape.
+- * This function is not implemented and returns NULL, which causes the Frontend to wait for a medium being loaded.
+- * The 3480/3490 type Tapes do not support a load command
+- */
+-ccw_req_t *
+-tape34xx_mtload (tape_info_t * ti, int count)
+-{
+- return NULL;
+-}
+-
+-/*
+- * MTUNLOAD: Rewind the tape and unload it.
+- */
+-ccw_req_t *
+-tape34xx_mtunload (tape_info_t * ti, int count)
+-{
+- long lockflags;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- cqr = tape_alloc_ccw_req (ti, 3, 32);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xunl nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+- ccw->cmd_code = REWIND_UNLOAD;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- ccw++;
+- ccw->cmd_code = SENSE;
+- ccw->flags = 0;
+- ccw->count = 32;
+- ccw->cda = (unsigned long) cqr->cpaddr;
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = NULL;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_RUN_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xunl ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-/*
+- * MTCOMPRESSION: used to enable compression.
+- * Sets the IDRC on/off.
+- */
+-ccw_req_t *
+-tape34xx_mtcompression (tape_info_t * ti, int count)
+-{
+- long lockflags;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- if ((count < 0) || (count > 1)) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xcom parm");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- if (count == 0)
+- ((tape34xx_disc_data_t *) ti->discdata)->modeset_byte = 0x00; // IDRC off
+-
+- else
+- ((tape34xx_disc_data_t *) ti->discdata)->modeset_byte = 0x08; // IDRC on
+-
+- cqr = tape_alloc_ccw_req (ti, 2, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xcom nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+- ccw->cmd_code = NOP;
+- ccw->flags = 0;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = NULL;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_NOP_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xcom ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-/*
+- * MTSTPART: Move the tape head at the partition with the number 'count'.
+- * Implement the NOP CCW command.
+- */
+-ccw_req_t *
+-tape34xx_mtsetpart (tape_info_t * ti, int count)
+-{
+- long lockflags;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- cqr = tape_alloc_ccw_req (ti, 2, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xspa nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+- ccw->cmd_code = NOP;
+- ccw->flags = 0;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = NULL;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_NOP_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xspa ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-/*
+- * MTMKPART: .... dummy .
+- * Implement the NOP CCW command.
+- */
+-ccw_req_t *
+-tape34xx_mtmkpart (tape_info_t * ti, int count)
+-{
+- long lockflags;
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- cqr = tape_alloc_ccw_req (ti, 2, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xnpa nomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- ccw++;
+- ccw->cmd_code = NOP;
+- ccw->flags = 0;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- s390irq_spin_lock_irqsave (ti->devinfo.irq, lockflags);
+- ti->kernbuf = NULL;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_NOP_INIT);
+- s390irq_spin_unlock_irqrestore (ti->devinfo.irq, lockflags);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xnpa ccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-
+-/*
+- * MTIOCGET: query the tape drive status.
+- */
+-ccw_req_t *
+-tape34xx_mtiocget (tape_info_t * ti, int count)
+-{
+- return NULL;
+-}
+-
+-/*
+- * MTIOCPOS: query the tape position.
+- */
+-ccw_req_t *
+-tape34xx_mtiocpos (tape_info_t * ti, int count)
+-{
+- return NULL;
+-}
+-
+-ccw_req_t * tape34xx_bread (struct request *req,tape_info_t* ti,int tapeblock_major) {
+- ccw_req_t *cqr;
+- ccw1_t *ccw;
+- __u8 *data;
+- int s2b = blksize_size[tapeblock_major][ti->blk_minor]/hardsect_size[tapeblock_major][ti->blk_minor];
+- int realcount;
+- int size,bhct = 0;
+- struct buffer_head* bh;
+- for (bh = req->bh; bh; bh = bh->b_reqnext) {
+- if (bh->b_size > blksize_size[tapeblock_major][ti->blk_minor])
+- for (size = 0; size < bh->b_size; size += blksize_size[tapeblock_major][ti->blk_minor])
+- bhct++;
+- else
+- bhct++;
+- }
+- if ((data = kmalloc (4 * sizeof (__u8), GFP_ATOMIC)) == NULL) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,3,"xBREDnomem");
+-#endif /* TAPE_DEBUG */
+- return NULL;
+- }
+- data[0] = 0x01;
+- data[1] = data[2] = data[3] = 0x00;
+- realcount=req->sector/s2b;
+- if (((tape34xx_disc_data_t *) ti->discdata)->modeset_byte & 0x08) // IDRC on
+-
+- data[1] = data[1] | 0x80;
+- data[3] += realcount % 256;
+- data[2] += (realcount / 256) % 256;
+- data[1] += (realcount / 65536);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xBREDid:");
+- debug_int_event (tape_debug_area,6,realcount);
+-#endif /* TAPE_DEBUG */
+- cqr = tape_alloc_ccw_req (ti, 2+bhct+1, 0);
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,6,"xBREDnomem");
+-#endif /* TAPE_DEBUG */
+- kfree(data);
+- return NULL;
+- }
+- ccw = cqr->cpaddr;
+- ccw->cmd_code = MODE_SET_DB;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 1;
+- set_normalized_cda (ccw, (unsigned long) (&(((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)));
+- if (realcount!=ti->position) {
+- ccw++;
+- ccw->cmd_code = LOCATE;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->count = 4;
+- set_normalized_cda (ccw, (unsigned long) data);
+- }
+- ti->position=realcount+req->nr_sectors/s2b;
+- for (bh=req->bh;bh!=NULL;) {
+- ccw->flags = CCW_FLAG_CC;
+- if (bh->b_size >= blksize_size[tapeblock_major][ti->blk_minor]) {
+- for (size = 0; size < bh->b_size; size += blksize_size[tapeblock_major][ti->blk_minor]) {
+- ccw++;
+- ccw->flags = CCW_FLAG_CC;
+- ccw->cmd_code = READ_FORWARD;
+- ccw->count = blksize_size[tapeblock_major][ti->blk_minor];
+- set_normalized_cda (ccw, __pa (bh->b_data + size));
+- }
+- bh = bh->b_reqnext;
+- } else { /* group N bhs to fit into byt_per_blk */
+- for (size = 0; bh != NULL && size < blksize_size[tapeblock_major][ti->blk_minor];) {
+- ccw++;
+- ccw->flags = CCW_FLAG_DC;
+- ccw->cmd_code = READ_FORWARD;
+- ccw->count = bh->b_size;
+- set_normalized_cda (ccw, __pa (bh->b_data));
+- size += bh->b_size;
+- bh = bh->b_reqnext;
+- }
+- if (size != blksize_size[tapeblock_major][ti->blk_minor]) {
+- PRINT_WARN ("Cannot fulfill small request %d vs. %d (%ld sects)\n",
+- size,
+- blksize_size[tapeblock_major][ti->blk_minor],
+- req->nr_sectors);
+- kfree(data);
+- tape_free_request (cqr);
+- return NULL;
+- }
+- }
+- }
+- ccw -> flags &= ~(CCW_FLAG_DC);
+- ccw -> flags |= (CCW_FLAG_CC);
+- ccw++;
+- ccw->cmd_code = NOP;
+- ccw->flags = 0;
+- ccw->count = 0;
+- ccw->cda = (unsigned long) (&(ccw->cmd_code));
+- ti->kernbuf = data;
+- ti->userbuf = NULL;
+- tapestate_set (ti, TS_BLOCK_INIT);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xBREDccwg");
+-#endif /* TAPE_DEBUG */
+- return cqr;
+-}
+-void tape34xx_free_bread (ccw_req_t* cqr,struct _tape_info_t* ti) {
+- ccw1_t* ccw;
+- for (ccw=(ccw1_t*)cqr->cpaddr;(ccw->flags & CCW_FLAG_CC)||(ccw->flags & CCW_FLAG_DC);ccw++)
+- if ((ccw->cmd_code == MODE_SET_DB) ||
+- (ccw->cmd_code == LOCATE) ||
+- (ccw->cmd_code == READ_FORWARD))
+- clear_normalized_cda(ccw);
+- tape_free_request(cqr);
+- kfree(ti->kernbuf);
+- ti->kernbuf=NULL;
+-}
+-
+-/* event handlers */
+-void
+-tape34xx_default_handler (tape_info_t * ti)
+-{
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xdefhandle");
+-#endif /* TAPE_DEBUG */
+- PRINT_ERR ("TAPE34XX: An unexpected Unit Check occurred.\n");
+- PRINT_ERR ("TAPE34XX: Please read Documentation/s390/TAPE and report it!\n");
+- PRINT_ERR ("TAPE34XX: Current state is: %s",
+- (((tapestate_get (ti) < TS_SIZE) && (tapestate_get (ti) >= 0)) ?
+- state_verbose[tapestate_get (ti)] : "->UNKNOWN STATE<-"));
+- tape_dump_sense (&ti->devstat);
+- ti->rc = -EIO;
+- ti->wanna_wakeup=1;
+- switch (tapestate_get(ti)) {
+- case TS_REW_RELEASE_INIT:
+- tapestate_set(ti,TS_FAILED);
+- wake_up (&ti->wq);
+- break;
+- case TS_BLOCK_INIT:
+- tapestate_set(ti,TS_FAILED);
+- schedule_tapeblock_exec_IO(ti);
+- break;
+- default:
+- tapestate_set(ti,TS_FAILED);
+- wake_up_interruptible (&ti->wq);
+- }
+-}
+-
+-void
+-tape34xx_unexpect_uchk_handler (tape_info_t * ti)
+-{
+- if ((ti->devstat.ii.sense.data[0] == 0x40) &&
+- (ti->devstat.ii.sense.data[1] == 0x40) &&
+- (ti->devstat.ii.sense.data[3] == 0x43)) {
+- // no tape in the drive
+- PRINT_INFO ("Drive %d not ready. No volume loaded.\n", ti->rew_minor / 2);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"xuuh nomed");
+-#endif /* TAPE_DEBUG */
+- tapestate_set (ti, TS_FAILED);
+- ti->rc = -ENOMEDIUM;
+- ti->wanna_wakeup=1;
+- wake_up_interruptible (&ti->wq);
+- } else if ((ti->devstat.ii.sense.data[0] == 0x42) &&
+- (ti->devstat.ii.sense.data[1] == 0x44) &&
+- (ti->devstat.ii.sense.data[3] == 0x3b)) {
+- PRINT_INFO ("Media in drive %d was changed!\n",
+- ti->rew_minor / 2);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"xuuh medchg");
+-#endif
+- /* nothing to do. chan end & dev end will be reported when io is finished */
+- } else {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"xuuh unexp");
+- debug_text_event (tape_debug_area,3,"state:");
+- debug_text_event (tape_debug_area,3,((tapestate_get (ti) < TS_SIZE) &&
+- (tapestate_get (ti) >= 0)) ?
+- state_verbose[tapestate_get (ti)] :
+- "TS UNKNOWN");
+-#endif /* TAPE_DEBUG */
+- tape34xx_default_handler (ti);
+- }
+-}
+-
+-void
+-tape34xx_unused_done (tape_info_t * ti)
+-{
+- if (ti->medium_is_unloaded) {
+- // A medium was inserted in the drive!
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xuui med");
+-#endif /* TAPE_DEBUG */
+- PRINT_WARN ("A medium was inserted into the tape.\n");
+- ti->medium_is_unloaded=0;
+- } else {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"unsol.irq!");
+- debug_text_event (tape_debug_area,3,"dev end");
+- debug_int_exception (tape_debug_area,3,ti->devinfo.irq);
+-#endif /* TAPE_DEBUG */
+- PRINT_WARN ("Unsolicited IRQ (Device End) caught in unused state.\n");
+- tape_dump_sense (&ti->devstat);
+- }
+-}
+-
+-
+-void
+-tape34xx_idle_done (tape_info_t * ti)
+-{
+- if (ti->medium_is_unloaded) {
+- // A medium was inserted in the drive!
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"xuud med");
+-#endif /* TAPE_DEBUG */
+- PRINT_WARN ("A medium was inserted into the tape.\n");
+- ti->medium_is_unloaded=0;
+- wake_up_interruptible (&ti->wq);
+- } else {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"unsol.irq!");
+- debug_text_event (tape_debug_area,3,"dev end");
+- debug_int_exception (tape_debug_area,3,ti->devinfo.irq);
+-#endif /* TAPE_DEBUG */
+- PRINT_WARN ("Unsolicited IRQ (Device End) caught in idle state.\n");
+- tape_dump_sense (&ti->devstat);
+- }
+-}
+-
+-void
+-tape34xx_block_done (tape_info_t * ti)
+-{
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"x:bREQdone");
+-#endif /* TAPE_DEBUG */
+- tapestate_set(ti,TS_DONE);
+- schedule_tapeblock_exec_IO(ti);
+-}
+-
+-void
+-tape34xx_bsf_init_done (tape_info_t * ti)
+-{
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"bsf done");
+-#endif
+- tapestate_set (ti, TS_DONE);
+- ti->rc = 0;
+- ti->wanna_wakeup=1;
+- wake_up_interruptible (&ti->wq);
+-}
+-
+-void
+-tape34xx_dse_init_done (tape_info_t * ti)
+-{
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"dse done");
+-#endif
+- tapestate_set (ti, TS_DONE);
+- ti->rc = 0;
+- ti->wanna_wakeup=1;
+- wake_up_interruptible (&ti->wq);
+-}
+-
+-void
+-tape34xx_fsf_init_done (tape_info_t * ti)
+-{
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"fsf done");
+-#endif
+- tapestate_set (ti, TS_DONE);
+- ti->rc = 0;
+- ti->wanna_wakeup=1;
+- wake_up_interruptible (&ti->wq);
+-}
+-
+-void
+-tape34xx_fsb_init_done (tape_info_t * ti)
+-{
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"fsb done");
+-#endif
+- tapestate_set (ti, TS_DONE);
+- ti->rc = 0;
+- ti->wanna_wakeup=1;
+- wake_up_interruptible (&ti->wq);
+-}
+-
+-void
+-tape34xx_bsb_init_done (tape_info_t * ti)
+-{
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"bsb done");
+-#endif
+- tapestate_set (ti, TS_DONE);
+- ti->rc = 0;
+- ti->wanna_wakeup=1;
+- wake_up (&ti->wq);
+-}
+-
+-void
+-tape34xx_lbl_init_done (tape_info_t * ti)
+-{
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"lbl done");
+-#endif
+- tapestate_set (ti, TS_DONE);
+- ti->rc = 0;
+- //s390irq_spin_unlock(tape->devinfo.irq);
+- ti->wanna_wakeup=1;
+- wake_up (&ti->wq);
+-}
+-
+-void
+-tape34xx_nop_init_done (tape_info_t * ti)
+-{
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"nop done..");
+- debug_text_exception (tape_debug_area,6,"or rew/rel");
+-#endif
+- tapestate_set (ti, TS_DONE);
+- ti->rc = 0;
+- //s390irq_spin_unlock(tape->devinfo.irq);
+- ti->wanna_wakeup=1;
+- wake_up (&ti->wq);
+-}
+-
+-void
+-tape34xx_rfo_init_done (tape_info_t * ti)
+-{
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"rfo done");
+-#endif
+- tapestate_set (ti, TS_DONE);
+- ti->rc = 0;
+- ti->wanna_wakeup=1;
+- wake_up (&ti->wq);
+-}
+-
+-void
+-tape34xx_rbi_init_done (tape_info_t * ti)
+-{
+- __u8 *data;
+-#ifdef TAPE_DEBUG
+- int i;
+-#endif
+- tapestate_set (ti, TS_FAILED);
+- data = ti->kernbuf;
+- ti->rc = data[3];
+- ti->rc += 256 * data[2];
+- ti->rc += 65536 * (data[1] & 0x3F);
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"rbi done");
+- debug_text_event (tape_debug_area,6,"data:");
+- for (i=0;i<8;i++)
+- debug_int_event (tape_debug_area,6,data[i]);
+-#endif
+- ti->wanna_wakeup=1;
+- wake_up_interruptible (&ti->wq);
+-}
+-
+-void
+-tape34xx_rew_init_done (tape_info_t * ti)
+-{
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"rew done");
+-#endif
+- //BH: use irqsave
+- //s390irq_spin_lock(tape->devinfo.irq);
+- tapestate_set (ti, TS_DONE);
+- ti->rc = 0;
+- //s390irq_spin_unlock(tape->devinfo.irq);
+- ti->wanna_wakeup=1;
+- wake_up_interruptible (&ti->wq);
+-}
+-
+-void
+-tape34xx_rew_release_init_done (tape_info_t * ti)
+-{
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"rewR done");
+-#endif
+- tapestate_set (ti, TS_DONE);
+- ti->rc = 0;
+- //s390irq_spin_unlock(tape->devinfo.irq);
+- ti->wanna_wakeup=1;
+- wake_up (&ti->wq);
+-}
+-
+-void
+-tape34xx_run_init_done (tape_info_t * ti)
+-{
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"rew done");
+-#endif
+- tapestate_set (ti, TS_DONE);
+- ti->rc = 0;
+- ti->wanna_wakeup=1;
+- wake_up_interruptible (&ti->wq);
+-}
+-
+-void
+-tape34xx_wri_init_done (tape_info_t * ti)
+-{
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"wri done");
+-#endif
+- //BH: use irqsave
+- //s390irq_spin_lock(ti->devinfo.irq);
+- tapestate_set (ti, TS_DONE);
+- ti->rc = 0;
+- //s390irq_spin_unlock(ti->devinfo.irq);
+- ti->wanna_wakeup=1;
+- wake_up_interruptible (&ti->wq);
+-}
+-
+-void
+-tape34xx_wtm_init_done (tape_info_t * ti)
+-{
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"wtm done");
+-#endif
+- tapestate_set (ti, TS_DONE);
+- ti->rc = 0;
+- ti->wanna_wakeup=1;
+- wake_up_interruptible (&ti->wq);
+-}
+-
+-/* This function analyses the tape's sense-data in case of a unit-check. If possible,
+- it tries to recover from the error. Else the user is informed about the problem. */
+-void
+-tape34xx_error_recovery (tape_info_t* ti)
+-{
+- __u8* sense=ti->devstat.ii.sense.data;
+- int inhibit_cu_recovery=0;
+- int cu_type=ti->discipline->cu_type;
+- if ((((tape34xx_disc_data_t *) ti->discdata)->modeset_byte)&0x80) inhibit_cu_recovery=1;
+- if (tapestate_get(ti)==TS_BLOCK_INIT) {
+- // no recovery for block device, bottom half will retry...
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- }
+- if (sense[0]&SENSE_COMMAND_REJECT)
+- switch (tapestate_get(ti)) {
+- case TS_BLOCK_INIT:
+- case TS_DSE_INIT:
+- case TS_EGA_INIT:
+- case TS_WRI_INIT:
+- case TS_WTM_INIT:
+- if (sense[1]&SENSE_WRITE_PROTECT) {
+- // trying to write, but medium is write protected
+- tape34xx_error_recovery_has_failed(ti,EACCES);
+- return;
+- }
+- default:
+- tape34xx_error_recovery_HWBUG(ti,1);
+- return;
+- }
+- // special cases for various tape-states when reaching end of recorded area
+- if (((sense[0]==0x08) || (sense[0]==0x10) || (sense[0]==0x12)) &&
+- ((sense[1]==0x40) || (sense[1]==0x0c)))
+- switch (tapestate_get(ti)) {
+- case TS_FSF_INIT:
+- // Trying to seek beyond end of recorded area
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case TS_LBL_INIT:
+- // Block could not be located.
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case TS_RFO_INIT:
+- // Try to read beyond end of recorded area -> 0 bytes read
+- tape34xx_error_recovery_has_failed(ti,0);
+- return;
+- }
+- // Sensing special bits
+- if (sense[0]&SENSE_BUS_OUT_CHECK) {
+- tape34xx_error_recovery_do_retry(ti);
+- return;
+- }
+- if (sense[0]&SENSE_DATA_CHECK) {
+- // hardware failure, damaged tape or improper operating conditions
+- switch (sense[3]) {
+- case 0x23:
+- // a read data check occurred
+- if ((sense[2]&SENSE_TAPE_SYNC_MODE) ||
+- (inhibit_cu_recovery)) {
+- // data check is not permanent, may be recovered.
+- // We always use async-mode with cu-recovery, so this should *never* happen.
+- tape34xx_error_recovery_HWBUG(ti,2);
+- return;
+- } else {
+- // data check is permanent, CU recovery has failed
+- PRINT_WARN("Permanent read error, recovery failed!\n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- }
+- case 0x25:
+- // a write data check occurred
+- if ((sense[2]&SENSE_TAPE_SYNC_MODE) ||
+- (inhibit_cu_recovery)) {
+- // data check is not permanent, may be recovered.
+- // We always use async-mode with cu-recovery, so this should *never* happen.
+- tape34xx_error_recovery_HWBUG(ti,3);
+- return;
+- } else {
+- // data check is permanent, cu-recovery has failed
+- PRINT_WARN("Permanent write error, recovery failed!\n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- }
+- case 0x26:
+- // Data Check (read opposite) occurred. We'll recover this.
+- tape34xx_error_recovery_read_opposite(ti);
+- return;
+- case 0x28:
+- // The ID-Mark at the beginning of the tape could not be written. This is fatal, we'll report and exit.
+- PRINT_WARN("ID-Mark could not be written. Check your hardware!\n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x31:
+- // Tape void. Tried to read beyond end of device. We'll report and exit.
+- PRINT_WARN("Try to read beyond end of recorded area!\n");
+- tape34xx_error_recovery_has_failed(ti,ENOSPC);
+- return;
+- case 0x41:
+- // Record sequence error. cu detected incorrect block-id sequence on tape. We'll report and exit.
+- PRINT_WARN("Illegal block-id sequence found!\n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- default:
+- // well, all data checks for 3480 should result in one of the above erpa-codes. if not -> bug
+- // On 3490, other data-check conditions do exist.
+- if (cu_type==0x3480) {
+- tape34xx_error_recovery_HWBUG(ti,4);
+- return;
+- }
+- }
+- }
+- if (sense[0]&SENSE_OVERRUN) {
+- // A data overrun between cu and drive occurred. The channel speed is to slow! We'll report this and exit!
+- switch (sense[3]) {
+- case 0x40: // overrun error
+- PRINT_WARN ("Data overrun error between control-unit and drive. Use a faster channel connection, if possible! \n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- default:
+- // Overrun bit is set, but erpa does not show overrun error. This is a bug.
+- tape34xx_error_recovery_HWBUG(ti,5);
+- return;
+- }
+- }
+- if (sense[1]&SENSE_RECORD_SEQUENCE_ERR) {
+- switch (sense[3]) {
+- case 0x41:
+- // Record sequence error. cu detected incorrect block-id sequence on tape. We'll report and exit.
+- PRINT_WARN("Illegal block-id sequence found!\n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- default:
+- // Record sequence error bit is set, but erpa does not show record sequence error. This is a bug.
+- tape34xx_error_recovery_HWBUG(ti,6);
+- return;
+- }
+- }
+- // Sensing erpa codes
+- switch (sense[3]) {
+- case 0x00:
+- // Everything is fine, but we got a unit check. Report and ignore!
+- PRINT_WARN ("Non-error sense was found. Unit-check will be ignored, expect errors...\n");
+- return;
+- case 0x21:
+- // Data streaming not operational. Cu switches to interlock mode, we reissue the command.
+- PRINT_WARN ("Data streaming not operational. Switching to interlock-mode! \n");
+- tape34xx_error_recovery_do_retry(ti);
+- return;
+- case 0x22:
+- // Path equipment check. Might be drive adapter error, buffer error on the lower interface, internal path not useable, or error during cartridge load.
+- // All of the above are not recoverable
+- PRINT_WARN ("A path equipment check occurred. One of the following conditions occurred:\n");
+- PRINT_WARN ("drive adapter error,buffer error on the lower interface, internal path not useable, error during cartridge load.\n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x23:
+- // Read data check. Should have been be covered earlier -> Bug!
+- tape34xx_error_recovery_HWBUG(ti,7);
+- return;
+- case 0x24:
+- // Load display check. Load display was command was issued, but the drive is displaying a drive check message. Can be threated as "device end".
+- tape34xx_error_recovery_succeded(ti);
+- return;
+- case 0x25:
+- // Write data check. Should have been covered earlier -> Bug!
+- tape34xx_error_recovery_HWBUG(ti,8);
+- return;
+- case 0x26:
+- // Data check (read opposite). Should have been covered earlier -> Bug!
+- tape34xx_error_recovery_HWBUG(ti,9);
+- return;
+- case 0x27:
+- // Command reject. May indicate illegal channel program or buffer over/underrun.
+- // Since all channel programms are issued by this driver and ought be correct,
+- // we assume a over/underrun situaltion and retry the channel program.
+- tape34xx_error_recovery_do_retry(ti);
+- return;
+- case 0x28:
+- // Write id mark check. Should have beed covered earlier -> bug!
+- tape34xx_error_recovery_HWBUG(ti,10);
+- return;
+- case 0x29:
+- // Function incompatible. Either idrc is on but hardware not capable doing idrc
+- // or a perform subsystem func is issued and the cu is not online. Anyway, this
+- // cannot be recovered and is an I/O error.
+- PRINT_WARN ("Function incompatible. Try to switch off idrc! \n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x2a:
+- // Unsolicited environmental data. An internal counter overflows, we can ignore
+- // this and reissue the cmd.
+- tape34xx_error_recovery_do_retry(ti);
+- return;
+- case 0x2b:
+- // Environmental data present. Indicates either unload completed ok or read buffered
+- // log command completed ok.
+- if (tapestate_get(ti)==TS_RUN_INIT) {
+- // Rewind unload completed ok.
+- tape34xx_error_recovery_succeded(ti);
+- return;
+- }
+- // Since we do not issue read buffered log commands, this should never occur -> bug.
+- tape34xx_error_recovery_HWBUG(ti,11);
+- return;
+- case 0x2c:
+- // Permanent equipment check. cu has tried recovery, but did not succeed. This is an
+- // I/O error.
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x2d:
+- // Data security erase failure.
+- if (tapestate_get(ti)==TS_DSE_INIT) {
+- // report an I/O error
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- }
+- // Data security erase failure, but no such command issued. This is a bug.
+- tape34xx_error_recovery_HWBUG(ti,12);
+- return;
+- case 0x2e:
+- // Not capable. This indicates either that the drive fails reading the format id mark
+- // or that that format specified is not supported by the drive. We write a message and
+- // return an I/O error.
+- PRINT_WARN("Drive not capable processing the tape format!");
+- tape34xx_error_recovery_has_failed(ti,EMEDIUMTYPE);
+- return;
+- case 0x2f:
+- // This erpa is reserved. This is a bug.
+- tape34xx_error_recovery_HWBUG(ti,13);
+- return;
+- case 0x30:
+- // The medium is write protected, while trying to write on it. We'll report this.
+- PRINT_WARN("Medium is write protected!\n");
+- tape34xx_error_recovery_has_failed(ti,EACCES);
+- return;
+- case 0x31:
+- // Tape void. Should have beed covered ealier -> bug
+- tape34xx_error_recovery_HWBUG(ti,14);
+- return;
+- case 0x32:
+- // Tension loss. We cannot recover this, it's an I/O error.
+- PRINT_WARN("The drive lost tape tension.\n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x33:
+- // Load Failure. The catridge was not inserted correctly or the tape is not threaded
+- // correctly. We cannot recover this, the user has to reload the catridge.
+- PRINT_WARN("Cartridge load failure. Reload the cartridge and try again.\n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x34:
+- // Unload failure. The drive cannot maintain tape tension and control tape movement
+- // during an unload operation.
+- PRINT_WARN("Failure during cartridge unload. Please try manually.\n");
+- if (tapestate_get(ti)!=TS_RUN_INIT) {
+- tape34xx_error_recovery_HWBUG(ti,15);
+- return;
+- }
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x35:
+- // Drive equipment check. One of the following:
+- // - cu cannot recover from a drive detected error
+- // - a check code message is displayed on drive message/load displays
+- // - the cartridge loader does not respond correctly
+- // - a failure occurs during an index, load, or unload cycle
+- PRINT_WARN("Equipment check! Please check the drive and the cartridge loader.\n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x36:
+- switch (cu_type) {
+- case 0x3480:
+- // This erpa is reserved for 3480 -> BUG
+- tape34xx_error_recovery_HWBUG(ti,16);
+- return;
+- case 0x3490:
+- // End of data. This is a permanent I/O error, which cannot be recovered.
+- // A read-type command has reached the end-of-data mark.
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- }
+- case 0x37:
+- // Tape length error. The tape is shorter than reported in the beginning-of-tape data.
+- PRINT_WARN("Tape length error.\n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x38:
+- // Physical end of tape. A read/write operation reached the physical end of tape.
+- if (tapestate_get(ti)==TS_WRI_INIT ||
+- tapestate_get(ti)==TS_DSE_INIT ||
+- tapestate_get(ti)==TS_EGA_INIT ||
+- tapestate_get(ti)==TS_WTM_INIT){
+- tape34xx_error_recovery_has_failed(ti,ENOSPC);
+- } else {
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- }
+- return;
+- case 0x39:
+- // Backward at BOT. The drive is at BOT and is requestet to move backward.
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x3a:
+- // Drive switched not ready, but the command needs the drive to be ready.
+- PRINT_WARN("Drive not ready. Turn the ready/not ready switch to ready position and try again.\n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x3b:
+- // Manual rewind or unload. This causes an I/O error.
+- PRINT_WARN("Medium was rewound or unloaded manually. Expect errors! Please do only use the mtoffl and mtrew ioctl to unload tapes or rewind tapes.\n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x3c:
+- case 0x3d:
+- case 0x3e:
+- case 0x3f:
+- // These erpas are reserved -> BUG
+- tape34xx_error_recovery_HWBUG(ti,17);
+- return;
+- case 0x40:
+- // Overrun error. This should have been covered earlier -> bug.
+- tape34xx_error_recovery_HWBUG(ti,18);
+- return;
+- case 0x41:
+- // Record sequence error. This should have been covered earlier -> bug.
+- tape34xx_error_recovery_HWBUG(ti,19);
+- return;
+- case 0x42:
+- // Degraded mode. A condition that can cause degraded performace is detected.
+- PRINT_WARN("Subsystem is running in degraded mode. This may compromise your performace.\n");
+- tape34xx_error_recovery_do_retry(ti);
+- return;
+- case 0x43:
+- // Drive not ready. Probably swith the ready/not ready switch to ready?
+- PRINT_WARN("The drive is not ready. Maybe no medium in?\n");
+- tape34xx_error_recovery_has_failed(ti,ENOMEDIUM);
+- return;
+- case 0x44:
+- // Locate Block unsuccessfull. We'll report this.
+- if ((tapestate_get(ti)!=TS_BLOCK_INIT) &&
+- (tapestate_get(ti)!=TS_LBL_INIT)) {
+- tape34xx_error_recovery_HWBUG(ti,20); // No locate block was issued...
+- return;
+- }
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x45:
+- // The drive is assigned elsewhere [to a different channel path/computer].
+- PRINT_WARN("The drive is assigned elsewhere.\n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x46:
+- // Drive not online. Drive may be switched offline, the power supply may be switched off
+- // or the drive address may not be set correctly.
+- PRINT_WARN("The drive is not online.");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x47:
+- // Volume fenced. cu reports volume integrity is lost!
+- PRINT_WARN("Volume fenced. The volume integrity is lost! \n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x48:
+- // Log sense data and retry request. We'll do so...
+- tape34xx_error_recovery_do_retry(ti);
+- return;
+- case 0x49:
+- // Bus out check. A parity check error on the bus was found. PRINT_WARN("Bus out check. A data transfer over the bus was corrupted.\n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x4a:
+- // Control unit erp failed. We'll report this.
+- PRINT_WARN("The control unit failed recovering an I/O error.\n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x4b:
+- // Cu and drive incompatible. The drive requests micro-program patches, which are not available on the cu.
+- PRINT_WARN("The drive needs microprogram patches from the control unit, which are not available.\n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x4c:
+- // Recovered Check-One failure. Cu develops a hardware error, but is able to recover. We'll reissue the command.
+- tape34xx_error_recovery_do_retry(ti);
+- return;
+- case 0x4d:
+- switch (cu_type) {
+- case 0x3480:
+- // This erpa is reserved for 3480 -> bug
+- tape34xx_error_recovery_HWBUG(ti,21);
+- return;
+- case 0x3490:
+- // Resetting event received. Since the driver does not support resetting event recovery
+- // (which has to be handled by the I/O Layer), we'll report and retry our command.
+- tape34xx_error_recovery_do_retry(ti);
+- return;
+- }
+- case 0x4e:
+- switch (cu_type) {
+- case 0x3480:
+- // This erpa is reserved for 3480 -> bug.
+- tape34xx_error_recovery_HWBUG(ti,22);
+- return;
+- case 0x3490:
+- // Maximum block size exeeded. This indicates, that the block to be written is larger
+- // than allowed for buffered mode. We'll report this...
+- PRINT_WARN("Maximum block size for buffered mode exceeded.\n");
+- tape34xx_error_recovery_has_failed(ti,ENOBUFS);
+- return;
+- }
+- case 0x4f:
+- // These erpas are reserved -> bug
+- tape34xx_error_recovery_HWBUG(ti,23);
+- return;
+- case 0x50:
+- // Read buffered log (Overflow). Cu is running in extended beffered log mode, and a counter overflows.
+- // This should never happen, since we're never running in extended buffered log mode -> bug.
+- tape34xx_error_recovery_do_retry(ti);
+- return;
+- case 0x51:
+- // Read buffered log (EOV). EOF processing occurs while the cu is in extended buffered log mode.
+- // This should never happen, since we're never running in extended buffered log mode -> bug.
+- tape34xx_error_recovery_do_retry(ti);
+- return;
+- case 0x52:
+- // End of Volume complete. Rewind unload completed ok. We'll report to the user...
+- if (tapestate_get(ti)!=TS_RUN_INIT) {
+- tape34xx_error_recovery_HWBUG(ti,24);
+- return;
+- }
+- tape34xx_error_recovery_succeded(ti);
+- return;
+- case 0x53:
+- // Global command intercept. We'll have to reissue our command.
+- tape34xx_error_recovery_do_retry(ti);
+- return;
+- case 0x54:
+- // Channel interface recovery (temporary). This can be recovered by reissuing the command.
+- tape34xx_error_recovery_do_retry(ti);
+- return;
+- case 0x55:
+- // Channel interface recovery (permanent). This cannot be recovered, we'll inform the user.
+- PRINT_WARN("A permanent channel interface error occurred.\n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x56:
+- // Channel protocol error. This cannot be recovered.
+- PRINT_WARN("A channel protocol error occurred.\n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x57:
+- switch (cu_type) {
+- case 0x3480:
+- // Attention intercept. We have to reissue the command.
+- PRINT_WARN("An attention intercept occurred, which will be recovered.\n");
+- tape34xx_error_recovery_do_retry(ti);
+- return;
+- case 0x3490:
+- // Global status intercept. We have to reissue the command.
+- PRINT_WARN("An global status intercept was received, which will be recovered.\n");
+- tape34xx_error_recovery_do_retry(ti);
+- return;
+- }
+- case 0x58:
+- case 0x59:
+- // These erpas are reserved -> bug.
+- tape34xx_error_recovery_HWBUG(ti,25);
+- return;
+- case 0x5a:
+- // Tape length incompatible. The tape inserted is too long,
+- // which could cause damage to the tape or the drive.
+- PRINT_WARN("Tape length incompatible [should be IBM Cartridge System Tape]. May cause damage to drive or tape.n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x5b:
+- // Format 3480 XF incompatible
+- if (sense[1]&SENSE_BEGINNING_OF_TAPE) {
+- // Everything is fine. The tape will be overwritten in a different format.
+- tape34xx_error_recovery_do_retry(ti);
+- return;
+- }
+- PRINT_WARN("Tape format is incompatible to the drive, which writes 3480-2 XF.\n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x5c:
+- // Format 3480-2 XF incompatible
+- PRINT_WARN("Tape format is incompatible to the drive. The drive cannot access 3480-2 XF volumes.\n");
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- return;
+- case 0x5d:
+- // Tape length violation.
+- PRINT_WARN("Tape length violation [should be IBM Enhanced Capacity Cartridge System Tape]. May cause damage to drive or tape.\n");
+- tape34xx_error_recovery_has_failed(ti,EMEDIUMTYPE);
+- return;
+- case 0x5e:
+- // Compaction algorithm incompatible.
+- PRINT_WARN("The volume is recorded using an incompatible compaction algorith, which is not supported by the control unit.\n");
+- tape34xx_error_recovery_has_failed(ti,EMEDIUMTYPE);
+- return;
+- default:
+- // Reserved erpas -> bug
+- tape34xx_error_recovery_HWBUG(ti,26);
+- return;
+- }
+-}
+-
+-void tape34xx_error_recovery_has_failed (tape_info_t* ti,int error_id) {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"xerp fail");
+- debug_text_event (tape_debug_area,3,(((tapestate_get (ti) < TS_SIZE) &&
+- (tapestate_get (ti) >= 0)) ?
+- state_verbose[tapestate_get (ti)] : "UNKNOWN"));
+-#endif
+- if ((tapestate_get(ti)!=TS_UNUSED) && (tapestate_get(ti)!=TS_IDLE)) {
+- tape_dump_sense(&ti->devstat);
+- ti->rc = -error_id;
+- ti->wanna_wakeup=1;
+- switch (tapestate_get(ti)) {
+- case TS_REW_RELEASE_INIT:
+- case TS_RFO_INIT:
+- case TS_RBA_INIT:
+- tapestate_set(ti,TS_FAILED);
+- wake_up (&ti->wq);
+- break;
+- case TS_BLOCK_INIT:
+- tapestate_set(ti,TS_FAILED);
+- schedule_tapeblock_exec_IO(ti);
+- break;
+- default:
+- tapestate_set(ti,TS_FAILED);
+- wake_up_interruptible (&ti->wq);
+- }
+- } else {
+- PRINT_WARN("Recieved an unsolicited IRQ.\n");
+- tape_dump_sense(&ti->devstat);
+- }
+-}
+-
+-void tape34xx_error_recovery_succeded(tape_info_t* ti) {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"xerp done");
+- debug_text_event (tape_debug_area,3,(((tapestate_get (ti) < TS_SIZE) &&
+- (tapestate_get (ti) >= 0)) ?
+- state_verbose[tapestate_get (ti)] : "UNKNOWN"));
+-#endif
+- if ((tapestate_get(ti)!=TS_UNUSED) && (tapestate_get(ti)!=TS_DONE)) {
+- tapestate_event (ti, TE_DONE);
+- } else {
+- PRINT_WARN("Recieved an unsolicited IRQ.\n");
+- tape_dump_sense(&ti->devstat);
+- }
+-}
+-
+-void tape34xx_error_recovery_do_retry(tape_info_t* ti) {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"xerp retr");
+- debug_text_event (tape_debug_area,3,(((tapestate_get (ti) < TS_SIZE) &&
+- (tapestate_get (ti) >= 0)) ?
+- state_verbose[tapestate_get (ti)] : "UNKNOWN"));
+-#endif
+- if ((tapestate_get(ti)!=TS_UNUSED) && (tapestate_get(ti)!=TS_IDLE)) {
+- tape_dump_sense(&ti->devstat);
+- while (do_IO (ti->devinfo.irq, ti->cqr->cpaddr, (unsigned long) ti->cqr, 0x00, ti->cqr->options));
+- } else {
+- PRINT_WARN("Recieved an unsolicited IRQ.\n");
+- tape_dump_sense(&ti->devstat);
+- }
+-}
+-
+-void
+-tape34xx_error_recovery_read_opposite (tape_info_t* ti) {
+- switch (tapestate_get(ti)) {
+- case TS_RFO_INIT:
+- // We did read forward, but the data could not be read *correctly*.
+- // We will read backward and then skip forward again.
+- ti->cqr=tape34xx_read_opposite(ti,0);
+- if (ti->cqr==NULL)
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- else
+- tape34xx_error_recovery_do_retry(ti);
+- break;
+- case TS_RBA_INIT:
+- // We tried to read forward and backward, but hat no success -> failed.
+- tape34xx_error_recovery_has_failed(ti,EIO);
+- break;
+- case TS_BLOCK_INIT:
+- tape34xx_error_recovery_do_retry(ti);
+- break;
+- default:
+- PRINT_WARN("read_opposite_recovery_called_with_state:%s\n",
+- (((tapestate_get (ti) < TS_SIZE) &&
+- (tapestate_get (ti) >= 0)) ?
+- state_verbose[tapestate_get (ti)] : "UNKNOWN"));
+- }
+-}
+-
+-void
+-tape34xx_error_recovery_HWBUG (tape_info_t* ti,int condno) {
+- devstat_t* stat=&ti->devstat;
+- PRINT_WARN("An unexpected condition #%d was caught in tape error recovery.\n",condno);
+- PRINT_WARN("Please report this incident.\n");
+- PRINT_WARN("State of the tape:%s\n",
+- (((tapestate_get (ti) < TS_SIZE) &&
+- (tapestate_get (ti) >= 0)) ?
+- state_verbose[tapestate_get (ti)] : "UNKNOWN"));
+- PRINT_INFO ("Sense data: %02X%02X%02X%02X %02X%02X%02X%02X "
+- " %02X%02X%02X%02X %02X%02X%02X%02X \n",
+- stat->ii.sense.data[0], stat->ii.sense.data[1],
+- stat->ii.sense.data[2], stat->ii.sense.data[3],
+- stat->ii.sense.data[4], stat->ii.sense.data[5],
+- stat->ii.sense.data[6], stat->ii.sense.data[7],
+- stat->ii.sense.data[8], stat->ii.sense.data[9],
+- stat->ii.sense.data[10], stat->ii.sense.data[11],
+- stat->ii.sense.data[12], stat->ii.sense.data[13],
+- stat->ii.sense.data[14], stat->ii.sense.data[15]);
+- PRINT_INFO ("Sense data: %02X%02X%02X%02X %02X%02X%02X%02X "
+- " %02X%02X%02X%02X %02X%02X%02X%02X \n",
+- stat->ii.sense.data[16], stat->ii.sense.data[17],
+- stat->ii.sense.data[18], stat->ii.sense.data[19],
+- stat->ii.sense.data[20], stat->ii.sense.data[21],
+- stat->ii.sense.data[22], stat->ii.sense.data[23],
+- stat->ii.sense.data[24], stat->ii.sense.data[25],
+- stat->ii.sense.data[26], stat->ii.sense.data[27],
+- stat->ii.sense.data[28], stat->ii.sense.data[29],
+- stat->ii.sense.data[30], stat->ii.sense.data[31]);
+- tape34xx_error_recovery_has_failed(ti,EIO);
+-}
+=== drivers/s390/char/tape34xx.h
+==================================================================
+--- drivers/s390/char/tape34xx.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tape34xx.h (/trunk/2.4.27) (revision 52)
+@@ -1,183 +0,0 @@
+-
+-/***************************************************************************
+- *
+- * drivers/s390/char/tape34xx.h
+- * common tape device discipline for 34xx tapes.
+- *
+- * S390 and zSeries version
+- * Copyright (C) 2001 IBM Corporation
+- * Author(s): Carsten Otte <cotte at de.ibm.com>
+- * Tuan Ngo-Anh <ngoanh at de.ibm.com>
+- *
+- ****************************************************************************
+- */
+-
+-#ifndef _TAPE34XX_H
+-
+-#define _TAPE34XX_H
+-
+-/*
+- * The CCW commands for the Tape type of command.
+- */
+-
+-#define INVALID_00 0x00 /* Invalid cmd */
+-#define BACKSPACEBLOCK 0x27 /* Back Space block */
+-#define BACKSPACEFILE 0x2f /* Back Space file */
+-#define DATA_SEC_ERASE 0x97 /* Data security erase */
+-#define ERASE_GAP 0x17 /* Erase Gap */
+-#define FORSPACEBLOCK 0x37 /* Forward space block */
+-#define FORSPACEFILE 0x3F /* Forward Space file */
+-#define FORCE_STREAM_CNT 0xEB /* Forced streaming count # */
+-#define NOP 0x03 /* No operation */
+-#define READ_FORWARD 0x02 /* Read forward */
+-#define REWIND 0x07 /* Rewind */
+-#define REWIND_UNLOAD 0x0F /* Rewind and Unload */
+-#define SENSE 0x04 /* Sense */
+-#define NEW_MODE_SET 0xEB /* Guess it is Mode set */
+-#define WRITE_CMD 0x01 /* Write */
+-#define WRITETAPEMARK 0x1F /* Write Tape Mark */
+-
+-#define ASSIGN 0xB7 /* 3420 REJECT,3480 OK */
+-#define CONTROL_ACCESS 0xE3 /* Set high speed */
+-#define DIAG_MODE_SET 0x0B /* 3420 NOP, 3480 REJECT*/
+-#define LOAD_DISPLAY 0x9F /* 3420 REJECT,3480 OK */
+-#define LOCATE 0x4F /* 3420 REJ, 3480 NOP */
+-#define LOOP_WRITE_TO_READ 0x8B /* 3480 REJECT */
+-#define MODE_SET_DB 0xDB /* 3420 REJECT,3480 OK */
+-#define MODE_SET_C3 0xC3 /* for 3420 */
+-#define MODE_SET_CB 0xCB /* for 3420 */
+-#define MODE_SET_D3 0xD3 /* for 3420 */
+-#define READ_BACKWARD 0x0C /* */
+-#define READ_BLOCK_ID 0x22 /* 3420 REJECT,3480 OK */
+-#define READ_BUFFER 0x12 /* 3420 REJECT,3480 OK */
+-#define READ_BUFF_LOG 0x24 /* 3420 REJECT,3480 OK */
+-#define RELEASE 0xD4 /* 3420 NOP, 3480 REJECT*/
+-#define REQ_TRK_IN_ERROR 0x1B /* 3420 NOP, 3480 REJECT*/
+-#define RESERVE 0xF4 /* 3420 NOP, 3480 REJECT*/
+-#define SENSE_GROUP_ID 0x34 /* 3420 REJECT,3480 OK */
+-#define SENSE_ID 0xE4 /* 3420 REJECT,3480 OK */
+-#define READ_DEV_CHAR 0x64 /* Read device characteristics */
+-#define SET_DIAGNOSE 0x4B /* 3420 NOP, 3480 REJECT*/
+-#define SET_GROUP_ID 0xAF /* 3420 REJECT,3480 OK */
+-#define SET_TAPE_WRITE_IMMED 0xC3 /* for 3480 */
+-#define SUSPEND 0x5B /* 3420 REJ, 3480 NOP */
+-#define SYNC 0x43 /* Synchronize (flush buffer) */
+-#define UNASSIGN 0xC7 /* 3420 REJECT,3480 OK */
+-#define PERF_SUBSYS_FUNC 0x77 /* 3490 CMD */
+-#define READ_CONFIG_DATA 0xFA /* 3490 CMD */
+-#define READ_MESSAGE_ID 0x4E /* 3490 CMD */
+-#define READ_SUBSYS_DATA 0x3E /* 3490 CMD */
+-#define SET_INTERFACE_ID 0x73 /* 3490 CMD */
+-
+-#ifndef MIN
+-#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
+-#endif
+-
+-
+-#define BLOCKSIZE 4096 /* size of the tape rcds */
+-
+-#define COMMAND_CHAIN CCW_FLAG_CC /* redefine from irq.h */
+-#define CHANNEL_END DEV_STAT_CHN_END /* redefine from irq.h */
+-#define DEVICE_END DEV_STAT_DEV_END /* redefine from irq.h */
+-#define UNIT_CHECK DEV_STAT_UNIT_CHECK /* redefine from irq.h */
+-#define UNIT_EXCEPTION DEV_STAT_UNIT_EXCEP /* redefine from irq.h */
+-#define CONTROL_UNIT_END DEV_STAT_CU_END /* redefine from irq.h */
+-#define INCORR_LEN SCHN_STAT_INCORR_LEN /* redefine from irq.h */
+-
+-#define SENSE_COMMAND_REJECT 0x80
+-#define SENSE_INTERVENTION_REQUIRED 0x40
+-#define SENSE_BUS_OUT_CHECK 0x20
+-#define SENSE_EQUIPMENT_CHECK 0x10
+-#define SENSE_DATA_CHECK 0x08
+-#define SENSE_OVERRUN 0x04
+-#define SENSE_DEFERRED_UNIT_CHECK 0x02
+-#define SENSE_ASSIGNED_ELSEWHERE 0x01
+-
+-#define SENSE_LOCATE_FAILURE 0x80
+-#define SENSE_DRIVE_ONLINE 0x40
+-#define SENSE_RESERVED 0x20
+-#define SENSE_RECORD_SEQUENCE_ERR 0x10
+-#define SENSE_BEGINNING_OF_TAPE 0x08
+-#define SENSE_WRITE_MODE 0x04
+-#define SENSE_WRITE_PROTECT 0x02
+-#define SENSE_NOT_CAPABLE 0x01
+-
+-#define SENSE_CHANNEL_ADAPTER_CODE 0xE0
+-#define SENSE_CHANNEL_ADAPTER_LOC 0x10
+-#define SENSE_REPORTING_CU 0x08
+-#define SENSE_AUTOMATIC_LOADER 0x04
+-#define SENSE_TAPE_SYNC_MODE 0x02
+-#define SENSE_TAPE_POSITIONING 0x01
+-
+-typedef struct _tape34xx_disc_data_t {
+- __u8 modeset_byte;
+-} tape34xx_disc_data_t __attribute__ ((packed, aligned(8)));
+-
+-/* discipline functions */
+-int tape34xx_ioctl_overload (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+-ccw_req_t * tape34xx_write_block (const char *data, size_t count, tape_info_t * ti);
+-void tape34xx_free_write_block (ccw_req_t * cqr, tape_info_t * ti);
+-ccw_req_t * tape34xx_read_block (const char *data, size_t count, tape_info_t * ti);
+-void tape34xx_free_read_block (ccw_req_t * cqr, tape_info_t * ti);
+-void tape34xx_clear_read_block (ccw_req_t * cqr, tape_info_t * ti);
+-ccw_req_t * tape34xx_mtfsf (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mtbsf (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mtfsr (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mtbsr (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mtweof (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mtrew (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mtoffl (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mtnop (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mtbsfm (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mtfsfm (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mteom (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mterase (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mtsetdensity (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mtseek (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mttell (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mtsetdrvbuffer (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mtlock (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mtunlock (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mtload (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mtunload (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mtcompression (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mtsetpart (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mtmkpart (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mtiocget (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_mtiocpos (tape_info_t * ti, int count);
+-ccw_req_t * tape34xx_bread (struct request *req, tape_info_t* ti,int tapeblock_major);
+-ccw_req_t * tape34xx_bwrite (struct request *req, tape_info_t* ti,int tapeblock_major);
+-void tape34xx_free_bread (ccw_req_t*,struct _tape_info_t*);
+-void tape34xx_free_bwrite (ccw_req_t*,struct _tape_info_t*);
+-
+-/* Event handlers */
+-void tape34xx_default_handler (tape_info_t * ti);
+-void tape34xx_unexpect_uchk_handler (tape_info_t * ti);
+-void tape34xx_unused_done(tape_info_t* ti);
+-void tape34xx_idle_done(tape_info_t* ti);
+-void tape34xx_block_done(tape_info_t* ti);
+-void tape34xx_bsf_init_done(tape_info_t* ti);
+-void tape34xx_dse_init_done(tape_info_t* ti);
+-void tape34xx_fsf_init_done(tape_info_t* ti);
+-void tape34xx_bsb_init_done(tape_info_t* ti);
+-void tape34xx_fsb_init_done(tape_info_t* ti);
+-void tape34xx_lbl_init_done(tape_info_t* ti);
+-void tape34xx_nop_init_done(tape_info_t* ti);
+-void tape34xx_rfo_init_done(tape_info_t* ti);
+-void tape34xx_rbi_init_done(tape_info_t* ti);
+-void tape34xx_rew_init_done(tape_info_t* ti);
+-void tape34xx_rew_release_init_done(tape_info_t* ti);
+-void tape34xx_run_init_done(tape_info_t* ti);
+-void tape34xx_wri_init_done(tape_info_t* ti);
+-void tape34xx_wtm_init_done(tape_info_t* ti);
+-
+-extern void schedule_tapeblock_exec_IO (tape_info_t *ti);
+-
+-// the error recovery stuff:
+-void tape34xx_error_recovery (tape_info_t* ti);
+-void tape34xx_error_recovery_has_failed (tape_info_t* ti,int error_id);
+-void tape34xx_error_recovery_succeded(tape_info_t* ti);
+-void tape34xx_error_recovery_do_retry(tape_info_t* ti);
+-void tape34xx_error_recovery_read_opposite (tape_info_t* ti);
+-void tape34xx_error_recovery_HWBUG (tape_info_t* ti,int condno);
+-#endif // _TAPE34XX_H
+=== drivers/s390/char/hwc.h
+==================================================================
+--- drivers/s390/char/hwc.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/hwc.h (/trunk/2.4.27) (revision 52)
+@@ -1,275 +0,0 @@
+-/*
+- * drivers/s390/char/hwc.h
+- *
+- *
+- * S390 version
+- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+- * Author(s): Martin Peschke <mpeschke at de.ibm.com>
+- *
+- *
+- *
+- */
+-
+-#ifndef __HWC_H__
+-#define __HWC_H__
+-
+-#define HWC_EXT_INT_PARAM_ADDR 0xFFFFFFF8
+-#define HWC_EXT_INT_PARAM_PEND 0x00000001
+-
+-#define ET_OpCmd 0x01
+-#define ET_Msg 0x02
+-#define ET_StateChange 0x08
+-#define ET_PMsgCmd 0x09
+-#define ET_CntlProgOpCmd 0x20
+-#define ET_CntlProgIdent 0x0B
+-#define ET_SigQuiesce 0x1D
+-
+-#define ET_OpCmd_Mask 0x80000000
+-#define ET_Msg_Mask 0x40000000
+-#define ET_StateChange_Mask 0x01000000
+-#define ET_PMsgCmd_Mask 0x00800000
+-#define ET_CtlProgOpCmd_Mask 0x00000001
+-#define ET_CtlProgIdent_Mask 0x00200000
+-#define ET_SigQuiesce_Mask 0x00000008
+-
+-#define GMF_DOM 0x8000
+-#define GMF_SndAlrm 0x4000
+-#define GMF_HoldMsg 0x2000
+-
+-#define LTF_CntlText 0x8000
+-#define LTF_LabelText 0x4000
+-#define LTF_DataText 0x2000
+-#define LTF_EndText 0x1000
+-#define LTF_PromptText 0x0800
+-
+-#define HWC_COMMAND_INITIATED 0
+-#define HWC_BUSY 2
+-#define HWC_NOT_OPERATIONAL 3
+-
+-#define hwc_cmdw_t u32;
+-
+-#define HWC_CMDW_READDATA 0x00770005
+-
+-#define HWC_CMDW_WRITEDATA 0x00760005
+-
+-#define HWC_CMDW_WRITEMASK 0x00780005
+-
+-#define GDS_ID_MDSMU 0x1310
+-
+-#define GDS_ID_MDSRouteInfo 0x1311
+-
+-#define GDS_ID_AgUnWrkCorr 0x1549
+-
+-#define GDS_ID_SNACondReport 0x1532
+-
+-#define GDS_ID_CPMSU 0x1212
+-
+-#define GDS_ID_RoutTargInstr 0x154D
+-
+-#define GDS_ID_OpReq 0x8070
+-
+-#define GDS_ID_TextCmd 0x1320
+-
+-#define GDS_KEY_SelfDefTextMsg 0x31
+-
+-#define _HWCB_HEADER u16 length; \
+- u8 function_code; \
+- u8 control_mask[3]; \
+- u16 response_code;
+-
+-#define _EBUF_HEADER u16 length; \
+- u8 type; \
+- u8 flags; \
+- u16 _reserved;
+-
+-typedef struct {
+- _EBUF_HEADER
+-} __attribute__ ((packed))
+-
+-evbuf_t;
+-
+-#define _MDB_HEADER u16 length; \
+- u16 type; \
+- u32 tag; \
+- u32 revision_code;
+-
+-#define _GO_HEADER u16 length; \
+- u16 type; \
+- u32 domid; \
+- u8 hhmmss_time[8]; \
+- u8 th_time[3]; \
+- u8 _reserved_0; \
+- u8 dddyyyy_date[7]; \
+- u8 _reserved_1; \
+- u16 general_msg_flags; \
+- u8 _reserved_2[10]; \
+- u8 originating_system_name[8]; \
+- u8 job_guest_name[8];
+-
+-#define _MTO_HEADER u16 length; \
+- u16 type; \
+- u16 line_type_flags; \
+- u8 alarm_control; \
+- u8 _reserved[3];
+-
+-typedef struct {
+- _GO_HEADER
+-} __attribute__ ((packed))
+-
+-go_t;
+-
+-typedef struct {
+- go_t go;
+-} __attribute__ ((packed))
+-
+-mdb_body_t;
+-
+-typedef struct {
+- _MDB_HEADER
+- mdb_body_t mdb_body;
+-} __attribute__ ((packed))
+-
+-mdb_t;
+-
+-typedef struct {
+- _EBUF_HEADER
+- mdb_t mdb;
+-} __attribute__ ((packed))
+-
+-msgbuf_t;
+-
+-typedef struct {
+- _HWCB_HEADER
+- msgbuf_t msgbuf;
+-} __attribute__ ((packed))
+-
+-write_hwcb_t;
+-
+-typedef struct {
+- _MTO_HEADER
+-} __attribute__ ((packed))
+-
+-mto_t;
+-
+-static write_hwcb_t write_hwcb_template =
+-{
+- sizeof (write_hwcb_t),
+- 0x00,
+- {
+- 0x00,
+- 0x00,
+- 0x00
+- },
+- 0x0000,
+- {
+- sizeof (msgbuf_t),
+- ET_Msg,
+- 0x00,
+- 0x0000,
+- {
+- sizeof (mdb_t),
+- 0x0001,
+- 0xD4C4C240,
+- 0x00000001,
+- {
+- {
+- sizeof (go_t),
+- 0x0001
+-
+- }
+- }
+- }
+- }
+-};
+-
+-static mto_t mto_template =
+-{
+- sizeof (mto_t),
+- 0x0004,
+- LTF_EndText,
+- 0x00
+-};
+-
+-typedef u32 _hwcb_mask_t;
+-
+-typedef struct {
+- _HWCB_HEADER
+- u16 _reserved;
+- u16 mask_length;
+- _hwcb_mask_t cp_receive_mask;
+- _hwcb_mask_t cp_send_mask;
+- _hwcb_mask_t hwc_receive_mask;
+- _hwcb_mask_t hwc_send_mask;
+-} __attribute__ ((packed))
+-
+-init_hwcb_t;
+-
+-static init_hwcb_t init_hwcb_template =
+-{
+- sizeof (init_hwcb_t),
+- 0x00,
+- {
+- 0x00,
+- 0x00,
+- 0x00
+- },
+- 0x0000,
+- 0x0000,
+- sizeof (_hwcb_mask_t),
+- ET_OpCmd_Mask | ET_PMsgCmd_Mask |
+- ET_StateChange_Mask | ET_SigQuiesce_Mask,
+- ET_Msg_Mask | ET_PMsgCmd_Mask | ET_CtlProgIdent_Mask
+-};
+-
+-typedef struct {
+- _EBUF_HEADER
+- u8 validity_hwc_active_facility_mask:1;
+- u8 validity_hwc_receive_mask:1;
+- u8 validity_hwc_send_mask:1;
+- u8 validity_read_data_function_mask:1;
+- u16 _zeros:12;
+- u16 mask_length;
+- u64 hwc_active_facility_mask;
+- _hwcb_mask_t hwc_receive_mask;
+- _hwcb_mask_t hwc_send_mask;
+- u32 read_data_function_mask;
+-} __attribute__ ((packed))
+-
+-statechangebuf_t;
+-
+-#define _GDS_VECTOR_HEADER u16 length; \
+- u16 gds_id;
+-
+-#define _GDS_SUBVECTOR_HEADER u8 length; \
+- u8 key;
+-
+-typedef struct {
+- _GDS_VECTOR_HEADER
+-} __attribute__ ((packed))
+-
+-gds_vector_t;
+-
+-typedef struct {
+- _GDS_SUBVECTOR_HEADER
+-} __attribute__ ((packed))
+-
+-gds_subvector_t;
+-
+-typedef struct {
+- _HWCB_HEADER
+-} __attribute__ ((packed))
+-
+-read_hwcb_t;
+-
+-static read_hwcb_t read_hwcb_template =
+-{
+- PAGE_SIZE,
+- 0x00,
+- {
+- 0x00,
+- 0x00,
+- 0x80
+- }
+-};
+-
+-#endif /* __HWC_H__ */
+=== drivers/s390/char/hwc_rw.c
+==================================================================
+--- drivers/s390/char/hwc_rw.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/hwc_rw.c (/trunk/2.4.27) (revision 52)
+@@ -1,2458 +0,0 @@
+-/*
+- * drivers/s390/char/hwc_rw.c
+- * driver: reading from and writing to system console on S/390 via HWC
+- *
+- * S390 version
+- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+- * Author(s): Martin Peschke <mpeschke at de.ibm.com>
+- *
+- *
+- *
+- *
+- *
+- *
+- */
+-
+-#include <linux/kernel.h>
+-#include <linux/string.h>
+-#include <linux/errno.h>
+-#include <linux/ctype.h>
+-#include <linux/mm.h>
+-#include <linux/timer.h>
+-#include <linux/bootmem.h>
+-#include <linux/module.h>
+-
+-#include <asm/ebcdic.h>
+-#include <asm/uaccess.h>
+-#include <asm/types.h>
+-#include <asm/bitops.h>
+-#include <asm/setup.h>
+-#include <asm/page.h>
+-#include <asm/s390_ext.h>
+-#include <asm/irq.h>
+-
+-#ifndef MIN
+-#define MIN(a,b) (((a<b) ? a : b))
+-#endif
+-
+-extern void ctrl_alt_del (void);
+-
+-#define HWC_RW_PRINT_HEADER "hwc low level driver: "
+-
+-#define USE_VM_DETECTION
+-
+-#define DEFAULT_CASE_DELIMITER '%'
+-
+-#undef DUMP_HWC_INIT_ERROR
+-
+-#undef DUMP_HWC_WRITE_ERROR
+-
+-#undef DUMP_HWC_WRITE_LIST_ERROR
+-
+-#undef DUMP_HWC_READ_ERROR
+-
+-#undef DUMP_HWCB_INPUT
+-
+-#undef BUFFER_STRESS_TEST
+-
+-typedef struct {
+- unsigned char *next;
+- unsigned short int mto_char_sum;
+- unsigned char mto_number;
+- unsigned char times_lost;
+- unsigned short int mto_number_lost;
+- unsigned long int mto_char_sum_lost;
+-} __attribute__ ((packed))
+-
+-hwcb_list_t;
+-
+-#define MAX_HWCB_ROOM (PAGE_SIZE - sizeof(hwcb_list_t))
+-
+-#define MAX_MESSAGE_SIZE (MAX_HWCB_ROOM - sizeof(write_hwcb_t))
+-
+-#define BUF_HWCB hwc_data.hwcb_list_tail
+-#define OUT_HWCB hwc_data.hwcb_list_head
+-#define ALL_HWCB_MTO hwc_data.mto_number
+-#define ALL_HWCB_CHAR hwc_data.mto_char_sum
+-
+-#define _LIST(hwcb) ((hwcb_list_t*)(&(hwcb)[PAGE_SIZE-sizeof(hwcb_list_t)]))
+-
+-#define _HWCB_CHAR(hwcb) (_LIST(hwcb)->mto_char_sum)
+-
+-#define _HWCB_MTO(hwcb) (_LIST(hwcb)->mto_number)
+-
+-#define _HWCB_CHAR_LOST(hwcb) (_LIST(hwcb)->mto_char_sum_lost)
+-
+-#define _HWCB_MTO_LOST(hwcb) (_LIST(hwcb)->mto_number_lost)
+-
+-#define _HWCB_TIMES_LOST(hwcb) (_LIST(hwcb)->times_lost)
+-
+-#define _HWCB_NEXT(hwcb) (_LIST(hwcb)->next)
+-
+-#define BUF_HWCB_CHAR _HWCB_CHAR(BUF_HWCB)
+-
+-#define BUF_HWCB_MTO _HWCB_MTO(BUF_HWCB)
+-
+-#define BUF_HWCB_NEXT _HWCB_NEXT(BUF_HWCB)
+-
+-#define OUT_HWCB_CHAR _HWCB_CHAR(OUT_HWCB)
+-
+-#define OUT_HWCB_MTO _HWCB_MTO(OUT_HWCB)
+-
+-#define OUT_HWCB_NEXT _HWCB_NEXT(OUT_HWCB)
+-
+-#define BUF_HWCB_CHAR_LOST _HWCB_CHAR_LOST(BUF_HWCB)
+-
+-#define BUF_HWCB_MTO_LOST _HWCB_MTO_LOST(BUF_HWCB)
+-
+-#define OUT_HWCB_CHAR_LOST _HWCB_CHAR_LOST(OUT_HWCB)
+-
+-#define OUT_HWCB_MTO_LOST _HWCB_MTO_LOST(OUT_HWCB)
+-
+-#define BUF_HWCB_TIMES_LOST _HWCB_TIMES_LOST(BUF_HWCB)
+-
+-#include "hwc.h"
+-
+-#define __HWC_RW_C__
+-#include "hwc_rw.h"
+-#undef __HWC_RW_C__
+-
+-static unsigned char _obuf[MAX_HWCB_ROOM];
+-
+-static unsigned char
+- _page[PAGE_SIZE] __attribute__ ((aligned (PAGE_SIZE)));
+-
+-typedef unsigned long kmem_pages_t;
+-
+-#define MAX_KMEM_PAGES (sizeof(kmem_pages_t) << 3)
+-
+-#define HWC_WTIMER_RUNS 1
+-#define HWC_FLUSH 2
+-#define HWC_INIT 4
+-#define HWC_BROKEN 8
+-#define HWC_INTERRUPT 16
+-#define HWC_PTIMER_RUNS 32
+-
+-static struct {
+-
+- hwc_ioctls_t ioctls;
+-
+- hwc_ioctls_t init_ioctls;
+-
+- unsigned char *hwcb_list_head;
+-
+- unsigned char *hwcb_list_tail;
+-
+- unsigned short int mto_number;
+-
+- unsigned int mto_char_sum;
+-
+- unsigned char hwcb_count;
+-
+- unsigned long kmem_start;
+-
+- unsigned long kmem_end;
+-
+- kmem_pages_t kmem_pages;
+-
+- unsigned char *obuf;
+-
+- unsigned short int obuf_cursor;
+-
+- unsigned short int obuf_count;
+-
+- unsigned short int obuf_start;
+-
+- unsigned char *page;
+-
+- u32 current_servc;
+-
+- unsigned char *current_hwcb;
+-
+- unsigned char write_nonprio:1;
+- unsigned char write_prio:1;
+- unsigned char read_nonprio:1;
+- unsigned char read_prio:1;
+- unsigned char read_statechange:1;
+- unsigned char sig_quiesce:1;
+-
+- unsigned char flags;
+-
+- hwc_high_level_calls_t *calls;
+-
+- hwc_request_t *request;
+-
+- spinlock_t lock;
+-
+- struct timer_list write_timer;
+-
+- struct timer_list poll_timer;
+-} hwc_data =
+-{
+- {
+- },
+- {
+- 8,
+- 0,
+- 80,
+- 1,
+- MAX_KMEM_PAGES,
+- MAX_KMEM_PAGES,
+-
+- 0,
+-
+- 0x6c
+-
+- },
+- NULL,
+- NULL,
+- 0,
+- 0,
+- 0,
+- 0,
+- 0,
+- 0,
+- _obuf,
+- 0,
+- 0,
+- 0,
+- _page,
+- 0,
+- NULL,
+- 0,
+- 0,
+- 0,
+- 0,
+- 0,
+- 0,
+- 0,
+- NULL,
+- NULL
+-
+-};
+-
+-static unsigned long cr0 __attribute__ ((aligned (8)));
+-static unsigned long cr0_save __attribute__ ((aligned (8)));
+-static unsigned char psw_mask __attribute__ ((aligned (8)));
+-
+-static ext_int_info_t ext_int_info_hwc;
+-
+-#define DELAYED_WRITE 0
+-#define IMMEDIATE_WRITE 1
+-
+-static signed int do_hwc_write (int from_user, unsigned char *,
+- unsigned int,
+- unsigned char);
+-
+-unsigned char hwc_ip_buf[512];
+-
+-static asmlinkage int
+-internal_print (char write_time, char *fmt,...)
+-{
+- va_list args;
+- int i;
+-
+- va_start (args, fmt);
+- i = vsprintf (hwc_ip_buf, fmt, args);
+- va_end (args);
+- return do_hwc_write (0, hwc_ip_buf, i, write_time);
+-}
+-
+-int
+-hwc_printk (const char *fmt,...)
+-{
+- va_list args;
+- int i;
+- unsigned long flags;
+- int retval;
+-
+- spin_lock_irqsave (&hwc_data.lock, flags);
+-
+- i = vsprintf (hwc_ip_buf, fmt, args);
+- va_end (args);
+- retval = do_hwc_write (0, hwc_ip_buf, i, IMMEDIATE_WRITE);
+-
+- spin_unlock_irqrestore (&hwc_data.lock, flags);
+-
+- return retval;
+-}
+-
+-#ifdef DUMP_HWCB_INPUT
+-
+-static void
+-dump_storage_area (unsigned char *area, unsigned short int count)
+-{
+- unsigned short int index;
+- ioctl_nl_t old_final_nl;
+-
+- if (!area || !count)
+- return;
+-
+- old_final_nl = hwc_data.ioctls.final_nl;
+- hwc_data.ioctls.final_nl = 1;
+-
+- internal_print (DELAYED_WRITE, "\n%8x ", area);
+-
+- for (index = 0; index < count; index++) {
+-
+- if (area[index] <= 0xF)
+- internal_print (DELAYED_WRITE, "0%x", area[index]);
+- else
+- internal_print (DELAYED_WRITE, "%x", area[index]);
+-
+- if ((index & 0xF) == 0xF)
+- internal_print (DELAYED_WRITE, "\n%8x ",
+- &area[index + 1]);
+- else if ((index & 3) == 3)
+- internal_print (DELAYED_WRITE, " ");
+- }
+-
+- internal_print (IMMEDIATE_WRITE, "\n");
+-
+- hwc_data.ioctls.final_nl = old_final_nl;
+-}
+-#endif
+-
+-static inline u32
+-service_call (
+- u32 hwc_command_word,
+- unsigned char hwcb[])
+-{
+- unsigned int condition_code = 1;
+-
+- __asm__ __volatile__ ("L 1, 0(%0) \n\t"
+- "LRA 2, 0(%1) \n\t"
+- ".long 0xB2200012 \n\t"
+- :
+- :"a" (&hwc_command_word), "a" (hwcb)
+- :"1", "2", "memory");
+-
+- __asm__ __volatile__ ("IPM %0 \n\t"
+- "SRL %0, 28 \n\t"
+- :"=r" (condition_code));
+-
+- return condition_code;
+-}
+-
+-static inline unsigned long
+-hwc_ext_int_param (void)
+-{
+- u32 param;
+-
+- __asm__ __volatile__ ("L %0,128\n\t"
+- :"=r" (param));
+-
+- return (unsigned long) param;
+-}
+-
+-static int
+-prepare_write_hwcb (void)
+-{
+- write_hwcb_t *hwcb;
+-
+- if (!BUF_HWCB)
+- return -ENOMEM;
+-
+- BUF_HWCB_MTO = 0;
+- BUF_HWCB_CHAR = 0;
+-
+- hwcb = (write_hwcb_t *) BUF_HWCB;
+-
+- memcpy (hwcb, &write_hwcb_template, sizeof (write_hwcb_t));
+-
+- return 0;
+-}
+-
+-static int
+-sane_write_hwcb (void)
+-{
+- unsigned short int lost_msg;
+- unsigned int lost_char;
+- unsigned char lost_hwcb;
+- unsigned char *bad_addr;
+- unsigned long page;
+- int page_nr;
+-
+- if (!OUT_HWCB)
+- return -ENOMEM;
+-
+- if ((unsigned long) OUT_HWCB & 0xFFF) {
+-
+- bad_addr = OUT_HWCB;
+-
+-#ifdef DUMP_HWC_WRITE_LIST_ERROR
+- __asm__ ("LHI 1,0xe30\n\t"
+- "LRA 2,0(%0) \n\t"
+- "J .+0 \n\t"
+- :
+- : "a" (bad_addr)
+- : "1", "2");
+-#endif
+-
+- hwc_data.kmem_pages = 0;
+- if ((unsigned long) BUF_HWCB & 0xFFF) {
+-
+- lost_hwcb = hwc_data.hwcb_count;
+- lost_msg = ALL_HWCB_MTO;
+- lost_char = ALL_HWCB_CHAR;
+-
+- OUT_HWCB = NULL;
+- BUF_HWCB = NULL;
+- ALL_HWCB_MTO = 0;
+- ALL_HWCB_CHAR = 0;
+- hwc_data.hwcb_count = 0;
+- } else {
+-
+- lost_hwcb = hwc_data.hwcb_count - 1;
+- lost_msg = ALL_HWCB_MTO - BUF_HWCB_MTO;
+- lost_char = ALL_HWCB_CHAR - BUF_HWCB_CHAR;
+- OUT_HWCB = BUF_HWCB;
+- ALL_HWCB_MTO = BUF_HWCB_MTO;
+- ALL_HWCB_CHAR = BUF_HWCB_CHAR;
+- hwc_data.hwcb_count = 1;
+- page = (unsigned long) BUF_HWCB;
+-
+- if (page >= hwc_data.kmem_start &&
+- page <= hwc_data.kmem_end) {
+-
+- page_nr = (int)
+- ((page - hwc_data.kmem_start) >> 12);
+- set_bit (page_nr, &hwc_data.kmem_pages);
+- }
+- }
+-
+- internal_print (
+- DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "found invalid HWCB at address 0x%lx. List corrupted. "
+- "Lost %i HWCBs with %i characters within up to %i "
+- "messages. Saved %i HWCB with last %i characters i"
+- "within up to %i messages.\n",
+- (unsigned long) bad_addr,
+- lost_hwcb, lost_char, lost_msg,
+- hwc_data.hwcb_count,
+- ALL_HWCB_CHAR, ALL_HWCB_MTO);
+- }
+- return 0;
+-}
+-
+-static int
+-reuse_write_hwcb (void)
+-{
+- int retval;
+-
+- if (hwc_data.hwcb_count < 2)
+-#ifdef DUMP_HWC_WRITE_LIST_ERROR
+- __asm__ ("LHI 1,0xe31\n\t"
+- "LRA 2,0(%0)\n\t"
+- "LRA 3,0(%1)\n\t"
+- "J .+0 \n\t"
+- :
+- : "a" (BUF_HWCB), "a" (OUT_HWCB)
+- : "1", "2", "3");
+-#else
+- return -EPERM;
+-#endif
+-
+- if (hwc_data.current_hwcb == OUT_HWCB) {
+-
+- if (hwc_data.hwcb_count > 2) {
+-
+- BUF_HWCB_NEXT = OUT_HWCB_NEXT;
+-
+- BUF_HWCB = OUT_HWCB_NEXT;
+-
+- OUT_HWCB_NEXT = BUF_HWCB_NEXT;
+-
+- BUF_HWCB_NEXT = NULL;
+- }
+- } else {
+-
+- BUF_HWCB_NEXT = OUT_HWCB;
+-
+- BUF_HWCB = OUT_HWCB;
+-
+- OUT_HWCB = OUT_HWCB_NEXT;
+-
+- BUF_HWCB_NEXT = NULL;
+- }
+-
+- BUF_HWCB_TIMES_LOST += 1;
+- BUF_HWCB_CHAR_LOST += BUF_HWCB_CHAR;
+- BUF_HWCB_MTO_LOST += BUF_HWCB_MTO;
+- ALL_HWCB_MTO -= BUF_HWCB_MTO;
+- ALL_HWCB_CHAR -= BUF_HWCB_CHAR;
+-
+- retval = prepare_write_hwcb ();
+-
+- if (hwc_data.hwcb_count == hwc_data.ioctls.max_hwcb)
+- internal_print (
+- DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "reached my own limit of "
+- "allowed buffer space for output (%i HWCBs = %li "
+- "bytes), skipped content of oldest HWCB %i time(s) "
+- "(%i lines = %i characters)\n",
+- hwc_data.ioctls.max_hwcb,
+- hwc_data.ioctls.max_hwcb * PAGE_SIZE,
+- BUF_HWCB_TIMES_LOST,
+- BUF_HWCB_MTO_LOST,
+- BUF_HWCB_CHAR_LOST);
+- else
+- internal_print (
+- DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "page allocation failed, "
+- "could not expand buffer for output (currently in "
+- "use: %i HWCBs = %li bytes), skipped content of "
+- "oldest HWCB %i time(s) (%i lines = %i characters)\n",
+- hwc_data.hwcb_count,
+- hwc_data.hwcb_count * PAGE_SIZE,
+- BUF_HWCB_TIMES_LOST,
+- BUF_HWCB_MTO_LOST,
+- BUF_HWCB_CHAR_LOST);
+-
+- return retval;
+-}
+-
+-static int
+-allocate_write_hwcb (void)
+-{
+- unsigned char *page;
+- int page_nr;
+-
+- if (hwc_data.hwcb_count == hwc_data.ioctls.max_hwcb)
+- return -ENOMEM;
+-
+- page_nr = find_first_zero_bit (&hwc_data.kmem_pages, MAX_KMEM_PAGES);
+- if (page_nr < hwc_data.ioctls.kmem_hwcb) {
+-
+- page = (unsigned char *)
+- (hwc_data.kmem_start + (page_nr << 12));
+- set_bit (page_nr, &hwc_data.kmem_pages);
+- } else
+- page = (unsigned char *) __get_free_page (GFP_ATOMIC | GFP_DMA);
+-
+- if (!page)
+- return -ENOMEM;
+-
+- if (!OUT_HWCB)
+- OUT_HWCB = page;
+- else
+- BUF_HWCB_NEXT = page;
+-
+- BUF_HWCB = page;
+-
+- BUF_HWCB_NEXT = NULL;
+-
+- hwc_data.hwcb_count++;
+-
+- prepare_write_hwcb ();
+-
+- BUF_HWCB_TIMES_LOST = 0;
+- BUF_HWCB_MTO_LOST = 0;
+- BUF_HWCB_CHAR_LOST = 0;
+-
+-#ifdef BUFFER_STRESS_TEST
+-
+- internal_print (
+- DELAYED_WRITE,
+- "*** " HWC_RW_PRINT_HEADER
+- "page #%i at 0x%x for buffering allocated. ***\n",
+- hwc_data.hwcb_count, page);
+-
+-#endif
+-
+- return 0;
+-}
+-
+-static int
+-release_write_hwcb (void)
+-{
+- unsigned long page;
+- int page_nr;
+-
+- if (!hwc_data.hwcb_count)
+- return -ENODATA;
+-
+- if (hwc_data.hwcb_count == 1) {
+-
+- prepare_write_hwcb ();
+-
+- ALL_HWCB_CHAR = 0;
+- ALL_HWCB_MTO = 0;
+- BUF_HWCB_TIMES_LOST = 0;
+- BUF_HWCB_MTO_LOST = 0;
+- BUF_HWCB_CHAR_LOST = 0;
+- } else {
+- page = (unsigned long) OUT_HWCB;
+-
+- ALL_HWCB_MTO -= OUT_HWCB_MTO;
+- ALL_HWCB_CHAR -= OUT_HWCB_CHAR;
+- hwc_data.hwcb_count--;
+-
+- OUT_HWCB = OUT_HWCB_NEXT;
+-
+- if (page >= hwc_data.kmem_start &&
+- page <= hwc_data.kmem_end) {
+- /*memset((void *) page, 0, PAGE_SIZE); */
+-
+- page_nr = (int) ((page - hwc_data.kmem_start) >> 12);
+- clear_bit (page_nr, &hwc_data.kmem_pages);
+- } else
+- free_page (page);
+-#ifdef BUFFER_STRESS_TEST
+-
+- internal_print (
+- DELAYED_WRITE,
+- "*** " HWC_RW_PRINT_HEADER
+- "page at 0x%x released, %i pages still in use ***\n",
+- page, hwc_data.hwcb_count);
+-
+-#endif
+- }
+- return 0;
+-}
+-
+-static int
+-add_mto (
+- unsigned char *message,
+- unsigned short int count)
+-{
+- unsigned short int mto_size;
+- write_hwcb_t *hwcb;
+- mto_t *mto;
+- void *dest;
+-
+- if (!BUF_HWCB)
+- return -ENOMEM;
+-
+- if (BUF_HWCB == hwc_data.current_hwcb)
+- return -ENOMEM;
+-
+- mto_size = sizeof (mto_t) + count;
+-
+- hwcb = (write_hwcb_t *) BUF_HWCB;
+-
+- if ((MAX_HWCB_ROOM - hwcb->length) < mto_size)
+- return -ENOMEM;
+-
+- mto = (mto_t *) (((unsigned long) hwcb) + hwcb->length);
+-
+- memcpy (mto, &mto_template, sizeof (mto_t));
+-
+- dest = (void *) (((unsigned long) mto) + sizeof (mto_t));
+-
+- memcpy (dest, message, count);
+-
+- mto->length += count;
+-
+- hwcb->length += mto_size;
+- hwcb->msgbuf.length += mto_size;
+- hwcb->msgbuf.mdb.length += mto_size;
+-
+- BUF_HWCB_MTO++;
+- ALL_HWCB_MTO++;
+- BUF_HWCB_CHAR += count;
+- ALL_HWCB_CHAR += count;
+-
+- return count;
+-}
+-
+-static int write_event_data_1 (void);
+-
+-static void
+-do_poll_hwc (unsigned long data)
+-{
+- unsigned long flags;
+-
+- spin_lock_irqsave (&hwc_data.lock, flags);
+-
+- write_event_data_1 ();
+-
+- spin_unlock_irqrestore (&hwc_data.lock, flags);
+-}
+-
+-void
+-start_poll_hwc (void)
+-{
+- init_timer (&hwc_data.poll_timer);
+- hwc_data.poll_timer.function = do_poll_hwc;
+- hwc_data.poll_timer.data = (unsigned long) NULL;
+- hwc_data.poll_timer.expires = jiffies + 2 * HZ;
+- add_timer (&hwc_data.poll_timer);
+- hwc_data.flags |= HWC_PTIMER_RUNS;
+-}
+-
+-static int
+-write_event_data_1 (void)
+-{
+- unsigned short int condition_code;
+- int retval;
+- write_hwcb_t *hwcb = (write_hwcb_t *) OUT_HWCB;
+-
+- if ((!hwc_data.write_prio) &&
+- (!hwc_data.write_nonprio) &&
+- hwc_data.read_statechange)
+- return -EOPNOTSUPP;
+-
+- if (hwc_data.current_servc)
+- return -EBUSY;
+-
+- retval = sane_write_hwcb ();
+- if (retval < 0)
+- return -EIO;
+-
+- if (!OUT_HWCB_MTO)
+- return -ENODATA;
+-
+- if (!hwc_data.write_nonprio && hwc_data.write_prio)
+- hwcb->msgbuf.type = ET_PMsgCmd;
+- else
+- hwcb->msgbuf.type = ET_Msg;
+-
+- condition_code = service_call (HWC_CMDW_WRITEDATA, OUT_HWCB);
+-
+-#ifdef DUMP_HWC_WRITE_ERROR
+- if (condition_code != HWC_COMMAND_INITIATED)
+- __asm__ ("LHI 1,0xe20\n\t"
+- "L 2,0(%0)\n\t"
+- "LRA 3,0(%1)\n\t"
+- "J .+0 \n\t"
+- :
+- : "a" (&condition_code), "a" (OUT_HWCB)
+- : "1", "2", "3");
+-#endif
+-
+- switch (condition_code) {
+- case HWC_COMMAND_INITIATED:
+- hwc_data.current_servc = HWC_CMDW_WRITEDATA;
+- hwc_data.current_hwcb = OUT_HWCB;
+- retval = condition_code;
+- break;
+- case HWC_BUSY:
+- retval = -EBUSY;
+- break;
+- case HWC_NOT_OPERATIONAL:
+- start_poll_hwc ();
+- default:
+- retval = -EIO;
+- }
+-
+- return retval;
+-}
+-
+-static void
+-flush_hwcbs (void)
+-{
+- while (hwc_data.hwcb_count > 1)
+- release_write_hwcb ();
+-
+- release_write_hwcb ();
+-
+- hwc_data.flags &= ~HWC_FLUSH;
+-}
+-
+-static int
+-write_event_data_2 (u32 ext_int_param)
+-{
+- write_hwcb_t *hwcb;
+- int retval = 0;
+-
+-#ifdef DUMP_HWC_WRITE_ERROR
+- if ((ext_int_param & HWC_EXT_INT_PARAM_ADDR)
+- != (unsigned long) hwc_data.current_hwcb) {
+- internal_print (
+- DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "write_event_data_2 : "
+- "HWCB address does not fit "
+- "(expected: 0x%lx, got: 0x%lx).\n",
+- (unsigned long) hwc_data.current_hwcb,
+- ext_int_param);
+- return -EINVAL;
+- }
+-#endif
+-
+- hwcb = (write_hwcb_t *) OUT_HWCB;
+-
+-#ifdef DUMP_HWC_WRITE_LIST_ERROR
+- if (((unsigned char *) hwcb) != hwc_data.current_hwcb) {
+- __asm__ ("LHI 1,0xe22\n\t"
+- "LRA 2,0(%0)\n\t"
+- "LRA 3,0(%1)\n\t"
+- "LRA 4,0(%2)\n\t"
+- "LRA 5,0(%3)\n\t"
+- "J .+0 \n\t"
+- :
+- : "a" (OUT_HWCB),
+- "a" (hwc_data.current_hwcb),
+- "a" (BUF_HWCB),
+- "a" (hwcb)
+- : "1", "2", "3", "4", "5");
+- }
+-#endif
+-
+-#ifdef DUMP_HWC_WRITE_ERROR
+- if (hwcb->response_code != 0x0020) {
+- __asm__ ("LHI 1,0xe21\n\t"
+- "LRA 2,0(%0)\n\t"
+- "LRA 3,0(%1)\n\t"
+- "LRA 4,0(%2)\n\t"
+- "LH 5,0(%3)\n\t"
+- "SRL 5,8\n\t"
+- "J .+0 \n\t"
+- :
+- : "a" (OUT_HWCB), "a" (hwc_data.current_hwcb),
+- "a" (BUF_HWCB),
+- "a" (&(hwc_data.hwcb_count))
+- : "1", "2", "3", "4", "5");
+- }
+-#endif
+-
+- switch (hwcb->response_code) {
+- case 0x0020:
+-
+- retval = OUT_HWCB_CHAR;
+- release_write_hwcb ();
+- break;
+- case 0x0040:
+- case 0x0340:
+- case 0x40F0:
+- if (!hwc_data.read_statechange) {
+- hwcb->response_code = 0;
+- start_poll_hwc ();
+- }
+- retval = -EIO;
+- break;
+- default:
+- internal_print (
+- DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "write_event_data_2 : "
+- "failed operation "
+- "(response code: 0x%x "
+- "HWCB address: 0x%x).\n",
+- hwcb->response_code,
+- hwcb);
+- retval = -EIO;
+- }
+-
+- if (retval == -EIO) {
+-
+- hwcb->control_mask[0] = 0;
+- hwcb->control_mask[1] = 0;
+- hwcb->control_mask[2] = 0;
+- hwcb->response_code = 0;
+- }
+- hwc_data.current_servc = 0;
+- hwc_data.current_hwcb = NULL;
+-
+- if (hwc_data.flags & HWC_FLUSH)
+- flush_hwcbs ();
+-
+- return retval;
+-}
+-
+-static void
+-do_put_line (
+- unsigned char *message,
+- unsigned short count)
+-{
+-
+- if (add_mto (message, count) != count) {
+-
+- if (allocate_write_hwcb () < 0)
+- reuse_write_hwcb ();
+-
+-#ifdef DUMP_HWC_WRITE_LIST_ERROR
+- if (add_mto (message, count) != count)
+- __asm__ ("LHI 1,0xe32\n\t"
+- "LRA 2,0(%0)\n\t"
+- "L 3,0(%1)\n\t"
+- "LRA 4,0(%2)\n\t"
+- "LRA 5,0(%3)\n\t"
+- "J .+0 \n\t"
+- :
+- : "a" (message), "a" (&hwc_data.kmem_pages),
+- "a" (BUF_HWCB), "a" (OUT_HWCB)
+- : "1", "2", "3", "4", "5");
+-#else
+- add_mto (message, count);
+-#endif
+- }
+-}
+-
+-static void
+-put_line (
+- unsigned char *message,
+- unsigned short count)
+-{
+-
+- if ((!hwc_data.obuf_start) && (hwc_data.flags & HWC_WTIMER_RUNS)) {
+- del_timer (&hwc_data.write_timer);
+- hwc_data.flags &= ~HWC_WTIMER_RUNS;
+- }
+- hwc_data.obuf_start += count;
+-
+- do_put_line (message, count);
+-
+- hwc_data.obuf_start -= count;
+-}
+-
+-static void
+-set_alarm (void)
+-{
+- write_hwcb_t *hwcb;
+-
+- if ((!BUF_HWCB) || (BUF_HWCB == hwc_data.current_hwcb))
+- allocate_write_hwcb ();
+-
+- hwcb = (write_hwcb_t *) BUF_HWCB;
+- hwcb->msgbuf.mdb.mdb_body.go.general_msg_flags |= GMF_SndAlrm;
+-}
+-
+-static void
+-hwc_write_timeout (unsigned long data)
+-{
+- unsigned long flags;
+-
+- spin_lock_irqsave (&hwc_data.lock, flags);
+-
+- hwc_data.obuf_start = hwc_data.obuf_count;
+- if (hwc_data.obuf_count)
+- put_line (hwc_data.obuf, hwc_data.obuf_count);
+- hwc_data.obuf_start = 0;
+-
+- hwc_data.obuf_cursor = 0;
+- hwc_data.obuf_count = 0;
+-
+- write_event_data_1 ();
+-
+- spin_unlock_irqrestore (&hwc_data.lock, flags);
+-}
+-
+-static int
+-do_hwc_write (
+- int from_user,
+- unsigned char *msg,
+- unsigned int count,
+- unsigned char write_time)
+-{
+- unsigned int i_msg = 0;
+- unsigned short int spaces = 0;
+- unsigned int processed_characters = 0;
+- unsigned char ch;
+- unsigned short int obuf_count;
+- unsigned short int obuf_cursor;
+- unsigned short int obuf_columns;
+-
+- if (hwc_data.obuf_start) {
+- obuf_cursor = 0;
+- obuf_count = 0;
+- obuf_columns = MIN (hwc_data.ioctls.columns,
+- MAX_MESSAGE_SIZE - hwc_data.obuf_start);
+- } else {
+- obuf_cursor = hwc_data.obuf_cursor;
+- obuf_count = hwc_data.obuf_count;
+- obuf_columns = hwc_data.ioctls.columns;
+- }
+-
+- for (i_msg = 0; i_msg < count; i_msg++) {
+- if (from_user)
+- get_user (ch, msg + i_msg);
+- else
+- ch = msg[i_msg];
+-
+- processed_characters++;
+-
+- if ((obuf_cursor == obuf_columns) &&
+-
+- (ch != '\n') &&
+-
+- (ch != '\t')) {
+- put_line (&hwc_data.obuf[hwc_data.obuf_start],
+- obuf_columns);
+- obuf_cursor = 0;
+- obuf_count = 0;
+- }
+- switch (ch) {
+-
+- case '\n':
+-
+- put_line (&hwc_data.obuf[hwc_data.obuf_start],
+- obuf_count);
+- obuf_cursor = 0;
+- obuf_count = 0;
+- break;
+-
+- case '\a':
+-
+- hwc_data.obuf_start += obuf_count;
+- set_alarm ();
+- hwc_data.obuf_start -= obuf_count;
+-
+- break;
+-
+- case '\t':
+-
+- do {
+- if (obuf_cursor < obuf_columns) {
+- hwc_data.obuf[hwc_data.obuf_start +
+- obuf_cursor]
+- = HWC_ASCEBC (' ');
+- obuf_cursor++;
+- } else
+- break;
+- } while (obuf_cursor % hwc_data.ioctls.width_htab);
+-
+- break;
+-
+- case '\f':
+- case '\v':
+-
+- spaces = obuf_cursor;
+- put_line (&hwc_data.obuf[hwc_data.obuf_start],
+- obuf_count);
+- obuf_count = obuf_cursor;
+- while (spaces) {
+- hwc_data.obuf[hwc_data.obuf_start +
+- obuf_cursor - spaces]
+- = HWC_ASCEBC (' ');
+- spaces--;
+- }
+-
+- break;
+-
+- case '\b':
+-
+- if (obuf_cursor)
+- obuf_cursor--;
+- break;
+-
+- case '\r':
+-
+- obuf_cursor = 0;
+- break;
+-
+- case 0x00:
+-
+- put_line (&hwc_data.obuf[hwc_data.obuf_start],
+- obuf_count);
+- obuf_cursor = 0;
+- obuf_count = 0;
+- goto out;
+-
+- default:
+-
+- if (isprint (ch))
+- hwc_data.obuf[hwc_data.obuf_start +
+- obuf_cursor++]
+- = HWC_ASCEBC (ch);
+- }
+- if (obuf_cursor > obuf_count)
+- obuf_count = obuf_cursor;
+- }
+-
+- if (obuf_cursor) {
+-
+- if (hwc_data.obuf_start ||
+- (hwc_data.ioctls.final_nl == 0)) {
+-
+- put_line (&hwc_data.obuf[hwc_data.obuf_start],
+- obuf_count);
+- obuf_cursor = 0;
+- obuf_count = 0;
+- } else {
+-
+- if (hwc_data.ioctls.final_nl > 0) {
+-
+- if (hwc_data.flags & HWC_WTIMER_RUNS) {
+-
+- mod_timer (&hwc_data.write_timer,
+- jiffies + hwc_data.ioctls.final_nl * HZ / 10);
+- } else {
+-
+- init_timer (&hwc_data.write_timer);
+- hwc_data.write_timer.function =
+- hwc_write_timeout;
+- hwc_data.write_timer.data =
+- (unsigned long) NULL;
+- hwc_data.write_timer.expires =
+- jiffies +
+- hwc_data.ioctls.final_nl * HZ / 10;
+- add_timer (&hwc_data.write_timer);
+- hwc_data.flags |= HWC_WTIMER_RUNS;
+- }
+- } else;
+-
+- }
+- } else;
+-
+- out:
+-
+- if (!hwc_data.obuf_start) {
+- hwc_data.obuf_cursor = obuf_cursor;
+- hwc_data.obuf_count = obuf_count;
+- }
+- if (write_time == IMMEDIATE_WRITE)
+- write_event_data_1 ();
+-
+- return processed_characters;
+-}
+-
+-signed int
+-hwc_write (int from_user, const unsigned char *msg, unsigned int count)
+-{
+- unsigned long flags;
+- int retval;
+-
+- spin_lock_irqsave (&hwc_data.lock, flags);
+-
+- retval = do_hwc_write (from_user, (unsigned char *) msg,
+- count, IMMEDIATE_WRITE);
+-
+- spin_unlock_irqrestore (&hwc_data.lock, flags);
+-
+- return retval;
+-}
+-
+-unsigned int
+-hwc_chars_in_buffer (unsigned char flag)
+-{
+- unsigned short int number = 0;
+- unsigned long flags;
+-
+- spin_lock_irqsave (&hwc_data.lock, flags);
+-
+- if (flag & IN_HWCB)
+- number += ALL_HWCB_CHAR;
+-
+- if (flag & IN_WRITE_BUF)
+- number += hwc_data.obuf_cursor;
+-
+- spin_unlock_irqrestore (&hwc_data.lock, flags);
+-
+- return number;
+-}
+-
+-static inline int
+-nr_setbits (kmem_pages_t arg)
+-{
+- int i;
+- int nr = 0;
+-
+- for (i = 0; i < (sizeof (arg) << 3); i++) {
+- if (arg & 1)
+- nr++;
+- arg >>= 1;
+- }
+-
+- return nr;
+-}
+-
+-unsigned int
+-hwc_write_room (unsigned char flag)
+-{
+- unsigned int number = 0;
+- unsigned long flags;
+- write_hwcb_t *hwcb;
+-
+- spin_lock_irqsave (&hwc_data.lock, flags);
+-
+- if (flag & IN_HWCB) {
+-
+- if (BUF_HWCB) {
+- hwcb = (write_hwcb_t *) BUF_HWCB;
+- number += MAX_HWCB_ROOM - hwcb->length;
+- }
+- number += (hwc_data.ioctls.kmem_hwcb -
+- nr_setbits (hwc_data.kmem_pages)) *
+- (MAX_HWCB_ROOM -
+- (sizeof (write_hwcb_t) + sizeof (mto_t)));
+- }
+- if (flag & IN_WRITE_BUF)
+- number += MAX_HWCB_ROOM - hwc_data.obuf_cursor;
+-
+- spin_unlock_irqrestore (&hwc_data.lock, flags);
+-
+- return number;
+-}
+-
+-void
+-hwc_flush_buffer (unsigned char flag)
+-{
+- unsigned long flags;
+-
+- spin_lock_irqsave (&hwc_data.lock, flags);
+-
+- if (flag & IN_HWCB) {
+- if (hwc_data.current_servc != HWC_CMDW_WRITEDATA)
+- flush_hwcbs ();
+- else
+- hwc_data.flags |= HWC_FLUSH;
+- }
+- if (flag & IN_WRITE_BUF) {
+- hwc_data.obuf_cursor = 0;
+- hwc_data.obuf_count = 0;
+- }
+- spin_unlock_irqrestore (&hwc_data.lock, flags);
+-}
+-
+-unsigned short int
+-seperate_cases (unsigned char *buf, unsigned short int count)
+-{
+-
+- unsigned short int i_in;
+-
+- unsigned short int i_out = 0;
+-
+- unsigned char _case = 0;
+-
+- for (i_in = 0; i_in < count; i_in++) {
+-
+- if (buf[i_in] == hwc_data.ioctls.delim) {
+-
+- if ((i_in + 1 < count) &&
+- (buf[i_in + 1] == hwc_data.ioctls.delim)) {
+-
+- buf[i_out] = hwc_data.ioctls.delim;
+-
+- i_out++;
+-
+- i_in++;
+-
+- } else
+- _case = ~_case;
+-
+- } else {
+-
+- if (_case) {
+-
+- if (hwc_data.ioctls.tolower)
+- buf[i_out] = _ebc_toupper[buf[i_in]];
+-
+- else
+- buf[i_out] = _ebc_tolower[buf[i_in]];
+-
+- } else
+- buf[i_out] = buf[i_in];
+-
+- i_out++;
+- }
+- }
+-
+- return i_out;
+-}
+-
+-#ifdef DUMP_HWCB_INPUT
+-
+-static int
+-gds_vector_name (u16 id, unsigned char name[])
+-{
+- int retval = 0;
+-
+- switch (id) {
+- case GDS_ID_MDSMU:
+- name = "Multiple Domain Support Message Unit";
+- break;
+- case GDS_ID_MDSRouteInfo:
+- name = "MDS Routing Information";
+- break;
+- case GDS_ID_AgUnWrkCorr:
+- name = "Agent Unit of Work Correlator";
+- break;
+- case GDS_ID_SNACondReport:
+- name = "SNA Condition Report";
+- break;
+- case GDS_ID_CPMSU:
+- name = "CP Management Services Unit";
+- break;
+- case GDS_ID_RoutTargInstr:
+- name = "Routing and Targeting Instructions";
+- break;
+- case GDS_ID_OpReq:
+- name = "Operate Request";
+- break;
+- case GDS_ID_TextCmd:
+- name = "Text Command";
+- break;
+-
+- default:
+- name = "unknown GDS variable";
+- retval = -EINVAL;
+- }
+-
+- return retval;
+-}
+-#endif
+-
+-inline static gds_vector_t *
+-find_gds_vector (
+- gds_vector_t * start, void *end, u16 id)
+-{
+- gds_vector_t *vec;
+- gds_vector_t *retval = NULL;
+-
+- vec = start;
+-
+- while (((void *) vec) < end) {
+- if (vec->gds_id == id) {
+-
+-#ifdef DUMP_HWCB_INPUT
+- int retval_name;
+- unsigned char name[64];
+-
+- retval_name = gds_vector_name (id, name);
+- internal_print (
+- DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "%s at 0x%x up to 0x%x, length: %d",
+- name,
+- (unsigned long) vec,
+- ((unsigned long) vec) + vec->length - 1,
+- vec->length);
+- if (retval_name < 0)
+- internal_print (
+- IMMEDIATE_WRITE,
+- ", id: 0x%x\n",
+- vec->gds_id);
+- else
+- internal_print (
+- IMMEDIATE_WRITE,
+- "\n");
+-#endif
+-
+- retval = vec;
+- break;
+- }
+- vec = (gds_vector_t *) (((unsigned long) vec) + vec->length);
+- }
+-
+- return retval;
+-}
+-
+-inline static gds_subvector_t *
+-find_gds_subvector (
+- gds_subvector_t * start, void *end, u8 key)
+-{
+- gds_subvector_t *subvec;
+- gds_subvector_t *retval = NULL;
+-
+- subvec = start;
+-
+- while (((void *) subvec) < end) {
+- if (subvec->key == key) {
+- retval = subvec;
+- break;
+- }
+- subvec = (gds_subvector_t *)
+- (((unsigned long) subvec) + subvec->length);
+- }
+-
+- return retval;
+-}
+-
+-inline static int
+-get_input (void *start, void *end)
+-{
+- int count;
+-
+- count = ((unsigned long) end) - ((unsigned long) start);
+-
+- if (hwc_data.ioctls.tolower)
+- EBC_TOLOWER (start, count);
+-
+- if (hwc_data.ioctls.delim)
+- count = seperate_cases (start, count);
+-
+- HWC_EBCASC_STR (start, count);
+-
+- if (hwc_data.ioctls.echo)
+- do_hwc_write (0, start, count, IMMEDIATE_WRITE);
+-
+- if (hwc_data.calls != NULL)
+- if (hwc_data.calls->move_input != NULL)
+- (hwc_data.calls->move_input) (start, count);
+-
+- return count;
+-}
+-
+-inline static int
+-eval_selfdeftextmsg (gds_subvector_t * start, void *end)
+-{
+- gds_subvector_t *subvec;
+- void *subvec_data;
+- void *subvec_end;
+- int retval = 0;
+-
+- subvec = start;
+-
+- while (((void *) subvec) < end) {
+- subvec = find_gds_subvector (subvec, end, 0x30);
+- if (!subvec)
+- break;
+- subvec_data = (void *)
+- (((unsigned long) subvec) +
+- sizeof (gds_subvector_t));
+- subvec_end = (void *)
+- (((unsigned long) subvec) + subvec->length);
+- retval += get_input (subvec_data, subvec_end);
+- subvec = (gds_subvector_t *) subvec_end;
+- }
+-
+- return retval;
+-}
+-
+-inline static int
+-eval_textcmd (gds_subvector_t * start, void *end)
+-{
+- gds_subvector_t *subvec;
+- gds_subvector_t *subvec_data;
+- void *subvec_end;
+- int retval = 0;
+-
+- subvec = start;
+-
+- while (((void *) subvec) < end) {
+- subvec = find_gds_subvector (
+- subvec, end, GDS_KEY_SelfDefTextMsg);
+- if (!subvec)
+- break;
+- subvec_data = (gds_subvector_t *)
+- (((unsigned long) subvec) +
+- sizeof (gds_subvector_t));
+- subvec_end = (void *)
+- (((unsigned long) subvec) + subvec->length);
+- retval += eval_selfdeftextmsg (subvec_data, subvec_end);
+- subvec = (gds_subvector_t *) subvec_end;
+- }
+-
+- return retval;
+-}
+-
+-inline static int
+-eval_cpmsu (gds_vector_t * start, void *end)
+-{
+- gds_vector_t *vec;
+- gds_subvector_t *vec_data;
+- void *vec_end;
+- int retval = 0;
+-
+- vec = start;
+-
+- while (((void *) vec) < end) {
+- vec = find_gds_vector (vec, end, GDS_ID_TextCmd);
+- if (!vec)
+- break;
+- vec_data = (gds_subvector_t *)
+- (((unsigned long) vec) + sizeof (gds_vector_t));
+- vec_end = (void *) (((unsigned long) vec) + vec->length);
+- retval += eval_textcmd (vec_data, vec_end);
+- vec = (gds_vector_t *) vec_end;
+- }
+-
+- return retval;
+-}
+-
+-inline static int
+-eval_mdsmu (gds_vector_t * start, void *end)
+-{
+- gds_vector_t *vec;
+- gds_vector_t *vec_data;
+- void *vec_end;
+- int retval = 0;
+-
+- vec = find_gds_vector (start, end, GDS_ID_CPMSU);
+- if (vec) {
+- vec_data = (gds_vector_t *)
+- (((unsigned long) vec) + sizeof (gds_vector_t));
+- vec_end = (void *) (((unsigned long) vec) + vec->length);
+- retval = eval_cpmsu (vec_data, vec_end);
+- }
+- return retval;
+-}
+-
+-static int
+-eval_evbuf (gds_vector_t * start, void *end)
+-{
+- gds_vector_t *vec;
+- gds_vector_t *vec_data;
+- void *vec_end;
+- int retval = 0;
+-
+- vec = find_gds_vector (start, end, GDS_ID_MDSMU);
+- if (vec) {
+- vec_data = (gds_vector_t *)
+- (((unsigned long) vec) + sizeof (gds_vector_t));
+- vec_end = (void *) (((unsigned long) vec) + vec->length);
+- retval = eval_mdsmu (vec_data, vec_end);
+- }
+- return retval;
+-}
+-
+-static inline int
+-eval_hwc_receive_mask (_hwcb_mask_t mask)
+-{
+-
+- hwc_data.write_nonprio
+- = ((mask & ET_Msg_Mask) == ET_Msg_Mask);
+-
+- hwc_data.write_prio
+- = ((mask & ET_PMsgCmd_Mask) == ET_PMsgCmd_Mask);
+-
+- if (hwc_data.write_prio || hwc_data.write_nonprio) {
+- internal_print (
+- DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "can write messages\n");
+- return 0;
+- } else {
+- internal_print (
+- DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "can not write messages\n");
+- return -1;
+- }
+-}
+-
+-static inline int
+-eval_hwc_send_mask (_hwcb_mask_t mask)
+-{
+-
+- hwc_data.read_statechange
+- = ((mask & ET_StateChange_Mask) == ET_StateChange_Mask);
+- if (hwc_data.read_statechange)
+- internal_print (
+- DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "can read state change notifications\n");
+- else
+- internal_print (
+- DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "can not read state change notifications\n");
+-
+- hwc_data.sig_quiesce
+- = ((mask & ET_SigQuiesce_Mask) == ET_SigQuiesce_Mask);
+- if (hwc_data.sig_quiesce)
+- internal_print (
+- DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "can receive signal quiesce\n");
+- else
+- internal_print (
+- DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "can not receive signal quiesce\n");
+-
+- hwc_data.read_nonprio
+- = ((mask & ET_OpCmd_Mask) == ET_OpCmd_Mask);
+- if (hwc_data.read_nonprio)
+- internal_print (
+- DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "can read commands\n");
+-
+- hwc_data.read_prio
+- = ((mask & ET_PMsgCmd_Mask) == ET_PMsgCmd_Mask);
+- if (hwc_data.read_prio)
+- internal_print (
+- DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "can read priority commands\n");
+-
+- if (hwc_data.read_prio || hwc_data.read_nonprio) {
+- return 0;
+- } else {
+- internal_print (
+- DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "can not read commands from operator\n");
+- return -1;
+- }
+-}
+-
+-static int
+-eval_statechangebuf (statechangebuf_t * scbuf)
+-{
+- int retval = 0;
+-
+- internal_print (
+- DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "HWC state change detected\n");
+-
+- if (scbuf->validity_hwc_active_facility_mask) {
+-
+- }
+- if (scbuf->validity_hwc_receive_mask) {
+-
+- if (scbuf->mask_length != 4) {
+-#ifdef DUMP_HWC_INIT_ERROR
+- __asm__ ("LHI 1,0xe50\n\t"
+- "LRA 2,0(%0)\n\t"
+- "J .+0 \n\t"
+- :
+- : "a" (scbuf)
+- : "1", "2");
+-#endif
+- } else {
+-
+- retval += eval_hwc_receive_mask
+- (scbuf->hwc_receive_mask);
+- }
+- }
+- if (scbuf->validity_hwc_send_mask) {
+-
+- if (scbuf->mask_length != 4) {
+-#ifdef DUMP_HWC_INIT_ERROR
+- __asm__ ("LHI 1,0xe51\n\t"
+- "LRA 2,0(%0)\n\t"
+- "J .+0 \n\t"
+- :
+- : "a" (scbuf)
+- : "1", "2");
+-#endif
+- } else {
+-
+- retval += eval_hwc_send_mask
+- (scbuf->hwc_send_mask);
+- }
+- }
+- if (scbuf->validity_read_data_function_mask) {
+-
+- }
+- return retval;
+-}
+-
+-#ifdef CONFIG_SMP
+-extern unsigned long cpu_online_map;
+-static volatile unsigned long cpu_quiesce_map;
+-
+-static void
+-do_load_quiesce_psw (void)
+-{
+- psw_t quiesce_psw;
+-
+- clear_bit (smp_processor_id (), &cpu_quiesce_map);
+- if (smp_processor_id () == 0) {
+-
+- while (cpu_quiesce_map != 0) ;
+-
+- quiesce_psw.mask = _DW_PSW_MASK;
+- quiesce_psw.addr = 0xfff;
+- __load_psw (quiesce_psw);
+- }
+- signal_processor (smp_processor_id (), sigp_stop);
+-}
+-
+-static void
+-do_machine_quiesce (void)
+-{
+- cpu_quiesce_map = cpu_online_map;
+- smp_call_function (do_load_quiesce_psw, NULL, 0, 0);
+- do_load_quiesce_psw ();
+-}
+-
+-#else
+-static void
+-do_machine_quiesce (void)
+-{
+- psw_t quiesce_psw;
+-
+- quiesce_psw.mask = _DW_PSW_MASK;
+- queisce_psw.addr = 0xfff;
+- __load_psw (quiesce_psw);
+-}
+-
+-#endif
+-
+-static int
+-process_evbufs (void *start, void *end)
+-{
+- int retval = 0;
+- evbuf_t *evbuf;
+- void *evbuf_end;
+- gds_vector_t *evbuf_data;
+-
+- evbuf = (evbuf_t *) start;
+- while (((void *) evbuf) < end) {
+- evbuf_data = (gds_vector_t *)
+- (((unsigned long) evbuf) + sizeof (evbuf_t));
+- evbuf_end = (void *) (((unsigned long) evbuf) + evbuf->length);
+- switch (evbuf->type) {
+- case ET_OpCmd:
+- case ET_CntlProgOpCmd:
+- case ET_PMsgCmd:
+-#ifdef DUMP_HWCB_INPUT
+-
+- internal_print (
+- DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "event buffer "
+- "at 0x%x up to 0x%x, length: %d\n",
+- (unsigned long) evbuf,
+- (unsigned long) (evbuf_end - 1),
+- evbuf->length);
+- dump_storage_area ((void *) evbuf, evbuf->length);
+-#endif
+- retval += eval_evbuf (evbuf_data, evbuf_end);
+- break;
+- case ET_StateChange:
+- retval += eval_statechangebuf
+- ((statechangebuf_t *) evbuf);
+- break;
+- case ET_SigQuiesce:
+-
+- _machine_restart = do_machine_quiesce;
+- _machine_halt = do_machine_quiesce;
+- _machine_power_off = do_machine_quiesce;
+- ctrl_alt_del ();
+- break;
+- default:
+- internal_print (
+- DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "unconditional read: "
+- "unknown event buffer found, "
+- "type 0x%x",
+- evbuf->type);
+- retval = -ENOSYS;
+- }
+- evbuf = (evbuf_t *) evbuf_end;
+- }
+- return retval;
+-}
+-
+-static int
+-unconditional_read_1 (void)
+-{
+- unsigned short int condition_code;
+- read_hwcb_t *hwcb = (read_hwcb_t *) hwc_data.page;
+- int retval;
+-
+-#if 0
+-
+- if ((!hwc_data.read_prio) && (!hwc_data.read_nonprio))
+- return -EOPNOTSUPP;
+-
+- if (hwc_data.current_servc)
+- return -EBUSY;
+-#endif
+-
+- memset (hwcb, 0x00, PAGE_SIZE);
+- memcpy (hwcb, &read_hwcb_template, sizeof (read_hwcb_t));
+-
+- condition_code = service_call (HWC_CMDW_READDATA, hwc_data.page);
+-
+-#ifdef DUMP_HWC_READ_ERROR
+- if (condition_code == HWC_NOT_OPERATIONAL)
+- __asm__ ("LHI 1,0xe40\n\t"
+- "L 2,0(%0)\n\t"
+- "LRA 3,0(%1)\n\t"
+- "J .+0 \n\t"
+- :
+- : "a" (&condition_code), "a" (hwc_data.page)
+- : "1", "2", "3");
+-#endif
+-
+- switch (condition_code) {
+- case HWC_COMMAND_INITIATED:
+- hwc_data.current_servc = HWC_CMDW_READDATA;
+- hwc_data.current_hwcb = hwc_data.page;
+- retval = condition_code;
+- break;
+- case HWC_BUSY:
+- retval = -EBUSY;
+- break;
+- default:
+- retval = -EIO;
+- }
+-
+- return retval;
+-}
+-
+-static int
+-unconditional_read_2 (u32 ext_int_param)
+-{
+- read_hwcb_t *hwcb = (read_hwcb_t *) hwc_data.page;
+-
+-#ifdef DUMP_HWC_READ_ERROR
+- if ((hwcb->response_code != 0x0020) &&
+- (hwcb->response_code != 0x0220) &&
+- (hwcb->response_code != 0x60F0) &&
+- (hwcb->response_code != 0x62F0))
+- __asm__ ("LHI 1,0xe41\n\t"
+- "LRA 2,0(%0)\n\t"
+- "L 3,0(%1)\n\t"
+- "J .+0\n\t"
+- :
+- : "a" (hwc_data.page), "a" (&(hwcb->response_code))
+- : "1", "2", "3");
+-#endif
+-
+- hwc_data.current_servc = 0;
+- hwc_data.current_hwcb = NULL;
+-
+- switch (hwcb->response_code) {
+-
+- case 0x0020:
+- case 0x0220:
+- return process_evbufs (
+- (void *) (((unsigned long) hwcb) + sizeof (read_hwcb_t)),
+- (void *) (((unsigned long) hwcb) + hwcb->length));
+-
+- case 0x60F0:
+- case 0x62F0:
+- internal_print (
+- IMMEDIATE_WRITE,
+- HWC_RW_PRINT_HEADER
+- "unconditional read: "
+- "got interrupt and tried to read input, "
+- "but nothing found (response code=0x%x).\n",
+- hwcb->response_code);
+- return 0;
+-
+- case 0x0100:
+- internal_print (
+- IMMEDIATE_WRITE,
+- HWC_RW_PRINT_HEADER
+- "unconditional read: HWCB boundary violation - this "
+- "must not occur in a correct driver, please contact "
+- "author\n");
+- return -EIO;
+-
+- case 0x0300:
+- internal_print (
+- IMMEDIATE_WRITE,
+- HWC_RW_PRINT_HEADER
+- "unconditional read: "
+- "insufficient HWCB length - this must not occur in a "
+- "correct driver, please contact author\n");
+- return -EIO;
+-
+- case 0x01F0:
+- internal_print (
+- IMMEDIATE_WRITE,
+- HWC_RW_PRINT_HEADER
+- "unconditional read: "
+- "invalid command - this must not occur in a correct "
+- "driver, please contact author\n");
+- return -EIO;
+-
+- case 0x40F0:
+- internal_print (
+- IMMEDIATE_WRITE,
+- HWC_RW_PRINT_HEADER
+- "unconditional read: invalid function code\n");
+- return -EIO;
+-
+- case 0x70F0:
+- internal_print (
+- IMMEDIATE_WRITE,
+- HWC_RW_PRINT_HEADER
+- "unconditional read: invalid selection mask\n");
+- return -EIO;
+-
+- case 0x0040:
+- internal_print (
+- IMMEDIATE_WRITE,
+- HWC_RW_PRINT_HEADER
+- "unconditional read: HWC equipment check\n");
+- return -EIO;
+-
+- default:
+- internal_print (
+- IMMEDIATE_WRITE,
+- HWC_RW_PRINT_HEADER
+- "unconditional read: invalid response code %x - this "
+- "must not occur in a correct driver, please contact "
+- "author\n",
+- hwcb->response_code);
+- return -EIO;
+- }
+-}
+-
+-static int
+-write_event_mask_1 (void)
+-{
+- unsigned int condition_code;
+- int retval;
+-
+- condition_code = service_call (HWC_CMDW_WRITEMASK, hwc_data.page);
+-
+-#ifdef DUMP_HWC_INIT_ERROR
+-
+- if (condition_code == HWC_NOT_OPERATIONAL)
+- __asm__ ("LHI 1,0xe10\n\t"
+- "L 2,0(%0)\n\t"
+- "LRA 3,0(%1)\n\t"
+- "J .+0\n\t"
+- :
+- : "a" (&condition_code), "a" (hwc_data.page)
+- : "1", "2", "3");
+-#endif
+-
+- switch (condition_code) {
+- case HWC_COMMAND_INITIATED:
+- hwc_data.current_servc = HWC_CMDW_WRITEMASK;
+- hwc_data.current_hwcb = hwc_data.page;
+- retval = condition_code;
+- break;
+- case HWC_BUSY:
+- retval = -EBUSY;
+- break;
+- default:
+- retval = -EIO;
+- }
+-
+- return retval;
+-}
+-
+-static int
+-write_event_mask_2 (u32 ext_int_param)
+-{
+- init_hwcb_t *hwcb = (init_hwcb_t *) hwc_data.page;
+- int retval = 0;
+-
+- if (hwcb->response_code != 0x0020) {
+-#ifdef DUMP_HWC_INIT_ERROR
+- __asm__ ("LHI 1,0xe11\n\t"
+- "LRA 2,0(%0)\n\t"
+- "L 3,0(%1)\n\t"
+- "J .+0\n\t"
+- :
+- : "a" (hwcb), "a" (&(hwcb->response_code))
+- : "1", "2", "3");
+-#else
+- retval = -1;
+-#endif
+- } else {
+- if (hwcb->mask_length != 4) {
+-#ifdef DUMP_HWC_INIT_ERROR
+- __asm__ ("LHI 1,0xe52\n\t"
+- "LRA 2,0(%0)\n\t"
+- "J .+0 \n\t"
+- :
+- : "a" (hwcb)
+- : "1", "2");
+-#endif
+- } else {
+- retval += eval_hwc_receive_mask
+- (hwcb->hwc_receive_mask);
+- retval += eval_hwc_send_mask (hwcb->hwc_send_mask);
+- }
+- }
+-
+- hwc_data.current_servc = 0;
+- hwc_data.current_hwcb = NULL;
+-
+- return retval;
+-}
+-
+-static int
+-set_hwc_ioctls (hwc_ioctls_t * ioctls, char correct)
+-{
+- int retval = 0;
+- hwc_ioctls_t tmp;
+-
+- if (ioctls->width_htab > MAX_MESSAGE_SIZE) {
+- if (correct)
+- tmp.width_htab = MAX_MESSAGE_SIZE;
+- else
+- retval = -EINVAL;
+- } else
+- tmp.width_htab = ioctls->width_htab;
+-
+- tmp.echo = ioctls->echo;
+-
+- if (ioctls->columns > MAX_MESSAGE_SIZE) {
+- if (correct)
+- tmp.columns = MAX_MESSAGE_SIZE;
+- else
+- retval = -EINVAL;
+- } else
+- tmp.columns = ioctls->columns;
+-
+- tmp.final_nl = ioctls->final_nl;
+-
+- if (ioctls->max_hwcb < 2) {
+- if (correct)
+- tmp.max_hwcb = 2;
+- else
+- retval = -EINVAL;
+- } else
+- tmp.max_hwcb = ioctls->max_hwcb;
+-
+- tmp.tolower = ioctls->tolower;
+-
+- if (ioctls->kmem_hwcb > ioctls->max_hwcb) {
+- if (correct)
+- tmp.kmem_hwcb = ioctls->max_hwcb;
+- else
+- retval = -EINVAL;
+- } else
+- tmp.kmem_hwcb = ioctls->kmem_hwcb;
+-
+- if (ioctls->kmem_hwcb > MAX_KMEM_PAGES) {
+- if (correct)
+- ioctls->kmem_hwcb = MAX_KMEM_PAGES;
+- else
+- retval = -EINVAL;
+- }
+- if (ioctls->kmem_hwcb < 2) {
+- if (correct)
+- ioctls->kmem_hwcb = 2;
+- else
+- retval = -EINVAL;
+- }
+- tmp.delim = ioctls->delim;
+-
+- if (!(retval < 0))
+- hwc_data.ioctls = tmp;
+-
+- return retval;
+-}
+-
+-int
+-do_hwc_init (void)
+-{
+- int retval;
+-
+- memcpy (hwc_data.page, &init_hwcb_template, sizeof (init_hwcb_t));
+-
+- do {
+-
+- retval = write_event_mask_1 ();
+-
+- if (retval == -EBUSY) {
+-
+- hwc_data.flags |= HWC_INIT;
+-
+- __ctl_store (cr0, 0, 0);
+- cr0_save = cr0;
+- cr0 |= 0x00000200;
+- cr0 &= 0xFFFFF3AC;
+- __ctl_load (cr0, 0, 0);
+-
+- asm volatile ("STOSM %0,0x01"
+- :"=m" (psw_mask)::"memory");
+-
+- while (!(hwc_data.flags & HWC_INTERRUPT))
+- barrier ();
+-
+- asm volatile ("STNSM %0,0xFE"
+- :"=m" (psw_mask)::"memory");
+-
+- __ctl_load (cr0_save, 0, 0);
+-
+- hwc_data.flags &= ~HWC_INIT;
+- }
+- } while (retval == -EBUSY);
+-
+- if (retval == -EIO) {
+- hwc_data.flags |= HWC_BROKEN;
+- printk (HWC_RW_PRINT_HEADER "HWC not operational\n");
+- }
+- return retval;
+-}
+-
+-void hwc_interrupt_handler (struct pt_regs *regs, __u16 code);
+-
+-int
+-hwc_init (void)
+-{
+- int retval;
+-
+-#ifdef BUFFER_STRESS_TEST
+-
+- init_hwcb_t *hwcb;
+- int i;
+-
+-#endif
+-
+- if (register_early_external_interrupt (0x2401, hwc_interrupt_handler,
+- &ext_int_info_hwc) != 0)
+- panic ("Couldn't request external interrupts 0x2401");
+-
+- spin_lock_init (&hwc_data.lock);
+-
+-#ifdef USE_VM_DETECTION
+-
+- if (MACHINE_IS_VM) {
+-
+- if (hwc_data.init_ioctls.columns > 76)
+- hwc_data.init_ioctls.columns = 76;
+- hwc_data.init_ioctls.tolower = 1;
+- if (!hwc_data.init_ioctls.delim)
+- hwc_data.init_ioctls.delim = DEFAULT_CASE_DELIMITER;
+- } else {
+- hwc_data.init_ioctls.tolower = 0;
+- hwc_data.init_ioctls.delim = 0;
+- }
+-#endif
+- retval = set_hwc_ioctls (&hwc_data.init_ioctls, 1);
+-
+- hwc_data.kmem_start = (unsigned long)
+- alloc_bootmem_low_pages (hwc_data.ioctls.kmem_hwcb * PAGE_SIZE);
+- hwc_data.kmem_end = hwc_data.kmem_start +
+- hwc_data.ioctls.kmem_hwcb * PAGE_SIZE - 1;
+-
+- retval = do_hwc_init ();
+-
+- ctl_set_bit (0, 9);
+-
+-#ifdef BUFFER_STRESS_TEST
+-
+- internal_print (
+- DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "use %i bytes for buffering.\n",
+- hwc_data.ioctls.kmem_hwcb * PAGE_SIZE);
+- for (i = 0; i < 500; i++) {
+- hwcb = (init_hwcb_t *) BUF_HWCB;
+- internal_print (
+- DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "This is stress test message #%i, free: %i bytes\n",
+- i,
+- MAX_HWCB_ROOM - (hwcb->length + sizeof (mto_t)));
+- }
+-
+-#endif
+-
+- return /*retval */ 0;
+-}
+-
+-signed int
+-hwc_register_calls (hwc_high_level_calls_t * calls)
+-{
+- if (calls == NULL)
+- return -EINVAL;
+-
+- if (hwc_data.calls != NULL)
+- return -EBUSY;
+-
+- hwc_data.calls = calls;
+- return 0;
+-}
+-
+-signed int
+-hwc_unregister_calls (hwc_high_level_calls_t * calls)
+-{
+- if (hwc_data.calls == NULL)
+- return -EINVAL;
+-
+- if (calls != hwc_data.calls)
+- return -EINVAL;
+-
+- hwc_data.calls = NULL;
+- return 0;
+-}
+-
+-int
+-hwc_send (hwc_request_t * req)
+-{
+- unsigned long flags;
+- int retval;
+- int cc;
+-
+- spin_lock_irqsave (&hwc_data.lock, flags);
+- if (!req || !req->callback || !req->block) {
+- retval = -EINVAL;
+- goto unlock;
+- }
+- if (hwc_data.request) {
+- retval = -ENOTSUPP;
+- goto unlock;
+- }
+- cc = service_call (req->word, req->block);
+- switch (cc) {
+- case 0:
+- hwc_data.request = req;
+- hwc_data.current_servc = req->word;
+- hwc_data.current_hwcb = req->block;
+- retval = 0;
+- break;
+- case 2:
+- retval = -EBUSY;
+- break;
+- default:
+- retval = -ENOSYS;
+-
+- }
+- unlock:
+- spin_unlock_irqrestore (&hwc_data.lock, flags);
+- return retval;
+-}
+-
+-EXPORT_SYMBOL (hwc_send);
+-
+-void
+-do_hwc_callback (u32 ext_int_param)
+-{
+- if (!hwc_data.request || !hwc_data.request->callback)
+- return;
+- if ((ext_int_param & HWC_EXT_INT_PARAM_ADDR)
+- != (unsigned long) hwc_data.request->block)
+- return;
+- hwc_data.request->callback (hwc_data.request);
+- hwc_data.request = NULL;
+- hwc_data.current_hwcb = NULL;
+- hwc_data.current_servc = 0;
+-}
+-
+-void
+-hwc_do_interrupt (u32 ext_int_param)
+-{
+- u32 finished_hwcb = ext_int_param & HWC_EXT_INT_PARAM_ADDR;
+- u32 evbuf_pending = ext_int_param & HWC_EXT_INT_PARAM_PEND;
+-
+- if (hwc_data.flags & HWC_PTIMER_RUNS) {
+- del_timer (&hwc_data.poll_timer);
+- hwc_data.flags &= ~HWC_PTIMER_RUNS;
+- }
+- if (finished_hwcb) {
+-
+- if ((unsigned long) hwc_data.current_hwcb != finished_hwcb) {
+- internal_print (
+- DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "interrupt: mismatch: "
+- "ext. int param. (0x%x) vs. "
+- "current HWCB (0x%x)\n",
+- ext_int_param,
+- hwc_data.current_hwcb);
+- } else {
+- if (hwc_data.request) {
+-
+- do_hwc_callback (ext_int_param);
+- } else {
+-
+- switch (hwc_data.current_servc) {
+-
+- case HWC_CMDW_WRITEMASK:
+-
+- write_event_mask_2 (ext_int_param);
+- break;
+-
+- case HWC_CMDW_WRITEDATA:
+-
+- write_event_data_2 (ext_int_param);
+- break;
+-
+- case HWC_CMDW_READDATA:
+-
+- unconditional_read_2 (ext_int_param);
+- break;
+- default:
+- }
+- }
+- }
+- } else {
+-
+- if (hwc_data.current_hwcb) {
+- internal_print (
+- DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "interrupt: mismatch: "
+- "ext. int. param. (0x%x) vs. "
+- "current HWCB (0x%x)\n",
+- ext_int_param,
+- hwc_data.current_hwcb);
+- }
+- }
+-
+- if (evbuf_pending) {
+-
+- unconditional_read_1 ();
+- } else {
+-
+- write_event_data_1 ();
+- }
+-
+- if (!hwc_data.calls || !hwc_data.calls->wake_up)
+- return;
+- (hwc_data.calls->wake_up) ();
+-}
+-
+-void
+-hwc_interrupt_handler (struct pt_regs *regs, __u16 code)
+-{
+- int cpu = smp_processor_id ();
+-
+- u32 ext_int_param = hwc_ext_int_param ();
+-
+- irq_enter (cpu, 0x2401);
+-
+- if (hwc_data.flags & HWC_INIT) {
+-
+- hwc_data.flags |= HWC_INTERRUPT;
+- } else if (hwc_data.flags & HWC_BROKEN) {
+-
+- if (!do_hwc_init ()) {
+- hwc_data.flags &= ~HWC_BROKEN;
+- internal_print (DELAYED_WRITE,
+- HWC_RW_PRINT_HEADER
+- "delayed HWC setup after"
+- " temporary breakdown"
+- " (ext. int. parameter=0x%x)\n",
+- ext_int_param);
+- }
+- } else {
+- spin_lock (&hwc_data.lock);
+- hwc_do_interrupt (ext_int_param);
+- spin_unlock (&hwc_data.lock);
+- }
+- irq_exit (cpu, 0x2401);
+-}
+-
+-void
+-hwc_unblank (void)
+-{
+-
+- spin_lock (&hwc_data.lock);
+- spin_unlock (&hwc_data.lock);
+-
+- __ctl_store (cr0, 0, 0);
+- cr0_save = cr0;
+- cr0 |= 0x00000200;
+- cr0 &= 0xFFFFF3AC;
+- __ctl_load (cr0, 0, 0);
+-
+- asm volatile ("STOSM %0,0x01":"=m" (psw_mask)::"memory");
+-
+- while (ALL_HWCB_CHAR)
+- barrier ();
+-
+- asm volatile ("STNSM %0,0xFE":"=m" (psw_mask)::"memory");
+-
+- __ctl_load (cr0_save, 0, 0);
+-}
+-
+-int
+-hwc_ioctl (unsigned int cmd, unsigned long arg)
+-{
+- hwc_ioctls_t tmp = hwc_data.ioctls;
+- int retval = 0;
+- unsigned long flags;
+- unsigned int obuf;
+-
+- spin_lock_irqsave (&hwc_data.lock, flags);
+-
+- switch (cmd) {
+-
+- case TIOCHWCSHTAB:
+- if (get_user (tmp.width_htab, (ioctl_htab_t *) arg))
+- goto fault;
+- break;
+-
+- case TIOCHWCSECHO:
+- if (get_user (tmp.echo, (ioctl_echo_t *) arg))
+- goto fault;
+- break;
+-
+- case TIOCHWCSCOLS:
+- if (get_user (tmp.columns, (ioctl_cols_t *) arg))
+- goto fault;
+- break;
+-
+- case TIOCHWCSNL:
+- if (get_user (tmp.final_nl, (ioctl_nl_t *) arg))
+- goto fault;
+- break;
+-
+- case TIOCHWCSOBUF:
+- if (get_user (obuf, (unsigned int *) arg))
+- goto fault;
+- if (obuf & 0xFFF)
+- tmp.max_hwcb = (((obuf | 0xFFF) + 1) >> 12);
+- else
+- tmp.max_hwcb = (obuf >> 12);
+- break;
+-
+- case TIOCHWCSCASE:
+- if (get_user (tmp.tolower, (ioctl_case_t *) arg))
+- goto fault;
+- break;
+-
+- case TIOCHWCSDELIM:
+- if (get_user (tmp.delim, (ioctl_delim_t *) arg))
+- goto fault;
+- break;
+-
+- case TIOCHWCSINIT:
+- retval = set_hwc_ioctls (&hwc_data.init_ioctls, 1);
+- break;
+-
+- case TIOCHWCGHTAB:
+- if (put_user (tmp.width_htab, (ioctl_htab_t *) arg))
+- goto fault;
+- break;
+-
+- case TIOCHWCGECHO:
+- if (put_user (tmp.echo, (ioctl_echo_t *) arg))
+- goto fault;
+- break;
+-
+- case TIOCHWCGCOLS:
+- if (put_user (tmp.columns, (ioctl_cols_t *) arg))
+- goto fault;
+- break;
+-
+- case TIOCHWCGNL:
+- if (put_user (tmp.final_nl, (ioctl_nl_t *) arg))
+- goto fault;
+- break;
+-
+- case TIOCHWCGOBUF:
+- if (put_user (tmp.max_hwcb, (ioctl_obuf_t *) arg))
+- goto fault;
+- break;
+-
+- case TIOCHWCGKBUF:
+- if (put_user (tmp.kmem_hwcb, (ioctl_obuf_t *) arg))
+- goto fault;
+- break;
+-
+- case TIOCHWCGCASE:
+- if (put_user (tmp.tolower, (ioctl_case_t *) arg))
+- goto fault;
+- break;
+-
+- case TIOCHWCGDELIM:
+- if (put_user (tmp.delim, (ioctl_delim_t *) arg))
+- goto fault;
+- break;
+-#if 0
+-
+- case TIOCHWCGINIT:
+- if (put_user (&hwc_data.init_ioctls, (hwc_ioctls_t *) arg))
+- goto fault;
+- break;
+-
+- case TIOCHWCGCURR:
+- if (put_user (&hwc_data.ioctls, (hwc_ioctls_t *) arg))
+- goto fault;
+- break;
+-#endif
+-
+- default:
+- goto noioctlcmd;
+- }
+-
+- if (_IOC_DIR (cmd) == _IOC_WRITE)
+- retval = set_hwc_ioctls (&tmp, 0);
+-
+- goto out;
+-
+- fault:
+- retval = -EFAULT;
+- goto out;
+- noioctlcmd:
+- retval = -ENOIOCTLCMD;
+- out:
+- spin_unlock_irqrestore (&hwc_data.lock, flags);
+- return retval;
+-}
+=== drivers/s390/char/tape.c
+==================================================================
+--- drivers/s390/char/tape.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/char/tape.c (/trunk/2.4.27) (revision 52)
+@@ -1,1120 +0,0 @@
+-
+-/***********************************************************************
+- * drivers/s390/char/tape.c
+- * tape device driver for S/390 and zSeries tapes.
+- *
+- * S390 and zSeries version
+- * Copyright (C) 2001 IBM Corporation
+- * Author(s): Carsten Otte <cotte at de.ibm.com>
+- * Tuan Ngo-Anh <ngoanh at de.ibm.com>
+- *
+- ***********************************************************************
+- */
+-
+-#include "tapedefs.h"
+-
+-#include <linux/config.h>
+-#include <linux/stddef.h>
+-#include <linux/kernel.h>
+-#include <linux/version.h>
+-#include <linux/proc_fs.h>
+-#include <linux/init.h>
+-#include <asm/types.h>
+-#include <asm/ccwcache.h>
+-#include <asm/idals.h>
+-#include <asm/ebcdic.h>
+-#include <linux/compatmac.h>
+-#ifdef MODULE
+-#include <linux/module.h>
+-#endif
+-#include <asm/debug.h>
+-#ifdef CONFIG_S390_TAPE_DYNAMIC
+-#include <asm/s390dyn.h>
+-#endif
+-#include "tape.h"
+-#ifdef CONFIG_S390_TAPE_3490
+-#include "tape3490.h"
+-#endif
+-#ifdef CONFIG_S390_TAPE_3480
+-#include "tape3480.h"
+-#endif
+-#ifdef CONFIG_S390_TAPE_BLOCK
+-#include "tapeblock.h"
+-#endif
+-#ifdef CONFIG_S390_TAPE_CHAR
+-#include "tapechar.h"
+-#endif
+-#ifdef CONFIG_PROC_FS
+-#include <linux/vmalloc.h>
+-#endif
+-#define PRINTK_HEADER "T390:"
+-
+-
+-/* state handling routines */
+-inline void tapestate_set (tape_info_t * ti, int newstate);
+-inline int tapestate_get (tape_info_t * ti);
+-void tapestate_event (tape_info_t * ti, int event);
+-
+-/* our globals */
+-tape_info_t *first_tape_info = NULL;
+-tape_discipline_t *first_discipline = NULL;
+-tape_frontend_t *first_frontend = NULL;
+-devreg_t* tape_devreg[128];
+-int devregct=0;
+-
+-#ifdef TAPE_DEBUG
+-debug_info_t *tape_debug_area = NULL;
+-#endif
+-
+-char* state_verbose[TS_SIZE]={
+- "TS_UNUSED", "TS_IDLE", "TS_DONE", "TS_FAILED",
+- "TS_BLOCK_INIT",
+- "TS_BSB_INIT",
+- "TS_BSF_INIT",
+- "TS_DSE_INIT",
+- "TS_EGA_INIT",
+- "TS_FSB_INIT",
+- "TS_FSF_INIT",
+- "TS_LDI_INIT",
+- "TS_LBL_INIT",
+- "TS_MSE_INIT",
+- "TS_NOP_INIT",
+- "TS_RBA_INIT",
+- "TS_RBI_INIT",
+- "TS_RBU_INIT",
+- "TS_RBL_INIT",
+- "TS_RDC_INIT",
+- "TS_RFO_INIT",
+- "TS_RSD_INIT",
+- "TS_REW_INIT",
+- "TS_REW_RELEASE_INIT",
+- "TS_RUN_INIT",
+- "TS_SEN_INIT",
+- "TS_SID_INIT",
+- "TS_SNP_INIT",
+- "TS_SPG_INIT",
+- "TS_SWI_INIT",
+- "TS_SMR_INIT",
+- "TS_SYN_INIT",
+- "TS_TIO_INIT",
+- "TS_UNA_INIT",
+- "TS_WRI_INIT",
+- "TS_WTM_INIT",
+- "TS_NOT_OPER"};
+-
+-char* event_verbose[TE_SIZE]= {
+- "TE_START", "TE_DONE", "TE_FAILED", "TE_ERROR", "TE_OTHER"};
+-
+-/* our root devfs handle */
+-#ifdef CONFIG_DEVFS_FS
+-devfs_handle_t tape_devfs_root_entry;
+-
+-inline void
+-tape_mkdevfsroots (tape_info_t* ti)
+-{
+- char devno [5];
+- sprintf (devno,"%04x",ti->devinfo.devno);
+- ti->devfs_dir=devfs_mk_dir (tape_devfs_root_entry, devno, ti);
+-}
+-
+-inline void
+-tape_rmdevfsroots (tape_info_t* ti)
+-{
+- devfs_unregister (ti->devfs_dir);
+-}
+-#endif
+-
+-#ifdef CONFIG_PROC_FS
+-/* our proc tapedevices entry */
+-static struct proc_dir_entry *tape_devices_entry;
+-
+-typedef struct {
+- char *data;
+- int len;
+-} tempinfo_t;
+-
+-
+-static int
+-tape_devices_open (struct inode *inode, struct file *file)
+-{
+- int size=80;
+- tape_info_t* ti;
+- tempinfo_t* tempinfo;
+- char* data;
+- int pos=0;
+- tempinfo = kmalloc (sizeof(tempinfo_t),GFP_KERNEL);
+- if (!tempinfo)
+- return -ENOMEM;
+- for (ti=first_tape_info;ti!=NULL;ti=ti->next)
+- size+=80; // FIXME: Guess better!
+- data=vmalloc(size);
+- if (!data) {
+- kfree (tempinfo);
+- return -ENOMEM;
+- }
+- pos+=sprintf(data+pos,"TapeNo\tDevNo\tCuType\tCuModel\tDevType\tDevModel\tState\n");
+- for (ti=first_tape_info;ti!=NULL;ti=ti->next) {
+- pos+=sprintf(data+pos,"%d\t%04X\t%04X\t%02X\t%04X\t%02X\t\t%s\n",ti->rew_minor/2,
+- ti->devinfo.devno,ti->devinfo.sid_data.cu_type,
+- ti->devinfo.sid_data.cu_model,ti->devinfo.sid_data.dev_type,
+- ti->devinfo.sid_data.dev_model,((tapestate_get(ti) >= 0) &&
+- (tapestate_get(ti) < TS_SIZE)) ?
+- state_verbose[tapestate_get (ti)] : "TS UNKNOWN");
+- }
+- tempinfo->len=pos;
+- tempinfo->data=data;
+- file->private_data= (void*) tempinfo;
+-#ifdef MODULE
+- MOD_INC_USE_COUNT;
+-#endif
+- return 0;
+-}
+-
+-static ssize_t
+-tape_devices_read (struct file *file, char *user_buf, size_t user_len, loff_t * offset)
+-{
+- loff_t len;
+- tempinfo_t *p_info = (tempinfo_t *) file->private_data;
+-
+- if (*offset >= p_info->len) {
+- return 0; /* EOF */
+- } else {
+- len = user_len<(p_info->len - *offset)?user_len:(p_info->len - *offset);
+- if (copy_to_user (user_buf, &(p_info->data[*offset]), len))
+- return -EFAULT;
+- (*offset) += len;
+- return len; /* number of bytes "read" */
+- }
+-}
+-
+-static int
+-tape_devices_release (struct inode *inode, struct file *file)
+-{
+- int rc = 0;
+- tempinfo_t *p_info = (tempinfo_t *) file->private_data;
+- if (p_info) {
+- if (p_info->data)
+- vfree (p_info->data);
+- kfree (p_info);
+- }
+-#ifdef MODULE
+- MOD_DEC_USE_COUNT;
+-#endif
+- return rc;
+-}
+-
+-static struct file_operations tape_devices_file_ops =
+-{
+- read:tape_devices_read, /* read */
+- open:tape_devices_open, /* open */
+- release:tape_devices_release, /* close */
+-};
+-
+-static struct inode_operations tape_devices_inode_ops =
+-{
+-#if !(LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98))
+- default_file_ops:&tape_devices_file_ops /* file ops */
+-#endif /* LINUX_IS_24 */
+-};
+-#endif /* CONFIG_PROC_FS */
+-
+-/* SECTION: Parameters for tape */
+-char *tape[256] = { NULL, };
+-
+-#ifndef MODULE
+-static char tape_parm_string[1024] __initdata = { 0, };
+-static void
+-tape_split_parm_string (char *str)
+-{
+- char *tmp = str;
+- int count = 0;
+- while (tmp != NULL && *tmp != '\0') {
+- char *end;
+- int len;
+- end = strchr (tmp, ',');
+- if (end == NULL) {
+- len = strlen (tmp) + 1;
+- } else {
+- len = (long) end - (long) tmp + 1;
+- *end = '\0';
+- end++;
+- }
+- tape[count] = kmalloc (len * sizeof (char), GFP_ATOMIC);
+- if (tape[count] == NULL) {
+- printk (KERN_WARNING PRINTK_HEADER
+- "can't store tape= parameter no %d\n",
+- count + 1);
+- break;
+- }
+- memset (tape[count], 0, len * sizeof (char));
+- memcpy (tape[count], tmp, len * sizeof (char));
+- count++;
+- tmp = end;
+- };
+-}
+-
+-void __init
+-tape_parm_setup (char *str, int *ints)
+-{
+- int len = strlen (tape_parm_string);
+- if (len != 0) {
+- strcat (tape_parm_string, ",");
+- }
+- strcat (tape_parm_string, str);
+-}
+-
+-int __init
+-tape_parm_call_setup (char *str)
+-{
+- int dummy;
+- tape_parm_setup (str, &dummy);
+- return 1;
+-}
+-
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,2,16))
+-__setup("tape=", tape_parm_call_setup);
+-#endif /* kernel <2.2.19 */
+-#endif /* not defined MODULE */
+-
+-static inline int
+-tape_parm_strtoul (char *str, char **stra)
+-{
+- char *temp = str;
+- int val;
+- if (*temp == '0') {
+- temp++; /* strip leading zero */
+- if (*temp == 'x')
+- temp++; /* strip leading x */
+- }
+- val = simple_strtoul (temp, &temp, 16); /* interpret anything as hex */
+- *stra = temp;
+- return val;
+-}
+-
+-static inline devreg_t *
+-tape_create_devreg (int devno)
+-{
+- devreg_t *devreg = kmalloc (sizeof (devreg_t), GFP_KERNEL);
+- if (devreg != NULL) {
+- memset (devreg, 0, sizeof (devreg_t));
+- devreg->ci.devno = devno;
+- devreg->flag = DEVREG_TYPE_DEVNO;
+- devreg->oper_func = tape_oper_handler;
+- }
+- return devreg;
+-}
+-
+-static inline void
+-tape_parm_parse (char **str)
+-{
+- char *temp;
+- int from, to,i,irq=0,rc,retries=0,tape_num=0;
+- s390_dev_info_t dinfo;
+- tape_info_t* ti,*tempti;
+- tape_discipline_t* disc;
+- long lockflags;
+- if (*str==NULL) {
+- /* no params present -> leave */
+- return;
+- }
+- while (*str) {
+- temp = *str;
+- from = 0;
+- to = 0;
+-
+- /* turn off autodetect mode, if any range is present */
+- from = tape_parm_strtoul (temp, &temp);
+- to = from;
+- if (*temp == '-') {
+- temp++;
+- to = tape_parm_strtoul (temp, &temp);
+- }
+- for (i=from;i<=to;i++) {
+- retries=0;
+- // register for attch/detach of a devno
+- tape_devreg[devregct]=tape_create_devreg(i);
+- if (tape_devreg[devregct]==NULL) {
+- PRINT_WARN ("Could not create devreg for devno %04x, dyn. attach for this devno deactivated.\n",i);
+- } else {
+- s390_device_register (tape_devreg[devregct++]);
+- }
+- // we are activating a device if it is present
+- for (irq = get_irq_first(); irq!=-ENODEV; irq=get_irq_next(irq)) {
+- rc = get_dev_info_by_irq (irq, &dinfo);
+-
+- disc = first_discipline;
+- while ((dinfo.devno == i) && (disc != NULL) && (disc->cu_type != dinfo.sid_data.cu_type))
+- disc = (tape_discipline_t *) (disc->next);
+- if ((disc == NULL) || (rc == -ENODEV) || (i!=dinfo.devno)) {
+- continue;
+- }
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"det irq: ");
+- debug_int_event (tape_debug_area,3,irq);
+- debug_text_event (tape_debug_area,3,"cu: ");
+- debug_int_event (tape_debug_area,3,disc->cu_type);
+-#endif /* TAPE_DEBUG */
+- PRINT_INFO ("using devno %04x with discipline %04x on irq %d as tape device %d\n",dinfo.devno,dinfo.sid_data.cu_type,irq,tape_num/2);
+- /* Allocate tape structure */
+- ti = kmalloc (sizeof (tape_info_t), GFP_ATOMIC);
+- if (ti == NULL) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,3,"ti:no mem ");
+-#endif /* TAPE_DEBUG */
+- PRINT_INFO ("tape: can't allocate memory for "
+- "tape info structure\n");
+- continue;
+- }
+- memset(ti,0,sizeof(tape_info_t));
+- ti->discipline = disc;
+- disc->tape = ti;
+- rc = tape_setup (ti, irq, tape_num);
+- if (rc) {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"tsetup err");
+- debug_int_exception (tape_debug_area,3,rc);
+-#endif /* TAPE_DEBUG */
+- kfree (ti);
+- } else {
+- s390irq_spin_lock_irqsave (irq, lockflags);
+- if (first_tape_info == NULL) {
+- first_tape_info = ti;
+- } else {
+- tempti = first_tape_info;
+- while (tempti->next != NULL)
+- tempti = tempti->next;
+- tempti->next = ti;
+- }
+- s390irq_spin_unlock_irqrestore (irq, lockflags);
+- }
+- }
+- tape_num+=2;
+- }
+- str++;
+- }
+-}
+-
+-
+-/* SECTION: Managing wrappers for ccwcache */
+-
+-#define TAPE_EMERGENCY_REQUESTS 16
+-
+-static ccw_req_t *tape_emergency_req[TAPE_EMERGENCY_REQUESTS] =
+-{NULL,};
+-static spinlock_t tape_emergency_req_lock = SPIN_LOCK_UNLOCKED;
+-
+-static void
+-tape_init_emergency_req (void)
+-{
+- int i;
+- for (i = 0; i < TAPE_EMERGENCY_REQUESTS; i++) {
+- tape_emergency_req[i] = (ccw_req_t *) get_free_page (GFP_KERNEL);
+- }
+-}
+-
+-#ifdef MODULE // We only cleanup the emergency requests on module unload.
+-static void
+-tape_cleanup_emergency_req (void)
+-{
+- int i;
+- for (i = 0; i < TAPE_EMERGENCY_REQUESTS; i++) {
+- if (tape_emergency_req[i])
+- free_page ((long) (tape_emergency_req[i]));
+- else
+- printk (KERN_WARNING PRINTK_HEADER "losing one page for 'in-use' emergency request\n");
+- }
+-}
+-#endif
+-
+-ccw_req_t *
+-tape_alloc_request (char *magic, int cplength, int datasize)
+-{
+- ccw_req_t *rv = NULL;
+- int i;
+- if ((rv = ccw_alloc_request (magic, cplength, datasize)) != NULL) {
+- return rv;
+- }
+- if (cplength * sizeof (ccw1_t) + datasize + sizeof (ccw_req_t) > PAGE_SIZE) {
+- return NULL;
+- }
+- spin_lock (&tape_emergency_req_lock);
+- for (i = 0; i < TAPE_EMERGENCY_REQUESTS; i++) {
+- if (tape_emergency_req[i] != NULL) {
+- rv = tape_emergency_req[i];
+- tape_emergency_req[i] = NULL;
+- }
+- }
+- spin_unlock (&tape_emergency_req_lock);
+- if (rv) {
+- memset (rv, 0, PAGE_SIZE);
+- rv->cache = (kmem_cache_t *) (tape_emergency_req + i);
+- strncpy ((char *) (&rv->magic), magic, 4);
+- ASCEBC ((char *) (&rv->magic), 4);
+- rv->cplength = cplength;
+- rv->datasize = datasize;
+- rv->data = (void *) ((long) rv + PAGE_SIZE - datasize);
+- rv->cpaddr = (ccw1_t *) ((long) rv + sizeof (ccw_req_t));
+- }
+- return rv;
+-}
+-
+-void
+-tape_free_request (ccw_req_t * request)
+-{
+- if (request->cache >= (kmem_cache_t *) tape_emergency_req &&
+- request->cache <= (kmem_cache_t *) (tape_emergency_req + TAPE_EMERGENCY_REQUESTS)) {
+- *((ccw_req_t **) (request->cache)) = request;
+- } else {
+- clear_normalized_cda ((ccw1_t *) (request->cpaddr)); // avoid memory leak caused by modeset_byte
+- ccw_free_request (request);
+- }
+-}
+-
+-/*
+- * Allocate a ccw request and reserve it for tape driver
+- */
+-inline
+- ccw_req_t *
+-tape_alloc_ccw_req (tape_info_t * ti, int cplength, int datasize)
+-{
+- char tape_magic_id[] = "tape";
+- ccw_req_t *cqr = NULL;
+-
+- if (!ti)
+- return NULL;
+- cqr = tape_alloc_request (tape_magic_id, cplength, datasize);
+-
+- if (!cqr) {
+-#ifdef TAPE_DEBUG
+- PRINT_WARN ("empty CQR generated\n");
+-#endif
+- }
+- cqr->magic = TAPE_MAGIC; /* sets an identifier for tape driver */
+- cqr->device = ti; /* save pointer to tape info */
+- return cqr;
+-}
+-
+-/*
+- * Find the tape_info_t structure associated with irq
+- */
+-static inline tape_info_t *
+-tapedev_find_info (int irq)
+-{
+- tape_info_t *ti;
+-
+- ti = first_tape_info;
+- if (ti != NULL)
+- do {
+- if (ti->devinfo.irq == irq)
+- break;
+- } while ((ti = (tape_info_t *) ti->next) != NULL);
+- return ti;
+-}
+-
+-#define QUEUE_THRESHOLD 5
+-
+-/*
+- * Tape interrupt routine, called from Ingo's I/O layer
+- */
+-void
+-tape_irq (int irq, void *int_parm, struct pt_regs *regs)
+-{
+- tape_info_t *ti = tapedev_find_info (irq);
+-
+- /* analyse devstat and fire event */
+- if (ti->devstat.dstat & DEV_STAT_UNIT_CHECK) {
+- tapestate_event (ti, TE_ERROR);
+- } else if (ti->devstat.dstat & (DEV_STAT_DEV_END)) {
+- tapestate_event (ti, TE_DONE);
+- } else
+- tapestate_event (ti, TE_OTHER);
+-}
+-
+-int
+-tape_oper_handler ( int irq, struct _devreg *dreg) {
+- tape_info_t* ti=first_tape_info;
+- tape_info_t* newtape;
+- int rc,tape_num,retries=0,i;
+- s390_dev_info_t dinfo;
+- tape_discipline_t* disc;
+-#ifdef CONFIG_DEVFS_FS
+- tape_frontend_t* frontend;
+-#endif
+- long lockflags;
+- while ((ti!=NULL) && (ti->devinfo.irq!=irq))
+- ti=ti->next;
+- if (ti!=NULL) {
+- // irq is (still) used by tape. tell ingo to try again later
+- PRINT_WARN ("Oper handler for irq %d called while irq still (internaly?) used.\n",irq);
+- return -EAGAIN;
+- }
+- // irq is not used by tape
+- rc = get_dev_info_by_irq (irq, &dinfo);
+- if (rc == -ENODEV) {
+- retries++;
+- rc = get_dev_info_by_irq (irq, &dinfo);
+- if (retries > 5) {
+- PRINT_WARN ("No device information for new dev. could be retrieved.\n");
+- return -ENODEV;
+- }
+- }
+- disc = first_discipline;
+- while ((disc != NULL) && (disc->cu_type != dinfo.sid_data.cu_type))
+- disc = (tape_discipline_t *) (disc->next);
+- if (disc == NULL)
+- PRINT_WARN ("No matching discipline for cu_type %x found, ignoring device %04x.\n",dinfo.sid_data.cu_type,dinfo.devno);
+- if (rc == -ENODEV)
+- PRINT_WARN ("No device information for new dev. could be retrieved.\n");
+- if ((disc == NULL) || (rc == -ENODEV))
+- return -ENODEV;
+-
+- /* Allocate tape structure */
+- ti = kmalloc (sizeof (tape_info_t), GFP_ATOMIC);
+- if (ti == NULL) {
+- PRINT_INFO ( "tape: can't allocate memory for "
+- "tape info structure\n");
+- return -ENOBUFS;
+- }
+- memset(ti,0,sizeof(tape_info_t));
+- ti->discipline = disc;
+- disc->tape = ti;
+- tape_num=0;
+- if (*tape) {
+- // we have static device ranges, so fingure out the tape_num of the attached tape
+- for (i=0;i<devregct;i++)
+- if (tape_devreg[i]->ci.devno==dinfo.devno) {
+- tape_num=2*i;
+- break;
+- }
+- } else {
+- // we are running in autoprobe mode, find a free tape_num
+- newtape=first_tape_info;
+- while (newtape!=NULL) {
+- if (newtape->rew_minor==tape_num) {
+- // tape num in use. try next one
+- tape_num+=2;
+- newtape=first_tape_info;
+- } else {
+- // tape num not used by newtape. look at next tape info
+- newtape=newtape->next;
+- }
+- }
+- }
+- rc = tape_setup (ti, irq, tape_num);
+- if (rc) {
+- kfree (ti);
+- return -ENOBUFS;
+- }
+-#ifdef CONFIG_DEVFS_FS
+- for (frontend=first_frontend;frontend!=NULL;frontend=frontend->next)
+- frontend->mkdevfstree(ti);
+-#endif
+- s390irq_spin_lock_irqsave (irq,lockflags);
+- if (first_tape_info == NULL) {
+- first_tape_info = ti;
+- } else {
+- newtape = first_tape_info;
+- while (newtape->next != NULL)
+- newtape = newtape->next;
+- newtape->next = ti;
+- }
+- s390irq_spin_unlock_irqrestore (irq, lockflags);
+- return 0;
+-}
+-
+-
+-static void
+-tape_noper_handler ( int irq, int status ) {
+- tape_info_t *ti=first_tape_info;
+- tape_info_t *lastti;
+-#ifdef CONFIG_DEVFS_FS
+- tape_frontend_t *frontend;
+-#endif
+- long lockflags;
+- s390irq_spin_lock_irqsave(irq,lockflags);
+- while (ti!=NULL && ti->devinfo.irq!=irq) ti=ti->next;
+- if (ti==NULL) return;
+- if (tapestate_get(ti)!=TS_UNUSED) {
+- // device is in use!
+- PRINT_WARN ("Tape #%d was detached while it was busy. Expect errors!",ti->blk_minor/2);
+- tapestate_set(ti,TS_NOT_OPER);
+- ti->rc=-ENODEV;
+- ti->wanna_wakeup=1;
+- switch (tapestate_get(ti)) {
+- case TS_REW_RELEASE_INIT:
+- tapestate_set(ti,TS_NOT_OPER);
+- wake_up (&ti->wq);
+- break;
+-#ifdef CONFIG_S390_TAPE_BLOCK
+- case TS_BLOCK_INIT:
+- tapestate_set(ti,TS_NOT_OPER);
+- schedule_tapeblock_exec_IO(ti);
+- break;
+-#endif
+- default:
+- tapestate_set(ti,TS_NOT_OPER);
+- wake_up_interruptible (&ti->wq);
+- }
+- } else {
+- // device is unused!
+- PRINT_WARN ("Tape #%d was detached.\n",ti->blk_minor/2);
+- if (ti==first_tape_info) {
+- first_tape_info=ti->next;
+- } else {
+- lastti=first_tape_info;
+- while (lastti->next!=ti) lastti=lastti->next;
+- lastti->next=ti->next;
+- }
+-#ifdef CONFIG_DEVFS_FS
+- for (frontend=first_frontend;frontend!=NULL;frontend=frontend->next)
+- frontend->rmdevfstree(ti);
+- tape_rmdevfsroots(ti);
+-#endif
+- kfree(ti);
+- }
+- s390irq_spin_unlock_irqrestore(irq,lockflags);
+- return;
+-}
+-
+-
+-void
+-tape_dump_sense (devstat_t * stat)
+-{
+-#ifdef TAPE_DEBUG
+- int sl;
+-#endif
+-#if 0
+-
+- PRINT_WARN ("------------I/O resulted in unit check:-----------\n");
+- for (sl = 0; sl < 4; sl++) {
+- PRINT_WARN ("Sense:");
+- for (sct = 0; sct < 8; sct++) {
+- PRINT_WARN (" %2d:0x%02X", 8 * sl + sct,
+- stat->ii.sense.data[8 * sl + sct]);
+- }
+- PRINT_WARN ("\n");
+- }
+- PRINT_INFO ("Sense data: %02X%02X%02X%02X %02X%02X%02X%02X "
+- " %02X%02X%02X%02X %02X%02X%02X%02X \n",
+- stat->ii.sense.data[0], stat->ii.sense.data[1],
+- stat->ii.sense.data[2], stat->ii.sense.data[3],
+- stat->ii.sense.data[4], stat->ii.sense.data[5],
+- stat->ii.sense.data[6], stat->ii.sense.data[7],
+- stat->ii.sense.data[8], stat->ii.sense.data[9],
+- stat->ii.sense.data[10], stat->ii.sense.data[11],
+- stat->ii.sense.data[12], stat->ii.sense.data[13],
+- stat->ii.sense.data[14], stat->ii.sense.data[15]);
+- PRINT_INFO ("Sense data: %02X%02X%02X%02X %02X%02X%02X%02X "
+- " %02X%02X%02X%02X %02X%02X%02X%02X \n",
+- stat->ii.sense.data[16], stat->ii.sense.data[17],
+- stat->ii.sense.data[18], stat->ii.sense.data[19],
+- stat->ii.sense.data[20], stat->ii.sense.data[21],
+- stat->ii.sense.data[22], stat->ii.sense.data[23],
+- stat->ii.sense.data[24], stat->ii.sense.data[25],
+- stat->ii.sense.data[26], stat->ii.sense.data[27],
+- stat->ii.sense.data[28], stat->ii.sense.data[29],
+- stat->ii.sense.data[30], stat->ii.sense.data[31]);
+-#endif
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"SENSE:");
+- for (sl=0;sl<31;sl++) {
+- debug_int_event (tape_debug_area,3,stat->ii.sense.data[sl]);
+- }
+- debug_int_exception (tape_debug_area,3,stat->ii.sense.data[31]);
+-#endif
+-}
+-
+-/*
+- * Setup tape_info_t structure of a tape device
+- */
+-int
+-tape_setup (tape_info_t * ti, int irq, int minor)
+-{
+- long lockflags;
+- int rc = 0;
+-
+- if (minor>254) {
+- PRINT_WARN ("Device id %d on irq %d will not be accessible since this driver is restricted to 128 devices.\n",minor/2,irq);
+- return -EINVAL;
+- }
+- rc = get_dev_info_by_irq (irq, &(ti->devinfo));
+- if (rc == -ENODEV) { /* end of device list */
+- return rc;
+- }
+- ti->rew_minor = minor;
+- ti->nor_minor = minor + 1;
+- ti->blk_minor = minor;
+-#ifdef CONFIG_DEVFS_FS
+- tape_mkdevfsroots(ti);
+-#endif
+- /* Register IRQ */
+-#ifdef CONFIG_S390_TAPE_DYNAMIC
+- rc = s390_request_irq_special (irq, tape_irq, tape_noper_handler,0, "tape", &(ti->devstat));
+-#else
+- rc = s390_request_irq (irq, tape_irq, 0, "tape", &(ti->devstat));
+-#endif
+- s390irq_spin_lock_irqsave (irq, lockflags);
+- ti->next = NULL;
+- if (rc)
+- PRINT_WARN ("Cannot register irq %d, rc=%d\n", irq, rc);
+- init_waitqueue_head (&ti->wq);
+- ti->kernbuf = ti->userbuf = ti->discdata = NULL;
+- tapestate_set (ti, TS_UNUSED);
+- ti->discdata=NULL;
+- ti->discipline->setup_assist (ti);
+- ti->wanna_wakeup=0;
+- s390irq_spin_unlock_irqrestore (irq, lockflags);
+- return rc;
+-}
+-
+-/*
+- * tape_init will register the driver for each tape.
+- */
+-int
+-tape_init (void)
+-{
+- long lockflags;
+- s390_dev_info_t dinfo;
+- tape_discipline_t *disc;
+- tape_info_t *ti = NULL, *tempti = NULL;
+- char *opt_char,*opt_block,*opt_3490,*opt_3480;
+- int irq = 0, rc, retries = 0, tape_num = 0;
+- static int initialized=0;
+-
+- if (initialized) // Only init the devices once
+- return 0;
+- initialized=1;
+-
+-#ifdef TAPE_DEBUG
+- tape_debug_area = debug_register ( "tape", 3, 2, 10);
+- debug_register_view(tape_debug_area,&debug_hex_ascii_view);
+- debug_text_event (tape_debug_area,3,"begin init");
+-#endif /* TAPE_DEBUG */
+-
+- /* print banner */
+- PRINT_WARN ("IBM S/390 Tape Device Driver (v1.01).\n");
+- PRINT_WARN ("(C) IBM Deutschland Entwicklung GmbH, 2000\n");
+- opt_char=opt_block=opt_3480=opt_3490="not present";
+-#ifdef CONFIG_S390_TAPE_CHAR
+- opt_char="built in";
+-#endif
+-#ifdef CONFIG_S390_TAPE_BLOCK
+- opt_block="built in";
+-#endif
+-#ifdef CONFIG_S390_TAPE_3480
+- opt_3480="built in";
+-#endif
+-#ifdef CONFIG_S390_TAPE_3490
+- opt_3490="built in";
+-#endif
+- /* print feature info */
+- PRINT_WARN ("character device frontend : %s\n",opt_char);
+- PRINT_WARN ("block device frontend : %s\n",opt_block);
+- PRINT_WARN ("support for 3480 compatible : %s\n",opt_3480);
+- PRINT_WARN ("support for 3490 compatible : %s\n",opt_3490);
+-
+-#ifndef MODULE
+- tape_split_parm_string(tape_parm_string);
+-#endif
+- if (*tape)
+- PRINT_INFO ("Using ranges supplied in parameters, disabling autoprobe mode.\n");
+- else
+- PRINT_INFO ("No parameters supplied, enabling autoprobe mode for all supported devices.\n");
+-#ifdef CONFIG_S390_TAPE_3490
+- if (*tape)
+- first_discipline = tape3490_init (0); // no autoprobe for devices
+- else
+- first_discipline = tape3490_init (1); // do autoprobe since no parm specified
+- first_discipline->next = NULL;
+-#endif
+-
+-#ifdef CONFIG_S390_TAPE_3480
+- if (first_discipline == NULL) {
+- if (*tape)
+- first_discipline = tape3480_init (0); // no autoprobe for devices
+- else
+- first_discipline = tape3480_init (1); // do autoprobe since no parm specified
+- first_discipline->next = NULL;
+- } else {
+- if (*tape)
+- first_discipline->next = tape3480_init (0); // no autoprobe for devices
+- else
+- first_discipline->next = tape3480_init (1); // do autoprobe since no parm specified
+- ((tape_discipline_t*) (first_discipline->next))->next=NULL;
+- }
+-#endif
+-#ifdef CONFIG_DEVFS_FS
+- tape_devfs_root_entry=devfs_mk_dir (NULL, "tape", NULL);
+-#endif CONFIG_DEVFS_FS
+-
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"dev detect");
+-#endif /* TAPE_DEBUG */
+- /* Allocate the tape structures */
+- if (*tape!=NULL) {
+- // we have parameters, continue with parsing the parameters and set the devices online
+- tape_parm_parse (tape);
+- } else {
+- // we are running in autodetect mode, search all devices for compatibles
+- for (irq = get_irq_first(); irq!=-ENODEV; irq=get_irq_next(irq)) {
+- rc = get_dev_info_by_irq (irq, &dinfo);
+- disc = first_discipline;
+- while ((disc != NULL) && (disc->cu_type != dinfo.sid_data.cu_type))
+- disc = (tape_discipline_t *) (disc->next);
+- if ((disc == NULL) || (rc == -ENODEV))
+- continue;
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"det irq: ");
+- debug_int_event (tape_debug_area,3,irq);
+- debug_text_event (tape_debug_area,3,"cu: ");
+- debug_int_event (tape_debug_area,3,disc->cu_type);
+-#endif /* TAPE_DEBUG */
+- PRINT_INFO ("using devno %04x with discipline %04x on irq %d as tape device %d\n",dinfo.devno,dinfo.sid_data.cu_type,irq,tape_num/2);
+- /* Allocate tape structure */
+- ti = kmalloc (sizeof (tape_info_t), GFP_ATOMIC);
+- if (ti == NULL) {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,3,"ti:no mem ");
+-#endif /* TAPE_DEBUG */
+- PRINT_INFO ("tape: can't allocate memory for "
+- "tape info structure\n");
+- continue;
+- }
+- memset(ti,0,sizeof(tape_info_t));
+- ti->discipline = disc;
+- disc->tape = ti;
+- rc = tape_setup (ti, irq, tape_num);
+- if (rc) {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"tsetup err");
+- debug_int_exception (tape_debug_area,3,rc);
+-#endif /* TAPE_DEBUG */
+- kfree (ti);
+- } else {
+- s390irq_spin_lock_irqsave (irq, lockflags);
+- if (first_tape_info == NULL) {
+- first_tape_info = ti;
+- } else {
+- tempti = first_tape_info;
+- while (tempti->next != NULL)
+- tempti = tempti->next;
+- tempti->next = ti;
+- }
+- tape_num += 2;
+- s390irq_spin_unlock_irqrestore (irq, lockflags);
+- }
+- }
+- }
+-
+- /* Allocate local buffer for the ccwcache */
+- tape_init_emergency_req ();
+-#ifdef CONFIG_PROC_FS
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98))
+- tape_devices_entry = create_proc_entry ("tapedevices",
+- S_IFREG | S_IRUGO | S_IWUSR,
+- &proc_root);
+- tape_devices_entry->proc_fops = &tape_devices_file_ops;
+- tape_devices_entry->proc_iops = &tape_devices_inode_ops;
+-#else
+- tape_devices_entry = (struct proc_dir_entry *) kmalloc
+- (sizeof (struct proc_dir_entry), GFP_ATOMIC);
+- if (tape_devices_entry) {
+- memset (tape_devices_entry, 0, sizeof (struct proc_dir_entry));
+- tape_devices_entry->name = "tapedevices";
+- tape_devices_entry->namelen = strlen ("tapedevices");
+- tape_devices_entry->low_ino = 0;
+- tape_devices_entry->mode = (S_IFREG | S_IRUGO | S_IWUSR);
+- tape_devices_entry->nlink = 1;
+- tape_devices_entry->uid = 0;
+- tape_devices_entry->gid = 0;
+- tape_devices_entry->size = 0;
+- tape_devices_entry->get_info = NULL;
+- tape_devices_entry->ops = &tape_devices_inode_ops;
+- proc_register (&proc_root, tape_devices_entry);
+- }
+-#endif
+-#endif /* CONFIG_PROC_FS */
+-
+- return 0;
+-}
+-
+-#ifdef MODULE
+-MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte (cotte at de.ibm.com)");
+-MODULE_DESCRIPTION("Linux for S/390 channel attached tape device driver");
+-MODULE_PARM (tape, "1-" __MODULE_STRING (256) "s");
+-
+-int
+-init_module (void)
+-{
+-#ifdef CONFIG_S390_TAPE_CHAR
+- tapechar_init ();
+-#endif
+-#ifdef CONFIG_S390_TAPE_BLOCK
+- tapeblock_init ();
+-#endif
+- return 0;
+-}
+-
+-void
+-cleanup_module (void)
+-{
+- tape_info_t *ti ,*temp;
+- tape_frontend_t* frontend, *tempfe;
+- tape_discipline_t* disc ,*tempdi;
+- int i;
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"cleaup mod");
+-#endif /* TAPE_DEBUG */
+-
+- if (*tape) {
+- // we are running with parameters. we'll now deregister from our devno's
+- for (i=0;i<devregct;i++) {
+- s390_device_unregister(tape_devreg[devregct]);
+- }
+- }
+- ti = first_tape_info;
+- while (ti != NULL) {
+- temp = ti;
+- ti = ti->next;
+- //cleanup a device
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"free irq:");
+- debug_int_event (tape_debug_area,6,temp->devinfo.irq);
+-#endif /* TAPE_DEBUG */
+- free_irq (temp->devinfo.irq, &(temp->devstat));
+- if (temp->discdata) kfree (temp->discdata);
+- if (temp->kernbuf) kfree (temp->kernbuf);
+- if (temp->cqr) tape_free_request(temp->cqr);
+-#ifdef CONFIG_DEVFS_FS
+- for (frontend=first_frontend;frontend!=NULL;frontend=frontend->next)
+- frontend->rmdevfstree(temp);
+- tape_rmdevfsroots(temp);
+-#endif
+- kfree (temp);
+- }
+-#ifdef CONFIG_DEVFS_FS
+- devfs_unregister (tape_devfs_root_entry);
+-#endif CONFIG_DEVFS_FS
+-#ifdef CONFIG_PROC_FS
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,3,98))
+- remove_proc_entry ("tapedevices", &proc_root);
+-#else
+- proc_unregister (&proc_root, tape_devices_entry->low_ino);
+- kfree (tape_devices_entry);
+-#endif /* LINUX_IS_24 */
+-#endif
+-#ifdef CONFIG_S390_TAPE_CHAR
+- tapechar_uninit();
+-#endif
+-#ifdef CONFIG_S390_TAPE_BLOCK
+- tapeblock_uninit();
+-#endif
+- frontend=first_frontend;
+- while (frontend != NULL) {
+- tempfe = frontend;
+- frontend = frontend->next;
+- kfree (tempfe);
+- }
+- disc=first_discipline;
+- while (disc != NULL) {
+- if (*tape)
+- disc->shutdown(0);
+- else
+- disc->shutdown(1);
+- tempdi = disc;
+- disc = disc->next;
+- kfree (tempdi);
+- }
+- /* Deallocate the local buffer for the ccwcache */
+- tape_cleanup_emergency_req ();
+-#ifdef TAPE_DEBUG
+- debug_unregister (tape_debug_area);
+-#endif /* TAPE_DEBUG */
+-}
+-#endif /* MODULE */
+-
+-inline void
+-tapestate_set (tape_info_t * ti, int newstate)
+-{
+- if (ti->tape_state == TS_NOT_OPER) {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,3,"ts_set err");
+- debug_text_exception (tape_debug_area,3,"dev n.oper");
+-#endif /* TAPE_DEBUG */
+- } else {
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,4,"ts. dev: ");
+- debug_int_event (tape_debug_area,4,ti->blk_minor);
+- debug_text_event (tape_debug_area,4,"old ts: ");
+- debug_text_event (tape_debug_area,4,(((tapestate_get (ti) < TS_SIZE) &&
+- (tapestate_get (ti) >=0 )) ?
+- state_verbose[tapestate_get (ti)] :
+- "UNKNOWN TS"));
+- debug_text_event (tape_debug_area,4,"new ts: ");
+- debug_text_event (tape_debug_area,4,(((newstate < TS_SIZE) &&
+- (newstate >= 0)) ?
+- state_verbose[newstate] :
+- "UNKNOWN TS"));
+-#endif /* TAPE_DEBUG */
+- ti->tape_state = newstate;
+- }
+-}
+-
+-inline int
+-tapestate_get (tape_info_t * ti)
+-{
+- return (ti->tape_state);
+-}
+-
+-void
+-tapestate_event (tape_info_t * ti, int event)
+-{
+-#ifdef TAPE_DEBUG
+- debug_text_event (tape_debug_area,6,"te! dev: ");
+- debug_int_event (tape_debug_area,6,ti->blk_minor);
+- debug_text_event (tape_debug_area,6,"event:");
+- debug_text_event (tape_debug_area,6,((event >=0) &&
+- (event < TE_SIZE)) ?
+- event_verbose[event] : "TE UNKNOWN");
+- debug_text_event (tape_debug_area,6,"state:");
+- debug_text_event (tape_debug_area,6,((tapestate_get(ti) >= 0) &&
+- (tapestate_get(ti) < TS_SIZE)) ?
+- state_verbose[tapestate_get (ti)] :
+- "TS UNKNOWN");
+-#endif /* TAPE_DEBUG */
+- if (event == TE_ERROR) {
+- ti->discipline->error_recovery(ti);
+- } else {
+- if ((event >= 0) &&
+- (event < TE_SIZE) &&
+- (tapestate_get (ti) >= 0) &&
+- (tapestate_get (ti) < TS_SIZE) &&
+- ((*(ti->discipline->event_table))[tapestate_get (ti)][event] != NULL))
+- ((*(ti->discipline->event_table))[tapestate_get (ti)][event]) (ti);
+- else {
+-#ifdef TAPE_DEBUG
+- debug_text_exception (tape_debug_area,3,"TE UNEXPEC");
+-#endif /* TAPE_DEBUG */
+- ti->discipline->default_handler (ti);
+- }
+- }
+-}
+-
+-/*
+- * Overrides for Emacs so that we follow Linus's tabbing style.
+- * Emacs will notice this stuff at the end of the file and automatically
+- * adjust the settings for this buffer only. This must remain at the end
+- * of the file.
+- * ---------------------------------------------------------------------------
+- * Local variables:
+- * c-indent-level: 4
+- * c-brace-imaginary-offset: 0
+- * c-brace-offset: -4
+- * c-argdecl-indent: 4
+- * c-label-offset: -4
+- * c-continued-statement-offset: 4
+- * c-continued-brace-offset: 0
+- * indent-tabs-mode: nil
+- * tab-width: 8
+- * End:
+- */
+=== drivers/s390/qdio.c
+==================================================================
+--- drivers/s390/qdio.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/s390/qdio.c (/trunk/2.4.27) (revision 52)
+@@ -57,7 +57,7 @@
+
+ #include <asm/qdio.h>
+
+-#define VERSION_QDIO_C "$Revision: 1.145 $"
++#define VERSION_QDIO_C "$Revision: 1.145.4.11 $"
+
+ /****************** MODULE PARAMETER VARIABLES ********************/
+ MODULE_AUTHOR("Utz Bacher <utz.bacher at de.ibm.com>");
+@@ -97,10 +97,12 @@
+ #endif /* QDIO_PERFORMANCE_STATS */
+
+ static int hydra_thinints=0;
++static int omit_svs=0;
+
+ static int indicator_used[INDICATORS_PER_CACHELINE];
+ static __u32 * volatile indicators;
+ static __u32 volatile spare_indicator;
++static atomic_t spare_indicator_usecount;
+
+ static debug_info_t *qdio_dbf_setup=NULL;
+ static debug_info_t *qdio_dbf_sbal=NULL;
+@@ -121,6 +123,7 @@
+ static struct semaphore init_sema;
+
+ static qdio_chsc_area_t *chsc_area;
++static spinlock_t chsc_area_lock=SPIN_LOCK_UNLOCKED;
+ /* iQDIO stuff: */
+ static volatile qdio_q_t *tiq_list=NULL; /* volatile as it could change
+ during a while loop */
+@@ -198,7 +201,7 @@
+ }
+ static inline unsigned long qdio_get_millis(void)
+ {
+- return (unsigned long)(qdio_get_micros()>>12);
++ return (unsigned long)(qdio_get_micros()>>10);
+ }
+
+ static __inline__ int atomic_return_add (int i, atomic_t *v)
+@@ -518,8 +521,10 @@
+
+ if (found)
+ return indicators+i;
+- else
++ else {
++ atomic_inc(&spare_indicator_usecount);
+ return (__u32 * volatile) &spare_indicator;
++ }
+ }
+
+ /* locked by the locks in qdio_activate and qdio_cleanup */
+@@ -531,6 +536,9 @@
+ i=addr-indicators;
+ indicator_used[i]=0;
+ }
++ if (addr==&spare_indicator) {
++ atomic_dec(&spare_indicator_usecount);
++ }
+ }
+
+ static inline volatile void tiqdio_clear_summary_bit(__u32 *location)
+@@ -621,8 +629,8 @@
+ set_slsb(&q->slsb.acc.val[(gsf+QDIO_MAX_BUFFERS_PER_Q-1)&
+ (QDIO_MAX_BUFFERS_PER_Q-1)],SLSB_P_INPUT_NOT_INIT);
+ /* we don't issue this SYNC_MEMORY, as we trust Rick T and
+- * moreover will not use the PROCESSING state, so q->polling
+- * was 0
++ * moreover will not use the PROCESSING state under VM,
++ * so q->polling was 0 anyway.
+ SYNC_MEMORY;*/
+ if (q->slsb.acc.val[gsf]==SLSB_P_INPUT_PRIMED) {
+ /* set our summary bit again, as otherwise there is a
+@@ -655,6 +663,11 @@
+ if ((q->is_thinint_q)&&(q->is_input_q)) {
+ /* iQDIO */
+ spin_lock_irqsave(&ttiq_list_lock,flags);
++ /* in case cleanup has done this already and simultanously
++ * qdio_unmark_q is called from the interrupt handler, we've
++ * got to check this in this specific case again */
++ if ((!q->list_prev)||(!q->list_next))
++ goto out;
+ if (q->list_next==q) {
+ /* q was the only interesting q */
+ tiq_list=NULL;
+@@ -667,6 +680,7 @@
+ q->list_next=NULL;
+ q->list_prev=NULL;
+ }
++out:
+ spin_unlock_irqrestore(&ttiq_list_lock,flags);
+ }
+ }
+@@ -710,7 +724,7 @@
+ (void*)q->sbal[bufno],SBAL_SIZE);
+ }
+
+-inline static int qdio_get_outbound_buffer_frontier(qdio_q_t *q)
++static inline int qdio_get_outbound_buffer_frontier(qdio_q_t *q)
+ {
+ int f,f_mod_no;
+ volatile char *slsb;
+@@ -739,7 +753,7 @@
+ if (f==first_not_to_check) goto out;
+ slsbyte=slsb[f_mod_no];
+
+- /* the hydra has not fetched the output yet */
++ /* the card has not fetched the output yet */
+ if (slsbyte==SLSB_CU_OUTPUT_PRIMED) {
+ #ifdef QDIO_DBF_LIKE_HELL
+ QDIO_DBF_TEXT5(0,trace,"outpprim");
+@@ -747,7 +761,7 @@
+ goto out;
+ }
+
+- /* the hydra got it */
++ /* the card got it */
+ if (slsbyte==SLSB_P_OUTPUT_EMPTY) {
+ atomic_dec(&q->number_of_buffers_used);
+ f++;
+@@ -797,7 +811,7 @@
+ }
+
+ /* all buffers are processed */
+-inline static int qdio_is_outbound_q_done(qdio_q_t *q)
++static inline int qdio_is_outbound_q_done(qdio_q_t *q)
+ {
+ int no_used;
+ #ifdef QDIO_DBF_LIKE_HELL
+@@ -818,7 +832,7 @@
+ return (no_used==0);
+ }
+
+-inline static int qdio_has_outbound_q_moved(qdio_q_t *q)
++static inline int qdio_has_outbound_q_moved(qdio_q_t *q)
+ {
+ int i;
+
+@@ -827,7 +841,6 @@
+ if ( (i!=GET_SAVED_FRONTIER(q)) ||
+ (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
+ SAVE_FRONTIER(q,i);
+- SAVE_TIMESTAMP(q);
+ #ifdef QDIO_DBF_LIKE_HELL
+ QDIO_DBF_TEXT4(0,trace,"oqhasmvd");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+@@ -842,9 +855,10 @@
+ }
+ }
+
+-inline static void qdio_kick_outbound_q(qdio_q_t *q)
++static inline void qdio_kick_outbound_q(qdio_q_t *q)
+ {
+ int result;
++ char dbf_text[15];
+ #ifdef QDIO_DBF_LIKE_HELL
+ QDIO_DBF_TEXT4(0,trace,"kickoutq");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+@@ -852,9 +866,68 @@
+
+ if (!q->siga_out) return;
+
++ /* here's the story with cc=2 and busy bit set (thanks, Rick):
++ * VM's CP could present us cc=2 and busy bit set on SIGA-write
++ * during reconfiguration of their Guest LAN (only in HIPERS mode,
++ * QDIO mode is asynchronous -- cc=2 and busy bit there will take
++ * the queues down immediately; and not being under VM we have a
++ * problem on cc=2 and busy bit set right away).
++ *
++ * Therefore qdio_siga_output will try for a short time constantly,
++ * if such a condition occurs. If it doesn't change, it will
++ * increase the busy_siga_counter and save the timestamp, and
++ * schedule the queue for later processing (via mark_q, using the
++ * queue tasklet). __qdio_outbound_processing will check out the
++ * counter. If non-zero, it will call qdio_kick_outbound_q as often
++ * as the value of the counter. This will attempt further SIGA
++ * instructions.
++ * Every successful SIGA instruction will decrease the counter.
++ * After some time of no movement, qdio_kick_outbound_q will
++ * finally fail and reflect corresponding error codes to call
++ * the upper layer module and have it take the queues down.
++ *
++ * Note that this is a change from the original HiperSockets design
++ * (saying cc=2 and busy bit means take the queues down), but in
++ * these days Guest LAN didn't exist... excessive cc=2 with busy bit
++ * conditions will still take the queues down, but the threshold is
++ * higher due to the Guest LAN environment.
++ */
++
+ result=qdio_siga_output(q);
+
+- if (result) {
++ switch (result) {
++ case 0:
++ /* went smooth this time, reset timestamp */
++#ifdef QDIO_DBF_LIKE_HELL
++ QDIO_DBF_TEXT3(0,trace,"sigawsuc");
++ sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no,
++ atomic_read(&q->busy_siga_counter));
++ QDIO_DBF_TEXT3(0,trace,dbf_text);
++#endif /* QDIO_DBF_LIKE_HELL */
++ q->timing.busy_start=0;
++ break;
++ case (2|QDIO_SIGA_ERROR_B_BIT_SET):
++ /* cc=2 and busy bit: */
++ atomic_inc(&q->busy_siga_counter);
++
++ /* if the last siga was successful, save
++ * timestamp here */
++ if (!q->timing.busy_start)
++ q->timing.busy_start=NOW;
++
++ /* if we're in time, don't touch error_status_flags
++ * and siga_error */
++ if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
++ qdio_mark_q(q);
++ break;
++ }
++ QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
++ sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no,
++ atomic_read(&q->busy_siga_counter));
++ QDIO_DBF_TEXT3(0,trace,dbf_text);
++ /* else fallthrough and report error */
++ default:
++ /* for plain cc=1, 2 or 3: */
+ if (q->siga_error)
+ q->error_status_flags|=
+ QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
+@@ -864,7 +937,7 @@
+ }
+ }
+
+-inline static void qdio_kick_outbound_handler(qdio_q_t *q)
++static inline void qdio_kick_outbound_handler(qdio_q_t *q)
+ {
+ #ifdef QDIO_DBF_LIKE_HELL
+ char dbf_text[15];
+@@ -901,8 +974,9 @@
+ q->error_status_flags=0;
+ }
+
+-static void qdio_outbound_processing(qdio_q_t *q)
++static inline void __qdio_outbound_processing(qdio_q_t *q)
+ {
++ int siga_attempts;
+ #ifdef QDIO_DBF_LIKE_HELL
+ QDIO_DBF_TEXT4(0,trace,"qoutproc");
+ QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+@@ -926,6 +1000,14 @@
+ o_p_nc++;
+ #endif /* QDIO_PERFORMANCE_STATS */
+
++ /* see comment in qdio_kick_outbound_q */
++ siga_attempts=atomic_read(&q->busy_siga_counter);
++ while (siga_attempts) {
++ atomic_dec(&q->busy_siga_counter);
++ qdio_kick_outbound_q(q);
++ siga_attempts--;
++ }
++
+ #ifdef QDIO_PERFORMANCE_STATS
+ perf_stats.tl_runs++;
+ #endif /* QDIO_PERFORMANCE_STATS */
+@@ -936,7 +1018,8 @@
+
+ if (q->is_iqdio_q) {
+ /* for asynchronous queues, we better check, if the fill
+- * level is too high */
++ * level is too high. for synchronous queues, the fill
++ * level will never be that high. */
+ if (atomic_read(&q->number_of_buffers_used)>
+ IQDIO_FILL_LEVEL_TO_POLL) {
+ qdio_mark_q(q);
+@@ -950,16 +1033,24 @@
+ qdio_release_q(q);
+ }
+
++static void qdio_outbound_processing(qdio_q_t *q)
++{
++ __qdio_outbound_processing(q);
++}
++
+ /************************* INBOUND ROUTINES *******************************/
+
+
+-inline static int qdio_get_inbound_buffer_frontier(qdio_q_t *q)
++static inline int qdio_get_inbound_buffer_frontier(qdio_q_t *q)
+ {
+ int f,f_mod_no;
+ volatile char *slsb;
+ char slsbyte;
+ int first_not_to_check;
+ char dbf_text[15];
++#ifdef QDIO_USE_PROCESSING_STATE
++ int last_position=-1;
++#endif /* QDIO_USE_PROCESSING_STATE */
+
+ #ifdef QDIO_DBF_LIKE_HELL
+ QDIO_DBF_TEXT4(0,trace,"getibfro");
+@@ -1002,8 +1093,14 @@
+ if (q->siga_sync) {
+ set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT);
+ } else {
+- set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_PROCESSING);
++ /* set the previous buffer to NOT_INIT. The current
++ * buffer will be set to PROCESSING at the end of
++ * this function to avoid further interrupts. */
++ if (last_position>=0)
++ set_slsb(&slsb[last_position],
++ SLSB_P_INPUT_NOT_INIT);
+ atomic_set(&q->polling,1);
++ last_position=f_mod_no;
+ }
+ #else /* QDIO_USE_PROCESSING_STATE */
+ set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT);
+@@ -1044,6 +1141,10 @@
+ f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
+ atomic_dec(&q->number_of_buffers_used);
+
++#ifdef QDIO_USE_PROCESSING_STATE
++ last_position=-1;
++#endif /* QDIO_USE_PROCESSING_STATE */
++
+ goto out;
+ }
+
+@@ -1051,6 +1152,11 @@
+ out:
+ q->first_to_check=f_mod_no;
+
++#ifdef QDIO_USE_PROCESSING_STATE
++ if (last_position>=0)
++ set_slsb(&slsb[last_position],SLSB_P_INPUT_PROCESSING);
++#endif /* QDIO_USE_PROCESSING_STATE */
++
+ #ifdef QDIO_DBF_LIKE_HELL
+ QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
+ #endif /* QDIO_DBF_LIKE_HELL */
+@@ -1058,7 +1164,7 @@
+ return q->first_to_check;
+ }
+
+-inline static int qdio_has_inbound_q_moved(qdio_q_t *q)
++static inline int qdio_has_inbound_q_moved(qdio_q_t *q)
+ {
+ int i;
+
+@@ -1095,7 +1201,7 @@
+ }
+
+ /* means, no more buffers to be filled */
+-inline static int iqdio_is_inbound_q_done(qdio_q_t *q)
++static inline int iqdio_is_inbound_q_done(qdio_q_t *q)
+ {
+ int no_used;
+ #ifdef QDIO_DBF_LIKE_HELL
+@@ -1148,7 +1254,7 @@
+ return 0;
+ }
+
+-inline static int qdio_is_inbound_q_done(qdio_q_t *q)
++static inline int qdio_is_inbound_q_done(qdio_q_t *q)
+ {
+ int no_used;
+ #ifdef QDIO_DBF_LIKE_HELL
+@@ -1157,7 +1263,7 @@
+
+ no_used=atomic_read(&q->number_of_buffers_used);
+
+- /* we need that one for synchronization with Hydra, as Hydra
++ /* we need that one for synchronization with the OSA/FCP card, as it
+ * does a kind of PCI avoidance */
+ SYNC_MEMORY;
+
+@@ -1204,7 +1310,7 @@
+ }
+ }
+
+-inline static void qdio_kick_inbound_handler(qdio_q_t *q)
++static inline void qdio_kick_inbound_handler(qdio_q_t *q)
+ {
+ int count=0;
+ int start,end,real_end,i;
+@@ -1250,7 +1356,8 @@
+ #endif /* QDIO_PERFORMANCE_STATS */
+ }
+
+-static inline void tiqdio_inbound_processing(qdio_q_t *q)
++static inline void __tiqdio_inbound_processing(qdio_q_t *q,
++ int spare_ind_was_set)
+ {
+ qdio_irq_t *irq_ptr;
+ qdio_q_t *oq;
+@@ -1282,9 +1389,19 @@
+ goto out;
+ }
+
+- if (*(q->dev_st_chg_ind)) {
+- tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind);
++ /* we reset spare_ind_was_set, when the queue does not use the
++ * spare indicator */
++ if (spare_ind_was_set) {
++ spare_ind_was_set = (q->dev_st_chg_ind==&spare_indicator);
++ }
+
++ if ( (*(q->dev_st_chg_ind)) || (spare_ind_was_set) ) {
++ /* q->dev_st_chg_ind is the indicator, be it shared or not.
++ * only clear it, if indicator is non-shared */
++ if (!spare_ind_was_set) {
++ tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind);
++ }
++
+ if (q->hydra_gives_outbound_pcis) {
+ if (!q->siga_sync_done_on_thinints) {
+ SYNC_MEMORY_ALL;
+@@ -1297,7 +1414,7 @@
+ }
+
+ /* maybe we have to do work on our outbound queues... at least
+- * we have to check Hydra outbound-int-capable thinint-capable
++ * we have to check for outbound-int-capable thinint-capable
+ * queues */
+ if (q->hydra_gives_outbound_pcis) {
+ irq_ptr=(qdio_irq_t*)q->irq_ptr;
+@@ -1307,7 +1424,7 @@
+ perf_stats.tl_runs--;
+ #endif /* QDIO_PERFORMANCE_STATS */
+ if (!qdio_is_outbound_q_done(oq)) {
+- qdio_outbound_processing(oq);
++ __qdio_outbound_processing(oq);
+ }
+ }
+ }
+@@ -1330,8 +1447,13 @@
+ qdio_release_q(q);
+ }
+
+-static void qdio_inbound_processing(qdio_q_t *q)
++static void tiqdio_inbound_processing(qdio_q_t *q)
+ {
++ __tiqdio_inbound_processing(q,atomic_read(&spare_indicator_usecount));
++}
++
++static inline void __qdio_inbound_processing(qdio_q_t *q)
++{
+ int q_laps=0;
+
+ #ifdef QDIO_DBF_LIKE_HELL
+@@ -1375,11 +1497,17 @@
+ qdio_release_q(q);
+ }
+
++static void qdio_inbound_processing(qdio_q_t *q)
++{
++ __qdio_inbound_processing(q);
++}
++
+ /************************* MAIN ROUTINES *******************************/
+
+ static inline void tiqdio_inbound_checks(void)
+ {
+ qdio_q_t *q;
++ int spare_ind_was_set=0;
+ #ifdef QDIO_USE_PROCESSING_STATE
+ int q_laps=0;
+ #endif /* QDIO_USE_PROCESSING_STATE */
+@@ -1398,15 +1526,22 @@
+ again:
+ #endif /* QDIO_USE_PROCESSING_STATE */
+
++ /* when the spare indicator is used and set, save that and clear it */
++ if ( (atomic_read(&spare_indicator_usecount)) && (spare_indicator) ) {
++ spare_ind_was_set=1;
++ tiqdio_clear_summary_bit((__u32*)&spare_indicator);
++ }
++
+ q=(qdio_q_t*)tiq_list;
+ /* switch all active queues to processing state */
+ do {
+ if (!q) break;
+- tiqdio_inbound_processing(q);
++ __tiqdio_inbound_processing(q,spare_ind_was_set);
+ q=(qdio_q_t*)q->list_next;
+ } while (q!=(qdio_q_t*)tiq_list);
+
+- /* switch off all queues' processing state */
++ /* switch off all queues' processing state, see comments in
++ * qdio_get_inbound_buffer_frontier */
+ #ifdef QDIO_USE_PROCESSING_STATE
+ q=(qdio_q_t*)tiq_list;
+ do {
+@@ -1589,7 +1724,7 @@
+ kfree(irq_ptr->output_qs[i]);
+
+ }
+- if (irq_ptr->qdr) kfree(irq_ptr->qdr);
++ kfree(irq_ptr->qdr);
+ kfree(irq_ptr);
+ }
+
+@@ -1758,6 +1893,10 @@
+ ((irq_ptr->is_thinint_irq)?&tiqdio_inbound_processing:
+ &qdio_inbound_processing);
+
++ /* actually this is not used for inbound queues. yet. */
++ atomic_set(&q->busy_siga_counter,0);
++ q->timing.busy_start=0;
++
+ /* for (j=0;j<QDIO_STATS_NUMBER;j++)
+ q->timing.last_transfer_times[j]=(qdio_get_micros()/
+ QDIO_STATS_NUMBER)*j;
+@@ -1849,6 +1988,9 @@
+ q->tasklet.func=(void(*)(unsigned long))
+ &qdio_outbound_processing;
+
++ atomic_set(&q->busy_siga_counter,0);
++ q->timing.busy_start=0;
++
+ /* fill in slib */
+ if (i>0) irq_ptr->output_qs[i-1]->slib->nsliba=
+ QDIO_PFIX_GET_ADDR(q->slib);
+@@ -1928,9 +2070,10 @@
+ perf_stats.start_time_inbound=NOW;
+ #endif /* QDIO_PERFORMANCE_STATS */
+
+- /* VM will do the SVS for us
+- * issue SVS to benefit from iqdio interrupt avoidance (SVS clears AISOI)*/
+- if (!MACHINE_IS_VM) {
++ /* SVS only when needed:
++ * issue SVS to benefit from iqdio interrupt avoidance
++ * (SVS clears AISOI)*/
++ if (!omit_svs) {
+ tiqdio_clear_global_summary();
+ }
+
+@@ -2014,7 +2157,7 @@
+ #ifdef QDIO_PERFORMANCE_STATS
+ perf_stats.tl_runs--;
+ #endif /* QDIO_PERFORMANCE_STATS */
+- qdio_inbound_processing(q);
++ __qdio_inbound_processing(q);
+ }
+ }
+ if (irq_ptr->hydra_gives_outbound_pcis) {
+@@ -2027,7 +2170,7 @@
+ if (!irq_ptr->sync_done_on_outb_pcis) {
+ SYNC_MEMORY;
+ }
+- qdio_outbound_processing(q);
++ __qdio_outbound_processing(q);
+ }
+ }
+ }
+@@ -2206,7 +2349,10 @@
+ static unsigned char qdio_check_siga_needs(int sch)
+ {
+ int resp_code,result;
++ unsigned long flags;
+
++ spin_lock_irqsave(&chsc_area_lock,flags);
++
+ memset(chsc_area,0,sizeof(qdio_chsc_area_t));
+ chsc_area->request_block.command_code1=0x0010; /* length */
+ chsc_area->request_block.command_code2=0x0024; /* op code */
+@@ -2219,7 +2365,8 @@
+ QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \
+ "SIGAs for sch x%x.\n",
+ result,sch);
+- return -1; /* all flags set */
++ result=-1; /* all flags set */
++ goto out;
+ }
+
+ resp_code=chsc_area->request_block.operation_data_area.
+@@ -2228,7 +2375,8 @@
+ QDIO_PRINT_WARN("response upon checking SIGA needs " \
+ "is 0x%x. Using all SIGAs for sch x%x.\n",
+ resp_code,sch);
+- return -1; /* all flags set */
++ result=-1; /* all flags set */
++ goto out;
+ }
+ if (
+ (!(chsc_area->request_block.operation_data_area.
+@@ -2240,19 +2388,26 @@
+ ) {
+ QDIO_PRINT_WARN("huh? problems checking out sch x%x... " \
+ "using all SIGAs.\n",sch);
+- return CHSC_FLAG_SIGA_INPUT_NECESSARY |
++ result=CHSC_FLAG_SIGA_INPUT_NECESSARY |
+ CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
+ CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */
++ goto out;
+ }
+
+- return chsc_area->request_block.operation_data_area.
++ result=chsc_area->request_block.operation_data_area.
+ store_qdio_data_response.qdioac;
++out:
++ spin_unlock_irqrestore(&chsc_area_lock,flags);
++ return result;
+ }
+
+-static int qdio_check_for_hydra_thinints(void)
++static void qdio_check_for_machine_features(void)
+ {
+ int i,result;
++ unsigned long flags;
+
++ spin_lock_irqsave(&chsc_area_lock,flags);
++
+ memset(chsc_area,0,sizeof(qdio_chsc_area_t));
+ chsc_area->request_block.command_code1=0x0010;
+ chsc_area->request_block.command_code2=0x0010;
+@@ -2260,16 +2415,19 @@
+
+ if (result) {
+ QDIO_PRINT_WARN("CHSC returned cc %i. Won't use adapter " \
+- "interrupts for any Hydra.\n",result);
+- return 0;
++ "interrupts for any QDIO device.\n",result);
++ result=0;
++ goto out;
+ }
+
+ i=chsc_area->request_block.operation_data_area.
+ store_qdio_data_response.response_code;
+ if (i!=1) {
+ QDIO_PRINT_WARN("Was not able to determine general " \
+- "characteristics of all Hydras aboard.\n");
+- return 0;
++ "characteristics of all QDIO devices " \
++ "aboard.\n");
++ result=0;
++ goto out;
+ }
+
+ /* 4: request block
+@@ -2277,17 +2435,30 @@
+ * 512: chsc char */
+ /* check for bit 67 */
+ if ( (*(((unsigned int*)(chsc_area))+4+2+2)&0x10000000)!=0x10000000) {
+- return 0;
++ hydra_thinints=0;
+ } else {
+- return 1;
++ hydra_thinints=1;
+ }
++
++ /* check for bit 56: if aif time delay disablement fac installed,
++ * omit svs even under lpar (good point by rick again) */
++ if ( (*(((unsigned int*)(chsc_area))+4+2+1)&0x00000080)!=0x00000080) {
++ omit_svs=1;
++ } else {
++ omit_svs=0;
++ }
++out:
++ spin_unlock_irqrestore(&chsc_area_lock,flags);
+ }
+
+ /* the chsc_area is locked by the lock in qdio_activate */
+ static unsigned int tiqdio_check_chsc_availability(void) {
+ int result;
+ int i;
++ unsigned long flags;
+
++ spin_lock_irqsave(&chsc_area_lock,flags);
++
+ memset(chsc_area,0,sizeof(qdio_chsc_area_t));
+ chsc_area->request_block.command_code1=0x0010;
+ chsc_area->request_block.command_code2=0x0010;
+@@ -2327,6 +2498,7 @@
+ goto exit;
+ }
+ exit:
++ spin_unlock_irqrestore(&chsc_area_lock,flags);
+ return result;
+ }
+
+@@ -2337,6 +2509,7 @@
+ unsigned long real_addr_local_summary_bit;
+ unsigned long real_addr_dev_st_chg_ind;
+ void *ptr;
++ unsigned long flags;
+ char dbf_text[15];
+
+ unsigned int resp_code;
+@@ -2354,6 +2527,8 @@
+ virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind);
+ }
+
++ spin_lock_irqsave(&chsc_area_lock,flags);
++
+ memset(chsc_area,0,sizeof(qdio_chsc_area_t));
+ chsc_area->request_block.command_code1=0x0fe0;
+ chsc_area->request_block.command_code2=0x0021;
+@@ -2372,12 +2547,19 @@
+ isc=TIQDIO_THININT_ISC;
+ chsc_area->request_block.operation_data_area.set_chsc.
+ subsystem_id=(1<<16)+irq_ptr->irq;
++ /* enables the time delay disablement facility. Don't care
++ * whether it is really there (i.e. we haven't checked for
++ * it) */
++ chsc_area->request_block.operation_data_area.set_chsc.
++ word_with_d_bit=0x10000000;
+
++
+ result=qdio_chsc(chsc_area);
+ if (result) {
+ QDIO_PRINT_WARN("could not set indicators on irq x%x, " \
+ "cc=%i.\n",irq_ptr->irq,result);
+- return -EIO;
++ result=-EIO;
++ goto out;
+ }
+
+ resp_code=chsc_area->response_block.response_code;
+@@ -2389,14 +2571,18 @@
+ QDIO_DBF_TEXT1(0,setup,dbf_text);
+ ptr=&chsc_area->response_block;
+ QDIO_DBF_HEX2(1,setup,&ptr,QDIO_DBF_SETUP_LEN);
+- return -EIO;
++ result=-EIO;
++ goto out;
+ }
+
+ QDIO_DBF_TEXT2(0,setup,"setscind");
+ QDIO_DBF_HEX2(0,setup,&real_addr_local_summary_bit,
+ sizeof(unsigned long));
+ QDIO_DBF_HEX2(0,setup,&real_addr_dev_st_chg_ind,sizeof(unsigned long));
+- return 0;
++ result=0;
++out:
++ spin_unlock_irqrestore(&chsc_area_lock,flags);
++ return result;
+ }
+
+ /* chsc_area would have to be locked if called from outside qdio_activate */
+@@ -2406,10 +2592,13 @@
+ unsigned int resp_code;
+ int result;
+ void *ptr;
++ unsigned long flags;
+ char dbf_text[15];
+
+ if (!irq_ptr->is_thinint_irq) return -ENODEV;
+
++ spin_lock_irqsave(&chsc_area_lock,flags);
++
+ memset(chsc_area,0,sizeof(qdio_chsc_area_t));
+ chsc_area->request_block.command_code1=0x0fe0;
+ chsc_area->request_block.command_code2=0x1027;
+@@ -2420,7 +2609,8 @@
+ if (result) {
+ QDIO_PRINT_WARN("could not set delay target on irq x%x, " \
+ "cc=%i. Continuing.\n",irq_ptr->irq,result);
+- return -EIO;
++ result=-EIO;
++ goto out;
+ }
+
+ resp_code=chsc_area->response_block.response_code;
+@@ -2435,7 +2625,10 @@
+ }
+ QDIO_DBF_TEXT2(0,trace,"delytrgt");
+ QDIO_DBF_HEX2(0,trace,&delay_target,sizeof(unsigned long));
+- return 0;
++ result=0;
++out:
++ spin_unlock_irqrestore(&chsc_area_lock,flags);
++ return result;
+ }
+
+ int qdio_cleanup(int irq,int how)
+@@ -2445,7 +2638,7 @@
+ int do_an_irqrestore=0;
+ unsigned long flags;
+ int timeout;
+- char dbf_text[15]="12345678";
++ char dbf_text[15];
+
+ result=0;
+ sprintf(dbf_text,"qcln%4x",irq);
+@@ -2455,7 +2648,7 @@
+ irq_ptr=qdio_get_irq_ptr(irq);
+ if (!irq_ptr) return -ENODEV;
+
+- spin_lock(&irq_ptr->setting_up_lock);
++ down(&irq_ptr->setting_up_lock);
+
+ /* mark all qs as uninteresting */
+ for (i=0;i<irq_ptr->no_input_qs;i++) {
+@@ -2527,7 +2720,7 @@
+ if (do_an_irqrestore)
+ s390irq_spin_unlock_irqrestore(irq,flags);
+
+- spin_unlock(&irq_ptr->setting_up_lock);
++ up(&irq_ptr->setting_up_lock);
+
+ qdio_remove_irq_ptr(irq_ptr);
+ qdio_release_irq_memory(irq_ptr);
+@@ -2574,16 +2767,15 @@
+ int result,result2;
+ int found;
+ unsigned long flags;
+- char dbf_text[20]; /* if a printf would print out more than 8 chars */
++ char dbf_text[20]; /* if a printf printed out more than 8 chars */
+
+- down_interruptible(&init_sema);
++ down(&init_sema);
+
+ sprintf(dbf_text,"qini%4x",init_data->irq);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+ QDIO_DBF_TEXT0(0,trace,dbf_text);
+ sprintf(dbf_text,"qfmt:%x",init_data->q_format);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+- QDIO_DBF_TEXT0(0,setup,init_data->adapter_name);
+ QDIO_DBF_HEX0(0,setup,init_data->adapter_name,8);
+ sprintf(dbf_text,"qpff%4x",init_data->qib_param_field_format);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+@@ -2672,7 +2864,6 @@
+
+ irq_ptr->qdr=kmalloc(sizeof(qdr_t),GFP_DMA);
+ if (!(irq_ptr->qdr)) {
+- kfree(irq_ptr->qdr);
+ kfree(irq_ptr);
+ QDIO_PRINT_ERR("kmalloc of irq_ptr->qdr failed!\n");
+ result=-ENOMEM;
+@@ -2736,12 +2927,12 @@
+
+ qdio_set_state(irq_ptr,QDIO_IRQ_STATE_INACTIVE);
+
+- irq_ptr->setting_up_lock=SPIN_LOCK_UNLOCKED;
++ sema_init(&irq_ptr->setting_up_lock,1);
+
+ MOD_INC_USE_COUNT;
+ QDIO_DBF_TEXT3(0,setup,"MOD_INC_");
+
+- spin_lock(&irq_ptr->setting_up_lock);
++ down(&irq_ptr->setting_up_lock);
+
+ qdio_insert_irq_ptr(irq_ptr);
+
+@@ -2867,10 +3058,10 @@
+ }
+ result=tiqdio_set_subchannel_ind(irq_ptr,0);
+ if (result) {
+- spin_unlock(&irq_ptr->setting_up_lock);
++ up(&irq_ptr->setting_up_lock);
+ qdio_cleanup(irq_ptr->irq,
+ QDIO_FLAG_CLEANUP_USING_CLEAR);
+- goto out2;
++ goto out;
+ }
+ tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET);
+ }
+@@ -2906,9 +3097,9 @@
+ s390irq_spin_unlock_irqrestore(irq_ptr->irq,saveflags);
+
+ if (result) {
+- spin_unlock(&irq_ptr->setting_up_lock);
++ up(&irq_ptr->setting_up_lock);
+ qdio_cleanup(irq_ptr->irq,QDIO_FLAG_CLEANUP_USING_CLEAR);
+- goto out2;
++ goto out;
+ }
+
+ result=qdio_sleepon(&irq_ptr->interrupt_has_arrived,
+@@ -2918,9 +3109,9 @@
+ QDIO_PRINT_ERR("establish queues on irq %04x: timed out\n",
+ irq_ptr->irq);
+ QDIO_DBF_TEXT2(1,setup,"eq:timeo");
+- spin_unlock(&irq_ptr->setting_up_lock);
++ up(&irq_ptr->setting_up_lock);
+ qdio_cleanup(irq_ptr->irq,QDIO_FLAG_CLEANUP_USING_CLEAR);
+- goto out2;
++ goto out;
+ }
+
+ if (!(irq_ptr->io_result_dstat & DEV_STAT_DEV_END)) {
+@@ -2937,10 +3128,10 @@
+ irq_ptr->irq,irq_ptr->io_result_dstat,
+ irq_ptr->io_result_cstat,
+ irq_ptr->io_result_flags);
+- spin_unlock(&irq_ptr->setting_up_lock);
++ up(&irq_ptr->setting_up_lock);
+ qdio_cleanup(irq_ptr->irq,QDIO_FLAG_CLEANUP_USING_CLEAR);
+ result=-EIO;
+- goto out2;
++ goto out;
+ }
+
+ if (irq_ptr->io_result_dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) {
+@@ -2958,18 +3149,19 @@
+ irq_ptr->io_result_cstat,
+ irq_ptr->io_result_flags);
+ result=-EIO;
++ up(&irq_ptr->setting_up_lock);
+ goto out;
+ }
+
+- if (MACHINE_IS_VM)
+ irq_ptr->qdioac=qdio_check_siga_needs(irq_ptr->irq);
+- else {
+- irq_ptr->qdioac=CHSC_FLAG_SIGA_INPUT_NECESSARY
+- | CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
+- }
+ sprintf(dbf_text,"qdioac%2x",irq_ptr->qdioac);
+ QDIO_DBF_TEXT2(0,setup,dbf_text);
+
++ /* if this gets set once, we're running under VM and can omit SVSes */
++ if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY) {
++ omit_svs=1;
++ }
++
+ sprintf(dbf_text,"qib ac%2x",irq_ptr->qib.ac);
+ QDIO_DBF_TEXT2(0,setup,dbf_text);
+
+@@ -3026,11 +3218,10 @@
+
+ qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED);
+
+- out:
+ if (irq_ptr) {
+- spin_unlock(&irq_ptr->setting_up_lock);
++ up(&irq_ptr->setting_up_lock);
+ }
+- out2:
++ out:
+ up(&init_sema);
+
+ return result;
+@@ -3046,7 +3237,7 @@
+ irq_ptr=qdio_get_irq_ptr(irq);
+ if (!irq_ptr) return -ENODEV;
+
+- spin_lock(&irq_ptr->setting_up_lock);
++ down(&irq_ptr->setting_up_lock);
+ if (irq_ptr->state==QDIO_IRQ_STATE_INACTIVE) {
+ result=-EBUSY;
+ goto out;
+@@ -3141,14 +3332,15 @@
+ qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ACTIVE);
+
+ out:
+- spin_unlock(&irq_ptr->setting_up_lock);
++ up(&irq_ptr->setting_up_lock);
+
+ return result;
+ }
+
+ /* buffers filled forwards again to make Rick happy */
+-static void qdio_do_qdio_fill_input(qdio_q_t *q,unsigned int qidx,
+- unsigned int count,qdio_buffer_t *buffers)
++static inline void qdio_do_qdio_fill_input(qdio_q_t *q,unsigned int qidx,
++ unsigned int count,
++ qdio_buffer_t *buffers)
+ {
+ for (;;) {
+ if (!q->is_0copy_sbals_q) {
+@@ -3281,7 +3473,7 @@
+ qdio_kick_outbound_q(q);
+ }
+
+- qdio_outbound_processing(q);
++ __qdio_outbound_processing(q);
+ } else {
+ /* under VM, we do a SIGA sync
+ * unconditionally */
+@@ -3309,7 +3501,7 @@
+ * too long, the upper layer
+ * module could do a lot of
+ * traffic in that time */
+- qdio_outbound_processing(q);
++ __qdio_outbound_processing(q);
+ }
+ }
+
+@@ -3349,7 +3541,7 @@
+ perf_stats.siga_ins);
+ _OUTP_IT("Number of SIGA out's issued : %u\n",
+ perf_stats.siga_outs);
+- _OUTP_IT("Number of PCI's caught : %u\n",
++ _OUTP_IT("Number of PCIs caught : %u\n",
+ perf_stats.pcis);
+ _OUTP_IT("Number of adapter interrupts caught : %u\n",
+ perf_stats.thinints);
+@@ -3576,10 +3768,12 @@
+
+ qdio_add_procfs_entry();
+
+- hydra_thinints=qdio_check_for_hydra_thinints();
++ qdio_check_for_machine_features();
+
+ sprintf(dbf_text,"hydrati%1x",hydra_thinints);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
++ sprintf(dbf_text,"omitsvs%1x",omit_svs);
++ QDIO_DBF_TEXT0(0,setup,dbf_text);
+
+ tiqdio_register_thinints();
+
+=== drivers/char/tty_io.c
+==================================================================
+--- drivers/char/tty_io.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/char/tty_io.c (/trunk/2.4.27) (revision 52)
+@@ -145,8 +145,10 @@
+ extern void au1x00_serial_console_init(void);
+ extern int rs_8xx_init(void);
+ extern void mac_scc_console_init(void);
+-extern void hwc_console_init(void);
+-extern void hwc_tty_init(void);
++extern void sclp_console_init(void);
++extern void sclp_tty_init(void);
++extern void sclp_vt220_con_init(void);
++extern void sclp_vt220_tty_init(void);
+ extern void con3215_init(void);
+ extern void tty3215_init(void);
+ extern void tub3270_con_init(void);
+@@ -2110,8 +2112,63 @@
+ #endif /* CONFIG_DEVFS_FS */
+ }
+
++/*
++ * Register a tty device described by <driver>, with minor number <minor>,
++ * device name <name> and in the /dev directory given by <dir>.
++ */
++void tty_register_devfs_name (struct tty_driver *driver, unsigned int flags,
++ unsigned minor, devfs_handle_t dir,
++ const char *name)
++{
++#ifdef CONFIG_DEVFS_FS
++ umode_t mode = S_IFCHR | S_IRUSR | S_IWUSR;
++ kdev_t device = MKDEV (driver->major, minor);
++
++ switch (device) {
++ case TTY_DEV:
++ case PTMX_DEV:
++ mode |= S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
++ break;
++ default:
++ if (driver->major == PTY_MASTER_MAJOR)
++ flags |= DEVFS_FL_AUTO_OWNER;
++ break;
++ }
++ if ( (minor < driver->minor_start) ||
++ (minor >= driver->minor_start + driver->num) ) {
++ printk(KERN_ERR "Attempt to register invalid minor number "
++ "with devfs (%d:%d).\n", (int)driver->major,(int)minor);
++ return;
++ }
++# ifdef CONFIG_UNIX98_PTYS
++ if ( (driver->major >= UNIX98_PTY_SLAVE_MAJOR) &&
++ (driver->major < UNIX98_PTY_SLAVE_MAJOR + UNIX98_NR_MAJORS) )
++ flags |= DEVFS_FL_CURRENT_OWNER;
++# endif
++ devfs_register (dir, name, flags | DEVFS_FL_DEFAULT,
++ driver->major, minor, mode, &tty_fops, NULL);
++#endif /* CONFIG_DEVFS_FS */
++}
++
++void tty_unregister_devfs_name (struct tty_driver *driver, unsigned minor,
++ devfs_handle_t dir, const char *name)
++{
++#ifdef CONFIG_DEVFS_FS
++ void * handle;
++
++ handle = devfs_find_handle (dir, name, driver->major, minor,
++ DEVFS_SPECIAL_CHR, 0);
++ devfs_unregister (handle);
++#endif /* CONFIG_DEVFS_FS */
++}
++
++extern void tty_unregister_devfs_name (struct tty_driver *driver,
++ unsigned minor, devfs_handle_t dir,
++ const char *name);
+ EXPORT_SYMBOL(tty_register_devfs);
+ EXPORT_SYMBOL(tty_unregister_devfs);
++EXPORT_SYMBOL(tty_register_devfs_name);
++EXPORT_SYMBOL(tty_unregister_devfs_name);
+
+ /*
+ * Called by a tty driver to register itself.
+@@ -2282,9 +2339,12 @@
+ #ifdef CONFIG_TN3215
+ con3215_init();
+ #endif
+-#ifdef CONFIG_HWC
+- hwc_console_init();
++#ifdef CONFIG_SCLP_CONSOLE
++ sclp_console_init();
+ #endif
++#ifdef CONFIG_SCLP_VT220_CONSOLE
++ sclp_vt220_con_init();
++#endif
+ #ifdef CONFIG_STDIO_CONSOLE
+ stdio_console_init();
+ #endif
+@@ -2454,9 +2514,12 @@
+ #ifdef CONFIG_TN3215
+ tty3215_init();
+ #endif
+-#ifdef CONFIG_HWC
+- hwc_tty_init();
++#ifdef CONFIG_SCLP_TTY
++ sclp_tty_init();
+ #endif
++#ifdef CONFIG_SCLP_VT220_TTY
++ sclp_vt220_tty_init();
++#endif
+ #ifdef CONFIG_A2232
+ a2232board_init();
+ #endif
+=== drivers/block/Makefile
+==================================================================
+--- drivers/block/Makefile (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/block/Makefile (/trunk/2.4.27) (revision 52)
+@@ -10,7 +10,7 @@
+
+ O_TARGET := block.o
+
+-export-objs := ll_rw_blk.o blkpg.o loop.o DAC960.o genhd.o acsi.o
++export-objs := ll_rw_blk.o blkpg.o elevator.o loop.o DAC960.o genhd.o acsi.o
+
+ obj-y := ll_rw_blk.o blkpg.o genhd.o elevator.o
+
+=== drivers/block/elevator.c
+==================================================================
+--- drivers/block/elevator.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/block/elevator.c (/trunk/2.4.27) (revision 52)
+@@ -219,3 +219,9 @@
+ *elevator = type;
+ elevator->queue_ID = queue_ID++;
+ }
++
++EXPORT_SYMBOL(elevator_init);
++EXPORT_SYMBOL(elevator_linus_merge);
++EXPORT_SYMBOL(elevator_linus_merge_req);
++EXPORT_SYMBOL(elevator_noop_merge);
++EXPORT_SYMBOL(elevator_noop_merge_req);
+=== drivers/scsi/scsi_lib.c
+==================================================================
+--- drivers/scsi/scsi_lib.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/scsi/scsi_lib.c (/trunk/2.4.27) (revision 52)
+@@ -256,12 +256,32 @@
+ if (SCpnt != NULL) {
+
+ /*
++ * This is a work around for a case where this Scsi_Cmnd
++ * may have been through the busy retry paths already. We
++ * clear the special flag and try to restore the
++ * read/write request cmd value.
++ */
++ if (SCpnt->request.cmd == SPECIAL)
++ SCpnt->request.cmd =
++ (SCpnt->sc_data_direction ==
++ SCSI_DATA_WRITE) ? WRITE : READ;
++
++ /*
+ * For some reason, we are not done with this request.
+ * This happens for I/O errors in the middle of the request,
+ * in which case we need to request the blocks that come after
+ * the bad sector.
+ */
+ SCpnt->request.special = (void *) SCpnt;
++ /*
++ * We need to recount the number of
++ * scatter-gather segments here - the
++ * normal case code assumes this to be
++ * correct, as it would be a performance
++ * loss to always recount. Handling
++ * errors is always unusual, of course.
++ */
++ recount_segments(SCpnt);
+ list_add(&SCpnt->request.queue, &q->queue_head);
+ }
+
+=== drivers/scsi/scsi_proc.c
+==================================================================
+--- drivers/scsi/scsi_proc.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/scsi/scsi_proc.c (/trunk/2.4.27) (revision 52)
+@@ -120,35 +120,34 @@
+ return(ret);
+ }
+
+-void build_proc_dir_entries(Scsi_Host_Template * tpnt)
+-{
+- struct Scsi_Host *hpnt;
+- char name[10]; /* see scsi_unregister_host() */
++void build_proc_dir_entry(struct Scsi_Host *shpnt) {
++ char name[10]; /* host_no>=10^9? I don't think so. */
++ struct proc_dir_entry *p;
+
+- tpnt->proc_dir = proc_mkdir(tpnt->proc_name, proc_scsi);
+- if (!tpnt->proc_dir) {
+- printk(KERN_ERR "Unable to proc_mkdir in scsi.c/build_proc_dir_entries");
+- return;
+- }
+- tpnt->proc_dir->owner = tpnt->module;
++ if(shpnt->hostt->proc_dir) {
++ sprintf(name, "%d", shpnt->host_no);
++ p = create_proc_read_entry(
++ name,
++ S_IFREG | S_IRUGO | S_IWUSR,
++ shpnt->hostt->proc_dir,
++ proc_scsi_read,
++ (void *) shpnt
++ );
++ if (!p)
++ panic("Not enough memory to register SCSI HBA in /proc/scsi !\n");
++ p->write_proc=proc_scsi_write;
++ p->owner = shpnt->hostt->module;
++ }
++}
+
+- hpnt = scsi_hostlist;
+- while (hpnt) {
+- if (tpnt == hpnt->hostt) {
+- struct proc_dir_entry *p;
+- sprintf(name,"%d",hpnt->host_no);
+- p = create_proc_read_entry(name,
+- S_IFREG | S_IRUGO | S_IWUSR,
+- tpnt->proc_dir,
+- proc_scsi_read,
+- (void *)hpnt);
+- if (!p)
+- panic("Not enough memory to register SCSI HBA in /proc/scsi !\n");
+- p->write_proc=proc_scsi_write;
+- p->owner = tpnt->module;
+- }
+- hpnt = hpnt->next;
++void build_proc_dir(Scsi_Host_Template * tpnt)
++{
++ tpnt->proc_dir = proc_mkdir(tpnt->proc_name, proc_scsi);
++ if (!tpnt->proc_dir) {
++ printk(KERN_ERR "Unable to proc_mkdir in scsi.c/build_proc_dir_entries");
++ return;
+ }
++ tpnt->proc_dir->owner = tpnt->module;
+ }
+
+ /*
+=== drivers/scsi/scsi.c
+==================================================================
+--- drivers/scsi/scsi.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/scsi/scsi.c (/trunk/2.4.27) (revision 52)
+@@ -537,23 +537,11 @@
+ SCpnt->target,
+ atomic_read(&SCpnt->host->host_active),
+ SCpnt->host->host_failed));
+- if (SCpnt->host->host_failed != 0) {
+- SCSI_LOG_ERROR_RECOVERY(5, printk("Error handler thread %d %d\n",
+- SCpnt->host->in_recovery,
+- SCpnt->host->eh_active));
+- }
+- /*
+- * If the host is having troubles, then look to see if this was the last
+- * command that might have failed. If so, wake up the error handler.
+- */
+- if (SCpnt->host->in_recovery
+- && !SCpnt->host->eh_active
+- && SCpnt->host->host_busy == SCpnt->host->host_failed) {
+- SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
+- atomic_read(&SCpnt->host->eh_wait->count)));
+- up(SCpnt->host->eh_wait);
+- }
+
++ /* Note: The eh_thread is now started in scsi_bottom_half_handler for
++ * all cases except command timeout
++ */
++
+ spin_unlock_irqrestore(&device_request_lock, flags);
+
+ /*
+@@ -1300,26 +1288,38 @@
+ SCpnt->owner = SCSI_OWNER_ERROR_HANDLER;
+ SCpnt->state = SCSI_STATE_FAILED;
+ SCpnt->host->in_recovery = 1;
+- /*
+- * If the host is having troubles, then look to see if this was the last
+- * command that might have failed. If so, wake up the error handler.
+- */
+- if (SCpnt->host->host_busy == SCpnt->host->host_failed) {
+- SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
+- atomic_read(&SCpnt->host->eh_wait->count)));
+- up(SCpnt->host->eh_wait);
+- }
+- } else {
+- /*
+- * We only get here if the error recovery thread has died.
+- */
++ } else {
++ /* eh not present....trying to continue anyway */
+ scsi_finish_command(SCpnt);
+- }
++ }
++ break;
++ } // switch
++ if (SCpnt->host->eh_wait != NULL) {
++ /*
++ * If the host is having troubles, then look to see if this was the last
++ * command that might have failed. If so, wake up the error handler.
++ */
++ if (SCpnt->host->in_recovery &&
++ !SCpnt->host->eh_active &&
++ (SCpnt->host->host_busy == SCpnt->host->host_failed)) {
++ SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
++ atomic_read(&SCpnt->host->eh_wait->count)));
++ printk("(in_recovery=%d, host_busy=%d, host_failed=%d) "
++ "Waking error handler thread bh(%d)\n",
++ SCpnt->host->in_recovery,
++ SCpnt->host->host_busy,
++ SCpnt->host->host_failed,
++ atomic_read(&SCpnt->host->eh_wait->count));
++ up(SCpnt->host->eh_wait);
++ }
++ } else {
++ SCSI_LOG_ERROR_RECOVERY(5, printk("Warning: eh_thread not present\n"));
+ }
++
+ } /* for(; SCpnt...) */
+-
++
+ } /* while(1==1) */
+-
++
+ }
+
+ /*
+@@ -1868,7 +1868,6 @@
+ struct Scsi_Host *shpnt;
+ Scsi_Device *SDpnt;
+ struct Scsi_Device_Template *sdtpnt;
+- const char *name;
+ unsigned long flags;
+ int out_of_space = 0;
+
+@@ -1895,11 +1894,18 @@
+
+ if (tpnt->use_new_eh_code) {
+ spin_lock_irqsave(&io_request_lock, flags);
+- tpnt->present = tpnt->detect(tpnt);
++ tpnt->detect(tpnt);
+ spin_unlock_irqrestore(&io_request_lock, flags);
+- } else
+- tpnt->present = tpnt->detect(tpnt);
++ } else tpnt->detect(tpnt);
+
++ /* Add the new driver to /proc/scsi (directory only) */
++#ifdef CONFIG_PROC_FS
++ build_proc_dir(tpnt);
++#endif
++
++ tpnt->next = scsi_hosts; /* Add to the linked list */
++ scsi_hosts = tpnt;
++
+ if (tpnt->present) {
+ if (pcount == next_scsi_host) {
+ if (tpnt->present > 1) {
+@@ -1918,48 +1924,7 @@
+ return 1;
+ }
+ }
+- tpnt->next = scsi_hosts; /* Add to the linked list */
+- scsi_hosts = tpnt;
+
+- /* Add the new driver to /proc/scsi */
+-#ifdef CONFIG_PROC_FS
+- build_proc_dir_entries(tpnt);
+-#endif
+-
+-
+- /*
+- * Add the kernel threads for each host adapter that will
+- * handle error correction.
+- */
+- for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
+- if (shpnt->hostt == tpnt && shpnt->hostt->use_new_eh_code) {
+- DECLARE_MUTEX_LOCKED(sem);
+-
+- shpnt->eh_notify = &sem;
+- kernel_thread((int (*)(void *)) scsi_error_handler,
+- (void *) shpnt, 0);
+-
+- /*
+- * Now wait for the kernel error thread to initialize itself
+- * as it might be needed when we scan the bus.
+- */
+- down(&sem);
+- shpnt->eh_notify = NULL;
+- }
+- }
+-
+- for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
+- if (shpnt->hostt == tpnt) {
+- if (tpnt->info) {
+- name = tpnt->info(shpnt);
+- } else {
+- name = tpnt->name;
+- }
+- printk(KERN_INFO "scsi%d : %s\n", /* And print a little message */
+- shpnt->host_no, name);
+- }
+- }
+-
+ /* The next step is to call scan_scsis here. This generates the
+ * Scsi_Devices entries
+ */
+@@ -2036,7 +2001,6 @@
+ struct Scsi_Device_Template *sdtpnt;
+ struct Scsi_Host *sh1;
+ struct Scsi_Host *shpnt;
+- char name[10]; /* host_no>=10^9? I don't think so. */
+
+ /* get the big kernel lock, so we don't race with open() */
+ lock_kernel();
+@@ -2138,19 +2102,7 @@
+ /*
+ * Next, kill the kernel error recovery thread for this host.
+ */
+- for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
+- if (shpnt->hostt == tpnt
+- && shpnt->hostt->use_new_eh_code
+- && shpnt->ehandler != NULL) {
+- DECLARE_MUTEX_LOCKED(sem);
+
+- shpnt->eh_notify = &sem;
+- send_sig(SIGHUP, shpnt->ehandler, 1);
+- down(&sem);
+- shpnt->eh_notify = NULL;
+- }
+- }
+-
+ /* Next we free up the Scsi_Cmnd structures for this host */
+
+ for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
+@@ -2178,9 +2130,6 @@
+ if (shpnt->hostt != tpnt)
+ continue;
+ pcount = next_scsi_host;
+- /* Remove the /proc/scsi directory entry */
+- sprintf(name,"%d",shpnt->host_no);
+- remove_proc_entry(name, tpnt->proc_dir);
+ if (tpnt->release)
+ (*tpnt->release) (shpnt);
+ else {
+@@ -2197,7 +2146,6 @@
+ }
+ if (pcount == next_scsi_host)
+ scsi_unregister(shpnt);
+- tpnt->present--;
+ }
+
+ /*
+=== drivers/scsi/scsi.h
+==================================================================
+--- drivers/scsi/scsi.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/scsi/scsi.h (/trunk/2.4.27) (revision 52)
+@@ -18,6 +18,7 @@
+ #include <linux/config.h> /* for CONFIG_SCSI_LOGGING */
+ #include <linux/devfs_fs_kernel.h>
+ #include <linux/proc_fs.h>
++#include <linux/blkdev.h>
+
+ /*
+ * Some of the public constants are being moved to this file.
+=== drivers/scsi/scsi_queue.c
+==================================================================
+--- drivers/scsi/scsi_queue.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/scsi/scsi_queue.c (/trunk/2.4.27) (revision 52)
+@@ -103,7 +103,7 @@
+ * If a host is inactive and cannot queue any commands, I don't see
+ * how things could possibly work anyways.
+ */
+- if (host->host_busy == 0) {
++ if (host->host_busy == 1) {
+ if (scsi_retry_command(cmd) == 0) {
+ return 0;
+ }
+@@ -118,7 +118,7 @@
+ * If a host is inactive and cannot queue any commands, I don't see
+ * how things could possibly work anyways.
+ */
+- if (cmd->device->device_busy == 0) {
++ if (cmd->device->device_busy == 1) {
+ if (scsi_retry_command(cmd) == 0) {
+ return 0;
+ }
+=== drivers/scsi/Config.in
+==================================================================
+--- drivers/scsi/Config.in (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/scsi/Config.in (/trunk/2.4.27) (revision 52)
+@@ -43,6 +43,8 @@
+ if [ "$CONFIG_PCI" = "y" ]; then
+ dep_tristate '3ware Hardware ATA-RAID support' CONFIG_BLK_DEV_3W_XXXX_RAID $CONFIG_SCSI
+ fi
++
++if [ "$CONFIG_ARCH_S390" != "y" ]; then
+ dep_tristate '7000FASST SCSI support' CONFIG_SCSI_7000FASST $CONFIG_SCSI
+ dep_tristate 'ACARD SCSI support' CONFIG_SCSI_ACARD $CONFIG_SCSI
+ dep_tristate 'Adaptec AHA152X/2825 support' CONFIG_SCSI_AHA152X $CONFIG_SCSI
+@@ -262,6 +264,13 @@
+ fi
+ fi
+
++fi
++
++if [ "$CONFIG_ARCH_S390" = "y" ]; then
++ dep_tristate 'FCP host bus adapter driver for IBM z800, z900, z990 (GA2)' CONFIG_ZFCP $CONFIG_QDIO
++ dep_tristate 'HBA API support for FCP host bus adapter driver for IBM z990 (GA2)' CONFIG_ZFCP_HBAAPI $CONFIG_ZFCP
++fi
++
+ endmenu
+
+ if [ "$CONFIG_HOTPLUG" = "y" -a "$CONFIG_PCMCIA" != "n" ]; then
+=== drivers/scsi/hosts.c
+==================================================================
+--- drivers/scsi/hosts.c (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/scsi/hosts.c (/trunk/2.4.27) (revision 52)
+@@ -88,6 +88,24 @@
+ scsi_unregister(struct Scsi_Host * sh){
+ struct Scsi_Host * shpnt;
+ Scsi_Host_Name *shn;
++ char name[10];
++
++ /* kill error handling thread */
++ if (sh->hostt->use_new_eh_code
++ && sh->ehandler != NULL) {
++ DECLARE_MUTEX_LOCKED(sem);
++
++ sh->eh_notify = &sem;
++ send_sig(SIGHUP, sh->ehandler, 1);
++ down(&sem);
++ sh->eh_notify = NULL;
++ }
++
++ /* remove proc entry */
++#ifdef CONFIG_PROC_FS
++ sprintf(name, "%d", sh->host_no);
++ remove_proc_entry(name, sh->hostt->proc_dir);
++#endif
+
+ if(scsi_hostlist == sh)
+ scsi_hostlist = sh->next;
+@@ -107,7 +125,35 @@
+ if (shn) shn->host_registered = 0;
+ /* else {} : This should not happen, we should panic here... */
+
++#if 1
++ /* We shoult not decrement max_scsi_hosts (and make this value
++ * candidate for re-allocation by a different driver).
++ * Reason: the device is _still_ on the
++ * scsi_host_no_list and it's identified by its name. When the same
++ * device is re-registered it will get the same host_no again while
++ * new devices may use the allocation scheme and get this very same
++ * host_no.
++ * It's OK to have "holes" in the allocation but it does not mean
++ * "leaks".
++ */
++#else // if 0
++ /* If we are removing the last host registered, it is safe to reuse
++ * its host number (this avoids "holes" at boot time) (DB)
++ * It is also safe to reuse those of numbers directly below which have
++ * been released earlier (to avoid some holes in numbering).
++ */
++ if(sh->host_no == max_scsi_hosts - 1) {
++ while(--max_scsi_hosts >= next_scsi_host) {
++ shpnt = scsi_hostlist;
++ while(shpnt && shpnt->host_no != max_scsi_hosts - 1)
++ shpnt = shpnt->next;
++ if(shpnt)
++ break;
++ }
++ }
++#endif
+ next_scsi_host--;
++ sh->hostt->present--;
+
+ kfree((char *) sh);
+ }
+@@ -122,6 +168,7 @@
+ Scsi_Host_Name *shn, *shn2;
+ int flag_new = 1;
+ const char * hname;
++ char *name;
+ size_t hname_len;
+ retval = (struct Scsi_Host *)kmalloc(sizeof(struct Scsi_Host) + j,
+ (tpnt->unchecked_isa_dma && j ?
+@@ -252,6 +299,37 @@
+ }
+ }
+
++#ifdef CONFIG_PROC_FS
++ build_proc_dir_entry(retval);
++#endif
++
++ /* Start error handling thread */
++ if (retval->hostt->use_new_eh_code) {
++ DECLARE_MUTEX_LOCKED(sem);
++
++ retval->eh_notify = &sem;
++ kernel_thread((int (*)(void *)) scsi_error_handler,
++ (void *) retval, 0);
++
++ /*
++ * Now wait for the kernel error thread to initialize itself
++ * as it might be needed when we scan the bus.
++ */
++ down(&sem);
++ retval->eh_notify = NULL;
++ }
++
++ tpnt->present++;
++
++ if (tpnt->info) {
++ name = (char *)tpnt->info(retval);
++ } else {
++ name = (char *)tpnt->name;
++ }
++ printk(KERN_INFO "scsi%d : %s\n", /* And print a little message */
++ retval->host_no, name);
++
++
+ return retval;
+ }
+
+=== drivers/scsi/hosts.h
+==================================================================
+--- drivers/scsi/hosts.h (/upstream/vanilla/2.4.27) (revision 52)
++++ drivers/scsi/hosts.h (/trunk/2.4.27) (revision 52)
+@@ -471,7 +471,10 @@
+
+ extern Scsi_Host_Template * scsi_hosts;
+
+-extern void build_proc_dir_entries(Scsi_Host_Template *);
++#ifdef CONFIG_PROC_FS
++extern void build_proc_dir(Scsi_Host_Template *);
++extern void build_proc_dir_entry(struct Scsi_Host *);
++#endif
+
+ /*
+ * scsi_init initializes the scsi hosts.
More information about the Kernel-svn-changes
mailing list