[kernel] r15203 - in dists/lenny/linux-2.6/debian: . config patches/features/all patches/series
Dann Frazier
dannf at alioth.debian.org
Thu Feb 18 23:11:32 UTC 2010
Author: dannf
Date: Thu Feb 18 23:11:29 2010
New Revision: 15203
Log:
Add be2net driver (Closes: #570428)
Added:
dists/lenny/linux-2.6/debian/patches/features/all/add-be2net.patch
Modified:
dists/lenny/linux-2.6/debian/changelog
dists/lenny/linux-2.6/debian/config/config
dists/lenny/linux-2.6/debian/patches/series/22
Modified: dists/lenny/linux-2.6/debian/changelog
==============================================================================
--- dists/lenny/linux-2.6/debian/changelog Thu Feb 18 22:24:49 2010 (r15202)
+++ dists/lenny/linux-2.6/debian/changelog Thu Feb 18 23:11:29 2010 (r15203)
@@ -31,6 +31,9 @@
* [xen][i386] Fix kernel logging via userspace (Closes: #568561)
(regression due to fix for #510478)
+ [ dann frazier ]
+ * Add be2net driver (Closes: #570428)
+
-- maximilian attems <maks at debian.org> Mon, 28 Dec 2009 23:44:19 +0100
linux-2.6 (2.6.26-21lenny3) stable-security; urgency=high
Modified: dists/lenny/linux-2.6/debian/config/config
==============================================================================
--- dists/lenny/linux-2.6/debian/config/config Thu Feb 18 22:24:49 2010 (r15202)
+++ dists/lenny/linux-2.6/debian/config/config Thu Feb 18 23:11:29 2010 (r15203)
@@ -1031,6 +1031,7 @@
CONFIG_NIU=m
CONFIG_MLX4_DEBUG=y
CONFIG_TEHUTI=m
+CONFIG_BE2NET=m
CONFIG_PPP=m
CONFIG_PPP_MULTILINK=y
CONFIG_PPP_FILTER=y
Added: dists/lenny/linux-2.6/debian/patches/features/all/add-be2net.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/lenny/linux-2.6/debian/patches/features/all/add-be2net.patch Thu Feb 18 23:11:29 2010 (r15203)
@@ -0,0 +1,5779 @@
+Backport of the following commits to Debian's 2.6.26:
+
+6b7c5b947c671a96e39f9526a5fd70c178b8dfd1
+ea1dae11e0baca5d633207fe50fc3cd30a5d68ee
+1ab1ab7543de53c945ea24140409ef67ed173eb4
+4097f663cbe9e58de7ebed222f8af33267f297a8
+65f71b8bd2651e6d6ca9b09fe53a8db2da22b85c
+728a9972d1f290608e730b9ccec2061aa81f6609
+03a980d162eb48a79ce21d47f45b9ec7d9db20e9
+c4ca2374312b4de819dd700e72a68395eddb5fcb
+ebc8d2ab61dde6cf775ae7bb1ed9e38dfe12ca65
+b305be78a044c5f6a9d146229a280d08db0c630a
+6811086899f2740c08d0ade26f8b9d705708e0cc
+76fbb42919396b9bf68c9a03ceb037c971a02e2f
+a7a0ef31def6b6badd94fc96c8f17c2e18d91513
+934037bc2ed29a94bbde72aa6a2e66bdc5861b98
+5fb379ee67a7ec55ff65b467b472f3d69b60ba16
+6ac7b687cb3acc437a586794949a43f5249956bb
+24307eef74bd38e3fc6a6df8f8a1bfc48967f9f6
+a8f447bda3ee00e3a3ab080c48db40078ea65221
+bd46cb6cf11867130a41ea9546dd65688b71f3c2
+c001c213b109c8baeeb6d012b422bf059b18368f
+fa77406aee9d33f35c7202dcd83436feb12d9fc3
+8788fdc2a53cf012a43808877eaa6ac7e3c923b4
+eec368fb3ce3ee9e7bb042bbafb03f297d96e55e
+5f0b849eb35d09cd2f332d5031051c1a8976c30b
+efd2e40a8cc891e8f90e0bdde000006bd6201530
+d9509ac1295ce2ec121333d29b8a85a9e564f817
+14074eab8dcaa7f66d8f52612b2dcec51222bb5f
+a8e9179a7de196d37410fd3e9528081f22c70a4e
+cdab23b7017693c00dd69fa28bcdf5b0434b3838
+859b1e4ec86840b0d0980f82b626d687be682eb9
+9b0365f1954b0b54a896171b4438ed42ad7ef02f
+a65027e4d80ece5a5a3bd4fc4808a83208430929
+b628bde2b5390776efc30837798d016ec1aa3ebe
+1a8887d81ac4bbee6153b4bc9b9f9e099fb5f07e
+b31c50a7f9e93a61d14740dedcbbf2c376998bc7
+dcb9b5648a04d9178f9af9d8b684831a8ea59b9f
+c5b9b92e07e4973b299537c5c684037349dc7e5c
+49643848f9ec8182cf04a83115f58d43854bbdc6
+583e3f34ebf421e51bf15beb9df84ef70f7dd3f9
+78122a52b39c9527fa3a32afbb6572964c17c651
+fad9ab2cefd3a3b4754f49eb41e2f43ea314cdce
+73d540f282c0d8ce48fafd7fcc844e91f31d4103
+43a04fdc369ce4fb6718b95e1c930ff8661e65c1
+12d7ea2c5a5c87834daf9fcd920aab80ff6248b1
+0388f251a33ea60937564ad1f27cf77243409f06
+59fd5d87a4243a992f3a3e69f3627cf4c509608e
+f5209b4446d185cc95f46363f8043a743530c15a
+9e90c961134929678022aee0c68b16c1ed520614
+4f2aa89cd263932d61f286307771996df76bf63e
+e7b909a68cfb83e4bafdadac39534969ce260518
+01ed30da5d2e718df458f1680fd97751a769c1a2
+2243e2e95e24f4c4b1c6575b874ebe0b837d2208
+8d56ff11708e5809c644a6d687a5dff4551043b4
+713d039426a80ed78e71294cfb5d0a009bb20b42
+51c59870f324805ed30eaa2c0089b4cb5f9f7c71
+35a652859ad76d8bd989025952ecb80d7c5304a4
+ca9e4988ccbde3b11116679f1b023eb75df8017e
+ca66ef500b874de4bf58e05f9d18ccdce64eabbc
+b7b83ac39a177741a378d728d82e87de9b0a01a5
+0dffc83e5b831df1df83dfe32a0c267347f9950b
+d744b44e21a2c908aae23a60da1b4ce35cd925ef
+71d8d1b58aa4025ea73a66a130a98d0ed077f9b1
+16c02145902d8597841a25e8443cfb082898a2d7
+26d92f9276a56d55511a427fb70bd70886af647a
+fa4281bbbcb44d1f8bdac894ad0696535272cc43
+7b139c83c590d4965259aad8889cbb08104b2891
+d291b9af1a1a12f59a464494900c6e0db26e2ec3
+55bdeed9f67a92f184e1ddcdd722e622d9dd10c6
+07793d33b4fba00f5bd1dac78fa038bb0e23fa5c
+
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 56a2f67..1c0bd41 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -3623,6 +3623,15 @@ L: linux-ide at vger.kernel.org
+ T: git kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev.git
+ S: Supported
+
++SERVER ENGINES 10Gbps NIC - BladeEngine 2 DRIVER
++P: Sathya Perla
++M: sathyap at serverengines.com
++P: Subbu Seetharaman
++M: subbus at serverengines.com
++L: netdev at vger.kernel.org
++W: http://www.serverengines.com
++S: Supported
++
+ SGI SN-IA64 (Altix) SERIAL CONSOLE DRIVER
+ P: Pat Gefre
+ M: pfg at sgi.com
+diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
+index f4182cf..04e05e1 100644
+--- a/drivers/net/Kconfig
++++ b/drivers/net/Kconfig
+@@ -2581,6 +2581,8 @@ config BNX2X
+
+ source "drivers/net/sfc/Kconfig"
+
++source "drivers/net/benet/Kconfig"
++
+ endif # NETDEV_10000
+
+ source "drivers/net/tokenring/Kconfig"
+diff --git a/drivers/net/Makefile b/drivers/net/Makefile
+index dcbfe84..032a745 100644
+--- a/drivers/net/Makefile
++++ b/drivers/net/Makefile
+@@ -18,6 +18,7 @@ obj-$(CONFIG_BONDING) += bonding/
+ obj-$(CONFIG_ATL1) += atlx/
+ obj-$(CONFIG_GIANFAR) += gianfar_driver.o
+ obj-$(CONFIG_TEHUTI) += tehuti.o
++obj-$(CONFIG_BE2NET) += benet/
+
+ gianfar_driver-objs := gianfar.o \
+ gianfar_ethtool.o \
+diff --git a/drivers/net/benet/Kconfig b/drivers/net/benet/Kconfig
+new file mode 100644
+index 0000000..c6934f1
+--- /dev/null
++++ b/drivers/net/benet/Kconfig
+@@ -0,0 +1,7 @@
++config BE2NET
++ tristate "ServerEngines' 10Gbps NIC - BladeEngine 2"
++ depends on PCI && INET
++ select INET_LRO
++ help
++ This driver implements the NIC functionality for ServerEngines'
++ 10Gbps network adapter - BladeEngine 2.
+diff --git a/drivers/net/benet/Makefile b/drivers/net/benet/Makefile
+new file mode 100644
+index 0000000..a60cd80
+--- /dev/null
++++ b/drivers/net/benet/Makefile
+@@ -0,0 +1,7 @@
++#
++# Makefile to build the network driver for ServerEngine's BladeEngine.
++#
++
++obj-$(CONFIG_BE2NET) += be2net.o
++
++be2net-y := be_main.o be_cmds.o be_ethtool.o
+diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
+new file mode 100644
+index 0000000..f63730f
+--- /dev/null
++++ b/drivers/net/benet/be.h
+@@ -0,0 +1,394 @@
++/*
++ * Copyright (C) 2005 - 2009 ServerEngines
++ * All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation. The full GNU General
++ * Public License is included in this distribution in the file called COPYING.
++ *
++ * Contact Information:
++ * linux-drivers at serverengines.com
++ *
++ * ServerEngines
++ * 209 N. Fair Oaks Ave
++ * Sunnyvale, CA 94085
++ */
++
++#ifndef BE_H
++#define BE_H
++
++#include <linux/pci.h>
++#include <linux/etherdevice.h>
++#include <linux/version.h>
++#include <linux/delay.h>
++#include <net/tcp.h>
++#include <net/ip.h>
++#include <net/ipv6.h>
++#include <linux/if_vlan.h>
++#include <linux/workqueue.h>
++#include <linux/interrupt.h>
++#include <linux/inet_lro.h>
++
++#include "be_hw.h"
++
++#define DRV_VER "2.0.348"
++#define DRV_NAME "be2net"
++#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
++#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
++#define OC_NAME "Emulex OneConnect 10Gbps NIC"
++#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)"
++#define DRV_DESC BE_NAME "Driver"
++
++#define BE_VENDOR_ID 0x19a2
++#define BE_DEVICE_ID1 0x211
++#define BE_DEVICE_ID2 0x221
++#define OC_DEVICE_ID1 0x700
++#define OC_DEVICE_ID2 0x701
++#define OC_DEVICE_ID3 0x710
++
++static inline char *nic_name(struct pci_dev *pdev)
++{
++ switch (pdev->device) {
++ case OC_DEVICE_ID1:
++ case OC_DEVICE_ID2:
++ return OC_NAME;
++ case OC_DEVICE_ID3:
++ return OC_NAME1;
++ case BE_DEVICE_ID2:
++ return BE3_NAME;
++ default:
++ return BE_NAME;
++ }
++}
++
++/* Number of bytes of an RX frame that are copied to skb->data */
++#define BE_HDR_LEN 64
++#define BE_MAX_JUMBO_FRAME_SIZE 9018
++#define BE_MIN_MTU 256
++
++#define BE_NUM_VLANS_SUPPORTED 64
++#define BE_MAX_EQD 96
++#define BE_MAX_TX_FRAG_COUNT 30
++
++#define EVNT_Q_LEN 1024
++#define TX_Q_LEN 2048
++#define TX_CQ_LEN 1024
++#define RX_Q_LEN 1024 /* Does not support any other value */
++#define RX_CQ_LEN 1024
++#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
++#define MCC_CQ_LEN 256
++
++#define BE_NAPI_WEIGHT 64
++#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
++#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
++
++#define BE_MAX_LRO_DESCRIPTORS 16
++#define BE_MAX_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS))
++
++#define FW_VER_LEN 32
++
++struct be_dma_mem {
++ void *va;
++ dma_addr_t dma;
++ u32 size;
++};
++
++struct be_queue_info {
++ struct be_dma_mem dma_mem;
++ u16 len;
++ u16 entry_size; /* Size of an element in the queue */
++ u16 id;
++ u16 tail, head;
++ bool created;
++ atomic_t used; /* Number of valid elements in the queue */
++};
++
++static inline u32 MODULO(u16 val, u16 limit)
++{
++ BUG_ON(limit & (limit - 1));
++ return val & (limit - 1);
++}
++
++static inline void index_adv(u16 *index, u16 val, u16 limit)
++{
++ *index = MODULO((*index + val), limit);
++}
++
++static inline void index_inc(u16 *index, u16 limit)
++{
++ *index = MODULO((*index + 1), limit);
++}
++
++static inline void *queue_head_node(struct be_queue_info *q)
++{
++ return q->dma_mem.va + q->head * q->entry_size;
++}
++
++static inline void *queue_tail_node(struct be_queue_info *q)
++{
++ return q->dma_mem.va + q->tail * q->entry_size;
++}
++
++static inline void queue_head_inc(struct be_queue_info *q)
++{
++ index_inc(&q->head, q->len);
++}
++
++static inline void queue_tail_inc(struct be_queue_info *q)
++{
++ index_inc(&q->tail, q->len);
++}
++
++struct be_eq_obj {
++ struct be_queue_info q;
++ char desc[32];
++
++ /* Adaptive interrupt coalescing (AIC) info */
++ bool enable_aic;
++ u16 min_eqd; /* in usecs */
++ u16 max_eqd; /* in usecs */
++ u16 cur_eqd; /* in usecs */
++
++ struct napi_struct napi;
++};
++
++struct be_mcc_obj {
++ struct be_queue_info q;
++ struct be_queue_info cq;
++};
++
++struct be_drvr_stats {
++ u32 be_tx_reqs; /* number of TX requests initiated */
++ u32 be_tx_stops; /* number of times TX Q was stopped */
++ u32 be_fwd_reqs; /* number of send reqs through forwarding i/f */
++ u32 be_tx_wrbs; /* number of tx WRBs used */
++ u32 be_tx_events; /* number of tx completion events */
++ u32 be_tx_compl; /* number of tx completion entries processed */
++ ulong be_tx_jiffies;
++ u64 be_tx_bytes;
++ u64 be_tx_bytes_prev;
++ u32 be_tx_rate;
++
++ u32 cache_barrier[16];
++
++ u32 be_ethrx_post_fail;/* number of ethrx buffer alloc failures */
++ u32 be_rx_polls; /* number of times NAPI called poll function */
++ u32 be_rx_events; /* number of ucast rx completion events */
++ u32 be_rx_compl; /* number of rx completion entries processed */
++ u32 be_lro_hgram_data[8]; /* histogram of LRO data packets */
++ u32 be_lro_hgram_ack[8]; /* histogram of LRO ACKs */
++ ulong be_rx_jiffies;
++ u64 be_rx_bytes;
++ u64 be_rx_bytes_prev;
++ u32 be_rx_rate;
++ /* number of non ether type II frames dropped where
++ * frame len > length field of Mac Hdr */
++ u32 be_802_3_dropped_frames;
++ /* number of non ether type II frames malformed where
++ * in frame len < length field of Mac Hdr */
++ u32 be_802_3_malformed_frames;
++ u32 be_rxcp_err; /* Num rx completion entries w/ err set. */
++ ulong rx_fps_jiffies; /* jiffies at last FPS calc */
++ u32 be_rx_frags;
++ u32 be_prev_rx_frags;
++ u32 be_rx_fps; /* Rx frags per second */
++};
++
++struct be_stats_obj {
++ struct be_drvr_stats drvr_stats;
++ struct be_dma_mem cmd;
++};
++
++struct be_tx_obj {
++ struct be_queue_info q;
++ struct be_queue_info cq;
++ /* Remember the skbs that were transmitted */
++ struct sk_buff *sent_skb_list[TX_Q_LEN];
++};
++
++/* Struct to remember the pages posted for rx frags */
++struct be_rx_page_info {
++ struct page *page;
++ dma_addr_t bus;
++ u16 page_offset;
++ bool last_page_user;
++};
++
++struct be_rx_obj {
++ struct be_queue_info q;
++ struct be_queue_info cq;
++ struct be_rx_page_info page_info_tbl[RX_Q_LEN];
++ struct net_lro_mgr lro_mgr;
++ struct net_lro_desc lro_desc[BE_MAX_LRO_DESCRIPTORS];
++};
++
++#define BE_NUM_MSIX_VECTORS 2 /* 1 each for Tx and Rx */
++struct be_adapter {
++ struct pci_dev *pdev;
++ struct net_device *netdev;
++
++ u8 __iomem *csr;
++ u8 __iomem *db; /* Door Bell */
++ u8 __iomem *pcicfg; /* PCI config space */
++
++ spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
++ struct be_dma_mem mbox_mem;
++ /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
++ * is stored for freeing purpose */
++ struct be_dma_mem mbox_mem_alloced;
++
++ struct be_mcc_obj mcc_obj;
++ spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
++ spinlock_t mcc_cq_lock;
++
++ struct msix_entry msix_entries[BE_NUM_MSIX_VECTORS];
++ bool msix_enabled;
++ bool isr_registered;
++
++ /* TX Rings */
++ struct be_eq_obj tx_eq;
++ struct be_tx_obj tx_obj;
++
++ u32 cache_line_break[8];
++
++ /* Rx rings */
++ struct be_eq_obj rx_eq;
++ struct be_rx_obj rx_obj;
++ u32 big_page_size; /* Compounded page size shared by rx wrbs */
++ bool rx_post_starved; /* Zero rx frags have been posted to BE */
++
++ struct vlan_group *vlan_grp;
++ u16 num_vlans;
++ u8 vlan_tag[VLAN_GROUP_ARRAY_LEN];
++ struct be_dma_mem mc_cmd_mem;
++
++ struct be_stats_obj stats;
++ /* Work queue used to perform periodic tasks like getting statistics */
++ struct delayed_work work;
++
++ /* Ethtool knobs and info */
++ bool rx_csum; /* BE card must perform rx-checksumming */
++ u32 max_rx_coal;
++ char fw_ver[FW_VER_LEN];
++ u32 if_handle; /* Used to configure filtering */
++ u32 pmac_id; /* MAC addr handle used by BE card */
++
++ bool link_up;
++ u32 port_num;
++ bool promiscuous;
++ bool wol;
++ u32 rx_fc; /* Rx flow control */
++ u32 tx_fc; /* Tx flow control */
++ int link_speed;
++ u8 port_type;
++ u8 transceiver;
++ u8 generation; /* BladeEngine ASIC generation */
++};
++
++/* BladeEngine Generation numbers */
++#define BE_GEN2 2
++#define BE_GEN3 3
++
++extern struct ethtool_ops be_ethtool_ops;
++
++#define drvr_stats(adapter) (&adapter->stats.drvr_stats)
++
++static inline unsigned int be_pci_func(struct be_adapter *adapter)
++{
++ return PCI_FUNC(adapter->pdev->devfn);
++}
++
++#define PAGE_SHIFT_4K 12
++#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
++
++/* Returns number of pages spanned by the data starting at the given addr */
++#define PAGES_4K_SPANNED(_address, size) \
++ ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
++ (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
++
++/* Byte offset into the page corresponding to given address */
++#define OFFSET_IN_PAGE(addr) \
++ ((size_t)(addr) & (PAGE_SIZE_4K-1))
++
++/* Returns bit offset within a DWORD of a bitfield */
++#define AMAP_BIT_OFFSET(_struct, field) \
++ (((size_t)&(((_struct *)0)->field))%32)
++
++/* Returns the bit mask of the field that is NOT shifted into location. */
++static inline u32 amap_mask(u32 bitsize)
++{
++ return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
++}
++
++static inline void
++amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
++{
++ u32 *dw = (u32 *) ptr + dw_offset;
++ *dw &= ~(mask << offset);
++ *dw |= (mask & value) << offset;
++}
++
++#define AMAP_SET_BITS(_struct, field, ptr, val) \
++ amap_set(ptr, \
++ offsetof(_struct, field)/32, \
++ amap_mask(sizeof(((_struct *)0)->field)), \
++ AMAP_BIT_OFFSET(_struct, field), \
++ val)
++
++static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
++{
++ u32 *dw = (u32 *) ptr;
++ return mask & (*(dw + dw_offset) >> offset);
++}
++
++#define AMAP_GET_BITS(_struct, field, ptr) \
++ amap_get(ptr, \
++ offsetof(_struct, field)/32, \
++ amap_mask(sizeof(((_struct *)0)->field)), \
++ AMAP_BIT_OFFSET(_struct, field))
++
++#define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len)
++#define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len)
++static inline void swap_dws(void *wrb, int len)
++{
++#ifdef __BIG_ENDIAN
++ u32 *dw = wrb;
++ BUG_ON(len % 4);
++ do {
++ *dw = cpu_to_le32(*dw);
++ dw++;
++ len -= 4;
++ } while (len);
++#endif /* __BIG_ENDIAN */
++}
++
++static inline u8 is_tcp_pkt(struct sk_buff *skb)
++{
++ u8 val = 0;
++
++ if (ip_hdr(skb)->version == 4)
++ val = (ip_hdr(skb)->protocol == IPPROTO_TCP);
++ else if (ip_hdr(skb)->version == 6)
++ val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP);
++
++ return val;
++}
++
++static inline u8 is_udp_pkt(struct sk_buff *skb)
++{
++ u8 val = 0;
++
++ if (ip_hdr(skb)->version == 4)
++ val = (ip_hdr(skb)->protocol == IPPROTO_UDP);
++ else if (ip_hdr(skb)->version == 6)
++ val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP);
++
++ return val;
++}
++
++extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
++ u16 num_popped);
++extern void be_link_status_update(struct be_adapter *adapter, bool link_up);
++extern void netdev_stats_update(struct be_adapter *adapter);
++#endif /* BE_H */
+diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
+new file mode 100644
+index 0000000..f3ba2e8
+--- /dev/null
++++ b/drivers/net/benet/be_cmds.c
+@@ -0,0 +1,1407 @@
++/*
++ * Copyright (C) 2005 - 2009 ServerEngines
++ * All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation. The full GNU General
++ * Public License is included in this distribution in the file called COPYING.
++ *
++ * Contact Information:
++ * linux-drivers at serverengines.com
++ *
++ * ServerEngines
++ * 209 N. Fair Oaks Ave
++ * Sunnyvale, CA 94085
++ */
++
++#include "be.h"
++#include "be_cmds.h"
++
++static void be_mcc_notify(struct be_adapter *adapter)
++{
++ struct be_queue_info *mccq = &adapter->mcc_obj.q;
++ u32 val = 0;
++
++ val |= mccq->id & DB_MCCQ_RING_ID_MASK;
++ val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
++ iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
++}
++
++/* To check if valid bit is set, check the entire word as we don't know
++ * the endianness of the data (old entry is host endian while a new entry is
++ * little endian) */
++static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
++{
++ if (compl->flags != 0) {
++ compl->flags = le32_to_cpu(compl->flags);
++ BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
++ return true;
++ } else {
++ return false;
++ }
++}
++
++/* Need to reset the entire word that houses the valid bit */
++static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
++{
++ compl->flags = 0;
++}
++
++static int be_mcc_compl_process(struct be_adapter *adapter,
++ struct be_mcc_compl *compl)
++{
++ u16 compl_status, extd_status;
++
++ /* Just swap the status to host endian; mcc tag is opaquely copied
++ * from mcc_wrb */
++ be_dws_le_to_cpu(compl, 4);
++
++ compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
++ CQE_STATUS_COMPL_MASK;
++ if (compl_status == MCC_STATUS_SUCCESS) {
++ if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
++ struct be_cmd_resp_get_stats *resp =
++ adapter->stats.cmd.va;
++ be_dws_le_to_cpu(&resp->hw_stats,
++ sizeof(resp->hw_stats));
++ netdev_stats_update(adapter);
++ }
++ } else if (compl_status != MCC_STATUS_NOT_SUPPORTED) {
++ extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
++ CQE_STATUS_EXTD_MASK;
++ dev_warn(&adapter->pdev->dev,
++ "Error in cmd completion - opcode %d, compl %d, extd %d\n",
++ compl->tag0, compl_status, extd_status);
++ }
++ return compl_status;
++}
++
++/* Link state evt is a string of bytes; no need for endian swapping */
++static void be_async_link_state_process(struct be_adapter *adapter,
++ struct be_async_event_link_state *evt)
++{
++ be_link_status_update(adapter,
++ evt->port_link_status == ASYNC_EVENT_LINK_UP);
++}
++
++static inline bool is_link_state_evt(u32 trailer)
++{
++ return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
++ ASYNC_TRAILER_EVENT_CODE_MASK) ==
++ ASYNC_EVENT_CODE_LINK_STATE);
++}
++
++static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
++{
++ struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
++ struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
++
++ if (be_mcc_compl_is_new(compl)) {
++ queue_tail_inc(mcc_cq);
++ return compl;
++ }
++ return NULL;
++}
++
++int be_process_mcc(struct be_adapter *adapter)
++{
++ struct be_mcc_compl *compl;
++ int num = 0, status = 0;
++
++ spin_lock_bh(&adapter->mcc_cq_lock);
++ while ((compl = be_mcc_compl_get(adapter))) {
++ if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
++ /* Interpret flags as an async trailer */
++ BUG_ON(!is_link_state_evt(compl->flags));
++
++ /* Interpret compl as a async link evt */
++ be_async_link_state_process(adapter,
++ (struct be_async_event_link_state *) compl);
++ } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
++ status = be_mcc_compl_process(adapter, compl);
++ atomic_dec(&adapter->mcc_obj.q.used);
++ }
++ be_mcc_compl_use(compl);
++ num++;
++ }
++
++ if (num)
++ be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num);
++
++ spin_unlock_bh(&adapter->mcc_cq_lock);
++ return status;
++}
++
++/* Wait till no more pending mcc requests are present */
++static int be_mcc_wait_compl(struct be_adapter *adapter)
++{
++#define mcc_timeout 120000 /* 12s timeout */
++ int i, status;
++ for (i = 0; i < mcc_timeout; i++) {
++ status = be_process_mcc(adapter);
++ if (status)
++ return status;
++
++ if (atomic_read(&adapter->mcc_obj.q.used) == 0)
++ break;
++ udelay(100);
++ }
++ if (i == mcc_timeout) {
++ dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
++ return -1;
++ }
++ return 0;
++}
++
++/* Notify MCC requests and wait for completion */
++static int be_mcc_notify_wait(struct be_adapter *adapter)
++{
++ be_mcc_notify(adapter);
++ return be_mcc_wait_compl(adapter);
++}
++
++static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
++{
++ int cnt = 0, wait = 5;
++ u32 ready;
++
++ do {
++ ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
++ if (ready)
++ break;
++
++ if (cnt > 200000) {
++ dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
++ return -1;
++ }
++
++ if (cnt > 50)
++ wait = 200;
++ cnt += wait;
++ udelay(wait);
++ } while (true);
++
++ return 0;
++}
++
++/*
++ * Insert the mailbox address into the doorbell in two steps
++ * Polls on the mbox doorbell till a command completion (or a timeout) occurs
++ */
++static int be_mbox_notify_wait(struct be_adapter *adapter)
++{
++ int status;
++ u32 val = 0;
++ void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
++ struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
++ struct be_mcc_mailbox *mbox = mbox_mem->va;
++ struct be_mcc_compl *compl = &mbox->compl;
++
++ val |= MPU_MAILBOX_DB_HI_MASK;
++ /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
++ val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
++ iowrite32(val, db);
++
++ /* wait for ready to be set */
++ status = be_mbox_db_ready_wait(adapter, db);
++ if (status != 0)
++ return status;
++
++ val = 0;
++ /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
++ val |= (u32)(mbox_mem->dma >> 4) << 2;
++ iowrite32(val, db);
++
++ status = be_mbox_db_ready_wait(adapter, db);
++ if (status != 0)
++ return status;
++
++ /* A cq entry has been made now */
++ if (be_mcc_compl_is_new(compl)) {
++ status = be_mcc_compl_process(adapter, &mbox->compl);
++ be_mcc_compl_use(compl);
++ if (status)
++ return status;
++ } else {
++ dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
++ return -1;
++ }
++ return 0;
++}
++
++static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
++{
++ u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
++
++ *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
++ if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
++ return -1;
++ else
++ return 0;
++}
++
++int be_cmd_POST(struct be_adapter *adapter)
++{
++ u16 stage;
++ int status, timeout = 0;
++
++ do {
++ status = be_POST_stage_get(adapter, &stage);
++ if (status) {
++ dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
++ stage);
++ return -1;
++ } else if (stage != POST_STAGE_ARMFW_RDY) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule_timeout(2 * HZ);
++ timeout += 2;
++ } else {
++ return 0;
++ }
++ } while (timeout < 20);
++
++ dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
++ return -1;
++}
++
++static inline void *embedded_payload(struct be_mcc_wrb *wrb)
++{
++ return wrb->payload.embedded_payload;
++}
++
++static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
++{
++ return &wrb->payload.sgl[0];
++}
++
++/* Don't touch the hdr after it's prepared */
++static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
++ bool embedded, u8 sge_cnt, u32 opcode)
++{
++ if (embedded)
++ wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
++ else
++ wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
++ MCC_WRB_SGE_CNT_SHIFT;
++ wrb->payload_length = payload_len;
++ wrb->tag0 = opcode;
++ be_dws_cpu_to_le(wrb, 8);
++}
++
++/* Don't touch the hdr after it's prepared */
++static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
++ u8 subsystem, u8 opcode, int cmd_len)
++{
++ req_hdr->opcode = opcode;
++ req_hdr->subsystem = subsystem;
++ req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
++ req_hdr->version = 0;
++}
++
++static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
++ struct be_dma_mem *mem)
++{
++ int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
++ u64 dma = (u64)mem->dma;
++
++ for (i = 0; i < buf_pages; i++) {
++ pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
++ pages[i].hi = cpu_to_le32(upper_32_bits(dma));
++ dma += PAGE_SIZE_4K;
++ }
++}
++
++/* Converts interrupt delay in microseconds to multiplier value */
++static u32 eq_delay_to_mult(u32 usec_delay)
++{
++#define MAX_INTR_RATE 651042
++ const u32 round = 10;
++ u32 multiplier;
++
++ if (usec_delay == 0)
++ multiplier = 0;
++ else {
++ u32 interrupt_rate = 1000000 / usec_delay;
++ /* Max delay, corresponding to the lowest interrupt rate */
++ if (interrupt_rate == 0)
++ multiplier = 1023;
++ else {
++ multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
++ multiplier /= interrupt_rate;
++ /* Round the multiplier to the closest value.*/
++ multiplier = (multiplier + round/2) / round;
++ multiplier = min(multiplier, (u32)1023);
++ }
++ }
++ return multiplier;
++}
++
++static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
++{
++ struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
++ struct be_mcc_wrb *wrb
++ = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
++ memset(wrb, 0, sizeof(*wrb));
++ return wrb;
++}
++
++static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
++{
++ struct be_queue_info *mccq = &adapter->mcc_obj.q;
++ struct be_mcc_wrb *wrb;
++
++ if (atomic_read(&mccq->used) >= mccq->len) {
++ dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
++ return NULL;
++ }
++
++ wrb = queue_head_node(mccq);
++ queue_head_inc(mccq);
++ atomic_inc(&mccq->used);
++ memset(wrb, 0, sizeof(*wrb));
++ return wrb;
++}
++
++/* Tell fw we're about to start firing cmds by writing a
++ * special pattern across the wrb hdr; uses mbox
++ */
++int be_cmd_fw_init(struct be_adapter *adapter)
++{
++ u8 *wrb;
++ int status;
++
++ spin_lock(&adapter->mbox_lock);
++
++ wrb = (u8 *)wrb_from_mbox(adapter);
++ *wrb++ = 0xFF;
++ *wrb++ = 0x12;
++ *wrb++ = 0x34;
++ *wrb++ = 0xFF;
++ *wrb++ = 0xFF;
++ *wrb++ = 0x56;
++ *wrb++ = 0x78;
++ *wrb = 0xFF;
++
++ status = be_mbox_notify_wait(adapter);
++
++ spin_unlock(&adapter->mbox_lock);
++ return status;
++}
++
++/* Tell fw we're done with firing cmds by writing a
++ * special pattern across the wrb hdr; uses mbox
++ */
++int be_cmd_fw_clean(struct be_adapter *adapter)
++{
++ u8 *wrb;
++ int status;
++
++ spin_lock(&adapter->mbox_lock);
++
++ wrb = (u8 *)wrb_from_mbox(adapter);
++ *wrb++ = 0xFF;
++ *wrb++ = 0xAA;
++ *wrb++ = 0xBB;
++ *wrb++ = 0xFF;
++ *wrb++ = 0xFF;
++ *wrb++ = 0xCC;
++ *wrb++ = 0xDD;
++ *wrb = 0xFF;
++
++ status = be_mbox_notify_wait(adapter);
++
++ spin_unlock(&adapter->mbox_lock);
++ return status;
++}
++int be_cmd_eq_create(struct be_adapter *adapter,
++ struct be_queue_info *eq, int eq_delay)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_eq_create *req;
++ struct be_dma_mem *q_mem = &eq->dma_mem;
++ int status;
++
++ spin_lock(&adapter->mbox_lock);
++
++ wrb = wrb_from_mbox(adapter);
++ req = embedded_payload(wrb);
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
++ OPCODE_COMMON_EQ_CREATE, sizeof(*req));
++
++ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
++
++ AMAP_SET_BITS(struct amap_eq_context, func, req->context,
++ be_pci_func(adapter));
++ AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
++ /* 4byte eqe*/
++ AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
++ AMAP_SET_BITS(struct amap_eq_context, count, req->context,
++ __ilog2_u32(eq->len/256));
++ AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
++ eq_delay_to_mult(eq_delay));
++ be_dws_cpu_to_le(req->context, sizeof(req->context));
++
++ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
++
++ status = be_mbox_notify_wait(adapter);
++ if (!status) {
++ struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
++ eq->id = le16_to_cpu(resp->eq_id);
++ eq->created = true;
++ }
++
++ spin_unlock(&adapter->mbox_lock);
++ return status;
++}
++
++/* Uses mbox */
++int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
++ u8 type, bool permanent, u32 if_handle)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_mac_query *req;
++ int status;
++
++ spin_lock(&adapter->mbox_lock);
++
++ wrb = wrb_from_mbox(adapter);
++ req = embedded_payload(wrb);
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
++ OPCODE_COMMON_NTWK_MAC_QUERY);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
++ OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
++
++ req->type = type;
++ if (permanent) {
++ req->permanent = 1;
++ } else {
++ req->if_id = cpu_to_le16((u16) if_handle);
++ req->permanent = 0;
++ }
++
++ status = be_mbox_notify_wait(adapter);
++ if (!status) {
++ struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
++ memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
++ }
++
++ spin_unlock(&adapter->mbox_lock);
++ return status;
++}
++
++/* Uses synchronous MCCQ */
++int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
++ u32 if_id, u32 *pmac_id)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_pmac_add *req;
++ int status;
++
++ spin_lock_bh(&adapter->mcc_lock);
++
++ wrb = wrb_from_mccq(adapter);
++ if (!wrb) {
++ status = -EBUSY;
++ goto err;
++ }
++ req = embedded_payload(wrb);
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
++ OPCODE_COMMON_NTWK_PMAC_ADD);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
++ OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
++
++ req->if_id = cpu_to_le32(if_id);
++ memcpy(req->mac_address, mac_addr, ETH_ALEN);
++
++ status = be_mcc_notify_wait(adapter);
++ if (!status) {
++ struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
++ *pmac_id = le32_to_cpu(resp->pmac_id);
++ }
++
++err:
++ spin_unlock_bh(&adapter->mcc_lock);
++ return status;
++}
++
++/* Uses synchronous MCCQ */
++int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_pmac_del *req;
++ int status;
++
++ spin_lock_bh(&adapter->mcc_lock);
++
++ wrb = wrb_from_mccq(adapter);
++ if (!wrb) {
++ status = -EBUSY;
++ goto err;
++ }
++ req = embedded_payload(wrb);
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
++ OPCODE_COMMON_NTWK_PMAC_DEL);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
++ OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
++
++ req->if_id = cpu_to_le32(if_id);
++ req->pmac_id = cpu_to_le32(pmac_id);
++
++ status = be_mcc_notify_wait(adapter);
++
++err:
++ spin_unlock_bh(&adapter->mcc_lock);
++ return status;
++}
++
++/* Uses Mbox */
++int be_cmd_cq_create(struct be_adapter *adapter,
++ struct be_queue_info *cq, struct be_queue_info *eq,
++ bool sol_evts, bool no_delay, int coalesce_wm)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_cq_create *req;
++ struct be_dma_mem *q_mem = &cq->dma_mem;
++ void *ctxt;
++ int status;
++
++ spin_lock(&adapter->mbox_lock);
++
++ wrb = wrb_from_mbox(adapter);
++ req = embedded_payload(wrb);
++ ctxt = &req->context;
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
++ OPCODE_COMMON_CQ_CREATE);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
++ OPCODE_COMMON_CQ_CREATE, sizeof(*req));
++
++ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
++
++ AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
++ AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
++ AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
++ __ilog2_u32(cq->len/256));
++ AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
++ AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
++ AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
++ AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
++ AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
++ AMAP_SET_BITS(struct amap_cq_context, func, ctxt, be_pci_func(adapter));
++ be_dws_cpu_to_le(ctxt, sizeof(req->context));
++
++ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
++
++ status = be_mbox_notify_wait(adapter);
++ if (!status) {
++ struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
++ cq->id = le16_to_cpu(resp->cq_id);
++ cq->created = true;
++ }
++
++ spin_unlock(&adapter->mbox_lock);
++
++ return status;
++}
++
++static u32 be_encoded_q_len(int q_len)
++{
++ u32 len_encoded = fls(q_len); /* log2(len) + 1 */
++ if (len_encoded == 16)
++ len_encoded = 0;
++ return len_encoded;
++}
++
++int be_cmd_mccq_create(struct be_adapter *adapter,
++ struct be_queue_info *mccq,
++ struct be_queue_info *cq)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_mcc_create *req;
++ struct be_dma_mem *q_mem = &mccq->dma_mem;
++ void *ctxt;
++ int status;
++
++ spin_lock(&adapter->mbox_lock);
++
++ wrb = wrb_from_mbox(adapter);
++ req = embedded_payload(wrb);
++ ctxt = &req->context;
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
++ OPCODE_COMMON_MCC_CREATE);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
++ OPCODE_COMMON_MCC_CREATE, sizeof(*req));
++
++ req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
++
++ AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, be_pci_func(adapter));
++ AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
++ AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
++ be_encoded_q_len(mccq->len));
++ AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
++
++ be_dws_cpu_to_le(ctxt, sizeof(req->context));
++
++ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
++
++ status = be_mbox_notify_wait(adapter);
++ if (!status) {
++ struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
++ mccq->id = le16_to_cpu(resp->id);
++ mccq->created = true;
++ }
++ spin_unlock(&adapter->mbox_lock);
++
++ return status;
++}
++
++int be_cmd_txq_create(struct be_adapter *adapter,
++ struct be_queue_info *txq,
++ struct be_queue_info *cq)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_eth_tx_create *req;
++ struct be_dma_mem *q_mem = &txq->dma_mem;
++ void *ctxt;
++ int status;
++
++ spin_lock(&adapter->mbox_lock);
++
++ wrb = wrb_from_mbox(adapter);
++ req = embedded_payload(wrb);
++ ctxt = &req->context;
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
++ OPCODE_ETH_TX_CREATE);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
++ sizeof(*req));
++
++ req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
++ req->ulp_num = BE_ULP1_NUM;
++ req->type = BE_ETH_TX_RING_TYPE_STANDARD;
++
++ AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
++ be_encoded_q_len(txq->len));
++ AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
++ be_pci_func(adapter));
++ AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
++ AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
++
++ be_dws_cpu_to_le(ctxt, sizeof(req->context));
++
++ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
++
++ status = be_mbox_notify_wait(adapter);
++ if (!status) {
++ struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
++ txq->id = le16_to_cpu(resp->cid);
++ txq->created = true;
++ }
++
++ spin_unlock(&adapter->mbox_lock);
++
++ return status;
++}
++
++/* Uses mbox */
++int be_cmd_rxq_create(struct be_adapter *adapter,
++ struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
++ u16 max_frame_size, u32 if_id, u32 rss)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_eth_rx_create *req;
++ struct be_dma_mem *q_mem = &rxq->dma_mem;
++ int status;
++
++ spin_lock(&adapter->mbox_lock);
++
++ wrb = wrb_from_mbox(adapter);
++ req = embedded_payload(wrb);
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
++ OPCODE_ETH_RX_CREATE);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
++ sizeof(*req));
++
++ req->cq_id = cpu_to_le16(cq_id);
++ req->frag_size = fls(frag_size) - 1;
++ req->num_pages = 2;
++ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
++ req->interface_id = cpu_to_le32(if_id);
++ req->max_frame_size = cpu_to_le16(max_frame_size);
++ req->rss_queue = cpu_to_le32(rss);
++
++ status = be_mbox_notify_wait(adapter);
++ if (!status) {
++ struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
++ rxq->id = le16_to_cpu(resp->id);
++ rxq->created = true;
++ }
++
++ spin_unlock(&adapter->mbox_lock);
++
++ return status;
++}
++
++/* Generic destroyer function for all types of queues
++ * Uses Mbox
++ */
++int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
++ int queue_type)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_q_destroy *req;
++ u8 subsys = 0, opcode = 0;
++ int status;
++
++ spin_lock(&adapter->mbox_lock);
++
++ wrb = wrb_from_mbox(adapter);
++ req = embedded_payload(wrb);
++
++ switch (queue_type) {
++ case QTYPE_EQ:
++ subsys = CMD_SUBSYSTEM_COMMON;
++ opcode = OPCODE_COMMON_EQ_DESTROY;
++ break;
++ case QTYPE_CQ:
++ subsys = CMD_SUBSYSTEM_COMMON;
++ opcode = OPCODE_COMMON_CQ_DESTROY;
++ break;
++ case QTYPE_TXQ:
++ subsys = CMD_SUBSYSTEM_ETH;
++ opcode = OPCODE_ETH_TX_DESTROY;
++ break;
++ case QTYPE_RXQ:
++ subsys = CMD_SUBSYSTEM_ETH;
++ opcode = OPCODE_ETH_RX_DESTROY;
++ break;
++ case QTYPE_MCCQ:
++ subsys = CMD_SUBSYSTEM_COMMON;
++ opcode = OPCODE_COMMON_MCC_DESTROY;
++ break;
++ default:
++ BUG();
++ }
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
++
++ be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
++ req->id = cpu_to_le16(q->id);
++
++ status = be_mbox_notify_wait(adapter);
++
++ spin_unlock(&adapter->mbox_lock);
++
++ return status;
++}
++
++/* Create an rx filtering policy configuration on an i/f
++ * Uses mbox
++ */
++int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
++ u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_if_create *req;
++ int status;
++
++ spin_lock(&adapter->mbox_lock);
++
++ wrb = wrb_from_mbox(adapter);
++ req = embedded_payload(wrb);
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
++ OPCODE_COMMON_NTWK_INTERFACE_CREATE);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
++ OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
++
++ req->capability_flags = cpu_to_le32(cap_flags);
++ req->enable_flags = cpu_to_le32(en_flags);
++ req->pmac_invalid = pmac_invalid;
++ if (!pmac_invalid)
++ memcpy(req->mac_addr, mac, ETH_ALEN);
++
++ status = be_mbox_notify_wait(adapter);
++ if (!status) {
++ struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
++ *if_handle = le32_to_cpu(resp->interface_id);
++ if (!pmac_invalid)
++ *pmac_id = le32_to_cpu(resp->pmac_id);
++ }
++
++ spin_unlock(&adapter->mbox_lock);
++ return status;
++}
++
++/* Uses mbox */
++int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_if_destroy *req;
++ int status;
++
++ spin_lock(&adapter->mbox_lock);
++
++ wrb = wrb_from_mbox(adapter);
++ req = embedded_payload(wrb);
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
++ OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
++ OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
++
++ req->interface_id = cpu_to_le32(interface_id);
++
++ status = be_mbox_notify_wait(adapter);
++
++ spin_unlock(&adapter->mbox_lock);
++
++ return status;
++}
++
++/* Get stats is a non embedded command: the request is not embedded inside
++ * WRB but is a separate dma memory block
++ * Uses asynchronous MCC
++ */
++int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_get_stats *req;
++ struct be_sge *sge;
++ int status = 0;
++
++ spin_lock_bh(&adapter->mcc_lock);
++
++ wrb = wrb_from_mccq(adapter);
++ if (!wrb) {
++ status = -EBUSY;
++ goto err;
++ }
++ req = nonemb_cmd->va;
++ sge = nonembedded_sgl(wrb);
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
++ OPCODE_ETH_GET_STATISTICS);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
++ OPCODE_ETH_GET_STATISTICS, sizeof(*req));
++ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
++ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
++ sge->len = cpu_to_le32(nonemb_cmd->size);
++
++ be_mcc_notify(adapter);
++
++err:
++ spin_unlock_bh(&adapter->mcc_lock);
++ return status;
++}
++
++/* Uses synchronous mcc */
++int be_cmd_link_status_query(struct be_adapter *adapter,
++ bool *link_up, u8 *mac_speed, u16 *link_speed)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_link_status *req;
++ int status;
++
++ spin_lock_bh(&adapter->mcc_lock);
++
++ wrb = wrb_from_mccq(adapter);
++ if (!wrb) {
++ status = -EBUSY;
++ goto err;
++ }
++ req = embedded_payload(wrb);
++
++ *link_up = false;
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
++ OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
++ OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
++
++ status = be_mcc_notify_wait(adapter);
++ if (!status) {
++ struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
++ if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
++ *link_up = true;
++ *link_speed = le16_to_cpu(resp->link_speed);
++ *mac_speed = resp->mac_speed;
++ }
++ }
++
++err:
++ spin_unlock_bh(&adapter->mcc_lock);
++ return status;
++}
++
++/* Uses Mbox */
++int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_get_fw_version *req;
++ int status;
++
++ spin_lock(&adapter->mbox_lock);
++
++ wrb = wrb_from_mbox(adapter);
++ req = embedded_payload(wrb);
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
++ OPCODE_COMMON_GET_FW_VERSION);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
++ OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
++
++ status = be_mbox_notify_wait(adapter);
++ if (!status) {
++ struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
++ strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
++ }
++
++ spin_unlock(&adapter->mbox_lock);
++ return status;
++}
++
++/* set the EQ delay interval of an EQ to specified value
++ * Uses async mcc
++ */
++int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_modify_eq_delay *req;
++ int status = 0;
++
++ spin_lock_bh(&adapter->mcc_lock);
++
++ wrb = wrb_from_mccq(adapter);
++ if (!wrb) {
++ status = -EBUSY;
++ goto err;
++ }
++ req = embedded_payload(wrb);
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
++ OPCODE_COMMON_MODIFY_EQ_DELAY);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
++ OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
++
++ req->num_eq = cpu_to_le32(1);
++ req->delay[0].eq_id = cpu_to_le32(eq_id);
++ req->delay[0].phase = 0;
++ req->delay[0].delay_multiplier = cpu_to_le32(eqd);
++
++ be_mcc_notify(adapter);
++
++err:
++ spin_unlock_bh(&adapter->mcc_lock);
++ return status;
++}
++
++/* Uses sycnhronous mcc */
++int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
++ u32 num, bool untagged, bool promiscuous)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_vlan_config *req;
++ int status;
++
++ spin_lock_bh(&adapter->mcc_lock);
++
++ wrb = wrb_from_mccq(adapter);
++ if (!wrb) {
++ status = -EBUSY;
++ goto err;
++ }
++ req = embedded_payload(wrb);
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
++ OPCODE_COMMON_NTWK_VLAN_CONFIG);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
++ OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
++
++ req->interface_id = if_id;
++ req->promiscuous = promiscuous;
++ req->untagged = untagged;
++ req->num_vlan = num;
++ if (!promiscuous) {
++ memcpy(req->normal_vlan, vtag_array,
++ req->num_vlan * sizeof(vtag_array[0]));
++ }
++
++ status = be_mcc_notify_wait(adapter);
++
++err:
++ spin_unlock_bh(&adapter->mcc_lock);
++ return status;
++}
++
++/* Uses MCC for this command as it may be called in BH context
++ * Uses synchronous mcc
++ */
++int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_promiscuous_config *req;
++ int status;
++
++ spin_lock_bh(&adapter->mcc_lock);
++
++ wrb = wrb_from_mccq(adapter);
++ if (!wrb) {
++ status = -EBUSY;
++ goto err;
++ }
++ req = embedded_payload(wrb);
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_PROMISCUOUS);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
++ OPCODE_ETH_PROMISCUOUS, sizeof(*req));
++
++ if (port_num)
++ req->port1_promiscuous = en;
++ else
++ req->port0_promiscuous = en;
++
++ status = be_mcc_notify_wait(adapter);
++
++err:
++ spin_unlock_bh(&adapter->mcc_lock);
++ return status;
++}
++
++/*
++ * Uses MCC for this command as it may be called in BH context
++ * (mc == NULL) => multicast promiscous
++ */
++int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
++ struct dev_mc_list *mc_list, u32 mc_count,
++ struct be_dma_mem *mem)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_mcast_mac_config *req = mem->va;
++ struct be_sge *sge;
++ int status;
++
++ spin_lock_bh(&adapter->mcc_lock);
++
++ wrb = wrb_from_mccq(adapter);
++ if (!wrb) {
++ status = -EBUSY;
++ goto err;
++ }
++ sge = nonembedded_sgl(wrb);
++ memset(req, 0, sizeof(*req));
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
++ OPCODE_COMMON_NTWK_MULTICAST_SET);
++ sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
++ sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
++ sge->len = cpu_to_le32(mem->size);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
++ OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
++
++ req->interface_id = if_id;
++ if (mc_list) {
++ int i;
++ struct dev_mc_list *mc;
++
++ req->num_mac = cpu_to_le16(mc_count);
++
++ for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
++ memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
++ } else {
++ req->promiscuous = 1;
++ }
++
++ status = be_mcc_notify_wait(adapter);
++
++err:
++ spin_unlock_bh(&adapter->mcc_lock);
++ return status;
++}
++
++/* Uses synchrounous mcc */
++int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_set_flow_control *req;
++ int status;
++
++ spin_lock_bh(&adapter->mcc_lock);
++
++ wrb = wrb_from_mccq(adapter);
++ if (!wrb) {
++ status = -EBUSY;
++ goto err;
++ }
++ req = embedded_payload(wrb);
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
++ OPCODE_COMMON_SET_FLOW_CONTROL);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
++ OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
++
++ req->tx_flow_control = cpu_to_le16((u16)tx_fc);
++ req->rx_flow_control = cpu_to_le16((u16)rx_fc);
++
++ status = be_mcc_notify_wait(adapter);
++
++err:
++ spin_unlock_bh(&adapter->mcc_lock);
++ return status;
++}
++
++/* Uses sycn mcc */
++int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_get_flow_control *req;
++ int status;
++
++ spin_lock_bh(&adapter->mcc_lock);
++
++ wrb = wrb_from_mccq(adapter);
++ if (!wrb) {
++ status = -EBUSY;
++ goto err;
++ }
++ req = embedded_payload(wrb);
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
++ OPCODE_COMMON_GET_FLOW_CONTROL);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
++ OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
++
++ status = be_mcc_notify_wait(adapter);
++ if (!status) {
++ struct be_cmd_resp_get_flow_control *resp =
++ embedded_payload(wrb);
++ *tx_fc = le16_to_cpu(resp->tx_flow_control);
++ *rx_fc = le16_to_cpu(resp->rx_flow_control);
++ }
++
++err:
++ spin_unlock_bh(&adapter->mcc_lock);
++ return status;
++}
++
++/* Uses mbox */
++int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_query_fw_cfg *req;
++ int status;
++
++ spin_lock(&adapter->mbox_lock);
++
++ wrb = wrb_from_mbox(adapter);
++ req = embedded_payload(wrb);
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
++ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
++ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
++
++ status = be_mbox_notify_wait(adapter);
++ if (!status) {
++ struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
++ *port_num = le32_to_cpu(resp->phys_port);
++ }
++
++ spin_unlock(&adapter->mbox_lock);
++ return status;
++}
++
++/* Uses mbox */
++int be_cmd_reset_function(struct be_adapter *adapter)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_hdr *req;
++ int status;
++
++ spin_lock(&adapter->mbox_lock);
++
++ wrb = wrb_from_mbox(adapter);
++ req = embedded_payload(wrb);
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
++ OPCODE_COMMON_FUNCTION_RESET);
++
++ be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
++ OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
++
++ status = be_mbox_notify_wait(adapter);
++
++ spin_unlock(&adapter->mbox_lock);
++ return status;
++}
++
++/* Uses sync mcc */
++int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
++ u8 bcn, u8 sts, u8 state)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_enable_disable_beacon *req;
++ int status;
++
++ spin_lock_bh(&adapter->mcc_lock);
++
++ wrb = wrb_from_mccq(adapter);
++ if (!wrb) {
++ status = -EBUSY;
++ goto err;
++ }
++ req = embedded_payload(wrb);
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
++ OPCODE_COMMON_ENABLE_DISABLE_BEACON);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
++ OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
++
++ req->port_num = port_num;
++ req->beacon_state = state;
++ req->beacon_duration = bcn;
++ req->status_duration = sts;
++
++ status = be_mcc_notify_wait(adapter);
++
++err:
++ spin_unlock_bh(&adapter->mcc_lock);
++ return status;
++}
++
++/* Uses sync mcc */
++int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_get_beacon_state *req;
++ int status;
++
++ spin_lock_bh(&adapter->mcc_lock);
++
++ wrb = wrb_from_mccq(adapter);
++ if (!wrb) {
++ status = -EBUSY;
++ goto err;
++ }
++ req = embedded_payload(wrb);
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
++ OPCODE_COMMON_GET_BEACON_STATE);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
++ OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
++
++ req->port_num = port_num;
++
++ status = be_mcc_notify_wait(adapter);
++ if (!status) {
++ struct be_cmd_resp_get_beacon_state *resp =
++ embedded_payload(wrb);
++ *state = resp->beacon_state;
++ }
++
++err:
++ spin_unlock_bh(&adapter->mcc_lock);
++ return status;
++}
++
++/* Uses sync mcc */
++int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
++ u8 *connector)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_port_type *req;
++ int status;
++
++ spin_lock_bh(&adapter->mcc_lock);
++
++ wrb = wrb_from_mccq(adapter);
++ if (!wrb) {
++ status = -EBUSY;
++ goto err;
++ }
++ req = embedded_payload(wrb);
++
++ be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
++ OPCODE_COMMON_READ_TRANSRECV_DATA);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
++ OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
++
++ req->port = cpu_to_le32(port);
++ req->page_num = cpu_to_le32(TR_PAGE_A0);
++ status = be_mcc_notify_wait(adapter);
++ if (!status) {
++ struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
++ *connector = resp->data.connector;
++ }
++
++err:
++ spin_unlock_bh(&adapter->mcc_lock);
++ return status;
++}
++
++extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
++ struct be_dma_mem *nonemb_cmd)
++{
++ struct be_mcc_wrb *wrb;
++ struct be_cmd_req_acpi_wol_magic_config *req;
++ struct be_sge *sge;
++ int status;
++
++ spin_lock_bh(&adapter->mcc_lock);
++
++ wrb = wrb_from_mccq(adapter);
++ if (!wrb) {
++ status = -EBUSY;
++ goto err;
++ }
++ req = nonemb_cmd->va;
++ sge = nonembedded_sgl(wrb);
++
++ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
++ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
++
++ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
++ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
++ memcpy(req->magic_mac, mac, ETH_ALEN);
++
++ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
++ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
++ sge->len = cpu_to_le32(nonemb_cmd->size);
++
++ status = be_mcc_notify_wait(adapter);
++
++err:
++ spin_unlock_bh(&adapter->mcc_lock);
++ return status;
++}
+diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
+new file mode 100644
+index 0000000..ecb477c
+--- /dev/null
++++ b/drivers/net/benet/be_cmds.h
+@@ -0,0 +1,846 @@
++/*
++ * Copyright (C) 2005 - 2009 ServerEngines
++ * All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation. The full GNU General
++ * Public License is included in this distribution in the file called COPYING.
++ *
++ * Contact Information:
++ * linux-drivers at serverengines.com
++ *
++ * ServerEngines
++ * 209 N. Fair Oaks Ave
++ * Sunnyvale, CA 94085
++ */
++
++/*
++ * The driver sends configuration and managements command requests to the
++ * firmware in the BE. These requests are communicated to the processor
++ * using Work Request Blocks (WRBs) submitted to the MCC-WRB ring or via one
++ * WRB inside a MAILBOX.
++ * The commands are serviced by the ARM processor in the BladeEngine's MPU.
++ */
++
++struct be_sge {
++ u32 pa_lo;
++ u32 pa_hi;
++ u32 len;
++};
++
++#define MCC_WRB_EMBEDDED_MASK 1 /* bit 0 of dword 0*/
++#define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */
++#define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */
++struct be_mcc_wrb {
++ u32 embedded; /* dword 0 */
++ u32 payload_length; /* dword 1 */
++ u32 tag0; /* dword 2 */
++ u32 tag1; /* dword 3 */
++ u32 rsvd; /* dword 4 */
++ union {
++ u8 embedded_payload[236]; /* used by embedded cmds */
++ struct be_sge sgl[19]; /* used by non-embedded cmds */
++ } payload;
++};
++
++#define CQE_FLAGS_VALID_MASK (1 << 31)
++#define CQE_FLAGS_ASYNC_MASK (1 << 30)
++#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
++#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
++
++/* Completion Status */
++enum {
++ MCC_STATUS_SUCCESS = 0x0,
++/* The client does not have sufficient privileges to execute the command */
++ MCC_STATUS_INSUFFICIENT_PRIVILEGES = 0x1,
++/* A parameter in the command was invalid. */
++ MCC_STATUS_INVALID_PARAMETER = 0x2,
++/* There are insufficient chip resources to execute the command */
++ MCC_STATUS_INSUFFICIENT_RESOURCES = 0x3,
++/* The command is completing because the queue was getting flushed */
++ MCC_STATUS_QUEUE_FLUSHING = 0x4,
++/* The command is completing with a DMA error */
++ MCC_STATUS_DMA_FAILED = 0x5,
++ MCC_STATUS_NOT_SUPPORTED = 66
++};
++
++#define CQE_STATUS_COMPL_MASK 0xFFFF
++#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
++#define CQE_STATUS_EXTD_MASK 0xFFFF
++#define CQE_STATUS_EXTD_SHIFT 16 /* bits 16 - 31 */
++
++struct be_mcc_compl {
++ u32 status; /* dword 0 */
++ u32 tag0; /* dword 1 */
++ u32 tag1; /* dword 2 */
++ u32 flags; /* dword 3 */
++};
++
++/* When the async bit of mcc_compl is set, the last 4 bytes of
++ * mcc_compl is interpreted as follows:
++ */
++#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
++#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
++#define ASYNC_EVENT_CODE_LINK_STATE 0x1
++struct be_async_event_trailer {
++ u32 code;
++};
++
++enum {
++ ASYNC_EVENT_LINK_DOWN = 0x0,
++ ASYNC_EVENT_LINK_UP = 0x1
++};
++
++/* When the event code of an async trailer is link-state, the mcc_compl
++ * must be interpreted as follows
++ */
++struct be_async_event_link_state {
++ u8 physical_port;
++ u8 port_link_status;
++ u8 port_duplex;
++ u8 port_speed;
++ u8 port_fault;
++ u8 rsvd0[7];
++ struct be_async_event_trailer trailer;
++} __packed;
++
++struct be_mcc_mailbox {
++ struct be_mcc_wrb wrb;
++ struct be_mcc_compl compl;
++};
++
++#define CMD_SUBSYSTEM_COMMON 0x1
++#define CMD_SUBSYSTEM_ETH 0x3
++
++#define OPCODE_COMMON_NTWK_MAC_QUERY 1
++#define OPCODE_COMMON_NTWK_MAC_SET 2
++#define OPCODE_COMMON_NTWK_MULTICAST_SET 3
++#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4
++#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5
++#define OPCODE_COMMON_CQ_CREATE 12
++#define OPCODE_COMMON_EQ_CREATE 13
++#define OPCODE_COMMON_MCC_CREATE 21
++#define OPCODE_COMMON_NTWK_RX_FILTER 34
++#define OPCODE_COMMON_GET_FW_VERSION 35
++#define OPCODE_COMMON_SET_FLOW_CONTROL 36
++#define OPCODE_COMMON_GET_FLOW_CONTROL 37
++#define OPCODE_COMMON_SET_FRAME_SIZE 39
++#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
++#define OPCODE_COMMON_FIRMWARE_CONFIG 42
++#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
++#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
++#define OPCODE_COMMON_MCC_DESTROY 53
++#define OPCODE_COMMON_CQ_DESTROY 54
++#define OPCODE_COMMON_EQ_DESTROY 55
++#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
++#define OPCODE_COMMON_NTWK_PMAC_ADD 59
++#define OPCODE_COMMON_NTWK_PMAC_DEL 60
++#define OPCODE_COMMON_FUNCTION_RESET 61
++#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
++#define OPCODE_COMMON_GET_BEACON_STATE 70
++#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
++
++#define OPCODE_ETH_ACPI_CONFIG 2
++#define OPCODE_ETH_PROMISCUOUS 3
++#define OPCODE_ETH_GET_STATISTICS 4
++#define OPCODE_ETH_TX_CREATE 7
++#define OPCODE_ETH_RX_CREATE 8
++#define OPCODE_ETH_TX_DESTROY 9
++#define OPCODE_ETH_RX_DESTROY 10
++#define OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG 12
++
++struct be_cmd_req_hdr {
++ u8 opcode; /* dword 0 */
++ u8 subsystem; /* dword 0 */
++ u8 port_number; /* dword 0 */
++ u8 domain; /* dword 0 */
++ u32 timeout; /* dword 1 */
++ u32 request_length; /* dword 2 */
++ u8 version; /* dword 3 */
++ u8 rsvd[3]; /* dword 3 */
++};
++
++#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
++#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */
++struct be_cmd_resp_hdr {
++ u32 info; /* dword 0 */
++ u32 status; /* dword 1 */
++ u32 response_length; /* dword 2 */
++ u32 actual_resp_len; /* dword 3 */
++};
++
++struct phys_addr {
++ u32 lo;
++ u32 hi;
++};
++
++/**************************
++ * BE Command definitions *
++ **************************/
++
++/* Pseudo amap definition in which each bit of the actual structure is defined
++ * as a byte: used to calculate offset/shift/mask of each field */
++struct amap_eq_context {
++ u8 cidx[13]; /* dword 0*/
++ u8 rsvd0[3]; /* dword 0*/
++ u8 epidx[13]; /* dword 0*/
++ u8 valid; /* dword 0*/
++ u8 rsvd1; /* dword 0*/
++ u8 size; /* dword 0*/
++ u8 pidx[13]; /* dword 1*/
++ u8 rsvd2[3]; /* dword 1*/
++ u8 pd[10]; /* dword 1*/
++ u8 count[3]; /* dword 1*/
++ u8 solevent; /* dword 1*/
++ u8 stalled; /* dword 1*/
++ u8 armed; /* dword 1*/
++ u8 rsvd3[4]; /* dword 2*/
++ u8 func[8]; /* dword 2*/
++ u8 rsvd4; /* dword 2*/
++ u8 delaymult[10]; /* dword 2*/
++ u8 rsvd5[2]; /* dword 2*/
++ u8 phase[2]; /* dword 2*/
++ u8 nodelay; /* dword 2*/
++ u8 rsvd6[4]; /* dword 2*/
++ u8 rsvd7[32]; /* dword 3*/
++} __packed;
++
++struct be_cmd_req_eq_create {
++ struct be_cmd_req_hdr hdr;
++ u16 num_pages; /* sword */
++ u16 rsvd0; /* sword */
++ u8 context[sizeof(struct amap_eq_context) / 8];
++ struct phys_addr pages[8];
++} __packed;
++
++struct be_cmd_resp_eq_create {
++ struct be_cmd_resp_hdr resp_hdr;
++ u16 eq_id; /* sword */
++ u16 rsvd0; /* sword */
++} __packed;
++
++/******************** Mac query ***************************/
++enum {
++ MAC_ADDRESS_TYPE_STORAGE = 0x0,
++ MAC_ADDRESS_TYPE_NETWORK = 0x1,
++ MAC_ADDRESS_TYPE_PD = 0x2,
++ MAC_ADDRESS_TYPE_MANAGEMENT = 0x3
++};
++
++struct mac_addr {
++ u16 size_of_struct;
++ u8 addr[ETH_ALEN];
++} __packed;
++
++struct be_cmd_req_mac_query {
++ struct be_cmd_req_hdr hdr;
++ u8 type;
++ u8 permanent;
++ u16 if_id;
++} __packed;
++
++struct be_cmd_resp_mac_query {
++ struct be_cmd_resp_hdr hdr;
++ struct mac_addr mac;
++};
++
++/******************** PMac Add ***************************/
++struct be_cmd_req_pmac_add {
++ struct be_cmd_req_hdr hdr;
++ u32 if_id;
++ u8 mac_address[ETH_ALEN];
++ u8 rsvd0[2];
++} __packed;
++
++struct be_cmd_resp_pmac_add {
++ struct be_cmd_resp_hdr hdr;
++ u32 pmac_id;
++};
++
++/******************** PMac Del ***************************/
++struct be_cmd_req_pmac_del {
++ struct be_cmd_req_hdr hdr;
++ u32 if_id;
++ u32 pmac_id;
++};
++
++/******************** Create CQ ***************************/
++/* Pseudo amap definition in which each bit of the actual structure is defined
++ * as a byte: used to calculate offset/shift/mask of each field */
++struct amap_cq_context {
++ u8 cidx[11]; /* dword 0*/
++ u8 rsvd0; /* dword 0*/
++ u8 coalescwm[2]; /* dword 0*/
++ u8 nodelay; /* dword 0*/
++ u8 epidx[11]; /* dword 0*/
++ u8 rsvd1; /* dword 0*/
++ u8 count[2]; /* dword 0*/
++ u8 valid; /* dword 0*/
++ u8 solevent; /* dword 0*/
++ u8 eventable; /* dword 0*/
++ u8 pidx[11]; /* dword 1*/
++ u8 rsvd2; /* dword 1*/
++ u8 pd[10]; /* dword 1*/
++ u8 eqid[8]; /* dword 1*/
++ u8 stalled; /* dword 1*/
++ u8 armed; /* dword 1*/
++ u8 rsvd3[4]; /* dword 2*/
++ u8 func[8]; /* dword 2*/
++ u8 rsvd4[20]; /* dword 2*/
++ u8 rsvd5[32]; /* dword 3*/
++} __packed;
++
++struct be_cmd_req_cq_create {
++ struct be_cmd_req_hdr hdr;
++ u16 num_pages;
++ u16 rsvd0;
++ u8 context[sizeof(struct amap_cq_context) / 8];
++ struct phys_addr pages[8];
++} __packed;
++
++struct be_cmd_resp_cq_create {
++ struct be_cmd_resp_hdr hdr;
++ u16 cq_id;
++ u16 rsvd0;
++} __packed;
++
++/******************** Create MCCQ ***************************/
++/* Pseudo amap definition in which each bit of the actual structure is defined
++ * as a byte: used to calculate offset/shift/mask of each field */
++struct amap_mcc_context {
++ u8 con_index[14];
++ u8 rsvd0[2];
++ u8 ring_size[4];
++ u8 fetch_wrb;
++ u8 fetch_r2t;
++ u8 cq_id[10];
++ u8 prod_index[14];
++ u8 fid[8];
++ u8 pdid[9];
++ u8 valid;
++ u8 rsvd1[32];
++ u8 rsvd2[32];
++} __packed;
++
++struct be_cmd_req_mcc_create {
++ struct be_cmd_req_hdr hdr;
++ u16 num_pages;
++ u16 rsvd0;
++ u8 context[sizeof(struct amap_mcc_context) / 8];
++ struct phys_addr pages[8];
++} __packed;
++
++struct be_cmd_resp_mcc_create {
++ struct be_cmd_resp_hdr hdr;
++ u16 id;
++ u16 rsvd0;
++} __packed;
++
++/******************** Create TxQ ***************************/
++#define BE_ETH_TX_RING_TYPE_STANDARD 2
++#define BE_ULP1_NUM 1
++
++/* Pseudo amap definition in which each bit of the actual structure is defined
++ * as a byte: used to calculate offset/shift/mask of each field */
++struct amap_tx_context {
++ u8 rsvd0[16]; /* dword 0 */
++ u8 tx_ring_size[4]; /* dword 0 */
++ u8 rsvd1[26]; /* dword 0 */
++ u8 pci_func_id[8]; /* dword 1 */
++ u8 rsvd2[9]; /* dword 1 */
++ u8 ctx_valid; /* dword 1 */
++ u8 cq_id_send[16]; /* dword 2 */
++ u8 rsvd3[16]; /* dword 2 */
++ u8 rsvd4[32]; /* dword 3 */
++ u8 rsvd5[32]; /* dword 4 */
++ u8 rsvd6[32]; /* dword 5 */
++ u8 rsvd7[32]; /* dword 6 */
++ u8 rsvd8[32]; /* dword 7 */
++ u8 rsvd9[32]; /* dword 8 */
++ u8 rsvd10[32]; /* dword 9 */
++ u8 rsvd11[32]; /* dword 10 */
++ u8 rsvd12[32]; /* dword 11 */
++ u8 rsvd13[32]; /* dword 12 */
++ u8 rsvd14[32]; /* dword 13 */
++ u8 rsvd15[32]; /* dword 14 */
++ u8 rsvd16[32]; /* dword 15 */
++} __packed;
++
++struct be_cmd_req_eth_tx_create {
++ struct be_cmd_req_hdr hdr;
++ u8 num_pages;
++ u8 ulp_num;
++ u8 type;
++ u8 bound_port;
++ u8 context[sizeof(struct amap_tx_context) / 8];
++ struct phys_addr pages[8];
++} __packed;
++
++struct be_cmd_resp_eth_tx_create {
++ struct be_cmd_resp_hdr hdr;
++ u16 cid;
++ u16 rsvd0;
++} __packed;
++
++/******************** Create RxQ ***************************/
++struct be_cmd_req_eth_rx_create {
++ struct be_cmd_req_hdr hdr;
++ u16 cq_id;
++ u8 frag_size;
++ u8 num_pages;
++ struct phys_addr pages[2];
++ u32 interface_id;
++ u16 max_frame_size;
++ u16 rsvd0;
++ u32 rss_queue;
++} __packed;
++
++struct be_cmd_resp_eth_rx_create {
++ struct be_cmd_resp_hdr hdr;
++ u16 id;
++ u8 cpu_id;
++ u8 rsvd0;
++} __packed;
++
++/******************** Q Destroy ***************************/
++/* Type of Queue to be destroyed */
++enum {
++ QTYPE_EQ = 1,
++ QTYPE_CQ,
++ QTYPE_TXQ,
++ QTYPE_RXQ,
++ QTYPE_MCCQ
++};
++
++struct be_cmd_req_q_destroy {
++ struct be_cmd_req_hdr hdr;
++ u16 id;
++ u16 bypass_flush; /* valid only for rx q destroy */
++} __packed;
++
++/************ I/f Create (it's actually I/f Config Create)**********/
++
++/* Capability flags for the i/f */
++enum be_if_flags {
++ BE_IF_FLAGS_RSS = 0x4,
++ BE_IF_FLAGS_PROMISCUOUS = 0x8,
++ BE_IF_FLAGS_BROADCAST = 0x10,
++ BE_IF_FLAGS_UNTAGGED = 0x20,
++ BE_IF_FLAGS_ULP = 0x40,
++ BE_IF_FLAGS_VLAN_PROMISCUOUS = 0x80,
++ BE_IF_FLAGS_VLAN = 0x100,
++ BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
++ BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
++ BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800
++};
++
++/* An RX interface is an object with one or more MAC addresses and
++ * filtering capabilities. */
++struct be_cmd_req_if_create {
++ struct be_cmd_req_hdr hdr;
++ u32 version; /* ignore currntly */
++ u32 capability_flags;
++ u32 enable_flags;
++ u8 mac_addr[ETH_ALEN];
++ u8 rsvd0;
++ u8 pmac_invalid; /* if set, don't attach the mac addr to the i/f */
++ u32 vlan_tag; /* not used currently */
++} __packed;
++
++struct be_cmd_resp_if_create {
++ struct be_cmd_resp_hdr hdr;
++ u32 interface_id;
++ u32 pmac_id;
++};
++
++/****** I/f Destroy(it's actually I/f Config Destroy )**********/
++struct be_cmd_req_if_destroy {
++ struct be_cmd_req_hdr hdr;
++ u32 interface_id;
++};
++
++/*************** HW Stats Get **********************************/
++struct be_port_rxf_stats {
++ u32 rx_bytes_lsd; /* dword 0*/
++ u32 rx_bytes_msd; /* dword 1*/
++ u32 rx_total_frames; /* dword 2*/
++ u32 rx_unicast_frames; /* dword 3*/
++ u32 rx_multicast_frames; /* dword 4*/
++ u32 rx_broadcast_frames; /* dword 5*/
++ u32 rx_crc_errors; /* dword 6*/
++ u32 rx_alignment_symbol_errors; /* dword 7*/
++ u32 rx_pause_frames; /* dword 8*/
++ u32 rx_control_frames; /* dword 9*/
++ u32 rx_in_range_errors; /* dword 10*/
++ u32 rx_out_range_errors; /* dword 11*/
++ u32 rx_frame_too_long; /* dword 12*/
++ u32 rx_address_match_errors; /* dword 13*/
++ u32 rx_vlan_mismatch; /* dword 14*/
++ u32 rx_dropped_too_small; /* dword 15*/
++ u32 rx_dropped_too_short; /* dword 16*/
++ u32 rx_dropped_header_too_small; /* dword 17*/
++ u32 rx_dropped_tcp_length; /* dword 18*/
++ u32 rx_dropped_runt; /* dword 19*/
++ u32 rx_64_byte_packets; /* dword 20*/
++ u32 rx_65_127_byte_packets; /* dword 21*/
++ u32 rx_128_256_byte_packets; /* dword 22*/
++ u32 rx_256_511_byte_packets; /* dword 23*/
++ u32 rx_512_1023_byte_packets; /* dword 24*/
++ u32 rx_1024_1518_byte_packets; /* dword 25*/
++ u32 rx_1519_2047_byte_packets; /* dword 26*/
++ u32 rx_2048_4095_byte_packets; /* dword 27*/
++ u32 rx_4096_8191_byte_packets; /* dword 28*/
++ u32 rx_8192_9216_byte_packets; /* dword 29*/
++ u32 rx_ip_checksum_errs; /* dword 30*/
++ u32 rx_tcp_checksum_errs; /* dword 31*/
++ u32 rx_udp_checksum_errs; /* dword 32*/
++ u32 rx_non_rss_packets; /* dword 33*/
++ u32 rx_ipv4_packets; /* dword 34*/
++ u32 rx_ipv6_packets; /* dword 35*/
++ u32 rx_ipv4_bytes_lsd; /* dword 36*/
++ u32 rx_ipv4_bytes_msd; /* dword 37*/
++ u32 rx_ipv6_bytes_lsd; /* dword 38*/
++ u32 rx_ipv6_bytes_msd; /* dword 39*/
++ u32 rx_chute1_packets; /* dword 40*/
++ u32 rx_chute2_packets; /* dword 41*/
++ u32 rx_chute3_packets; /* dword 42*/
++ u32 rx_management_packets; /* dword 43*/
++ u32 rx_switched_unicast_packets; /* dword 44*/
++ u32 rx_switched_multicast_packets; /* dword 45*/
++ u32 rx_switched_broadcast_packets; /* dword 46*/
++ u32 tx_bytes_lsd; /* dword 47*/
++ u32 tx_bytes_msd; /* dword 48*/
++ u32 tx_unicastframes; /* dword 49*/
++ u32 tx_multicastframes; /* dword 50*/
++ u32 tx_broadcastframes; /* dword 51*/
++ u32 tx_pauseframes; /* dword 52*/
++ u32 tx_controlframes; /* dword 53*/
++ u32 tx_64_byte_packets; /* dword 54*/
++ u32 tx_65_127_byte_packets; /* dword 55*/
++ u32 tx_128_256_byte_packets; /* dword 56*/
++ u32 tx_256_511_byte_packets; /* dword 57*/
++ u32 tx_512_1023_byte_packets; /* dword 58*/
++ u32 tx_1024_1518_byte_packets; /* dword 59*/
++ u32 tx_1519_2047_byte_packets; /* dword 60*/
++ u32 tx_2048_4095_byte_packets; /* dword 61*/
++ u32 tx_4096_8191_byte_packets; /* dword 62*/
++ u32 tx_8192_9216_byte_packets; /* dword 63*/
++ u32 rx_fifo_overflow; /* dword 64*/
++ u32 rx_input_fifo_overflow; /* dword 65*/
++};
++
++struct be_rxf_stats {
++ struct be_port_rxf_stats port[2];
++ u32 rx_drops_no_pbuf; /* dword 132*/
++ u32 rx_drops_no_txpb; /* dword 133*/
++ u32 rx_drops_no_erx_descr; /* dword 134*/
++ u32 rx_drops_no_tpre_descr; /* dword 135*/
++ u32 management_rx_port_packets; /* dword 136*/
++ u32 management_rx_port_bytes; /* dword 137*/
++ u32 management_rx_port_pause_frames; /* dword 138*/
++ u32 management_rx_port_errors; /* dword 139*/
++ u32 management_tx_port_packets; /* dword 140*/
++ u32 management_tx_port_bytes; /* dword 141*/
++ u32 management_tx_port_pause; /* dword 142*/
++ u32 management_rx_port_rxfifo_overflow; /* dword 143*/
++ u32 rx_drops_too_many_frags; /* dword 144*/
++ u32 rx_drops_invalid_ring; /* dword 145*/
++ u32 forwarded_packets; /* dword 146*/
++ u32 rx_drops_mtu; /* dword 147*/
++ u32 rsvd0[15];
++};
++
++struct be_erx_stats {
++ u32 rx_drops_no_fragments[44]; /* dwordS 0 to 43*/
++ u32 debug_wdma_sent_hold; /* dword 44*/
++ u32 debug_wdma_pbfree_sent_hold; /* dword 45*/
++ u32 debug_wdma_zerobyte_pbfree_sent_hold; /* dword 46*/
++ u32 debug_pmem_pbuf_dealloc; /* dword 47*/
++};
++
++struct be_hw_stats {
++ struct be_rxf_stats rxf;
++ u32 rsvd[48];
++ struct be_erx_stats erx;
++};
++
++struct be_cmd_req_get_stats {
++ struct be_cmd_req_hdr hdr;
++ u8 rsvd[sizeof(struct be_hw_stats)];
++};
++
++struct be_cmd_resp_get_stats {
++ struct be_cmd_resp_hdr hdr;
++ struct be_hw_stats hw_stats;
++};
++
++struct be_cmd_req_vlan_config {
++ struct be_cmd_req_hdr hdr;
++ u8 interface_id;
++ u8 promiscuous;
++ u8 untagged;
++ u8 num_vlan;
++ u16 normal_vlan[64];
++} __packed;
++
++struct be_cmd_req_promiscuous_config {
++ struct be_cmd_req_hdr hdr;
++ u8 port0_promiscuous;
++ u8 port1_promiscuous;
++ u16 rsvd0;
++} __packed;
++
++/******************** Multicast MAC Config *******************/
++#define BE_MAX_MC 64 /* set mcast promisc if > 64 */
++struct macaddr {
++ u8 byte[ETH_ALEN];
++};
++
++struct be_cmd_req_mcast_mac_config {
++ struct be_cmd_req_hdr hdr;
++ u16 num_mac;
++ u8 promiscuous;
++ u8 interface_id;
++ struct macaddr mac[BE_MAX_MC];
++} __packed;
++
++static inline struct be_hw_stats *
++hw_stats_from_cmd(struct be_cmd_resp_get_stats *cmd)
++{
++ return &cmd->hw_stats;
++}
++
++/******************** Link Status Query *******************/
++struct be_cmd_req_link_status {
++ struct be_cmd_req_hdr hdr;
++ u32 rsvd;
++};
++
++enum {
++ PHY_LINK_DUPLEX_NONE = 0x0,
++ PHY_LINK_DUPLEX_HALF = 0x1,
++ PHY_LINK_DUPLEX_FULL = 0x2
++};
++
++enum {
++ PHY_LINK_SPEED_ZERO = 0x0, /* => No link */
++ PHY_LINK_SPEED_10MBPS = 0x1,
++ PHY_LINK_SPEED_100MBPS = 0x2,
++ PHY_LINK_SPEED_1GBPS = 0x3,
++ PHY_LINK_SPEED_10GBPS = 0x4
++};
++
++struct be_cmd_resp_link_status {
++ struct be_cmd_resp_hdr hdr;
++ u8 physical_port;
++ u8 mac_duplex;
++ u8 mac_speed;
++ u8 mac_fault;
++ u8 mgmt_mac_duplex;
++ u8 mgmt_mac_speed;
++ u16 link_speed;
++ u32 rsvd0;
++} __packed;
++
++/******************** Port Identification ***************************/
++/* Identifies the type of port attached to NIC */
++struct be_cmd_req_port_type {
++ struct be_cmd_req_hdr hdr;
++ u32 page_num;
++ u32 port;
++};
++
++enum {
++ TR_PAGE_A0 = 0xa0,
++ TR_PAGE_A2 = 0xa2
++};
++
++struct be_cmd_resp_port_type {
++ struct be_cmd_resp_hdr hdr;
++ u32 page_num;
++ u32 port;
++ struct data {
++ u8 identifier;
++ u8 identifier_ext;
++ u8 connector;
++ u8 transceiver[8];
++ u8 rsvd0[3];
++ u8 length_km;
++ u8 length_hm;
++ u8 length_om1;
++ u8 length_om2;
++ u8 length_cu;
++ u8 length_cu_m;
++ u8 vendor_name[16];
++ u8 rsvd;
++ u8 vendor_oui[3];
++ u8 vendor_pn[16];
++ u8 vendor_rev[4];
++ } data;
++};
++
++/******************** Get FW Version *******************/
++struct be_cmd_req_get_fw_version {
++ struct be_cmd_req_hdr hdr;
++ u8 rsvd0[FW_VER_LEN];
++ u8 rsvd1[FW_VER_LEN];
++} __packed;
++
++struct be_cmd_resp_get_fw_version {
++ struct be_cmd_resp_hdr hdr;
++ u8 firmware_version_string[FW_VER_LEN];
++ u8 fw_on_flash_version_string[FW_VER_LEN];
++} __packed;
++
++/******************** Set Flow Contrl *******************/
++struct be_cmd_req_set_flow_control {
++ struct be_cmd_req_hdr hdr;
++ u16 tx_flow_control;
++ u16 rx_flow_control;
++} __packed;
++
++/******************** Get Flow Contrl *******************/
++struct be_cmd_req_get_flow_control {
++ struct be_cmd_req_hdr hdr;
++ u32 rsvd;
++};
++
++struct be_cmd_resp_get_flow_control {
++ struct be_cmd_resp_hdr hdr;
++ u16 tx_flow_control;
++ u16 rx_flow_control;
++} __packed;
++
++/******************** Modify EQ Delay *******************/
++struct be_cmd_req_modify_eq_delay {
++ struct be_cmd_req_hdr hdr;
++ u32 num_eq;
++ struct {
++ u32 eq_id;
++ u32 phase;
++ u32 delay_multiplier;
++ } delay[8];
++} __packed;
++
++struct be_cmd_resp_modify_eq_delay {
++ struct be_cmd_resp_hdr hdr;
++ u32 rsvd0;
++} __packed;
++
++/******************** Get FW Config *******************/
++struct be_cmd_req_query_fw_cfg {
++ struct be_cmd_req_hdr hdr;
++ u32 rsvd[30];
++};
++
++struct be_cmd_resp_query_fw_cfg {
++ struct be_cmd_resp_hdr hdr;
++ u32 be_config_number;
++ u32 asic_revision;
++ u32 phys_port;
++ u32 function_mode;
++ u32 rsvd[26];
++};
++
++/******************** Port Beacon ***************************/
++
++#define BEACON_STATE_ENABLED 0x1
++#define BEACON_STATE_DISABLED 0x0
++
++struct be_cmd_req_enable_disable_beacon {
++ struct be_cmd_req_hdr hdr;
++ u8 port_num;
++ u8 beacon_state;
++ u8 beacon_duration;
++ u8 status_duration;
++} __packed;
++
++struct be_cmd_resp_enable_disable_beacon {
++ struct be_cmd_resp_hdr resp_hdr;
++ u32 rsvd0;
++} __packed;
++
++struct be_cmd_req_get_beacon_state {
++ struct be_cmd_req_hdr hdr;
++ u8 port_num;
++ u8 rsvd0;
++ u16 rsvd1;
++} __packed;
++
++struct be_cmd_resp_get_beacon_state {
++ struct be_cmd_resp_hdr resp_hdr;
++ u8 beacon_state;
++ u8 rsvd0[3];
++} __packed;
++
++/************************ WOL *******************************/
++struct be_cmd_req_acpi_wol_magic_config{
++ struct be_cmd_req_hdr hdr;
++ u32 rsvd0[145];
++ u8 magic_mac[6];
++ u8 rsvd2[2];
++} __packed;
++
++extern int be_pci_fnum_get(struct be_adapter *adapter);
++extern int be_cmd_POST(struct be_adapter *adapter);
++extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
++ u8 type, bool permanent, u32 if_handle);
++extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
++ u32 if_id, u32 *pmac_id);
++extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
++extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
++ u32 en_flags, u8 *mac, bool pmac_invalid,
++ u32 *if_handle, u32 *pmac_id);
++extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
++extern int be_cmd_eq_create(struct be_adapter *adapter,
++ struct be_queue_info *eq, int eq_delay);
++extern int be_cmd_cq_create(struct be_adapter *adapter,
++ struct be_queue_info *cq, struct be_queue_info *eq,
++ bool sol_evts, bool no_delay,
++ int num_cqe_dma_coalesce);
++extern int be_cmd_mccq_create(struct be_adapter *adapter,
++ struct be_queue_info *mccq,
++ struct be_queue_info *cq);
++extern int be_cmd_txq_create(struct be_adapter *adapter,
++ struct be_queue_info *txq,
++ struct be_queue_info *cq);
++extern int be_cmd_rxq_create(struct be_adapter *adapter,
++ struct be_queue_info *rxq, u16 cq_id,
++ u16 frag_size, u16 max_frame_size, u32 if_id,
++ u32 rss);
++extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
++ int type);
++extern int be_cmd_link_status_query(struct be_adapter *adapter,
++ bool *link_up, u8 *mac_speed, u16 *link_speed);
++extern int be_cmd_reset(struct be_adapter *adapter);
++extern int be_cmd_get_stats(struct be_adapter *adapter,
++ struct be_dma_mem *nonemb_cmd);
++extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver);
++
++extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd);
++extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
++ u16 *vtag_array, u32 num, bool untagged,
++ bool promiscuous);
++extern int be_cmd_promiscuous_config(struct be_adapter *adapter,
++ u8 port_num, bool en);
++extern int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
++ struct dev_mc_list *mc_list, u32 mc_count,
++ struct be_dma_mem *mem);
++extern int be_cmd_set_flow_control(struct be_adapter *adapter,
++ u32 tx_fc, u32 rx_fc);
++extern int be_cmd_get_flow_control(struct be_adapter *adapter,
++ u32 *tx_fc, u32 *rx_fc);
++extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num);
++extern int be_cmd_reset_function(struct be_adapter *adapter);
++extern int be_process_mcc(struct be_adapter *adapter);
++extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
++ u8 port_num, u8 beacon, u8 status, u8 state);
++extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
++ u8 port_num, u32 *state);
++extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
++ u8 *connector);
++extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
++ struct be_dma_mem *nonemb_cmd);
++extern int be_cmd_fw_init(struct be_adapter *adapter);
++extern int be_cmd_fw_clean(struct be_adapter *adapter);
+diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
+new file mode 100644
+index 0000000..0160ceb
+--- /dev/null
++++ b/drivers/net/benet/be_ethtool.c
+@@ -0,0 +1,491 @@
++/*
++ * Copyright (C) 2005 - 2009 ServerEngines
++ * All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation. The full GNU General
++ * Public License is included in this distribution in the file called COPYING.
++ *
++ * Contact Information:
++ * linux-drivers at serverengines.com
++ *
++ * ServerEngines
++ * 209 N. Fair Oaks Ave
++ * Sunnyvale, CA 94085
++ */
++
++#include "be.h"
++#include "be_cmds.h"
++#include <linux/ethtool.h>
++
++struct be_ethtool_stat {
++ char desc[ETH_GSTRING_LEN];
++ int type;
++ int size;
++ int offset;
++};
++
++enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT, ERXSTAT};
++#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
++ offsetof(_struct, field)
++#define NETSTAT_INFO(field) #field, NETSTAT,\
++ FIELDINFO(struct net_device_stats,\
++ field)
++#define DRVSTAT_INFO(field) #field, DRVSTAT,\
++ FIELDINFO(struct be_drvr_stats, field)
++#define MISCSTAT_INFO(field) #field, MISCSTAT,\
++ FIELDINFO(struct be_rxf_stats, field)
++#define PORTSTAT_INFO(field) #field, PORTSTAT,\
++ FIELDINFO(struct be_port_rxf_stats, \
++ field)
++#define ERXSTAT_INFO(field) #field, ERXSTAT,\
++ FIELDINFO(struct be_erx_stats, field)
++
++static const struct be_ethtool_stat et_stats[] = {
++ {NETSTAT_INFO(rx_packets)},
++ {NETSTAT_INFO(tx_packets)},
++ {NETSTAT_INFO(rx_bytes)},
++ {NETSTAT_INFO(tx_bytes)},
++ {NETSTAT_INFO(rx_errors)},
++ {NETSTAT_INFO(tx_errors)},
++ {NETSTAT_INFO(rx_dropped)},
++ {NETSTAT_INFO(tx_dropped)},
++ {DRVSTAT_INFO(be_tx_reqs)},
++ {DRVSTAT_INFO(be_tx_stops)},
++ {DRVSTAT_INFO(be_fwd_reqs)},
++ {DRVSTAT_INFO(be_tx_wrbs)},
++ {DRVSTAT_INFO(be_rx_polls)},
++ {DRVSTAT_INFO(be_tx_events)},
++ {DRVSTAT_INFO(be_rx_events)},
++ {DRVSTAT_INFO(be_tx_compl)},
++ {DRVSTAT_INFO(be_rx_compl)},
++ {DRVSTAT_INFO(be_ethrx_post_fail)},
++ {DRVSTAT_INFO(be_802_3_dropped_frames)},
++ {DRVSTAT_INFO(be_802_3_malformed_frames)},
++ {DRVSTAT_INFO(be_tx_rate)},
++ {DRVSTAT_INFO(be_rx_rate)},
++ {PORTSTAT_INFO(rx_unicast_frames)},
++ {PORTSTAT_INFO(rx_multicast_frames)},
++ {PORTSTAT_INFO(rx_broadcast_frames)},
++ {PORTSTAT_INFO(rx_crc_errors)},
++ {PORTSTAT_INFO(rx_alignment_symbol_errors)},
++ {PORTSTAT_INFO(rx_pause_frames)},
++ {PORTSTAT_INFO(rx_control_frames)},
++ {PORTSTAT_INFO(rx_in_range_errors)},
++ {PORTSTAT_INFO(rx_out_range_errors)},
++ {PORTSTAT_INFO(rx_frame_too_long)},
++ {PORTSTAT_INFO(rx_address_match_errors)},
++ {PORTSTAT_INFO(rx_vlan_mismatch)},
++ {PORTSTAT_INFO(rx_dropped_too_small)},
++ {PORTSTAT_INFO(rx_dropped_too_short)},
++ {PORTSTAT_INFO(rx_dropped_header_too_small)},
++ {PORTSTAT_INFO(rx_dropped_tcp_length)},
++ {PORTSTAT_INFO(rx_dropped_runt)},
++ {PORTSTAT_INFO(rx_fifo_overflow)},
++ {PORTSTAT_INFO(rx_input_fifo_overflow)},
++ {PORTSTAT_INFO(rx_ip_checksum_errs)},
++ {PORTSTAT_INFO(rx_tcp_checksum_errs)},
++ {PORTSTAT_INFO(rx_udp_checksum_errs)},
++ {PORTSTAT_INFO(rx_non_rss_packets)},
++ {PORTSTAT_INFO(rx_ipv4_packets)},
++ {PORTSTAT_INFO(rx_ipv6_packets)},
++ {PORTSTAT_INFO(tx_unicastframes)},
++ {PORTSTAT_INFO(tx_multicastframes)},
++ {PORTSTAT_INFO(tx_broadcastframes)},
++ {PORTSTAT_INFO(tx_pauseframes)},
++ {PORTSTAT_INFO(tx_controlframes)},
++ {MISCSTAT_INFO(rx_drops_no_pbuf)},
++ {MISCSTAT_INFO(rx_drops_no_txpb)},
++ {MISCSTAT_INFO(rx_drops_no_erx_descr)},
++ {MISCSTAT_INFO(rx_drops_no_tpre_descr)},
++ {MISCSTAT_INFO(rx_drops_too_many_frags)},
++ {MISCSTAT_INFO(rx_drops_invalid_ring)},
++ {MISCSTAT_INFO(forwarded_packets)},
++ {MISCSTAT_INFO(rx_drops_mtu)},
++ {ERXSTAT_INFO(rx_drops_no_fragments)},
++};
++#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
++
++static void
++be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++
++ strcpy(drvinfo->driver, DRV_NAME);
++ strcpy(drvinfo->version, DRV_VER);
++ strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
++ strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
++ drvinfo->testinfo_len = 0;
++ drvinfo->regdump_len = 0;
++ drvinfo->eedump_len = 0;
++}
++
++static int
++be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++ struct be_eq_obj *rx_eq = &adapter->rx_eq;
++ struct be_eq_obj *tx_eq = &adapter->tx_eq;
++
++ coalesce->rx_max_coalesced_frames = adapter->max_rx_coal;
++
++ coalesce->rx_coalesce_usecs = rx_eq->cur_eqd;
++ coalesce->rx_coalesce_usecs_high = rx_eq->max_eqd;
++ coalesce->rx_coalesce_usecs_low = rx_eq->min_eqd;
++
++ coalesce->tx_coalesce_usecs = tx_eq->cur_eqd;
++ coalesce->tx_coalesce_usecs_high = tx_eq->max_eqd;
++ coalesce->tx_coalesce_usecs_low = tx_eq->min_eqd;
++
++ coalesce->use_adaptive_rx_coalesce = rx_eq->enable_aic;
++ coalesce->use_adaptive_tx_coalesce = tx_eq->enable_aic;
++
++ return 0;
++}
++
++/*
++ * This routine is used to set interrup coalescing delay *as well as*
++ * the number of pkts to coalesce for LRO.
++ */
++static int
++be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++ struct be_eq_obj *rx_eq = &adapter->rx_eq;
++ struct be_eq_obj *tx_eq = &adapter->tx_eq;
++ u32 tx_max, tx_min, tx_cur;
++ u32 rx_max, rx_min, rx_cur;
++ int status = 0;
++
++ if (coalesce->use_adaptive_tx_coalesce == 1)
++ return -EINVAL;
++
++ adapter->max_rx_coal = coalesce->rx_max_coalesced_frames;
++ if (adapter->max_rx_coal > BE_MAX_FRAGS_PER_FRAME)
++ adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME;
++
++ /* if AIC is being turned on now, start with an EQD of 0 */
++ if (rx_eq->enable_aic == 0 &&
++ coalesce->use_adaptive_rx_coalesce == 1) {
++ rx_eq->cur_eqd = 0;
++ }
++ rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
++
++ rx_max = coalesce->rx_coalesce_usecs_high;
++ rx_min = coalesce->rx_coalesce_usecs_low;
++ rx_cur = coalesce->rx_coalesce_usecs;
++
++ tx_max = coalesce->tx_coalesce_usecs_high;
++ tx_min = coalesce->tx_coalesce_usecs_low;
++ tx_cur = coalesce->tx_coalesce_usecs;
++
++ if (tx_cur > BE_MAX_EQD)
++ tx_cur = BE_MAX_EQD;
++ if (tx_eq->cur_eqd != tx_cur) {
++ status = be_cmd_modify_eqd(adapter, tx_eq->q.id, tx_cur);
++ if (!status)
++ tx_eq->cur_eqd = tx_cur;
++ }
++
++ if (rx_eq->enable_aic) {
++ if (rx_max > BE_MAX_EQD)
++ rx_max = BE_MAX_EQD;
++ if (rx_min > rx_max)
++ rx_min = rx_max;
++ rx_eq->max_eqd = rx_max;
++ rx_eq->min_eqd = rx_min;
++ if (rx_eq->cur_eqd > rx_max)
++ rx_eq->cur_eqd = rx_max;
++ if (rx_eq->cur_eqd < rx_min)
++ rx_eq->cur_eqd = rx_min;
++ } else {
++ if (rx_cur > BE_MAX_EQD)
++ rx_cur = BE_MAX_EQD;
++ if (rx_eq->cur_eqd != rx_cur) {
++ status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
++ rx_cur);
++ if (!status)
++ rx_eq->cur_eqd = rx_cur;
++ }
++ }
++ return 0;
++}
++
++static u32 be_get_rx_csum(struct net_device *netdev)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++
++ return adapter->rx_csum;
++}
++
++static int be_set_rx_csum(struct net_device *netdev, uint32_t data)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++
++ if (data)
++ adapter->rx_csum = true;
++ else
++ adapter->rx_csum = false;
++
++ return 0;
++}
++
++static void
++be_get_ethtool_stats(struct net_device *netdev,
++ struct ethtool_stats *stats, uint64_t *data)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++ struct be_drvr_stats *drvr_stats = &adapter->stats.drvr_stats;
++ struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
++ struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
++ struct be_port_rxf_stats *port_stats =
++ &rxf_stats->port[adapter->port_num];
++ struct net_device_stats *net_stats = &netdev->stats;
++ struct be_erx_stats *erx_stats = &hw_stats->erx;
++ void *p = NULL;
++ int i;
++
++ for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
++ switch (et_stats[i].type) {
++ case NETSTAT:
++ p = net_stats;
++ break;
++ case DRVSTAT:
++ p = drvr_stats;
++ break;
++ case PORTSTAT:
++ p = port_stats;
++ break;
++ case MISCSTAT:
++ p = rxf_stats;
++ break;
++ case ERXSTAT: /* Currently only one ERX stat is provided */
++ p = (u32 *)erx_stats + adapter->rx_obj.q.id;
++ break;
++ }
++
++ p = (u8 *)p + et_stats[i].offset;
++ data[i] = (et_stats[i].size == sizeof(u64)) ?
++ *(u64 *)p: *(u32 *)p;
++ }
++
++ return;
++}
++
++static void
++be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
++ uint8_t *data)
++{
++ int i;
++ switch (stringset) {
++ case ETH_SS_STATS:
++ for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
++ memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
++ data += ETH_GSTRING_LEN;
++ }
++ break;
++ }
++}
++
++static int be_get_stats_count(struct net_device *netdev)
++{
++ return ETHTOOL_STATS_NUM;
++}
++
++static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++ u8 mac_speed = 0, connector = 0;
++ u16 link_speed = 0;
++ bool link_up = false;
++ int status;
++
++ if (adapter->link_speed < 0) {
++ status = be_cmd_link_status_query(adapter, &link_up,
++ &mac_speed, &link_speed);
++
++ /* link_speed is in units of 10 Mbps */
++ if (link_speed) {
++ ecmd->speed = link_speed*10;
++ } else {
++ switch (mac_speed) {
++ case PHY_LINK_SPEED_1GBPS:
++ ecmd->speed = SPEED_1000;
++ break;
++ case PHY_LINK_SPEED_10GBPS:
++ ecmd->speed = SPEED_10000;
++ break;
++ }
++ }
++
++ status = be_cmd_read_port_type(adapter, adapter->port_num,
++ &connector);
++ if (!status) {
++ switch (connector) {
++ case 7:
++ ecmd->port = PORT_FIBRE;
++ ecmd->transceiver = XCVR_EXTERNAL;
++ break;
++ case 0:
++ ecmd->port = PORT_TP;
++ ecmd->transceiver = XCVR_EXTERNAL;
++ break;
++ default:
++ ecmd->port = PORT_TP;
++ ecmd->transceiver = XCVR_INTERNAL;
++ break;
++ }
++ } else {
++ ecmd->port = PORT_AUI;
++ ecmd->transceiver = XCVR_INTERNAL;
++ }
++
++ /* Save for future use */
++ adapter->link_speed = ecmd->speed;
++ adapter->port_type = ecmd->port;
++ adapter->transceiver = ecmd->transceiver;
++ } else {
++ ecmd->speed = adapter->link_speed;
++ ecmd->port = adapter->port_type;
++ ecmd->transceiver = adapter->transceiver;
++ }
++
++ ecmd->duplex = DUPLEX_FULL;
++ ecmd->autoneg = AUTONEG_DISABLE;
++ ecmd->phy_address = adapter->port_num;
++ switch (ecmd->port) {
++ case PORT_FIBRE:
++ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
++ break;
++ case PORT_TP:
++ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
++ break;
++ case PORT_AUI:
++ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI);
++ break;
++ }
++
++ return 0;
++}
++
++static void
++be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++
++ ring->rx_max_pending = adapter->rx_obj.q.len;
++ ring->tx_max_pending = adapter->tx_obj.q.len;
++
++ ring->rx_pending = atomic_read(&adapter->rx_obj.q.used);
++ ring->tx_pending = atomic_read(&adapter->tx_obj.q.used);
++}
++
++static void
++be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++
++ be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
++ ecmd->autoneg = 0;
++}
++
++static int
++be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++ int status;
++
++ if (ecmd->autoneg != 0)
++ return -EINVAL;
++ adapter->tx_fc = ecmd->tx_pause;
++ adapter->rx_fc = ecmd->rx_pause;
++
++ status = be_cmd_set_flow_control(adapter,
++ adapter->tx_fc, adapter->rx_fc);
++ if (status)
++ dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
++
++ return status;
++}
++
++static int
++be_phys_id(struct net_device *netdev, u32 data)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++ int status;
++ u32 cur;
++
++ be_cmd_get_beacon_state(adapter, adapter->port_num, &cur);
++
++ if (cur == BEACON_STATE_ENABLED)
++ return 0;
++
++ if (data < 2)
++ data = 2;
++
++ status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0,
++ BEACON_STATE_ENABLED);
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule_timeout(data*HZ);
++
++ status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0,
++ BEACON_STATE_DISABLED);
++
++ return status;
++}
++
++static void
++be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++
++ wol->supported = WAKE_MAGIC;
++ if (adapter->wol)
++ wol->wolopts = WAKE_MAGIC;
++ else
++ wol->wolopts = 0;
++ memset(&wol->sopass, 0, sizeof(wol->sopass));
++ return;
++}
++
++static int
++be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++
++ if (wol->wolopts & ~WAKE_MAGIC)
++ return -EINVAL;
++
++ if (wol->wolopts & WAKE_MAGIC)
++ adapter->wol = true;
++ else
++ adapter->wol = false;
++
++ return 0;
++}
++
++struct ethtool_ops be_ethtool_ops = {
++ .get_settings = be_get_settings,
++ .get_drvinfo = be_get_drvinfo,
++ .get_wol = be_get_wol,
++ .set_wol = be_set_wol,
++ .get_link = ethtool_op_get_link,
++ .get_coalesce = be_get_coalesce,
++ .set_coalesce = be_set_coalesce,
++ .get_ringparam = be_get_ringparam,
++ .get_pauseparam = be_get_pauseparam,
++ .set_pauseparam = be_set_pauseparam,
++ .get_rx_csum = be_get_rx_csum,
++ .set_rx_csum = be_set_rx_csum,
++ .get_tx_csum = ethtool_op_get_tx_csum,
++ .set_tx_csum = ethtool_op_set_tx_hw_csum,
++ .get_sg = ethtool_op_get_sg,
++ .set_sg = ethtool_op_set_sg,
++ .get_tso = ethtool_op_get_tso,
++ .set_tso = ethtool_op_set_tso,
++ .get_strings = be_get_stat_strings,
++ .get_stats_count = be_get_stats_count,
++ .phys_id = be_phys_id,
++ .get_ethtool_stats = be_get_ethtool_stats,
++};
+diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
+new file mode 100644
+index 0000000..0dc120e
+--- /dev/null
++++ b/drivers/net/benet/be_hw.h
+@@ -0,0 +1,222 @@
++/*
++ * Copyright (C) 2005 - 2009 ServerEngines
++ * All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation. The full GNU General
++ * Public License is included in this distribution in the file called COPYING.
++ *
++ * Contact Information:
++ * linux-drivers at serverengines.com
++ *
++ * ServerEngines
++ * 209 N. Fair Oaks Ave
++ * Sunnyvale, CA 94085
++ */
++
++/********* Mailbox door bell *************/
++/* Used for driver communication with the FW.
++ * The software must write this register twice to post any command. First,
++ * it writes the register with hi=1 and the upper bits of the physical address
++ * for the MAILBOX structure. Software must poll the ready bit until this
++ * is acknowledged. Then, sotware writes the register with hi=0 with the lower
++ * bits in the address. It must poll the ready bit until the command is
++ * complete. Upon completion, the MAILBOX will contain a valid completion
++ * queue entry.
++ */
++#define MPU_MAILBOX_DB_OFFSET 0x160
++#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
++#define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */
++
++#define MPU_EP_CONTROL 0
++
++/********** MPU semphore ******************/
++#define MPU_EP_SEMAPHORE_OFFSET 0xac
++#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
++#define EP_SEMAPHORE_POST_ERR_MASK 0x1
++#define EP_SEMAPHORE_POST_ERR_SHIFT 31
++/* MPU semphore POST stage values */
++#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
++#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
++#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
++#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
++
++/********* Memory BAR register ************/
++#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
++/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
++ * Disable" may still globally block interrupts in addition to individual
++ * interrupt masks; a mechanism for the device driver to block all interrupts
++ * atomically without having to arbitrate for the PCI Interrupt Disable bit
++ * with the OS.
++ */
++#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */
++
++/********* Power managment (WOL) **********/
++#define PCICFG_PM_CONTROL_OFFSET 0x44
++#define PCICFG_PM_CONTROL_MASK 0x108 /* bits 3 & 8 */
++
++/********* ISR0 Register offset **********/
++#define CEV_ISR0_OFFSET 0xC18
++#define CEV_ISR_SIZE 4
++
++/********* Event Q door bell *************/
++#define DB_EQ_OFFSET DB_CQ_OFFSET
++#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
++/* Clear the interrupt for this eq */
++#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
++/* Must be 1 */
++#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
++/* Number of event entries processed */
++#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
++/* Rearm bit */
++#define DB_EQ_REARM_SHIFT (29) /* bit 29 */
++
++/********* Compl Q door bell *************/
++#define DB_CQ_OFFSET 0x120
++#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
++/* Number of event entries processed */
++#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
++/* Rearm bit */
++#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
++
++/********** TX ULP door bell *************/
++#define DB_TXULP1_OFFSET 0x60
++#define DB_TXULP_RING_ID_MASK 0x7FF /* bits 0 - 10 */
++/* Number of tx entries posted */
++#define DB_TXULP_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
++#define DB_TXULP_NUM_POSTED_MASK 0x3FFF /* bits 16 - 29 */
++
++/********** RQ(erx) door bell ************/
++#define DB_RQ_OFFSET 0x100
++#define DB_RQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
++/* Number of rx frags posted */
++#define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */
++
++/********** MCC door bell ************/
++#define DB_MCCQ_OFFSET 0x140
++#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
++/* Number of entries posted */
++#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
++
++/*
++ * BE descriptors: host memory data structures whose formats
++ * are hardwired in BE silicon.
++ */
++/* Event Queue Descriptor */
++#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
++#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
++#define EQ_ENTRY_RES_ID_SHIFT 16
++struct be_eq_entry {
++ u32 evt;
++};
++
++/* TX Queue Descriptor */
++#define ETH_WRB_FRAG_LEN_MASK 0xFFFF
++struct be_eth_wrb {
++ u32 frag_pa_hi; /* dword 0 */
++ u32 frag_pa_lo; /* dword 1 */
++ u32 rsvd0; /* dword 2 */
++ u32 frag_len; /* dword 3: bits 0 - 15 */
++} __packed;
++
++/* Pseudo amap definition for eth_hdr_wrb in which each bit of the
++ * actual structure is defined as a byte : used to calculate
++ * offset/shift/mask of each field */
++struct amap_eth_hdr_wrb {
++ u8 rsvd0[32]; /* dword 0 */
++ u8 rsvd1[32]; /* dword 1 */
++ u8 complete; /* dword 2 */
++ u8 event;
++ u8 crc;
++ u8 forward;
++ u8 ipsec;
++ u8 mgmt;
++ u8 ipcs;
++ u8 udpcs;
++ u8 tcpcs;
++ u8 lso;
++ u8 vlan;
++ u8 gso[2];
++ u8 num_wrb[5];
++ u8 lso_mss[14];
++ u8 len[16]; /* dword 3 */
++ u8 vlan_tag[16];
++} __packed;
++
++struct be_eth_hdr_wrb {
++ u32 dw[4];
++};
++
++/* TX Compl Queue Descriptor */
++
++/* Pseudo amap definition for eth_tx_compl in which each bit of the
++ * actual structure is defined as a byte: used to calculate
++ * offset/shift/mask of each field */
++struct amap_eth_tx_compl {
++ u8 wrb_index[16]; /* dword 0 */
++ u8 ct[2]; /* dword 0 */
++ u8 port[2]; /* dword 0 */
++ u8 rsvd0[8]; /* dword 0 */
++ u8 status[4]; /* dword 0 */
++ u8 user_bytes[16]; /* dword 1 */
++ u8 nwh_bytes[8]; /* dword 1 */
++ u8 lso; /* dword 1 */
++ u8 cast_enc[2]; /* dword 1 */
++ u8 rsvd1[5]; /* dword 1 */
++ u8 rsvd2[32]; /* dword 2 */
++ u8 pkts[16]; /* dword 3 */
++ u8 ringid[11]; /* dword 3 */
++ u8 hash_val[4]; /* dword 3 */
++ u8 valid; /* dword 3 */
++} __packed;
++
++struct be_eth_tx_compl {
++ u32 dw[4];
++};
++
++/* RX Queue Descriptor */
++struct be_eth_rx_d {
++ u32 fragpa_hi;
++ u32 fragpa_lo;
++};
++
++/* RX Compl Queue Descriptor */
++
++/* Pseudo amap definition for eth_rx_compl in which each bit of the
++ * actual structure is defined as a byte: used to calculate
++ * offset/shift/mask of each field */
++struct amap_eth_rx_compl {
++ u8 vlan_tag[16]; /* dword 0 */
++ u8 pktsize[14]; /* dword 0 */
++ u8 port; /* dword 0 */
++ u8 ip_opt; /* dword 0 */
++ u8 err; /* dword 1 */
++ u8 rsshp; /* dword 1 */
++ u8 ipf; /* dword 1 */
++ u8 tcpf; /* dword 1 */
++ u8 udpf; /* dword 1 */
++ u8 ipcksm; /* dword 1 */
++ u8 l4_cksm; /* dword 1 */
++ u8 ip_version; /* dword 1 */
++ u8 macdst[6]; /* dword 1 */
++ u8 vtp; /* dword 1 */
++ u8 rsvd0; /* dword 1 */
++ u8 fragndx[10]; /* dword 1 */
++ u8 ct[2]; /* dword 1 */
++ u8 sw; /* dword 1 */
++ u8 numfrags[3]; /* dword 1 */
++ u8 rss_flush; /* dword 2 */
++ u8 cast_enc[2]; /* dword 2 */
++ u8 qnq; /* dword 2 */
++ u8 rss_bank; /* dword 2 */
++ u8 rsvd1[23]; /* dword 2 */
++ u8 lro_pkt; /* dword 2 */
++ u8 rsvd2[2]; /* dword 2 */
++ u8 valid; /* dword 2 */
++ u8 rsshash[32]; /* dword 3 */
++} __packed;
++
++struct be_eth_rx_compl {
++ u32 dw[4];
++};
+diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
+new file mode 100644
+index 0000000..e497e51
+--- /dev/null
++++ b/drivers/net/benet/be_main.c
+@@ -0,0 +1,2240 @@
++/*
++ * Copyright (C) 2005 - 2009 ServerEngines
++ * All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation. The full GNU General
++ * Public License is included in this distribution in the file called COPYING.
++ *
++ * Contact Information:
++ * linux-drivers at serverengines.com
++ *
++ * ServerEngines
++ * 209 N. Fair Oaks Ave
++ * Sunnyvale, CA 94085
++ */
++
++#include "be.h"
++#include "be_cmds.h"
++#include <asm/div64.h>
++
++MODULE_VERSION(DRV_VER);
++MODULE_DEVICE_TABLE(pci, be_dev_ids);
++MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
++MODULE_AUTHOR("ServerEngines Corporation");
++MODULE_LICENSE("GPL");
++
++static unsigned int rx_frag_size = 2048;
++module_param(rx_frag_size, uint, S_IRUGO);
++MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
++
++static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
++ { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
++ { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
++ { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
++ { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
++ { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
++ { 0 }
++};
++MODULE_DEVICE_TABLE(pci, be_dev_ids);
++
++static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
++{
++ struct be_dma_mem *mem = &q->dma_mem;
++ if (mem->va)
++ pci_free_consistent(adapter->pdev, mem->size,
++ mem->va, mem->dma);
++}
++
++static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
++ u16 len, u16 entry_size)
++{
++ struct be_dma_mem *mem = &q->dma_mem;
++
++ memset(q, 0, sizeof(*q));
++ q->len = len;
++ q->entry_size = entry_size;
++ mem->size = len * entry_size;
++ mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
++ if (!mem->va)
++ return -1;
++ memset(mem->va, 0, mem->size);
++ return 0;
++}
++
++static void be_intr_set(struct be_adapter *adapter, bool enable)
++{
++ u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
++ u32 reg = ioread32(addr);
++ u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
++
++ if (!enabled && enable)
++ reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
++ else if (enabled && !enable)
++ reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
++ else
++ return;
++
++ iowrite32(reg, addr);
++}
++
++static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
++{
++ u32 val = 0;
++ val |= qid & DB_RQ_RING_ID_MASK;
++ val |= posted << DB_RQ_NUM_POSTED_SHIFT;
++ iowrite32(val, adapter->db + DB_RQ_OFFSET);
++}
++
++static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
++{
++ u32 val = 0;
++ val |= qid & DB_TXULP_RING_ID_MASK;
++ val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
++ iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
++}
++
++static void be_eq_notify(struct be_adapter *adapter, u16 qid,
++ bool arm, bool clear_int, u16 num_popped)
++{
++ u32 val = 0;
++ val |= qid & DB_EQ_RING_ID_MASK;
++ if (arm)
++ val |= 1 << DB_EQ_REARM_SHIFT;
++ if (clear_int)
++ val |= 1 << DB_EQ_CLR_SHIFT;
++ val |= 1 << DB_EQ_EVNT_SHIFT;
++ val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
++ iowrite32(val, adapter->db + DB_EQ_OFFSET);
++}
++
++void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
++{
++ u32 val = 0;
++ val |= qid & DB_CQ_RING_ID_MASK;
++ if (arm)
++ val |= 1 << DB_CQ_REARM_SHIFT;
++ val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
++ iowrite32(val, adapter->db + DB_CQ_OFFSET);
++}
++
++static int be_mac_addr_set(struct net_device *netdev, void *p)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++ struct sockaddr *addr = p;
++ int status = 0;
++
++ if (!is_valid_ether_addr(addr->sa_data))
++ return -EADDRNOTAVAIL;
++
++ status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
++ if (status)
++ return status;
++
++ status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
++ adapter->if_handle, &adapter->pmac_id);
++ if (!status)
++ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
++
++ return status;
++}
++
++void netdev_stats_update(struct be_adapter *adapter)
++{
++ struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
++ struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
++ struct be_port_rxf_stats *port_stats =
++ &rxf_stats->port[adapter->port_num];
++ struct net_device_stats *dev_stats = &adapter->netdev->stats;
++ struct be_erx_stats *erx_stats = &hw_stats->erx;
++
++ dev_stats->rx_packets = port_stats->rx_total_frames;
++ dev_stats->tx_packets = port_stats->tx_unicastframes +
++ port_stats->tx_multicastframes + port_stats->tx_broadcastframes;
++ dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 |
++ (u64) port_stats->rx_bytes_lsd;
++ dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 |
++ (u64) port_stats->tx_bytes_lsd;
++
++ /* bad pkts received */
++ dev_stats->rx_errors = port_stats->rx_crc_errors +
++ port_stats->rx_alignment_symbol_errors +
++ port_stats->rx_in_range_errors +
++ port_stats->rx_out_range_errors +
++ port_stats->rx_frame_too_long +
++ port_stats->rx_dropped_too_small +
++ port_stats->rx_dropped_too_short +
++ port_stats->rx_dropped_header_too_small +
++ port_stats->rx_dropped_tcp_length +
++ port_stats->rx_dropped_runt +
++ port_stats->rx_tcp_checksum_errs +
++ port_stats->rx_ip_checksum_errs +
++ port_stats->rx_udp_checksum_errs;
++
++ /* no space in linux buffers: best possible approximation */
++ dev_stats->rx_dropped =
++ erx_stats->rx_drops_no_fragments[adapter->rx_obj.q.id];
++
++ /* detailed rx errors */
++ dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
++ port_stats->rx_out_range_errors +
++ port_stats->rx_frame_too_long;
++
++ /* receive ring buffer overflow */
++ dev_stats->rx_over_errors = 0;
++
++ dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
++
++ /* frame alignment errors */
++ dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
++
++ /* receiver fifo overrun */
++ /* drops_no_pbuf is no per i/f, it's per BE card */
++ dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
++ port_stats->rx_input_fifo_overflow +
++ rxf_stats->rx_drops_no_pbuf;
++ /* receiver missed packetd */
++ dev_stats->rx_missed_errors = 0;
++
++ /* packet transmit problems */
++ dev_stats->tx_errors = 0;
++
++ /* no space available in linux */
++ dev_stats->tx_dropped = 0;
++
++ dev_stats->multicast = port_stats->rx_multicast_frames;
++ dev_stats->collisions = 0;
++
++ /* detailed tx_errors */
++ dev_stats->tx_aborted_errors = 0;
++ dev_stats->tx_carrier_errors = 0;
++ dev_stats->tx_fifo_errors = 0;
++ dev_stats->tx_heartbeat_errors = 0;
++ dev_stats->tx_window_errors = 0;
++}
++
++void be_link_status_update(struct be_adapter *adapter, bool link_up)
++{
++ struct net_device *netdev = adapter->netdev;
++
++ /* If link came up or went down */
++ if (adapter->link_up != link_up) {
++ adapter->link_speed = -1;
++ if (link_up) {
++ netif_start_queue(netdev);
++ netif_carrier_on(netdev);
++ printk(KERN_INFO "%s: Link up\n", netdev->name);
++ } else {
++ netif_stop_queue(netdev);
++ netif_carrier_off(netdev);
++ printk(KERN_INFO "%s: Link down\n", netdev->name);
++ }
++ adapter->link_up = link_up;
++ }
++}
++
++/* Update the EQ delay n BE based on the RX frags consumed / sec */
++static void be_rx_eqd_update(struct be_adapter *adapter)
++{
++ struct be_eq_obj *rx_eq = &adapter->rx_eq;
++ struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
++ ulong now = jiffies;
++ u32 eqd;
++
++ if (!rx_eq->enable_aic)
++ return;
++
++ /* Wrapped around */
++ if (time_before(now, stats->rx_fps_jiffies)) {
++ stats->rx_fps_jiffies = now;
++ return;
++ }
++
++ /* Update once a second */
++ if ((now - stats->rx_fps_jiffies) < HZ)
++ return;
++
++ stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
++ ((now - stats->rx_fps_jiffies) / HZ);
++
++ stats->rx_fps_jiffies = now;
++ stats->be_prev_rx_frags = stats->be_rx_frags;
++ eqd = stats->be_rx_fps / 110000;
++ eqd = eqd << 3;
++ if (eqd > rx_eq->max_eqd)
++ eqd = rx_eq->max_eqd;
++ if (eqd < rx_eq->min_eqd)
++ eqd = rx_eq->min_eqd;
++ if (eqd < 10)
++ eqd = 0;
++ if (eqd != rx_eq->cur_eqd)
++ be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
++
++ rx_eq->cur_eqd = eqd;
++}
++
++static struct net_device_stats *be_get_stats(struct net_device *dev)
++{
++ return &dev->stats;
++}
++
++static u32 be_calc_rate(u64 bytes, unsigned long ticks)
++{
++ u64 rate = bytes;
++
++ do_div(rate, ticks / HZ);
++ rate <<= 3; /* bytes/sec -> bits/sec */
++ do_div(rate, 1000000ul); /* MB/Sec */
++
++ return rate;
++}
++
++static void be_tx_rate_update(struct be_adapter *adapter)
++{
++ struct be_drvr_stats *stats = drvr_stats(adapter);
++ ulong now = jiffies;
++
++ /* Wrapped around? */
++ if (time_before(now, stats->be_tx_jiffies)) {
++ stats->be_tx_jiffies = now;
++ return;
++ }
++
++ /* Update tx rate once in two seconds */
++ if ((now - stats->be_tx_jiffies) > 2 * HZ) {
++ stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
++ - stats->be_tx_bytes_prev,
++ now - stats->be_tx_jiffies);
++ stats->be_tx_jiffies = now;
++ stats->be_tx_bytes_prev = stats->be_tx_bytes;
++ }
++}
++
++static void be_tx_stats_update(struct be_adapter *adapter,
++ u32 wrb_cnt, u32 copied, bool stopped)
++{
++ struct be_drvr_stats *stats = drvr_stats(adapter);
++ stats->be_tx_reqs++;
++ stats->be_tx_wrbs += wrb_cnt;
++ stats->be_tx_bytes += copied;
++ if (stopped)
++ stats->be_tx_stops++;
++}
++
++/* Determine number of WRB entries needed to xmit data in an skb */
++static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
++{
++ int cnt = (skb->len > skb->data_len);
++
++ cnt += skb_shinfo(skb)->nr_frags;
++
++ /* to account for hdr wrb */
++ cnt++;
++ if (cnt & 1) {
++ /* add a dummy to make it an even num */
++ cnt++;
++ *dummy = true;
++ } else
++ *dummy = false;
++ BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
++ return cnt;
++}
++
++static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
++{
++ wrb->frag_pa_hi = upper_32_bits(addr);
++ wrb->frag_pa_lo = addr & 0xFFFFFFFF;
++ wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
++}
++
++static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
++ bool vlan, u32 wrb_cnt, u32 len)
++{
++ memset(hdr, 0, sizeof(*hdr));
++
++ AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
++
++ if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
++ AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
++ AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
++ hdr, skb_shinfo(skb)->gso_size);
++ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
++ if (is_tcp_pkt(skb))
++ AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
++ else if (is_udp_pkt(skb))
++ AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
++ }
++
++ if (vlan && vlan_tx_tag_present(skb)) {
++ AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
++ AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
++ hdr, vlan_tx_tag_get(skb));
++ }
++
++ AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
++ AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
++ AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
++ AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
++}
++
++
++static int make_tx_wrbs(struct be_adapter *adapter,
++ struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
++{
++ u64 busaddr;
++ u32 i, copied = 0;
++ struct pci_dev *pdev = adapter->pdev;
++ struct sk_buff *first_skb = skb;
++ struct be_queue_info *txq = &adapter->tx_obj.q;
++ struct be_eth_wrb *wrb;
++ struct be_eth_hdr_wrb *hdr;
++
++ atomic_add(wrb_cnt, &txq->used);
++ hdr = queue_head_node(txq);
++ queue_head_inc(txq);
++
++ if (skb->len > skb->data_len) {
++ int len = skb->len - skb->data_len;
++ busaddr = pci_map_single(pdev, skb->data, len,
++ PCI_DMA_TODEVICE);
++ wrb = queue_head_node(txq);
++ wrb_fill(wrb, busaddr, len);
++ be_dws_cpu_to_le(wrb, sizeof(*wrb));
++ queue_head_inc(txq);
++ copied += len;
++ }
++
++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++ struct skb_frag_struct *frag =
++ &skb_shinfo(skb)->frags[i];
++ busaddr = pci_map_page(pdev, frag->page,
++ frag->page_offset,
++ frag->size, PCI_DMA_TODEVICE);
++ wrb = queue_head_node(txq);
++ wrb_fill(wrb, busaddr, frag->size);
++ be_dws_cpu_to_le(wrb, sizeof(*wrb));
++ queue_head_inc(txq);
++ copied += frag->size;
++ }
++
++ if (dummy_wrb) {
++ wrb = queue_head_node(txq);
++ wrb_fill(wrb, 0, 0);
++ be_dws_cpu_to_le(wrb, sizeof(*wrb));
++ queue_head_inc(txq);
++ }
++
++ wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
++ wrb_cnt, copied);
++ be_dws_cpu_to_le(hdr, sizeof(*hdr));
++
++ return copied;
++}
++
++static int be_xmit(struct sk_buff *skb, struct net_device *netdev)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++ struct be_tx_obj *tx_obj = &adapter->tx_obj;
++ struct be_queue_info *txq = &tx_obj->q;
++ u32 wrb_cnt = 0, copied = 0;
++ u32 start = txq->head;
++ bool dummy_wrb, stopped = false;
++
++ wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
++
++ copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
++
++ /* record the sent skb in the sent_skb table */
++ BUG_ON(tx_obj->sent_skb_list[start]);
++ tx_obj->sent_skb_list[start] = skb;
++
++ /* Ensure that txq has space for the next skb; Else stop the queue
++ * *BEFORE* ringing the tx doorbell, so that we serialze the
++ * tx compls of the current transmit which'll wake up the queue
++ */
++ if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= txq->len) {
++ netif_stop_queue(netdev);
++ stopped = true;
++ }
++
++ be_txq_notify(adapter, txq->id, wrb_cnt);
++
++ netdev->trans_start = jiffies;
++
++ be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
++ return NETDEV_TX_OK;
++}
++
++static int be_change_mtu(struct net_device *netdev, int new_mtu)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++ if (new_mtu < BE_MIN_MTU ||
++ new_mtu > BE_MAX_JUMBO_FRAME_SIZE) {
++ dev_info(&adapter->pdev->dev,
++ "MTU must be between %d and %d bytes\n",
++ BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE);
++ return -EINVAL;
++ }
++ dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
++ netdev->mtu, new_mtu);
++ netdev->mtu = new_mtu;
++ return 0;
++}
++
++/*
++ * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
++ * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured,
++ * set the BE in promiscuous VLAN mode.
++ */
++static int be_vid_config(struct be_adapter *adapter)
++{
++ u16 vtag[BE_NUM_VLANS_SUPPORTED];
++ u16 ntags = 0, i;
++ int status;
++
++ if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) {
++ /* Construct VLAN Table to give to HW */
++ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
++ if (adapter->vlan_tag[i]) {
++ vtag[ntags] = cpu_to_le16(i);
++ ntags++;
++ }
++ }
++ status = be_cmd_vlan_config(adapter, adapter->if_handle,
++ vtag, ntags, 1, 0);
++ } else {
++ status = be_cmd_vlan_config(adapter, adapter->if_handle,
++ NULL, 0, 1, 1);
++ }
++ return status;
++}
++
++static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++ struct be_eq_obj *rx_eq = &adapter->rx_eq;
++ struct be_eq_obj *tx_eq = &adapter->tx_eq;
++
++ be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
++ be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
++ adapter->vlan_grp = grp;
++ be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
++ be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
++}
++
++static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++
++ adapter->num_vlans++;
++ adapter->vlan_tag[vid] = 1;
++
++ be_vid_config(adapter);
++}
++
++static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++
++ adapter->num_vlans--;
++ adapter->vlan_tag[vid] = 0;
++
++ vlan_group_set_device(adapter->vlan_grp, vid, NULL);
++ be_vid_config(adapter);
++}
++
++static void be_set_multicast_list(struct net_device *netdev)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++
++ if (netdev->flags & IFF_PROMISC) {
++ be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
++ adapter->promiscuous = true;
++ goto done;
++ }
++
++ /* BE was previously in promiscous mode; disable it */
++ if (adapter->promiscuous) {
++ adapter->promiscuous = false;
++ be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
++ }
++
++ /* Enable multicast promisc if num configured exceeds what we support */
++ if (netdev->flags & IFF_ALLMULTI || netdev->mc_count > BE_MAX_MC) {
++ be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0,
++ &adapter->mc_cmd_mem);
++ goto done;
++ }
++
++ be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list,
++ netdev->mc_count, &adapter->mc_cmd_mem);
++done:
++ return;
++}
++
++static void be_rx_rate_update(struct be_adapter *adapter)
++{
++ struct be_drvr_stats *stats = drvr_stats(adapter);
++ ulong now = jiffies;
++
++ /* Wrapped around */
++ if (time_before(now, stats->be_rx_jiffies)) {
++ stats->be_rx_jiffies = now;
++ return;
++ }
++
++ /* Update the rate once in two seconds */
++ if ((now - stats->be_rx_jiffies) < 2 * HZ)
++ return;
++
++ stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
++ - stats->be_rx_bytes_prev,
++ now - stats->be_rx_jiffies);
++ stats->be_rx_jiffies = now;
++ stats->be_rx_bytes_prev = stats->be_rx_bytes;
++}
++
++static void be_rx_stats_update(struct be_adapter *adapter,
++ u32 pktsize, u16 numfrags)
++{
++ struct be_drvr_stats *stats = drvr_stats(adapter);
++
++ stats->be_rx_compl++;
++ stats->be_rx_frags += numfrags;
++ stats->be_rx_bytes += pktsize;
++}
++
++static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
++{
++ u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
++
++ l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
++ ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
++ ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
++ if (ip_version) {
++ tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
++ udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
++ }
++ ipv6_chk = (ip_version && (tcpf || udpf));
++
++ return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
++}
++
++static struct be_rx_page_info *
++get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
++{
++ struct be_rx_page_info *rx_page_info;
++ struct be_queue_info *rxq = &adapter->rx_obj.q;
++
++ rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
++ BUG_ON(!rx_page_info->page);
++
++ if (rx_page_info->last_page_user)
++ pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
++ adapter->big_page_size, PCI_DMA_FROMDEVICE);
++
++ atomic_dec(&rxq->used);
++ return rx_page_info;
++}
++
++/* Throwaway the data in the Rx completion */
++static void be_rx_compl_discard(struct be_adapter *adapter,
++ struct be_eth_rx_compl *rxcp)
++{
++ struct be_queue_info *rxq = &adapter->rx_obj.q;
++ struct be_rx_page_info *page_info;
++ u16 rxq_idx, i, num_rcvd;
++
++ rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
++ num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
++
++ for (i = 0; i < num_rcvd; i++) {
++ page_info = get_rx_page_info(adapter, rxq_idx);
++ put_page(page_info->page);
++ memset(page_info, 0, sizeof(*page_info));
++ index_inc(&rxq_idx, rxq->len);
++ }
++}
++
++/*
++ * skb_fill_rx_data forms a complete skb for an ether frame
++ * indicated by rxcp.
++ */
++static void skb_fill_rx_data(struct be_adapter *adapter,
++ struct sk_buff *skb, struct be_eth_rx_compl *rxcp)
++{
++ struct be_queue_info *rxq = &adapter->rx_obj.q;
++ struct be_rx_page_info *page_info;
++ u16 rxq_idx, i, num_rcvd, j;
++ u32 pktsize, hdr_len, curr_frag_len, size;
++ u8 *start;
++
++ rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
++ pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
++ num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
++
++ page_info = get_rx_page_info(adapter, rxq_idx);
++
++ start = page_address(page_info->page) + page_info->page_offset;
++ prefetch(start);
++
++ /* Copy data in the first descriptor of this completion */
++ curr_frag_len = min(pktsize, rx_frag_size);
++
++ /* Copy the header portion into skb_data */
++ hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
++ memcpy(skb->data, start, hdr_len);
++ skb->len = curr_frag_len;
++ if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
++ /* Complete packet has now been moved to data */
++ put_page(page_info->page);
++ skb->data_len = 0;
++ skb->tail += curr_frag_len;
++ } else {
++ skb_shinfo(skb)->nr_frags = 1;
++ skb_shinfo(skb)->frags[0].page = page_info->page;
++ skb_shinfo(skb)->frags[0].page_offset =
++ page_info->page_offset + hdr_len;
++ skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
++ skb->data_len = curr_frag_len - hdr_len;
++ skb->tail += hdr_len;
++ }
++ memset(page_info, 0, sizeof(*page_info));
++
++ if (pktsize <= rx_frag_size) {
++ BUG_ON(num_rcvd != 1);
++ goto done;
++ }
++
++ /* More frags present for this completion */
++ size = pktsize;
++ for (i = 1, j = 0; i < num_rcvd; i++) {
++ size -= curr_frag_len;
++ index_inc(&rxq_idx, rxq->len);
++ page_info = get_rx_page_info(adapter, rxq_idx);
++
++ curr_frag_len = min(size, rx_frag_size);
++
++ /* Coalesce all frags from the same physical page in one slot */
++ if (page_info->page_offset == 0) {
++ /* Fresh page */
++ j++;
++ skb_shinfo(skb)->frags[j].page = page_info->page;
++ skb_shinfo(skb)->frags[j].page_offset =
++ page_info->page_offset;
++ skb_shinfo(skb)->frags[j].size = 0;
++ skb_shinfo(skb)->nr_frags++;
++ } else {
++ put_page(page_info->page);
++ }
++
++ skb_shinfo(skb)->frags[j].size += curr_frag_len;
++ skb->len += curr_frag_len;
++ skb->data_len += curr_frag_len;
++
++ memset(page_info, 0, sizeof(*page_info));
++ }
++ BUG_ON(j > MAX_SKB_FRAGS);
++
++done:
++ be_rx_stats_update(adapter, pktsize, num_rcvd);
++ return;
++}
++
++/* Process the RX completion indicated by rxcp when LRO is disabled */
++static void be_rx_compl_process(struct be_adapter *adapter,
++ struct be_eth_rx_compl *rxcp)
++{
++ struct sk_buff *skb;
++ u32 vtp, vid;
++
++ vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
++
++ skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
++ if (!skb) {
++ if (net_ratelimit())
++ dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
++ be_rx_compl_discard(adapter, rxcp);
++ return;
++ }
++
++ skb_reserve(skb, NET_IP_ALIGN);
++
++ skb_fill_rx_data(adapter, skb, rxcp);
++
++ if (do_pkt_csum(rxcp, adapter->rx_csum))
++ skb->ip_summed = CHECKSUM_NONE;
++ else
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++ skb->truesize = skb->len + sizeof(struct sk_buff);
++ skb->protocol = eth_type_trans(skb, adapter->netdev);
++ skb->dev = adapter->netdev;
++
++ if (vtp) {
++ if (!adapter->vlan_grp || adapter->num_vlans == 0) {
++ kfree_skb(skb);
++ return;
++ }
++ vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
++ vid = be16_to_cpu(vid);
++ vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
++ } else {
++ netif_receive_skb(skb);
++ }
++
++ adapter->netdev->last_rx = jiffies;
++
++ return;
++}
++
++/* Process the RX completion indicated by rxcp when LRO is enabled */
++static void be_rx_compl_process_lro(struct be_adapter *adapter,
++ struct be_eth_rx_compl *rxcp)
++{
++ struct be_rx_page_info *page_info;
++ struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
++ struct be_queue_info *rxq = &adapter->rx_obj.q;
++ u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
++ u16 i, rxq_idx = 0, vid, j;
++
++ num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
++ pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
++ vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
++ rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
++
++ remaining = pkt_size;
++ for (i = 0, j = -1; i < num_rcvd; i++) {
++ page_info = get_rx_page_info(adapter, rxq_idx);
++
++ curr_frag_len = min(remaining, rx_frag_size);
++
++ /* Coalesce all frags from the same physical page in one slot */
++ if (i == 0 || page_info->page_offset == 0) {
++ /* First frag or Fresh page */
++ j++;
++ rx_frags[j].page = page_info->page;
++ rx_frags[j].page_offset = page_info->page_offset;
++ rx_frags[j].size = 0;
++ } else {
++ put_page(page_info->page);
++ }
++ rx_frags[j].size += curr_frag_len;
++
++ remaining -= curr_frag_len;
++ index_inc(&rxq_idx, rxq->len);
++ memset(page_info, 0, sizeof(*page_info));
++ }
++ BUG_ON(j > MAX_SKB_FRAGS);
++
++ if (likely(!vlanf)) {
++ lro_receive_frags(&adapter->rx_obj.lro_mgr, rx_frags, pkt_size,
++ pkt_size, NULL, 0);
++ } else {
++ vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
++ vid = be16_to_cpu(vid);
++
++ if (!adapter->vlan_grp || adapter->num_vlans == 0)
++ return;
++
++ lro_vlan_hwaccel_receive_frags(&adapter->rx_obj.lro_mgr,
++ rx_frags, pkt_size, pkt_size, adapter->vlan_grp,
++ vid, NULL, 0);
++ }
++
++ be_rx_stats_update(adapter, pkt_size, num_rcvd);
++ return;
++}
++
++static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
++{
++ struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
++
++ if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
++ return NULL;
++
++ be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
++
++ queue_tail_inc(&adapter->rx_obj.cq);
++ return rxcp;
++}
++
++/* To reset the valid bit, we need to reset the whole word as
++ * when walking the queue the valid entries are little-endian
++ * and invalid entries are host endian
++ */
++static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
++{
++ rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
++}
++
++static inline struct page *be_alloc_pages(u32 size)
++{
++ gfp_t alloc_flags = GFP_ATOMIC;
++ u32 order = get_order(size);
++ if (order > 0)
++ alloc_flags |= __GFP_COMP;
++ return alloc_pages(alloc_flags, order);
++}
++
++/*
++ * Allocate a page, split it to fragments of size rx_frag_size and post as
++ * receive buffers to BE
++ */
++static void be_post_rx_frags(struct be_adapter *adapter)
++{
++ struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
++ struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
++ struct be_queue_info *rxq = &adapter->rx_obj.q;
++ struct page *pagep = NULL;
++ struct be_eth_rx_d *rxd;
++ u64 page_dmaaddr = 0, frag_dmaaddr;
++ u32 posted, page_offset = 0;
++
++ page_info = &page_info_tbl[rxq->head];
++ for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
++ if (!pagep) {
++ pagep = be_alloc_pages(adapter->big_page_size);
++ if (unlikely(!pagep)) {
++ drvr_stats(adapter)->be_ethrx_post_fail++;
++ break;
++ }
++ page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
++ adapter->big_page_size,
++ PCI_DMA_FROMDEVICE);
++ page_info->page_offset = 0;
++ } else {
++ get_page(pagep);
++ page_info->page_offset = page_offset + rx_frag_size;
++ }
++ page_offset = page_info->page_offset;
++ page_info->page = pagep;
++ pci_unmap_addr_set(page_info, bus, page_dmaaddr);
++ frag_dmaaddr = page_dmaaddr + page_info->page_offset;
++
++ rxd = queue_head_node(rxq);
++ rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
++ rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
++
++ /* Any space left in the current big page for another frag? */
++ if ((page_offset + rx_frag_size + rx_frag_size) >
++ adapter->big_page_size) {
++ pagep = NULL;
++ page_info->last_page_user = true;
++ }
++
++ prev_page_info = page_info;
++ queue_head_inc(rxq);
++ page_info = &page_info_tbl[rxq->head];
++ }
++ if (pagep)
++ prev_page_info->last_page_user = true;
++
++ if (posted) {
++ atomic_add(posted, &rxq->used);
++ be_rxq_notify(adapter, rxq->id, posted);
++ } else if (atomic_read(&rxq->used) == 0) {
++ /* Let be_worker replenish when memory is available */
++ adapter->rx_post_starved = true;
++ }
++
++ return;
++}
++
++static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
++{
++ struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
++
++ if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
++ return NULL;
++
++ be_dws_le_to_cpu(txcp, sizeof(*txcp));
++
++ txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
++
++ queue_tail_inc(tx_cq);
++ return txcp;
++}
++
++static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
++{
++ struct be_queue_info *txq = &adapter->tx_obj.q;
++ struct be_eth_wrb *wrb;
++ struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
++ struct sk_buff *sent_skb;
++ u64 busaddr;
++ u16 cur_index, num_wrbs = 0;
++
++ cur_index = txq->tail;
++ sent_skb = sent_skbs[cur_index];
++ BUG_ON(!sent_skb);
++ sent_skbs[cur_index] = NULL;
++
++ do {
++ cur_index = txq->tail;
++ wrb = queue_tail_node(txq);
++ be_dws_le_to_cpu(wrb, sizeof(*wrb));
++ busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
++ if (busaddr != 0) {
++ pci_unmap_single(adapter->pdev, busaddr,
++ wrb->frag_len, PCI_DMA_TODEVICE);
++ }
++ num_wrbs++;
++ queue_tail_inc(txq);
++ } while (cur_index != last_index);
++
++ atomic_sub(num_wrbs, &txq->used);
++
++ kfree_skb(sent_skb);
++}
++
++static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
++{
++ struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
++
++ if (!eqe->evt)
++ return NULL;
++
++ eqe->evt = le32_to_cpu(eqe->evt);
++ queue_tail_inc(&eq_obj->q);
++ return eqe;
++}
++
++static int event_handle(struct be_adapter *adapter,
++ struct be_eq_obj *eq_obj)
++{
++ struct be_eq_entry *eqe;
++ u16 num = 0;
++
++ while ((eqe = event_get(eq_obj)) != NULL) {
++ eqe->evt = 0;
++ num++;
++ }
++
++ /* Deal with any spurious interrupts that come
++ * without events
++ */
++ be_eq_notify(adapter, eq_obj->q.id, true, true, num);
++ if (num)
++ napi_schedule(&eq_obj->napi);
++
++ return num;
++}
++
++/* Just read and notify events without processing them.
++ * Used at the time of destroying event queues */
++static void be_eq_clean(struct be_adapter *adapter,
++ struct be_eq_obj *eq_obj)
++{
++ struct be_eq_entry *eqe;
++ u16 num = 0;
++
++ while ((eqe = event_get(eq_obj)) != NULL) {
++ eqe->evt = 0;
++ num++;
++ }
++
++ if (num)
++ be_eq_notify(adapter, eq_obj->q.id, false, true, num);
++}
++
++static void be_rx_q_clean(struct be_adapter *adapter)
++{
++ struct be_rx_page_info *page_info;
++ struct be_queue_info *rxq = &adapter->rx_obj.q;
++ struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
++ struct be_eth_rx_compl *rxcp;
++ u16 tail;
++
++ /* First cleanup pending rx completions */
++ while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
++ be_rx_compl_discard(adapter, rxcp);
++ be_rx_compl_reset(rxcp);
++ be_cq_notify(adapter, rx_cq->id, true, 1);
++ }
++
++ /* Then free posted rx buffer that were not used */
++ tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
++ for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
++ page_info = get_rx_page_info(adapter, tail);
++ put_page(page_info->page);
++ memset(page_info, 0, sizeof(*page_info));
++ }
++ BUG_ON(atomic_read(&rxq->used));
++}
++
++static void be_tx_compl_clean(struct be_adapter *adapter)
++{
++ struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
++ struct be_queue_info *txq = &adapter->tx_obj.q;
++ struct be_eth_tx_compl *txcp;
++ u16 end_idx, cmpl = 0, timeo = 0;
++
++ /* Wait for a max of 200ms for all the tx-completions to arrive. */
++ do {
++ while ((txcp = be_tx_compl_get(tx_cq))) {
++ end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
++ wrb_index, txcp);
++ be_tx_compl_process(adapter, end_idx);
++ cmpl++;
++ }
++ if (cmpl) {
++ be_cq_notify(adapter, tx_cq->id, false, cmpl);
++ cmpl = 0;
++ }
++
++ if (atomic_read(&txq->used) == 0 || ++timeo > 200)
++ break;
++
++ mdelay(1);
++ } while (true);
++
++ if (atomic_read(&txq->used))
++ dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
++ atomic_read(&txq->used));
++}
++
++static void be_mcc_queues_destroy(struct be_adapter *adapter)
++{
++ struct be_queue_info *q;
++
++ q = &adapter->mcc_obj.q;
++ if (q->created)
++ be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
++ be_queue_free(adapter, q);
++
++ q = &adapter->mcc_obj.cq;
++ if (q->created)
++ be_cmd_q_destroy(adapter, q, QTYPE_CQ);
++ be_queue_free(adapter, q);
++}
++
++/* Must be called only after TX qs are created as MCC shares TX EQ */
++static int be_mcc_queues_create(struct be_adapter *adapter)
++{
++ struct be_queue_info *q, *cq;
++
++ /* Alloc MCC compl queue */
++ cq = &adapter->mcc_obj.cq;
++ if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
++ sizeof(struct be_mcc_compl)))
++ goto err;
++
++ /* Ask BE to create MCC compl queue; share TX's eq */
++ if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
++ goto mcc_cq_free;
++
++ /* Alloc MCC queue */
++ q = &adapter->mcc_obj.q;
++ if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
++ goto mcc_cq_destroy;
++
++ /* Ask BE to create MCC queue */
++ if (be_cmd_mccq_create(adapter, q, cq))
++ goto mcc_q_free;
++
++ return 0;
++
++mcc_q_free:
++ be_queue_free(adapter, q);
++mcc_cq_destroy:
++ be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
++mcc_cq_free:
++ be_queue_free(adapter, cq);
++err:
++ return -1;
++}
++
++static void be_tx_queues_destroy(struct be_adapter *adapter)
++{
++ struct be_queue_info *q;
++
++ q = &adapter->tx_obj.q;
++ if (q->created)
++ be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
++ be_queue_free(adapter, q);
++
++ q = &adapter->tx_obj.cq;
++ if (q->created)
++ be_cmd_q_destroy(adapter, q, QTYPE_CQ);
++ be_queue_free(adapter, q);
++
++ /* Clear any residual events */
++ be_eq_clean(adapter, &adapter->tx_eq);
++
++ q = &adapter->tx_eq.q;
++ if (q->created)
++ be_cmd_q_destroy(adapter, q, QTYPE_EQ);
++ be_queue_free(adapter, q);
++}
++
++static int be_tx_queues_create(struct be_adapter *adapter)
++{
++ struct be_queue_info *eq, *q, *cq;
++
++ adapter->tx_eq.max_eqd = 0;
++ adapter->tx_eq.min_eqd = 0;
++ adapter->tx_eq.cur_eqd = 96;
++ adapter->tx_eq.enable_aic = false;
++ /* Alloc Tx Event queue */
++ eq = &adapter->tx_eq.q;
++ if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
++ return -1;
++
++ /* Ask BE to create Tx Event queue */
++ if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
++ goto tx_eq_free;
++ /* Alloc TX eth compl queue */
++ cq = &adapter->tx_obj.cq;
++ if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
++ sizeof(struct be_eth_tx_compl)))
++ goto tx_eq_destroy;
++
++ /* Ask BE to create Tx eth compl queue */
++ if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
++ goto tx_cq_free;
++
++ /* Alloc TX eth queue */
++ q = &adapter->tx_obj.q;
++ if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
++ goto tx_cq_destroy;
++
++ /* Ask BE to create Tx eth queue */
++ if (be_cmd_txq_create(adapter, q, cq))
++ goto tx_q_free;
++ return 0;
++
++tx_q_free:
++ be_queue_free(adapter, q);
++tx_cq_destroy:
++ be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
++tx_cq_free:
++ be_queue_free(adapter, cq);
++tx_eq_destroy:
++ be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
++tx_eq_free:
++ be_queue_free(adapter, eq);
++ return -1;
++}
++
++static void be_rx_queues_destroy(struct be_adapter *adapter)
++{
++ struct be_queue_info *q;
++
++ q = &adapter->rx_obj.q;
++ if (q->created) {
++ be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
++ be_rx_q_clean(adapter);
++ }
++ be_queue_free(adapter, q);
++
++ q = &adapter->rx_obj.cq;
++ if (q->created)
++ be_cmd_q_destroy(adapter, q, QTYPE_CQ);
++ be_queue_free(adapter, q);
++
++ /* Clear any residual events */
++ be_eq_clean(adapter, &adapter->rx_eq);
++
++ q = &adapter->rx_eq.q;
++ if (q->created)
++ be_cmd_q_destroy(adapter, q, QTYPE_EQ);
++ be_queue_free(adapter, q);
++}
++
++static int be_rx_queues_create(struct be_adapter *adapter)
++{
++ struct be_queue_info *eq, *q, *cq;
++ int rc;
++
++ adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME;
++ adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
++ adapter->rx_eq.max_eqd = BE_MAX_EQD;
++ adapter->rx_eq.min_eqd = 0;
++ adapter->rx_eq.cur_eqd = 0;
++ adapter->rx_eq.enable_aic = true;
++
++ /* Alloc Rx Event queue */
++ eq = &adapter->rx_eq.q;
++ rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
++ sizeof(struct be_eq_entry));
++ if (rc)
++ return rc;
++
++ /* Ask BE to create Rx Event queue */
++ rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd);
++ if (rc)
++ goto rx_eq_free;
++
++ /* Alloc RX eth compl queue */
++ cq = &adapter->rx_obj.cq;
++ rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
++ sizeof(struct be_eth_rx_compl));
++ if (rc)
++ goto rx_eq_destroy;
++
++ /* Ask BE to create Rx eth compl queue */
++ rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
++ if (rc)
++ goto rx_cq_free;
++
++ /* Alloc RX eth queue */
++ q = &adapter->rx_obj.q;
++ rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
++ if (rc)
++ goto rx_cq_destroy;
++
++ /* Ask BE to create Rx eth queue */
++ rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
++ BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
++ if (rc)
++ goto rx_q_free;
++
++ return 0;
++rx_q_free:
++ be_queue_free(adapter, q);
++rx_cq_destroy:
++ be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
++rx_cq_free:
++ be_queue_free(adapter, cq);
++rx_eq_destroy:
++ be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
++rx_eq_free:
++ be_queue_free(adapter, eq);
++ return rc;
++}
++
++/* There are 8 evt ids per func. Retruns the evt id's bit number */
++static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
++{
++ return eq_id - 8 * be_pci_func(adapter);
++}
++
++static irqreturn_t be_intx(int irq, void *dev)
++{
++ struct be_adapter *adapter = dev;
++ int isr;
++
++ isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
++ (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
++ if (!isr)
++ return IRQ_NONE;
++
++ event_handle(adapter, &adapter->tx_eq);
++ event_handle(adapter, &adapter->rx_eq);
++
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t be_msix_rx(int irq, void *dev)
++{
++ struct be_adapter *adapter = dev;
++
++ event_handle(adapter, &adapter->rx_eq);
++
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
++{
++ struct be_adapter *adapter = dev;
++
++ event_handle(adapter, &adapter->tx_eq);
++
++ return IRQ_HANDLED;
++}
++
++static inline bool do_lro(struct be_adapter *adapter,
++ struct be_eth_rx_compl *rxcp)
++{
++ int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
++ int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
++
++ if (err)
++ drvr_stats(adapter)->be_rxcp_err++;
++
++ return (!tcp_frame || err || (adapter->max_rx_coal <= 1)) ?
++ false : true;
++}
++
++int be_poll_rx(struct napi_struct *napi, int budget)
++{
++ struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
++ struct be_adapter *adapter =
++ container_of(rx_eq, struct be_adapter, rx_eq);
++ struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
++ struct be_eth_rx_compl *rxcp;
++ u32 work_done;
++
++ adapter->stats.drvr_stats.be_rx_polls++;
++ for (work_done = 0; work_done < budget; work_done++) {
++ rxcp = be_rx_compl_get(adapter);
++ if (!rxcp)
++ break;
++
++ if (do_lro(adapter, rxcp))
++ be_rx_compl_process_lro(adapter, rxcp);
++ else
++ be_rx_compl_process(adapter, rxcp);
++
++ be_rx_compl_reset(rxcp);
++ }
++
++ lro_flush_all(&adapter->rx_obj.lro_mgr);
++
++ /* Refill the queue */
++ if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
++ be_post_rx_frags(adapter);
++
++ /* All consumed */
++ if (work_done < budget) {
++ napi_complete(napi);
++ be_cq_notify(adapter, rx_cq->id, true, work_done);
++ } else {
++ /* More to be consumed; continue with interrupts disabled */
++ be_cq_notify(adapter, rx_cq->id, false, work_done);
++ }
++ return work_done;
++}
++
++void be_process_tx(struct be_adapter *adapter)
++{
++ struct be_queue_info *txq = &adapter->tx_obj.q;
++ struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
++ struct be_eth_tx_compl *txcp;
++ u32 num_cmpl = 0;
++ u16 end_idx;
++
++ while ((txcp = be_tx_compl_get(tx_cq))) {
++ end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
++ wrb_index, txcp);
++ be_tx_compl_process(adapter, end_idx);
++ num_cmpl++;
++ }
++
++ if (num_cmpl) {
++ be_cq_notify(adapter, tx_cq->id, true, num_cmpl);
++
++ /* As Tx wrbs have been freed up, wake up netdev queue if
++ * it was stopped due to lack of tx wrbs.
++ */
++ if (netif_queue_stopped(adapter->netdev) &&
++ atomic_read(&txq->used) < txq->len / 2) {
++ netif_wake_queue(adapter->netdev);
++ }
++
++ drvr_stats(adapter)->be_tx_events++;
++ drvr_stats(adapter)->be_tx_compl += num_cmpl;
++ }
++}
++
++/* As TX and MCC share the same EQ check for both TX and MCC completions.
++ * For TX/MCC we don't honour budget; consume everything
++ */
++static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
++{
++ struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
++ struct be_adapter *adapter =
++ container_of(tx_eq, struct be_adapter, tx_eq);
++
++ napi_complete(napi);
++
++ be_process_tx(adapter);
++
++ be_process_mcc(adapter);
++
++ return 1;
++}
++
++static void be_worker(struct work_struct *work)
++{
++ struct be_adapter *adapter =
++ container_of(work, struct be_adapter, work.work);
++
++ be_cmd_get_stats(adapter, &adapter->stats.cmd);
++
++ /* Set EQ delay */
++ be_rx_eqd_update(adapter);
++
++ be_tx_rate_update(adapter);
++ be_rx_rate_update(adapter);
++
++ if (adapter->rx_post_starved) {
++ adapter->rx_post_starved = false;
++ be_post_rx_frags(adapter);
++ }
++
++ schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
++}
++
++static void be_msix_disable(struct be_adapter *adapter)
++{
++ if (adapter->msix_enabled) {
++ pci_disable_msix(adapter->pdev);
++ adapter->msix_enabled = false;
++ }
++}
++
++static void be_msix_enable(struct be_adapter *adapter)
++{
++ int i, status;
++
++ for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
++ adapter->msix_entries[i].entry = i;
++
++ status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
++ BE_NUM_MSIX_VECTORS);
++ if (status == 0)
++ adapter->msix_enabled = true;
++ return;
++}
++
++static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
++{
++ return adapter->msix_entries[
++ be_evt_bit_get(adapter, eq_id)].vector;
++}
++
++static int be_request_irq(struct be_adapter *adapter,
++ struct be_eq_obj *eq_obj,
++ void *handler, char *desc)
++{
++ struct net_device *netdev = adapter->netdev;
++ int vec;
++
++ sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
++ vec = be_msix_vec_get(adapter, eq_obj->q.id);
++ return request_irq(vec, handler, 0, eq_obj->desc, adapter);
++}
++
++static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj)
++{
++ int vec = be_msix_vec_get(adapter, eq_obj->q.id);
++ free_irq(vec, adapter);
++}
++
++static int be_msix_register(struct be_adapter *adapter)
++{
++ int status;
++
++ status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx");
++ if (status)
++ goto err;
++
++ status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx");
++ if (status)
++ goto free_tx_irq;
++
++ return 0;
++
++free_tx_irq:
++ be_free_irq(adapter, &adapter->tx_eq);
++err:
++ dev_warn(&adapter->pdev->dev,
++ "MSIX Request IRQ failed - err %d\n", status);
++ pci_disable_msix(adapter->pdev);
++ adapter->msix_enabled = false;
++ return status;
++}
++
++static int be_irq_register(struct be_adapter *adapter)
++{
++ struct net_device *netdev = adapter->netdev;
++ int status;
++
++ if (adapter->msix_enabled) {
++ status = be_msix_register(adapter);
++ if (status == 0)
++ goto done;
++ }
++
++ /* INTx */
++ netdev->irq = adapter->pdev->irq;
++ status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
++ adapter);
++ if (status) {
++ dev_err(&adapter->pdev->dev,
++ "INTx request IRQ failed - err %d\n", status);
++ return status;
++ }
++done:
++ adapter->isr_registered = true;
++ return 0;
++}
++
++static void be_irq_unregister(struct be_adapter *adapter)
++{
++ struct net_device *netdev = adapter->netdev;
++
++ if (!adapter->isr_registered)
++ return;
++
++ /* INTx */
++ if (!adapter->msix_enabled) {
++ free_irq(netdev->irq, adapter);
++ goto done;
++ }
++
++ /* MSIx */
++ be_free_irq(adapter, &adapter->tx_eq);
++ be_free_irq(adapter, &adapter->rx_eq);
++done:
++ adapter->isr_registered = false;
++ return;
++}
++
++static int be_open(struct net_device *netdev)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++ struct be_eq_obj *rx_eq = &adapter->rx_eq;
++ struct be_eq_obj *tx_eq = &adapter->tx_eq;
++ bool link_up;
++ int status;
++ u8 mac_speed;
++ u16 link_speed;
++
++ /* First time posting */
++ be_post_rx_frags(adapter);
++
++ napi_enable(&rx_eq->napi);
++ napi_enable(&tx_eq->napi);
++
++ be_irq_register(adapter);
++
++ be_intr_set(adapter, true);
++
++ /* The evt queues are created in unarmed state; arm them */
++ be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
++ be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
++
++ /* Rx compl queue may be in unarmed state; rearm it */
++ be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
++
++ status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
++ &link_speed);
++ if (status)
++ goto ret_sts;
++ be_link_status_update(adapter, link_up);
++
++ status = be_vid_config(adapter);
++ if (status)
++ goto ret_sts;
++
++ status = be_cmd_set_flow_control(adapter,
++ adapter->tx_fc, adapter->rx_fc);
++ if (status)
++ goto ret_sts;
++
++ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
++ret_sts:
++ return status;
++}
++
++static int be_setup_wol(struct be_adapter *adapter, bool enable)
++{
++ struct be_dma_mem cmd;
++ int status = 0;
++ u8 mac[ETH_ALEN];
++
++ memset(mac, 0, ETH_ALEN);
++
++ cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
++ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
++ if (cmd.va == NULL)
++ return -1;
++ memset(cmd.va, 0, cmd.size);
++
++ if (enable) {
++ status = pci_write_config_dword(adapter->pdev,
++ PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
++ if (status) {
++ dev_err(&adapter->pdev->dev,
++ "Could not enable Wake-on-lan \n");
++ pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
++ cmd.dma);
++ return status;
++ }
++ status = be_cmd_enable_magic_wol(adapter,
++ adapter->netdev->dev_addr, &cmd);
++ pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
++ pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
++ } else {
++ status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
++ pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
++ pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
++ }
++
++ pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
++ return status;
++}
++
++static int be_setup(struct be_adapter *adapter)
++{
++ struct net_device *netdev = adapter->netdev;
++ u32 cap_flags, en_flags;
++ int status;
++
++ cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
++ BE_IF_FLAGS_MCAST_PROMISCUOUS |
++ BE_IF_FLAGS_PROMISCUOUS |
++ BE_IF_FLAGS_PASS_L3L4_ERRORS;
++ en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
++ BE_IF_FLAGS_PASS_L3L4_ERRORS;
++
++ status = be_cmd_if_create(adapter, cap_flags, en_flags,
++ netdev->dev_addr, false/* pmac_invalid */,
++ &adapter->if_handle, &adapter->pmac_id);
++ if (status != 0)
++ goto do_none;
++
++ status = be_tx_queues_create(adapter);
++ if (status != 0)
++ goto if_destroy;
++
++ status = be_rx_queues_create(adapter);
++ if (status != 0)
++ goto tx_qs_destroy;
++
++ status = be_mcc_queues_create(adapter);
++ if (status != 0)
++ goto rx_qs_destroy;
++
++ adapter->link_speed = -1;
++
++ return 0;
++
++rx_qs_destroy:
++ be_rx_queues_destroy(adapter);
++tx_qs_destroy:
++ be_tx_queues_destroy(adapter);
++if_destroy:
++ be_cmd_if_destroy(adapter, adapter->if_handle);
++do_none:
++ return status;
++}
++
++static int be_clear(struct be_adapter *adapter)
++{
++ be_mcc_queues_destroy(adapter);
++ be_rx_queues_destroy(adapter);
++ be_tx_queues_destroy(adapter);
++
++ be_cmd_if_destroy(adapter, adapter->if_handle);
++
++ /* tell fw we're done with firing cmds */
++ be_cmd_fw_clean(adapter);
++ return 0;
++}
++
++static int be_close(struct net_device *netdev)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++ struct be_eq_obj *rx_eq = &adapter->rx_eq;
++ struct be_eq_obj *tx_eq = &adapter->tx_eq;
++ int vec;
++
++ cancel_delayed_work_sync(&adapter->work);
++
++ netif_stop_queue(netdev);
++ netif_carrier_off(netdev);
++ adapter->link_up = false;
++
++ be_intr_set(adapter, false);
++
++ if (adapter->msix_enabled) {
++ vec = be_msix_vec_get(adapter, tx_eq->q.id);
++ synchronize_irq(vec);
++ vec = be_msix_vec_get(adapter, rx_eq->q.id);
++ synchronize_irq(vec);
++ } else {
++ synchronize_irq(netdev->irq);
++ }
++ be_irq_unregister(adapter);
++
++ napi_disable(&rx_eq->napi);
++ napi_disable(&tx_eq->napi);
++
++ /* Wait for all pending tx completions to arrive so that
++ * all tx skbs are freed.
++ */
++ be_tx_compl_clean(adapter);
++
++ return 0;
++}
++
++static int be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
++ void **ip_hdr, void **tcpudp_hdr,
++ u64 *hdr_flags, void *priv)
++{
++ struct ethhdr *eh;
++ struct vlan_ethhdr *veh;
++ struct iphdr *iph;
++ u8 *va = page_address(frag->page) + frag->page_offset;
++ unsigned long ll_hlen;
++
++ prefetch(va);
++ eh = (struct ethhdr *)va;
++ *mac_hdr = eh;
++ ll_hlen = ETH_HLEN;
++ if (eh->h_proto != htons(ETH_P_IP)) {
++ if (eh->h_proto == htons(ETH_P_8021Q)) {
++ veh = (struct vlan_ethhdr *)va;
++ if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
++ return -1;
++
++ ll_hlen += VLAN_HLEN;
++ } else {
++ return -1;
++ }
++ }
++ *hdr_flags = LRO_IPV4;
++ iph = (struct iphdr *)(va + ll_hlen);
++ *ip_hdr = iph;
++ if (iph->protocol != IPPROTO_TCP)
++ return -1;
++ *hdr_flags |= LRO_TCP;
++ *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
++
++ return 0;
++}
++
++static void be_lro_init(struct be_adapter *adapter, struct net_device *netdev)
++{
++ struct net_lro_mgr *lro_mgr;
++
++ lro_mgr = &adapter->rx_obj.lro_mgr;
++ lro_mgr->dev = netdev;
++ lro_mgr->features = LRO_F_NAPI;
++ lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
++ lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
++ lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS;
++ lro_mgr->lro_arr = adapter->rx_obj.lro_desc;
++ lro_mgr->get_frag_header = be_get_frag_header;
++ lro_mgr->max_aggr = BE_MAX_FRAGS_PER_FRAME;
++}
++
++static void be_netdev_init(struct net_device *netdev)
++{
++ struct be_adapter *adapter = netdev_priv(netdev);
++
++ netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
++ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM;
++
++ netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
++
++ netdev->flags |= IFF_MULTICAST;
++
++ adapter->rx_csum = true;
++
++ /* Default settings for Rx and Tx flow control */
++ adapter->rx_fc = true;
++ adapter->tx_fc = true;
++
++ netdev->open = &be_open;
++ netdev->stop = &be_close;
++ netdev->hard_start_xmit = &be_xmit;
++ netdev->get_stats = &be_get_stats;
++ netdev->set_rx_mode = &be_set_multicast_list;
++ netdev->set_mac_address = &be_mac_addr_set;
++ netdev->change_mtu = &be_change_mtu;
++ netdev->vlan_rx_register = &be_vlan_register;
++ netdev->vlan_rx_add_vid = &be_vlan_add_vid;
++ netdev->vlan_rx_kill_vid = &be_vlan_rem_vid;
++
++ SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
++
++ be_lro_init(adapter, netdev);
++
++ netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
++ BE_NAPI_WEIGHT);
++ netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
++ BE_NAPI_WEIGHT);
++
++ netif_carrier_off(netdev);
++ netif_stop_queue(netdev);
++}
++
++static void be_unmap_pci_bars(struct be_adapter *adapter)
++{
++ if (adapter->csr)
++ iounmap(adapter->csr);
++ if (adapter->db)
++ iounmap(adapter->db);
++ if (adapter->pcicfg)
++ iounmap(adapter->pcicfg);
++}
++
++static int be_map_pci_bars(struct be_adapter *adapter)
++{
++ u8 __iomem *addr;
++ int pcicfg_reg;
++
++ addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
++ pci_resource_len(adapter->pdev, 2));
++ if (addr == NULL)
++ return -ENOMEM;
++ adapter->csr = addr;
++
++ addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
++ 128 * 1024);
++ if (addr == NULL)
++ goto pci_map_err;
++ adapter->db = addr;
++
++ if (adapter->generation == BE_GEN2)
++ pcicfg_reg = 1;
++ else
++ pcicfg_reg = 0;
++
++ addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
++ pci_resource_len(adapter->pdev, pcicfg_reg));
++ if (addr == NULL)
++ goto pci_map_err;
++ adapter->pcicfg = addr;
++
++ return 0;
++pci_map_err:
++ be_unmap_pci_bars(adapter);
++ return -ENOMEM;
++}
++
++
++static void be_ctrl_cleanup(struct be_adapter *adapter)
++{
++ struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
++
++ be_unmap_pci_bars(adapter);
++
++ if (mem->va)
++ pci_free_consistent(adapter->pdev, mem->size,
++ mem->va, mem->dma);
++
++ mem = &adapter->mc_cmd_mem;
++ if (mem->va)
++ pci_free_consistent(adapter->pdev, mem->size,
++ mem->va, mem->dma);
++}
++
++static int be_ctrl_init(struct be_adapter *adapter)
++{
++ struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
++ struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
++ struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
++ int status;
++
++ status = be_map_pci_bars(adapter);
++ if (status)
++ goto done;
++
++ mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
++ mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
++ mbox_mem_alloc->size, &mbox_mem_alloc->dma);
++ if (!mbox_mem_alloc->va) {
++ status = -ENOMEM;
++ goto unmap_pci_bars;
++ }
++
++ mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
++ mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
++ mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
++ memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
++
++ mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
++ mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
++ &mc_cmd_mem->dma);
++ if (mc_cmd_mem->va == NULL) {
++ status = -ENOMEM;
++ goto free_mbox;
++ }
++ memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
++
++ spin_lock_init(&adapter->mbox_lock);
++ spin_lock_init(&adapter->mcc_lock);
++ spin_lock_init(&adapter->mcc_cq_lock);
++
++ return 0;
++
++free_mbox:
++ pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
++ mbox_mem_alloc->va, mbox_mem_alloc->dma);
++
++unmap_pci_bars:
++ be_unmap_pci_bars(adapter);
++
++done:
++ return status;
++}
++
++static void be_stats_cleanup(struct be_adapter *adapter)
++{
++ struct be_stats_obj *stats = &adapter->stats;
++ struct be_dma_mem *cmd = &stats->cmd;
++
++ if (cmd->va)
++ pci_free_consistent(adapter->pdev, cmd->size,
++ cmd->va, cmd->dma);
++}
++
++static int be_stats_init(struct be_adapter *adapter)
++{
++ struct be_stats_obj *stats = &adapter->stats;
++ struct be_dma_mem *cmd = &stats->cmd;
++
++ cmd->size = sizeof(struct be_cmd_req_get_stats);
++ cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
++ if (cmd->va == NULL)
++ return -1;
++ memset(cmd->va, 0, cmd->size);
++ return 0;
++}
++
++static void __devexit be_remove(struct pci_dev *pdev)
++{
++ struct be_adapter *adapter = pci_get_drvdata(pdev);
++
++ if (!adapter)
++ return;
++
++ unregister_netdev(adapter->netdev);
++
++ be_clear(adapter);
++
++ be_stats_cleanup(adapter);
++
++ be_ctrl_cleanup(adapter);
++
++ be_msix_disable(adapter);
++
++ pci_set_drvdata(pdev, NULL);
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++
++ free_netdev(adapter->netdev);
++}
++
++static int be_get_config(struct be_adapter *adapter)
++{
++ int status;
++ u8 mac[ETH_ALEN];
++
++ status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
++ if (status)
++ return status;
++
++ status = be_cmd_query_fw_cfg(adapter, &adapter->port_num);
++ if (status)
++ return status;
++
++ memset(mac, 0, ETH_ALEN);
++ status = be_cmd_mac_addr_query(adapter, mac,
++ MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
++ if (status)
++ return status;
++
++ if (!is_valid_ether_addr(mac))
++ return -EADDRNOTAVAIL;
++
++ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
++ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
++
++ return 0;
++}
++
++static int __devinit be_probe(struct pci_dev *pdev,
++ const struct pci_device_id *pdev_id)
++{
++ int status = 0;
++ struct be_adapter *adapter;
++ struct net_device *netdev;
++
++ status = pci_enable_device(pdev);
++ if (status)
++ goto do_none;
++
++ status = pci_request_regions(pdev, DRV_NAME);
++ if (status)
++ goto disable_dev;
++ pci_set_master(pdev);
++
++ netdev = alloc_etherdev(sizeof(struct be_adapter));
++ if (netdev == NULL) {
++ status = -ENOMEM;
++ goto rel_reg;
++ }
++ adapter = netdev_priv(netdev);
++
++ switch (pdev->device) {
++ case BE_DEVICE_ID1:
++ case OC_DEVICE_ID1:
++ adapter->generation = BE_GEN2;
++ break;
++ case BE_DEVICE_ID2:
++ case OC_DEVICE_ID2:
++ adapter->generation = BE_GEN3;
++ break;
++ default:
++ adapter->generation = 0;
++ }
++
++ adapter->pdev = pdev;
++ pci_set_drvdata(pdev, adapter);
++ adapter->netdev = netdev;
++ be_netdev_init(netdev);
++ SET_NETDEV_DEV(netdev, &pdev->dev);
++
++ be_msix_enable(adapter);
++
++ status = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
++ if (!status) {
++ netdev->features |= NETIF_F_HIGHDMA;
++ } else {
++ status = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
++ if (status) {
++ dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
++ goto free_netdev;
++ }
++ }
++
++ status = be_ctrl_init(adapter);
++ if (status)
++ goto free_netdev;
++
++ /* sync up with fw's ready state */
++ status = be_cmd_POST(adapter);
++ if (status)
++ goto ctrl_clean;
++
++ /* tell fw we're ready to fire cmds */
++ status = be_cmd_fw_init(adapter);
++ if (status)
++ goto ctrl_clean;
++
++ status = be_cmd_reset_function(adapter);
++ if (status)
++ goto ctrl_clean;
++
++ status = be_stats_init(adapter);
++ if (status)
++ goto ctrl_clean;
++
++ status = be_get_config(adapter);
++ if (status)
++ goto stats_clean;
++
++ INIT_DELAYED_WORK(&adapter->work, be_worker);
++
++ status = be_setup(adapter);
++ if (status)
++ goto stats_clean;
++
++ status = register_netdev(netdev);
++ if (status != 0)
++ goto unsetup;
++
++ dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
++ return 0;
++
++unsetup:
++ be_clear(adapter);
++stats_clean:
++ be_stats_cleanup(adapter);
++ctrl_clean:
++ be_ctrl_cleanup(adapter);
++free_netdev:
++ be_msix_disable(adapter);
++ free_netdev(adapter->netdev);
++ pci_set_drvdata(pdev, NULL);
++rel_reg:
++ pci_release_regions(pdev);
++disable_dev:
++ pci_disable_device(pdev);
++do_none:
++ dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
++ return status;
++}
++
++static int be_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ struct be_adapter *adapter = pci_get_drvdata(pdev);
++ struct net_device *netdev = adapter->netdev;
++
++ if (adapter->wol)
++ be_setup_wol(adapter, true);
++
++ netif_device_detach(netdev);
++ if (netif_running(netdev)) {
++ rtnl_lock();
++ be_close(netdev);
++ rtnl_unlock();
++ }
++ be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
++ be_clear(adapter);
++
++ pci_save_state(pdev);
++ pci_disable_device(pdev);
++ pci_set_power_state(pdev, pci_choose_state(pdev, state));
++ return 0;
++}
++
++static int be_resume(struct pci_dev *pdev)
++{
++ int status = 0;
++ struct be_adapter *adapter = pci_get_drvdata(pdev);
++ struct net_device *netdev = adapter->netdev;
++
++ netif_device_detach(netdev);
++
++ status = pci_enable_device(pdev);
++ if (status)
++ return status;
++
++ pci_set_power_state(pdev, 0);
++ pci_restore_state(pdev);
++
++ /* tell fw we're ready to fire cmds */
++ status = be_cmd_fw_init(adapter);
++ if (status)
++ return status;
++
++ be_setup(adapter);
++ if (netif_running(netdev)) {
++ rtnl_lock();
++ be_open(netdev);
++ rtnl_unlock();
++ }
++ netif_device_attach(netdev);
++
++ if (adapter->wol)
++ be_setup_wol(adapter, false);
++ return 0;
++}
++
++static struct pci_driver be_driver = {
++ .name = DRV_NAME,
++ .id_table = be_dev_ids,
++ .probe = be_probe,
++ .remove = be_remove,
++ .suspend = be_suspend,
++ .resume = be_resume
++};
++
++static int __init be_init_module(void)
++{
++ if (rx_frag_size != 8192 && rx_frag_size != 4096
++ && rx_frag_size != 2048) {
++ printk(KERN_WARNING DRV_NAME
++ " : Module param rx_frag_size must be 2048/4096/8192."
++ " Using 2048\n");
++ rx_frag_size = 2048;
++ }
++
++ return pci_register_driver(&be_driver);
++}
++module_init(be_init_module);
++
++static void __exit be_exit_module(void)
++{
++ pci_unregister_driver(&be_driver);
++}
++module_exit(be_exit_module);
Modified: dists/lenny/linux-2.6/debian/patches/series/22
==============================================================================
--- dists/lenny/linux-2.6/debian/patches/series/22 Thu Feb 18 22:24:49 2010 (r15202)
+++ dists/lenny/linux-2.6/debian/patches/series/22 Thu Feb 18 23:11:29 2010 (r15203)
@@ -17,4 +17,5 @@
+ bugfix/all/megaraid_sas-add-readl-to-force-PCI-posting-flush.patch
+ bugfix/all/megaraid_sas-add-the-shutdown-DCMD-cmd.patch
+ features/all/megaraid_sas-add-new-controllers-0x78-0x79.patch
-+ bugfix/all/saa7134-fix-deadlock.patch
\ No newline at end of file
++ bugfix/all/saa7134-fix-deadlock.patch
++ features/all/add-be2net.patch
More information about the Kernel-svn-changes
mailing list