[Glibc-bsd-commits] r1702 - in trunk/kfreebsd-6/debian: . patches

Aurelien Jarno aurel32 at alioth.debian.org
Fri Nov 3 18:30:57 CET 2006


Author: aurel32
Date: 2006-11-03 18:30:57 +0100 (Fri, 03 Nov 2006)
New Revision: 1702

Added:
   trunk/kfreebsd-6/debian/patches/000_nfe.diff
Modified:
   trunk/kfreebsd-6/debian/changelog
Log:
 * Backport the NVIDIA nForce MCP Ethernet driver from FREEBSD-7.0.


Modified: trunk/kfreebsd-6/debian/changelog
===================================================================
--- trunk/kfreebsd-6/debian/changelog	2006-11-03 16:39:24 UTC (rev 1701)
+++ trunk/kfreebsd-6/debian/changelog	2006-11-03 17:30:57 UTC (rev 1702)
@@ -1,3 +1,9 @@
+kfreebsd-6 (6.1-0.4) unreleased; urgency=low
+
+  * Backport the NVIDIA nForce MCP Ethernet driver from FREEBSD-7.0.
+
+ -- Aurelien Jarno <aurel32 at debian.org>  Fri,  3 Nov 2006 18:23:17 +0100
+
 kfreebsd-6 (6.1-0.3) unreleased; urgency=low
 
   * Relax the conflict with kfreebsd-loader to (<< 5.4-1.3). 

Added: trunk/kfreebsd-6/debian/patches/000_nfe.diff
===================================================================
--- trunk/kfreebsd-6/debian/patches/000_nfe.diff	2006-11-03 16:39:24 UTC (rev 1701)
+++ trunk/kfreebsd-6/debian/patches/000_nfe.diff	2006-11-03 17:30:57 UTC (rev 1702)
@@ -0,0 +1,2658 @@
+diff -Nurd sys/dev/nfe/if_nfe.c sys/dev/nfe/if_nfe.c
+--- sys/dev/nfe/if_nfe.c	1970-01-01 01:00:00.000000000 +0100
++++ sys/dev/nfe/if_nfe.c	2006-09-05 07:01:26.000000000 +0200
+@@ -0,0 +1,2231 @@
++/*	$OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $	*/
++
++/*-
++ * Copyright (c) 2006 Shigeaki Tagashira <shigeaki at se.hiroshima-u.ac.jp>
++ * Copyright (c) 2006 Damien Bergamini <damien.bergamini at free.fr>
++ * Copyright (c) 2005, 2006 Jonathan Gray <jsg at openbsd.org>
++ *
++ * Permission to use, copy, modify, and distribute this software for any
++ * purpose with or without fee is hereby granted, provided that the above
++ * copyright notice and this permission notice appear in all copies.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ */
++
++/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
++
++#include <sys/cdefs.h>
++__FBSDID("$FreeBSD: src/sys/dev/nfe/if_nfe.c,v 1.3 2006/08/14 15:35:43 ru Exp $");
++
++/* Uncomment the following line to enable polling. */
++/* #define DEVICE_POLLING */
++
++#undef NFE_NO_JUMBO
++#define NFE_CSUM
++#define NFE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
++#define NVLAN 1
++#undef NFE_TXTASK
++
++#ifdef HAVE_KERNEL_OPTION_HEADERS
++#include "opt_device_polling.h"
++#endif
++
++#include <sys/param.h>
++#include <sys/endian.h>
++#include <sys/systm.h>
++#include <sys/sockio.h>
++#include <sys/mbuf.h>
++#include <sys/malloc.h>
++#include <sys/module.h>
++#include <sys/kernel.h>
++#include <sys/sysctl.h>
++#include <sys/socket.h>
++#include <sys/taskqueue.h>
++
++#include <net/if.h>
++#include <net/if_arp.h>
++#include <net/ethernet.h>
++#include <net/if_dl.h>
++#include <net/if_media.h>
++#include <net/if_types.h>
++#include <net/if_vlan_var.h>
++
++#include <net/bpf.h>
++
++#include <machine/bus.h>
++#include <machine/resource.h>
++#include <sys/bus.h>
++#include <sys/rman.h>
++
++#include <dev/mii/mii.h>
++#include <dev/mii/miivar.h>
++
++#include <dev/pci/pcireg.h>
++#include <dev/pci/pcivar.h>
++
++#include <dev/nfe/if_nfereg.h>
++#include <dev/nfe/if_nfevar.h>
++
++MODULE_DEPEND(nfe, pci, 1, 1, 1);
++MODULE_DEPEND(nfe, ether, 1, 1, 1);
++MODULE_DEPEND(nfe, miibus, 1, 1, 1);
++#include "miibus_if.h"
++
++static  int nfe_probe  (device_t);
++static  int nfe_attach (device_t);
++static  int nfe_detach (device_t);
++static  void nfe_shutdown(device_t);
++static  int nfe_miibus_readreg	(device_t, int, int);
++static  int nfe_miibus_writereg	(device_t, int, int, int);
++static  void nfe_miibus_statchg	(device_t); 
++static  int nfe_ioctl(struct ifnet *, u_long, caddr_t);
++static  void nfe_intr(void *);
++static void nfe_rxeof(struct nfe_softc *);
++static void nfe_txeof(struct nfe_softc *);
++static int  nfe_encap(struct nfe_softc *, struct mbuf *);
++static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *);
++static int  nfe_newbuf_jumbo(struct nfe_softc *, int, struct mbuf *);
++static void nfe_jfree(void *, void *);
++static int  nfe_jpool_alloc(struct nfe_softc *);
++static void nfe_jpool_free(struct nfe_softc *);
++static void nfe_setmulti(struct nfe_softc *);
++static void nfe_start(struct ifnet *);
++static void nfe_start_locked(struct ifnet *);
++static void nfe_watchdog(struct ifnet *);
++static void nfe_init(void *);
++static void nfe_init_locked(void *);
++static void nfe_stop(struct ifnet *, int);
++static int  nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
++static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
++static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
++static int  nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
++static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
++static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
++static int  nfe_ifmedia_upd(struct ifnet *);
++static int  nfe_ifmedia_upd_locked(struct ifnet *);
++static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
++static void nfe_tick(void *);
++static void nfe_tick_locked(struct nfe_softc *);
++#ifdef NFE_TXTASK
++static void nfe_tx_task(void *, int);
++#endif
++static void nfe_get_macaddr(struct nfe_softc *, u_char *);
++static void nfe_set_macaddr(struct nfe_softc *, u_char *);
++static void nfe_dma_map_segs	(void *, bus_dma_segment_t *, int, int);
++#ifdef DEVICE_POLLING
++static void nfe_poll_locked(struct ifnet *, enum poll_cmd, int);
++#endif
++
++#ifdef NFE_DEBUG
++int nfedebug = 0;
++#define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
++#define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
++#else
++#define DPRINTF(x)
++#define DPRINTFN(n,x)
++#endif
++
++#define	NFE_LOCK(_sc)		mtx_lock(&(_sc)->nfe_mtx)
++#define	NFE_UNLOCK(_sc)		mtx_unlock(&(_sc)->nfe_mtx)
++#define	NFE_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
++#define	NFE_JLIST_LOCK(_sc)	mtx_lock(&(_sc)->nfe_jlist_mtx)
++#define	NFE_JLIST_UNLOCK(_sc)	mtx_unlock(&(_sc)->nfe_jlist_mtx)
++
++#define letoh16(x) le16toh(x)
++
++#define NFE_MODE_THROUGHPUT 0
++#define NFE_MODE_CPU        1
++static int opt_mode = NFE_MODE_CPU;
++static int nfe_encap_delay = 0;
++
++SYSCTL_NODE(_hw, OID_AUTO, nfe, CTLFLAG_RD, 0, "nfe device parameters");
++TUNABLE_INT("hw.nfe.encap_delay", &nfe_encap_delay);
++SYSCTL_INT(_hw_nfe, OID_AUTO, encap_delay, CTLFLAG_RW, &nfe_encap_delay, 0, "");
++
++static device_method_t nfe_methods[] = {
++	/* Device interface */
++	DEVMETHOD(device_probe,		nfe_probe),
++	DEVMETHOD(device_attach,	nfe_attach),
++	DEVMETHOD(device_detach,	nfe_detach),
++	DEVMETHOD(device_shutdown,	nfe_shutdown),
++
++	/* bus interface */
++	DEVMETHOD(bus_print_child,	bus_generic_print_child),
++	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
++
++	/* MII interface */
++	DEVMETHOD(miibus_readreg,	nfe_miibus_readreg),
++	DEVMETHOD(miibus_writereg,	nfe_miibus_writereg),
++	DEVMETHOD(miibus_statchg,	nfe_miibus_statchg), 
++
++	{ 0, 0 }
++};
++
++static driver_t nfe_driver = {
++	"nfe",
++	nfe_methods,
++	sizeof(struct nfe_softc)
++};
++
++static devclass_t nfe_devclass;
++
++DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
++DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
++
++static struct nfe_type nfe_devs[] = {
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
++	"NVIDIA nForce Networking Adapter"},
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
++	"NVIDIA nForce2 Networking Adapter"},
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
++	"NVIDIA nForce3 Networking Adapter"},
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
++	"NVIDIA nForce2 400 Networking Adapter"},
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
++	"NVIDIA nForce2 400 Networking Adapter"},
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
++	"NVIDIA nForce3 250 Networking Adapter"},
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
++	"NVIDIA nForce3 Networking Adapter"},
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
++	"NVIDIA nForce4 Networking Adapter"},
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
++	"NVIDIA nForce4 Networking Adapter"},
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
++	"NVIDIA nForce MCP04 Networking Adapter"},
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
++	"NVIDIA nForce MCP04 Networking Adapter"},
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
++	"NVIDIA nForce 430 Networking Adapter"},
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
++	"NVIDIA nForce 430 Networking Adapter"},
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
++	"NVIDIA nForce MCP55 Networking Adapter"},
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
++	"NVIDIA nForce MCP55 Networking Adapter"},
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
++	"NVIDIA nForce MCP61 Networking Adapter"},
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
++	"NVIDIA nForce MCP61 Networking Adapter"},
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
++	"NVIDIA nForce MCP61 Networking Adapter"},
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
++	"NVIDIA nForce MCP61 Networking Adapter"},
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
++	"NVIDIA nForce MCP65 Networking Adapter"},
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
++	"NVIDIA nForce MCP65 Networking Adapter"},
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
++	"NVIDIA nForce MCP65 Networking Adapter"},
++	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
++	"NVIDIA nForce MCP65 Networking Adapter"},
++	{0, 0, NULL}
++};
++
++
++/* Probe for supported hardware ID's */
++static int
++nfe_probe(device_t dev)
++{
++	struct nfe_type *t;
++
++	t = nfe_devs;
++	/* Check for matching PCI DEVICE ID's */
++	while (t->name != NULL) {
++		if ((pci_get_vendor(dev) == t->vid_id) &&
++		    (pci_get_device(dev) == t->dev_id)) {
++			device_set_desc(dev, t->name);
++			return (0);
++		}
++		t++;
++	}
++
++	return (ENXIO);
++}
++
++static int
++nfe_attach(device_t dev)
++{
++	struct nfe_softc *sc;
++	struct ifnet *ifp;
++	int unit, error = 0, rid;
++
++	sc = device_get_softc(dev);
++	unit = device_get_unit(dev);
++	sc->nfe_dev = dev;
++	sc->nfe_unit = unit;
++        
++	mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
++	    MTX_DEF);
++
++	pci_enable_busmaster(dev);
++
++	rid = NFE_PCI_BA;
++	sc->nfe_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
++	    0, ~0, 1, RF_ACTIVE);
++
++	if (sc->nfe_res == NULL) {
++		printf ("nfe%d: couldn't map ports/memory\n", unit);
++		error = ENXIO;
++		goto fail;
++	}
++
++	sc->nfe_memt = rman_get_bustag(sc->nfe_res);
++	sc->nfe_memh = rman_get_bushandle(sc->nfe_res);
++
++	/* Allocate interrupt */
++	rid = 0;
++	sc->nfe_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
++	    0, ~0, 1, RF_SHAREABLE | RF_ACTIVE);
++
++	if (sc->nfe_irq == NULL) {
++		printf("nfe%d: couldn't map interrupt\n", unit);
++		error = ENXIO;
++		goto fail;
++	}
++
++	nfe_get_macaddr(sc, sc->eaddr);
++
++	sc->nfe_flags = 0;
++
++	switch (pci_get_device(dev)) {
++	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
++	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
++	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
++	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
++		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
++		break;
++	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
++	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
++	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
++	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
++	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
++	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
++		sc->nfe_flags |= NFE_40BIT_ADDR;
++		break;
++	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
++	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
++	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
++	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
++	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
++	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
++	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
++	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
++	  	sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 
++		break;
++	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
++	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
++		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_HW_VLAN;
++		break;
++	}
++
++#ifndef NFE_NO_JUMBO
++	/* enable jumbo frames for adapters that support it */
++	if (sc->nfe_flags & NFE_JUMBO_SUP)
++		sc->nfe_flags |= NFE_USE_JUMBO;
++#endif
++
++	/*
++	 * Allocate the parent bus DMA tag appropriate for PCI.
++	 */
++#define NFE_NSEG_NEW 32
++	error = bus_dma_tag_create(NULL,
++				   1, 0,
++				   BUS_SPACE_MAXADDR_32BIT,
++				   BUS_SPACE_MAXADDR,
++				   NULL, NULL,
++				   BUS_SPACE_MAXSIZE_32BIT,
++				   NFE_NSEG_NEW,
++				   BUS_SPACE_MAXSIZE_32BIT,
++				   0, NULL, NULL,
++				   &sc->nfe_parent_tag);
++	if (error)
++		goto fail;
++
++	/*
++	 * Allocate Tx and Rx rings.
++	 */
++	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
++		printf("nfe%d: could not allocate Tx ring\n", unit);
++		error = ENXIO;
++		goto fail;
++	}
++
++	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
++		printf("nfe%d: could not allocate Rx ring\n", unit);
++		error = ENXIO;
++		goto fail;
++	}
++
++	ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
++	if (ifp == NULL) {
++		printf("nfe%d: can not if_alloc()\n", unit);
++		error = ENOSPC;
++		goto fail;
++	}
++
++	ifp->if_softc = sc;
++	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
++	ifp->if_mtu = ETHERMTU;
++	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
++	ifp->if_ioctl = nfe_ioctl;
++	ifp->if_start = nfe_start;
++	/* ifp->if_hwassist = NFE_CSUM_FEATURES; */
++	ifp->if_watchdog = nfe_watchdog;
++	ifp->if_init = nfe_init;
++	ifp->if_baudrate = IF_Gbps(1);
++	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT);
++	ifp->if_snd.ifq_maxlen = NFE_TX_RING_COUNT;
++	IFQ_SET_READY(&ifp->if_snd);
++
++	ifp->if_capabilities = IFCAP_VLAN_MTU;
++#if NVLAN > 0
++	if (sc->nfe_flags & NFE_HW_VLAN)
++		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
++#endif
++#ifdef NFE_CSUM
++	if (sc->nfe_flags & NFE_HW_CSUM) {
++		ifp->if_capabilities |= IFCAP_HWCSUM;
++	}
++#endif
++	ifp->if_capenable = ifp->if_capabilities;
++
++#ifdef DEVICE_POLLING
++	ifp->if_capabilities |= IFCAP_POLLING;
++#endif
++
++#ifdef NFE_TXTASK
++	TASK_INIT(&sc->nfe_txtask, 0, nfe_tx_task, ifp);
++#endif
++
++	/* Do MII setup */
++	if (mii_phy_probe(dev, &sc->nfe_miibus, nfe_ifmedia_upd, nfe_ifmedia_sts)) {
++		printf("nfe%d: MII without any phy!\n", unit);
++		error = ENXIO;
++		goto fail;
++	}
++
++	ether_ifattach(ifp, sc->eaddr);
++	callout_init(&sc->nfe_stat_ch, CALLOUT_MPSAFE);
++
++	error = bus_setup_intr(dev, sc->nfe_irq, INTR_TYPE_NET|INTR_MPSAFE,
++	    nfe_intr, sc, &sc->nfe_intrhand);
++
++	if (error) {
++		printf("nfe%d: couldn't set up irq\n", unit);
++		ether_ifdetach(ifp);
++		goto fail;
++	}
++
++fail:
++	if (error)
++		nfe_detach(dev);
++
++	return (error);
++}
++
++
++static int
++nfe_detach(device_t dev)
++{
++	struct nfe_softc	*sc;
++	struct ifnet		*ifp;
++	u_char			eaddr[ETHER_ADDR_LEN];
++	int			i;
++
++	sc = device_get_softc(dev);
++	KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
++	ifp = sc->nfe_ifp;
++
++#ifdef DEVICE_POLLING
++	if (ifp->if_capenable & IFCAP_POLLING)
++		ether_poll_deregister(ifp);
++#endif
++
++	for (i = 0; i < ETHER_ADDR_LEN; i++) {
++		eaddr[i] = sc->eaddr[5 - i];
++	}
++	nfe_set_macaddr(sc, eaddr);
++
++	if (device_is_attached(dev)) {
++		NFE_LOCK(sc);
++		nfe_stop(ifp, 1);
++		NFE_UNLOCK(sc);
++		ifp->if_flags &= ~IFF_UP;
++		callout_drain(&sc->nfe_stat_ch);
++		ether_ifdetach(ifp);
++	}
++
++	if (ifp)
++		if_free(ifp);
++	if (sc->nfe_miibus)
++		device_delete_child(dev, sc->nfe_miibus);
++	bus_generic_detach(dev);
++
++	if (sc->nfe_intrhand)
++		bus_teardown_intr(dev, sc->nfe_irq, sc->nfe_intrhand);
++	if (sc->nfe_irq)
++		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nfe_irq);
++	if (sc->nfe_res)
++		bus_release_resource(dev, SYS_RES_MEMORY, NFE_PCI_BA, sc->nfe_res);
++
++	nfe_free_tx_ring(sc, &sc->txq);
++	nfe_free_rx_ring(sc, &sc->rxq);
++
++	if (sc->nfe_parent_tag)
++		bus_dma_tag_destroy(sc->nfe_parent_tag);
++
++	mtx_destroy(&sc->nfe_mtx);
++
++	return (0);
++}
++
++
++static void
++nfe_miibus_statchg(device_t dev)
++{
++	struct nfe_softc *sc;
++	struct mii_data *mii;
++	u_int32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
++
++	sc = device_get_softc(dev);
++	mii = device_get_softc(sc->nfe_miibus);
++
++	phy = NFE_READ(sc, NFE_PHY_IFACE);
++	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
++
++	seed = NFE_READ(sc, NFE_RNDSEED);
++	seed &= ~NFE_SEED_MASK;
++
++	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
++		phy  |= NFE_PHY_HDX;	/* half-duplex */
++		misc |= NFE_MISC1_HDX;
++	}
++
++	switch (IFM_SUBTYPE(mii->mii_media_active)) {
++	case IFM_1000_T:	/* full-duplex only */
++		link |= NFE_MEDIA_1000T;
++		seed |= NFE_SEED_1000T;
++		phy  |= NFE_PHY_1000T;
++		break;
++	case IFM_100_TX:
++		link |= NFE_MEDIA_100TX;
++		seed |= NFE_SEED_100TX;
++		phy  |= NFE_PHY_100TX;
++		break;
++	case IFM_10_T:
++		link |= NFE_MEDIA_10T;
++		seed |= NFE_SEED_10T;
++		break;
++	}
++
++	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
++
++	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
++	NFE_WRITE(sc, NFE_MISC1, misc);
++	NFE_WRITE(sc, NFE_LINKSPEED, link);
++}
++
++static int
++nfe_miibus_readreg(device_t dev, int phy, int reg)
++{
++	struct nfe_softc *sc = device_get_softc(dev);
++	u_int32_t val;
++	int ntries;
++
++	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
++
++	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
++		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
++		DELAY(100);
++	}
++
++	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
++
++	for (ntries = 0; ntries < 1000; ntries++) {
++		DELAY(100);
++		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
++			break;
++	}
++	if (ntries == 1000) {
++		DPRINTFN(2, ("nfe%d: timeout waiting for PHY\n", sc->nfe_unit));
++		return 0;
++	}
++
++	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
++		DPRINTFN(2, ("nfe%d: could not read PHY\n", sc->nfe_unit));
++		return 0;
++	}
++
++	val = NFE_READ(sc, NFE_PHY_DATA);
++	if (val != 0xffffffff && val != 0)
++		sc->mii_phyaddr = phy;
++
++	DPRINTFN(2, ("nfe%d: mii read phy %d reg 0x%x ret 0x%x\n", sc->nfe_unit, phy, reg, val));
++
++	return val;
++}
++
++static int
++nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
++{
++	struct nfe_softc *sc = device_get_softc(dev);
++	u_int32_t ctl;
++	int ntries; 
++
++	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
++
++	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
++		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
++		DELAY(100);
++	}
++
++	NFE_WRITE(sc, NFE_PHY_DATA, val);
++	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
++	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
++
++	for (ntries = 0; ntries < 1000; ntries++) {
++		DELAY(100);
++		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
++			break;
++	}
++#ifdef NFE_DEBUG
++	if (nfedebug >= 2 && ntries == 1000)
++		printf("could not write to PHY\n");
++#endif
++	return 0;
++}
++
++static int
++nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
++{
++	struct nfe_desc32 *desc32;
++	struct nfe_desc64 *desc64;
++	struct nfe_rx_data *data;
++	void **desc;
++	bus_addr_t physaddr;
++	int nsegs;
++	bus_dma_segment_t segs;
++	int i, error, descsize;
++
++	if (sc->nfe_flags & NFE_40BIT_ADDR) {
++		desc = (void **)&ring->desc64;
++		descsize = sizeof (struct nfe_desc64);
++	} else {
++		desc = (void **)&ring->desc32;
++		descsize = sizeof (struct nfe_desc32);
++	}
++
++	ring->cur = ring->next = 0;
++
++	error = bus_dma_tag_create(sc->nfe_parent_tag, 
++				   PAGE_SIZE, 0,	/* alignment, boundary */
++				   BUS_SPACE_MAXADDR,   /* lowaddr */
++				   BUS_SPACE_MAXADDR,	/* highaddr */
++				   NULL, NULL,		/* filter, filterarg */
++				   NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
++				   NFE_RX_RING_COUNT * descsize,   /* maxsegsize */
++				   0,           	/* flags */
++				   NULL, NULL,		/* lockfunc, lockarg */
++				   &ring->rx_desc_tag);
++	if (error != 0) {
++		printf("nfe%d: could not create rx desc DMA tag\n", sc->nfe_unit);
++		goto fail;
++	}
++
++	/* allocate memory to desc */
++	error = bus_dmamem_alloc(ring->rx_desc_tag, (void **)desc, BUS_DMA_NOWAIT, &ring->rx_desc_map);
++	if (error != 0) {
++		printf("nfe%d: could not alloc rx desc DMA map\n", sc->nfe_unit);
++		goto fail;
++	}
++
++	/* map desc to device visible address space */
++	error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, *desc,
++				NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs,
++				&ring->rx_desc_segs, BUS_DMA_NOWAIT);
++	if (error != 0) {
++		printf("nfe%d: could not load desc DMA map\n", sc->nfe_unit);
++		goto fail;
++	}
++
++	bzero(*desc, NFE_RX_RING_COUNT * descsize);
++	ring->rx_desc_addr = ring->rx_desc_segs.ds_addr;
++	ring->physaddr = ring->rx_desc_addr;
++
++	if (sc->nfe_flags & NFE_USE_JUMBO) {
++		ring->bufsz = NFE_JBYTES;
++		if ((error = nfe_jpool_alloc(sc)) != 0) {
++			printf("nfe%d: could not allocate jumbo frames\n", sc->nfe_unit);
++			goto fail;
++		}
++	} else {
++		ring->bufsz = MCLBYTES;
++		error = bus_dma_tag_create(sc->nfe_parent_tag, 
++					   ETHER_ALIGN, 0,	/* alignment, boundary */
++					   BUS_SPACE_MAXADDR,   /* lowaddr */
++					   BUS_SPACE_MAXADDR,	/* highaddr */
++					   NULL, NULL,	        /* filter, filterarg */
++					   MCLBYTES, 1,         /* maxsize, nsegments */
++					   MCLBYTES,            /* maxsegsize */
++					   0,	                /* flags */
++					   NULL, NULL,	        /* lockfunc, lockarg */
++					   &ring->rx_data_tag);
++		if (error != 0) {
++			printf("nfe%d: could not create rx buf DMA tag\n", sc->nfe_unit);
++			goto fail;
++		}
++	}
++
++	/*
++	 * Pre-allocate Rx buffers and populate Rx ring.
++	 */
++	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
++		data = &sc->rxq.data[i];
++
++		if (sc->nfe_flags & NFE_USE_JUMBO) {
++			if (nfe_newbuf_jumbo(sc, i, NULL) == ENOBUFS) {
++				printf("nfe%d: could not allocate jumbo buffer\n", sc->nfe_unit);
++				goto fail;
++			}
++			data->m = sc->rxq.jbuf[i].m;
++			physaddr = sc->rxq.jbuf[i].rx_jumbo_seg.ds_addr;
++		} else {
++		  data->m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
++		  if (data->m == NULL) {
++			printf("nfe%d: could not allocate rx mbuf\n", sc->nfe_unit);
++			error = ENOMEM;
++			goto fail;
++		  }
++		  data->m->m_len = data->m->m_pkthdr.len = MCLBYTES;
++
++		  error = bus_dmamap_create(sc->rxq.rx_data_tag, 0, &data->rx_data_map);
++		  if (error != 0) {
++			printf("nfe%d: could not create rx buf DMA map\n", sc->nfe_unit);
++			goto fail;
++		  }
++
++		  error = bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, data->rx_data_map,
++						  data->m, &segs, &nsegs, BUS_DMA_NOWAIT);
++		  if (error != 0 || nsegs != 1) {
++			printf("nfe%d: could not load rx buf DMA map\n", sc->nfe_unit);
++			goto fail;
++		  }
++
++		  physaddr = segs.ds_addr;
++		}
++
++		if (sc->nfe_flags & NFE_40BIT_ADDR) {
++			desc64 = &sc->rxq.desc64[i];
++#if defined(__LP64__)
++			desc64->physaddr[0] = htole32(physaddr >> 32);
++#endif
++			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
++			desc64->length = htole16(sc->rxq.bufsz);
++			desc64->flags = htole16(NFE_RX_READY);
++		} else {
++			desc32 = &sc->rxq.desc32[i];
++			desc32->physaddr = htole32(physaddr);
++			desc32->length = htole16(sc->rxq.bufsz);
++			desc32->flags = htole16(NFE_RX_READY);
++		}
++
++	}
++
++	bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, BUS_DMASYNC_PREWRITE);
++
++	return 0;
++
++fail:	
++	return error;
++}
++
++static int
++nfe_jpool_alloc(struct nfe_softc *sc)
++{
++	struct nfe_rx_ring *ring = &sc->rxq;
++	struct nfe_jbuf *jbuf;
++	caddr_t buf;
++	int i, error;
++
++	mtx_init(&sc->nfe_jlist_mtx, "nfe_jlist_mtx", NULL, MTX_DEF);
++
++	/*
++	 * Allocate a big chunk of DMA'able memory.
++	 */
++	error = bus_dma_tag_create(sc->nfe_parent_tag, 
++				   PAGE_SIZE, 0,       	/* alignment, boundary */
++				   BUS_SPACE_MAXADDR,   /* lowaddr */
++				   BUS_SPACE_MAXADDR,	/* highaddr */
++				   NULL, NULL,		/* filter, filterarg */
++				   NFE_JPOOL_SIZE, 1,   /* maxsize, nsegments */
++				   NFE_JPOOL_SIZE,      /* maxsegsize */
++				   0,            	/* flags */
++				   NULL, NULL,		/* lockfunc, lockarg */
++				   &ring->rx_jpool_tag);
++	if (error != 0) {
++		printf("nfe%d: could not create jumbo pool DMA tag\n", sc->nfe_unit);
++		goto fail;
++	}
++
++	error = bus_dmamem_alloc(ring->rx_jpool_tag, (void**)&ring->jpool,
++				 BUS_DMA_NOWAIT, &ring->rx_jpool_map);
++	if (error != 0) {
++		printf("nfe%d: could not create jumbo DMA memory\n", sc->nfe_unit);
++		goto fail;
++	}
++
++	/* ..and split it into 9KB chunks */
++	SLIST_INIT(&ring->jfreelist);
++
++	buf = ring->jpool;
++	for (i = 0; i < NFE_JPOOL_COUNT; i++) {
++		jbuf = &ring->jbuf[i];
++		jbuf->buf = buf;
++		buf += NFE_JBYTES;
++		SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
++		error = bus_dmamap_create(ring->rx_jpool_tag, 0, &jbuf->rx_jumbo_map);
++		if (error != 0) {
++			printf("nfe%d: could not create rx jumbo buf DMA map\n", 
++			       sc->nfe_unit);
++			goto fail;
++		}
++	}
++	return 0;
++
++fail:	nfe_jpool_free(sc);
++	return error;
++}
++
++
++static void
++nfe_jpool_free(struct nfe_softc *sc)
++{
++	struct nfe_rx_ring *ring = &sc->rxq;
++	int i;
++
++	if (ring->jpool != NULL) {
++		bus_dmamem_free(ring->rx_jpool_tag, ring->jpool, ring->rx_jpool_map); 
++	}
++
++	for(i = 0; i < NFE_JPOOL_COUNT; i++) {
++		if(ring->jbuf[i].rx_jumbo_map != NULL) {
++			bus_dmamap_sync(ring->rx_jpool_tag, ring->jbuf[i].rx_jumbo_map, BUS_DMASYNC_POSTWRITE);
++			bus_dmamap_unload(ring->rx_jpool_tag, ring->jbuf[i].rx_jumbo_map);
++			bus_dmamap_destroy(ring->rx_jpool_tag, ring->jbuf[i].rx_jumbo_map);
++		}
++	}
++
++	if (ring->rx_jpool_map != NULL) {
++		bus_dmamap_destroy(ring->rx_jpool_tag, ring->rx_jpool_map); 
++		bus_dma_tag_destroy(ring->rx_jpool_tag);
++	}
++
++	mtx_destroy(&sc->nfe_jlist_mtx);
++}
++
++static struct nfe_jbuf *
++nfe_jalloc(struct nfe_softc *sc)
++{
++	struct nfe_jbuf *jbuf;
++
++	NFE_JLIST_LOCK(sc);
++	jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
++	if (jbuf == NULL) {
++		NFE_JLIST_UNLOCK(sc);
++		return NULL;
++	}
++	SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
++	NFE_JLIST_UNLOCK(sc);
++	return jbuf;
++}
++
++static int
++nfe_newbuf_jumbo(struct nfe_softc *sc, int i, struct mbuf *m)
++{
++	bus_dmamap_t		map;
++	struct mbuf		*m_new = NULL;
++	int			nsegs, ret=0;
++	bus_dma_segment_t	segs;
++	struct nfe_jbuf         *jbuf=NULL;
++
++	if (m == NULL) {
++		/* Allocate the jumbo buffer */
++		jbuf = nfe_jalloc(sc);
++		if (jbuf == NULL) {
++			DPRINTFN(2, ("nfe%d: jumbo allocation failed\n",
++				    sc->nfe_unit));
++			return ENOBUFS;
++		}
++
++		/* Allocate the mbuf. */
++		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
++		if (m_new == NULL) {
++			return ENOBUFS;
++		}
++
++		/* Attach the buffer to the mbuf. */
++		m_new->m_data = (void *) jbuf->buf;
++		m_new->m_len = m_new->m_pkthdr.len = NFE_JBYTES;
++		MEXTADD(m_new, jbuf->buf, NFE_JBYTES, nfe_jfree,
++			(struct nfe_softc *)sc, 0, EXT_NET_DRV);
++	} else {
++		m_new = m;
++		m_new->m_data = m_new->m_ext.ext_buf;
++		m_new->m_ext.ext_size = NFE_JBYTES;
++	}
++
++	m_adj(m_new, ETHER_ALIGN);
++
++	map = sc->rxq.jbuf[i].rx_jumbo_map;
++	if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_jpool_tag, map, m_new, &segs,
++				    &nsegs, 0)) {
++		if(m == NULL) m_freem(m_new);
++		return ENOBUFS;
++	}
++	if (nsegs != 1) {
++		if(m == NULL) m_freem(m_new);
++		return ENOBUFS;
++	}
++
++	sc->rxq.jbuf[i].rx_jumbo_seg = segs;
++	sc->rxq.jbuf[i].m = m_new;
++	bus_dmamap_sync(sc->rxq.rx_jpool_tag, map, BUS_DMASYNC_PREREAD);
++	return ret;
++}
++
++/*
++ * This is called automatically by the network stack when the mbuf is freed.
++ * Caution must be taken that the NIC might be reset by the time the mbuf is
++ * freed.
++ */
++static void
++nfe_jfree(void *buf, void *arg)
++{
++	struct nfe_softc *sc = arg;
++	struct nfe_jbuf *jbuf;
++	int i;
++
++	/* find the jbuf from the base pointer */
++
++	i = ((vm_offset_t)buf - (vm_offset_t)sc->rxq.jpool) / NFE_JBYTES;
++	if (i < 0 || i >= NFE_JPOOL_COUNT) {
++		printf("nfe%d: request to free a buffer (%p) not managed by us\n", sc->nfe_unit, buf);
++		return;
++	}
++	jbuf = &sc->rxq.jbuf[i];
++
++	/* ..and put it back in the free list */
++	NFE_JLIST_LOCK(sc);
++	SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext);
++	NFE_JLIST_UNLOCK(sc);
++}
++
++
++static void
++nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
++{
++	int i;
++
++	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
++		if (sc->nfe_flags & NFE_40BIT_ADDR) {
++			ring->desc64[i].length = htole16(ring->bufsz);
++			ring->desc64[i].flags = htole16(NFE_RX_READY);
++		} else {
++			ring->desc32[i].length = htole16(ring->bufsz);
++			ring->desc32[i].flags = htole16(NFE_RX_READY);
++		}
++	}
++
++	bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, BUS_DMASYNC_PREWRITE);
++
++	ring->cur = ring->next = 0;
++}
++
++
++static void
++nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
++{
++	struct nfe_rx_data *data;
++	void *desc;
++	int i, descsize;
++
++	if (sc->nfe_flags & NFE_40BIT_ADDR) {
++		desc = ring->desc64;
++		descsize = sizeof (struct nfe_desc64);
++	} else {
++		desc = ring->desc32;
++		descsize = sizeof (struct nfe_desc32);
++	}
++
++	if (desc != NULL) {
++		bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, BUS_DMASYNC_POSTWRITE);
++		bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
++		bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
++		bus_dma_tag_destroy(ring->rx_desc_tag);
++	}
++
++	if (sc->nfe_flags & NFE_USE_JUMBO) {
++	        nfe_jpool_free(sc);
++	} else {
++                for (i = 0; i < NFE_RX_RING_COUNT; i++) {
++			data = &ring->data[i];
++			if (data->rx_data_map != NULL) {
++				bus_dmamap_sync(ring->rx_data_tag, data->rx_data_map,
++						BUS_DMASYNC_POSTREAD);
++				bus_dmamap_unload(ring->rx_data_tag, data->rx_data_map);
++				bus_dmamap_destroy(ring->rx_data_tag, data->rx_data_map);
++			}
++			if (data->m != NULL)
++				m_freem(data->m);
++		}
++                bus_dma_tag_destroy(ring->rx_data_tag);
++	}
++}
++
++static int
++nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
++{
++	int i, error;
++	void **desc;
++	int descsize;
++
++	if (sc->nfe_flags & NFE_40BIT_ADDR) {
++		desc = (void **)&ring->desc64;
++		descsize = sizeof (struct nfe_desc64);
++	} else {
++		desc = (void **)&ring->desc32;
++		descsize = sizeof (struct nfe_desc32);
++	}
++
++	ring->queued = 0;
++	ring->cur = ring->next = 0;
++
++	error = bus_dma_tag_create(sc->nfe_parent_tag, 
++				   PAGE_SIZE, 0,	/* alignment, boundary */
++				   BUS_SPACE_MAXADDR,   /* lowaddr */
++				   BUS_SPACE_MAXADDR,	/* highaddr */
++				   NULL, NULL,		/* filter, filterarg */
++				   NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
++				   NFE_TX_RING_COUNT * descsize,   /* maxsegsize */
++				   0,           	/* flags */
++				   NULL, NULL,		/* lockfunc, lockarg */
++				   &ring->tx_desc_tag);
++	if (error != 0) {
++		printf("nfe%d: could not create tx desc DMA tag\n", sc->nfe_unit);
++		goto fail;
++	}
++
++	error = bus_dmamem_alloc(ring->tx_desc_tag, (void **)desc, BUS_DMA_NOWAIT, &ring->tx_desc_map);
++	if (error != 0) {
++		printf("nfe%d: could not create tx desc DMA map\n", sc->nfe_unit);
++		goto fail;
++	}
++
++	error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, *desc,
++	    NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ring->tx_desc_segs, BUS_DMA_NOWAIT);
++	if (error != 0) {
++		printf("nfe%d: could not load tx desc DMA map\n", sc->nfe_unit);
++		goto fail;
++	}
++
++	bzero(*desc, NFE_TX_RING_COUNT * descsize);
++
++	ring->tx_desc_addr = ring->tx_desc_segs.ds_addr;
++	ring->physaddr = ring->tx_desc_addr;
++
++	error = bus_dma_tag_create(sc->nfe_parent_tag, 
++				   2, 0,	
++				   BUS_SPACE_MAXADDR,
++				   BUS_SPACE_MAXADDR,	
++				   NULL, NULL,		
++				   NFE_JBYTES, 
++				   NFE_MAX_SCATTER,
++				   NFE_JBYTES, 
++				   0,
++				   NULL, NULL,	
++				   &ring->tx_data_tag);
++	if (error != 0) {
++		printf("nfe%d: could not create tx buf DMA tag\n", sc->nfe_unit);
++		goto fail;
++	}
++
++	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
++		error = bus_dmamap_create(ring->tx_data_tag, 0, &ring->data[i].tx_data_map);
++		if (error != 0) {
++			printf("nfe%d: could not create tx buf DMA map\n", sc->nfe_unit);
++			goto fail;
++		}
++	}
++
++	return 0;
++
++fail:	
++	return error;
++}
++
++
++static void
++nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
++{
++	struct nfe_tx_data *data;
++	int i;
++
++	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
++		if (sc->nfe_flags & NFE_40BIT_ADDR)
++			ring->desc64[i].flags = 0;
++		else
++			ring->desc32[i].flags = 0;
++
++		data = &ring->data[i];
++
++		if (data->m != NULL) {
++			bus_dmamap_sync(ring->tx_data_tag, data->active, BUS_DMASYNC_POSTWRITE);
++			bus_dmamap_unload(ring->tx_data_tag, data->active);
++			m_freem(data->m);
++			data->m = NULL;
++		}
++	}
++
++	bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, BUS_DMASYNC_PREWRITE);
++
++	ring->queued = 0;
++	ring->cur = ring->next = 0;
++}
++
++static void
++nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
++{
++	struct nfe_tx_data *data;
++	void *desc;
++	int i, descsize;
++
++	if (sc->nfe_flags & NFE_40BIT_ADDR) {
++		desc = ring->desc64;
++		descsize = sizeof (struct nfe_desc64);
++	} else {
++		desc = ring->desc32;
++		descsize = sizeof (struct nfe_desc32);
++	}
++
++	if (desc != NULL) {
++		bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, BUS_DMASYNC_POSTWRITE);
++		bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
++		bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
++		bus_dma_tag_destroy(ring->tx_desc_tag);
++	}
++
++	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
++		data = &ring->data[i];
++
++		if (data->m != NULL) {
++			bus_dmamap_sync(ring->tx_data_tag, data->active, BUS_DMASYNC_POSTWRITE);
++			bus_dmamap_unload(ring->tx_data_tag, data->active);
++			m_freem(data->m);
++		}
++	}
++
++	/* ..and now actually destroy the DMA mappings */
++	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
++		data = &ring->data[i];
++		if (data->tx_data_map == NULL)
++			continue;
++		bus_dmamap_destroy(ring->tx_data_tag, data->tx_data_map);
++	}
++
++	bus_dma_tag_destroy(ring->tx_data_tag);
++}
++
++#ifdef DEVICE_POLLING
++static poll_handler_t nfe_poll;
++
++static void
++nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
++{
++	struct  nfe_softc *sc = ifp->if_softc;
++
++	NFE_LOCK(sc);
++	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
++		nfe_poll_locked(ifp, cmd, count);
++	NFE_UNLOCK(sc);
++}
++
++
++static void
++nfe_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
++{
++	struct  nfe_softc *sc = ifp->if_softc;
++	u_int32_t r;
++
++	NFE_LOCK_ASSERT(sc);
++
++	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
++		return;
++	}
++
++	sc->rxcycles = count;
++	nfe_rxeof(sc);
++	nfe_txeof(sc);
++	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
++#ifdef NFE_TXTASK
++		taskqueue_enqueue(taskqueue_swi, &sc->nfe_txtask);
++#else
++		nfe_start_locked(ifp);
++#endif
++
++	if (cmd == POLL_AND_CHECK_STATUS) {
++		if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) {
++			return;
++		}
++		NFE_WRITE(sc, NFE_IRQ_STATUS, r);
++
++		if (r & NFE_IRQ_LINK) {
++			NFE_READ(sc, NFE_PHY_STATUS);
++			NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
++			DPRINTF(("nfe%d: link state changed\n", sc->nfe_unit));
++		}
++	}
++}
++#endif /* DEVICE_POLLING */
++
++
++static int
++nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
++{
++	int			error = 0;
++	struct nfe_softc	*sc = ifp->if_softc;
++	struct ifreq		*ifr = (struct ifreq *) data;
++	struct mii_data		*mii;
++
++	switch (cmd) {
++	case SIOCSIFMTU:
++		if (ifr->ifr_mtu < ETHERMIN ||
++		    ((sc->nfe_flags & NFE_USE_JUMBO) &&
++		    ifr->ifr_mtu > ETHERMTU_JUMBO) ||
++		    (!(sc->nfe_flags & NFE_USE_JUMBO) &&
++		    ifr->ifr_mtu > ETHERMTU))
++			error = EINVAL;
++		else if (ifp->if_mtu != ifr->ifr_mtu) {
++			ifp->if_mtu = ifr->ifr_mtu;
++			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
++			nfe_init(sc);
++		}
++		break;
++	case SIOCSIFFLAGS:
++		NFE_LOCK(sc);
++		if (ifp->if_flags & IFF_UP) {
++			/*
++			 * If only the PROMISC or ALLMULTI flag changes, then
++			 * don't do a full re-init of the chip, just update
++			 * the Rx filter.
++			 */
++			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
++			    ((ifp->if_flags ^ sc->nfe_if_flags) &
++			     (IFF_ALLMULTI | IFF_PROMISC)) != 0)
++				nfe_setmulti(sc);
++			else
++				nfe_init_locked(sc);
++		} else {
++			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
++				nfe_stop(ifp, 1);
++		}
++		sc->nfe_if_flags = ifp->if_flags;
++		NFE_UNLOCK(sc);
++		error = 0;
++		break;
++	case SIOCADDMULTI:
++	case SIOCDELMULTI:
++		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
++			NFE_LOCK(sc);
++			nfe_setmulti(sc);
++			NFE_UNLOCK(sc);
++			error = 0;
++		}
++		break;
++	case SIOCSIFMEDIA:
++	case SIOCGIFMEDIA:
++		mii = device_get_softc(sc->nfe_miibus);
++		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
++		break;
++	case SIOCSIFCAP:
++	    {
++		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
++#ifdef DEVICE_POLLING
++		if (mask & IFCAP_POLLING) {
++			if (ifr->ifr_reqcap & IFCAP_POLLING) {
++				error = ether_poll_register(nfe_poll, ifp);
++				if (error)
++					return(error);
++				NFE_LOCK(sc);
++				NFE_WRITE(sc, NFE_IRQ_MASK, 0);
++				ifp->if_capenable |= IFCAP_POLLING;   
++				NFE_UNLOCK(sc);
++			} else {
++				error = ether_poll_deregister(ifp);
++				/* Enable interrupt even in error case */
++				NFE_LOCK(sc);
++				NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
++				ifp->if_capenable &= ~IFCAP_POLLING;
++				NFE_UNLOCK(sc);
++			}
++		}
++#endif
++		if (mask & IFCAP_HWCSUM) {
++			ifp->if_capenable ^= IFCAP_HWCSUM;
++			if (IFCAP_HWCSUM & ifp->if_capenable &&
++			    IFCAP_HWCSUM & ifp->if_capabilities)
++				ifp->if_hwassist = NFE_CSUM_FEATURES;
++			else
++				ifp->if_hwassist = 0;
++		}
++	    }
++		break;
++
++	default:
++		error = ether_ioctl(ifp, cmd, data);
++		break;
++	}
++
++	return error;
++}
++
++
++static void nfe_intr(void *arg)
++{
++	struct nfe_softc *sc = arg;
++	struct ifnet *ifp = sc->nfe_ifp;
++	u_int32_t r;
++
++	NFE_LOCK(sc); 
++
++#ifdef DEVICE_POLLING
++	if (ifp->if_capenable & IFCAP_POLLING) {
++		NFE_UNLOCK(sc);
++		return;
++	}
++#endif
++
++	if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) {
++	        NFE_UNLOCK(sc);
++		return;	/* not for us */
++	}
++	NFE_WRITE(sc, NFE_IRQ_STATUS, r);
++
++	DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
++
++	if (r & NFE_IRQ_LINK) {
++		NFE_READ(sc, NFE_PHY_STATUS);
++		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
++		DPRINTF(("nfe%d: link state changed\n", sc->nfe_unit));
++	}
++
++	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
++		/* check Rx ring */
++		nfe_rxeof(sc);
++		/* check Tx ring */
++		nfe_txeof(sc);
++	}
++
++	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
++	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
++#ifdef NFE_TXTASK
++		taskqueue_enqueue(taskqueue_swi, &sc->nfe_txtask);
++#else
++		nfe_start_locked(ifp);
++#endif
++
++	NFE_UNLOCK(sc);
++
++	return;
++}
++
++static void nfe_rxeof(struct nfe_softc *sc)
++{
++	struct ifnet *ifp = sc->nfe_ifp;
++	struct nfe_desc32 *desc32=NULL;
++	struct nfe_desc64 *desc64=NULL;
++	struct nfe_rx_data *data;
++	struct mbuf *m, *mnew;
++	bus_addr_t physaddr;
++	u_int16_t flags;
++	int error, len, i;
++	int nsegs;
++	bus_dma_segment_t segs;
++
++	NFE_LOCK_ASSERT(sc);
++
++	for (;;) {
++
++#ifdef DEVICE_POLLING
++		if (ifp->if_capenable & IFCAP_POLLING) {
++			if (sc->rxcycles <= 0)
++				break;
++			sc->rxcycles--;
++		}
++#endif
++		bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
++				BUS_DMASYNC_POSTREAD);
++
++		data = &sc->rxq.data[sc->rxq.cur];
++
++		if (sc->nfe_flags & NFE_40BIT_ADDR) {
++			desc64 = &sc->rxq.desc64[sc->rxq.cur];
++			flags = letoh16(desc64->flags);
++			len = letoh16(desc64->length) & 0x3fff;
++		} else {
++			desc32 = &sc->rxq.desc32[sc->rxq.cur];
++			flags = letoh16(desc32->flags);
++			len = letoh16(desc32->length) & 0x3fff;
++		}
++
++		if (flags & NFE_RX_READY)
++			break;
++
++		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
++			if (!(flags & NFE_RX_VALID_V1))
++				goto skip;
++			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
++				flags &= ~NFE_RX_ERROR;
++				len--;	/* fix buffer length */
++			}
++		} else {
++			if (!(flags & NFE_RX_VALID_V2)) 
++				goto skip;
++
++			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
++				flags &= ~NFE_RX_ERROR;
++				len--;	/* fix buffer length */
++			}
++		}
++
++		if (flags & NFE_RX_ERROR) {
++			ifp->if_ierrors++;
++			goto skip;
++		}
++
++		/*
++		 * Try to allocate a new mbuf for this ring element and load
++		 * it before processing the current mbuf. If the ring element
++		 * cannot be loaded, drop the received packet and reuse the
++		 * old mbuf. In the unlikely case that the old mbuf can't be
++		 * reloaded either, explicitly panic.
++		 */
++
++		if (sc->nfe_flags & NFE_USE_JUMBO) {
++			i = ((vm_offset_t)mtod(data->m, caddr_t) -
++			     (vm_offset_t)sc->rxq.jpool) / NFE_JBYTES;
++			if (i < 0 || i >= NFE_JPOOL_COUNT) {
++				ifp->if_ierrors++;
++				goto skip;
++			}
++			bus_dmamap_sync(sc->rxq.rx_jpool_tag,
++					sc->rxq.jbuf[i].rx_jumbo_map,
++					BUS_DMASYNC_POSTREAD);
++			bus_dmamap_unload(sc->rxq.rx_jpool_tag,
++					  sc->rxq.jbuf[i].rx_jumbo_map);
++
++			if (nfe_newbuf_jumbo(sc, i, NULL) == ENOBUFS) {
++				ifp->if_ierrors++;
++				nfe_newbuf_jumbo(sc, i, data->m);
++				goto skip;
++			}
++
++			mnew = sc->rxq.jbuf[i].m;
++			physaddr = sc->rxq.jbuf[i].rx_jumbo_seg.ds_addr;
++		} else {
++			mnew = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
++			if (mnew == NULL) {
++				ifp->if_ierrors++;
++				goto skip;
++			}
++			mnew->m_len = mnew->m_pkthdr.len = MCLBYTES;
++
++			bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
++			    BUS_DMASYNC_POSTREAD);
++			bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
++			error = bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag,
++			    data->rx_data_map, mnew, &segs, &nsegs, BUS_DMA_NOWAIT);
++			if (error != 0 || nsegs != 1 ) {
++				m_freem(mnew);
++
++				/* try to reload the old mbuf */
++				error = bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag,
++								data->rx_data_map,
++								data->m, &segs,
++								&nsegs, BUS_DMA_NOWAIT);
++				if (error != 0) {
++					/* very unlikely that it will fail.. */
++				      panic("nfe%d: could not load old rx mbuf",
++					    sc->nfe_unit);
++				}
++				ifp->if_ierrors++;
++				goto skip;
++			}
++			physaddr = segs.ds_addr;
++		}
++
++		/*
++		 * New mbuf successfully loaded, update Rx ring and continue
++		 * processing.
++		 */
++		m = data->m;
++		data->m = mnew;
++
++		/* finalize mbuf */
++		m->m_pkthdr.len = m->m_len = len;
++		m->m_pkthdr.rcvif = ifp;
++
++
++#if defined(NFE_CSUM)
++		if ((sc->nfe_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) {
++			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
++			if (flags & NFE_RX_IP_CSUMOK_V2) {
++				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
++			}
++			if (flags & NFE_RX_UDP_CSUMOK_V2 ||
++			    flags & NFE_RX_TCP_CSUMOK_V2) {
++				m->m_pkthdr.csum_flags |=
++				    CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
++				m->m_pkthdr.csum_data = 0xffff;
++			}
++		}
++#endif
++		ifp->if_ipackets++;
++
++		NFE_UNLOCK(sc);
++		(*ifp->if_input)(ifp, m);
++		NFE_LOCK(sc);
++
++		/* update mapping address in h/w descriptor */
++		if (sc->nfe_flags & NFE_40BIT_ADDR) {
++#if defined(__LP64__)
++			desc64->physaddr[0] = htole32(physaddr >> 32);
++#endif
++			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
++		} else {
++			desc32->physaddr = htole32(physaddr);
++		}
++
++skip:		if (sc->nfe_flags & NFE_40BIT_ADDR) {
++			desc64->length = htole16(sc->rxq.bufsz);
++			desc64->flags = htole16(NFE_RX_READY);
++
++			bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
++					BUS_DMASYNC_PREWRITE);
++		} else {
++			desc32->length = htole16(sc->rxq.bufsz);
++			desc32->flags = htole16(NFE_RX_READY);
++
++			bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
++					BUS_DMASYNC_PREWRITE);
++		}
++
++		sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
++	}
++}
++
++static void nfe_txeof(struct nfe_softc *sc)
++{
++	struct ifnet *ifp = sc->nfe_ifp;
++	struct nfe_desc32 *desc32;
++	struct nfe_desc64 *desc64;
++	struct nfe_tx_data *data = NULL;
++	u_int16_t flags;
++
++	NFE_LOCK_ASSERT(sc);
++
++	while (sc->txq.next != sc->txq.cur) {
++
++		bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
++				BUS_DMASYNC_POSTREAD);
++
++		if (sc->nfe_flags & NFE_40BIT_ADDR) {
++			desc64 = &sc->txq.desc64[sc->txq.next];
++			flags = letoh16(desc64->flags);
++		} else {
++			desc32 = &sc->txq.desc32[sc->txq.next];
++			flags = letoh16(desc32->flags);
++		}
++
++		if (flags & NFE_TX_VALID)
++			break;
++
++		data = &sc->txq.data[sc->txq.next];
++
++		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
++			if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
++				goto skip;
++
++			if ((flags & NFE_TX_ERROR_V1) != 0) {
++				printf("nfe%d: tx v1 error 0x%4b\n",
++				    sc->nfe_unit, flags, NFE_V1_TXERR); 
++
++				ifp->if_oerrors++;
++			} else
++				ifp->if_opackets++;
++		} else {
++			if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
++				goto skip;
++
++			if ((flags & NFE_TX_ERROR_V2) != 0) {
++				printf("nfe%d: tx v2 error 0x%4b\n",
++				    sc->nfe_unit, flags, NFE_V2_TXERR); 
++
++				ifp->if_oerrors++;
++			} else
++				ifp->if_opackets++;
++		}
++
++		if (data->m == NULL) {	/* should not get there */
++		       printf("nfe%d: last fragment bit w/o associated mbuf!\n",
++			      sc->nfe_unit);
++		       goto skip;
++		}
++
++		/* last fragment of the mbuf chain transmitted */
++		bus_dmamap_sync(sc->txq.tx_data_tag, data->active,
++				BUS_DMASYNC_POSTWRITE);
++		bus_dmamap_unload(sc->txq.tx_data_tag, data->active);
++		m_freem(data->m);
++		data->m = NULL;
++
++		ifp->if_timer = 0;
++
++skip:		sc->txq.queued--;
++		sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
++	}
++
++	if (data != NULL) {	/* at least one slot freed */
++		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
++#ifdef NFE_TXTASK
++		taskqueue_enqueue(taskqueue_swi, &sc->nfe_txtask);
++#else
++		nfe_start_locked(ifp);
++#endif
++	}
++}
++
++static int nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
++{
++	struct nfe_desc32 *desc32=NULL;
++	struct nfe_desc64 *desc64=NULL;
++	struct nfe_tx_data *data=NULL;
++	bus_dmamap_t map;
++	u_int16_t flags = 0;
++#if NVLAN > 0
++	struct m_tag *vtag=NULL;
++#endif
++	bus_dma_segment_t segs[NFE_MAX_SCATTER];
++	int nsegs;
++	int error, i, j;
++
++	if (sc->txq.queued > NFE_TX_RING_COUNT/8) {
++		nfe_txeof(sc);
++	}
++
++	map = sc->txq.data[sc->txq.cur].tx_data_map;
++
++	error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, m0, segs,
++					&nsegs, BUS_DMA_NOWAIT);
++	if (error) {
++		if(error == EFBIG) {
++			struct mbuf *m;
++
++			m = m_defrag(m0, M_DONTWAIT);
++			if (m == NULL)
++				return (ENOBUFS);
++			m0 = m;
++			error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag,
++							map, m0, segs, &nsegs,
++							BUS_DMA_NOWAIT);
++		}
++		if(error)
++		  return error;
++	}
++
++	if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 1) {
++		bus_dmamap_unload(sc->txq.tx_data_tag, map);
++		return ENOBUFS;
++	}
++
++	/* Imported from DragonflyBSD
++	 * XXX
++	 * Without following delay certain MCP51 (e.g. MCP51 on
++	 * GA-M51GM-S2G 10B) chokes during bulk data transfering.
++	 */ 
++	if (nfe_encap_delay != 0) {
++		DELAY(nfe_encap_delay);
++	}
++
++#if NVLAN > 0
++	/* setup h/w VLAN tagging */
++	if (sc->nfe_flags & NFE_HW_VLAN)
++	  vtag = VLAN_OUTPUT_TAG(sc->nfe_ifp, m0);
++#endif
++
++#ifdef NFE_CSUM
++	if (m0->m_pkthdr.csum_flags & CSUM_IP)
++		flags |= NFE_TX_IP_CSUM;
++	if (m0->m_pkthdr.csum_flags & CSUM_TCP)
++		flags |= NFE_TX_TCP_CSUM;
++	if (m0->m_pkthdr.csum_flags & CSUM_UDP)
++		flags |= NFE_TX_TCP_CSUM;
++#endif
++
++	for (i = 0; i < nsegs; i++) {
++		j = (sc->txq.cur + i) % NFE_TX_RING_COUNT;
++		data = &sc->txq.data[j];
++
++		if (sc->nfe_flags & NFE_40BIT_ADDR) {
++			desc64 = &sc->txq.desc64[j];
++#if defined(__LP64__)
++			desc64->physaddr[0] = htole32(segs[i].ds_addr >> 32);
++#endif
++			desc64->physaddr[1] = htole32(segs[i].ds_addr &
++			    0xffffffff);
++			desc64->length = htole16(segs[i].ds_len - 1);
++			desc64->flags = htole16(flags);
++#if NVLAN > 0
++			if(vtag != NULL) {
++			  desc64->vtag = htole32(NFE_TX_VTAG |
++					 VLAN_TAG_VALUE(vtag));
++			}
++#endif
++		} else {
++			desc32 = &sc->txq.desc32[j];
++
++			desc32->physaddr = htole32(segs[i].ds_addr);
++			desc32->length = htole16(segs[i].ds_len - 1);
++			desc32->flags = htole16(flags);
++		}
++
++		/* csum flags and vtag belong to the first fragment only */
++		if (nsegs > 1) {
++			flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
++#if NVLAN > 0
++			vtag = 0;
++#endif
++		}
++
++		sc->txq.queued++;
++	}
++
++	/* the whole mbuf chain has been DMA mapped, fix last descriptor */
++	if (sc->nfe_flags & NFE_40BIT_ADDR) {
++		desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
++	} else {
++		if (sc->nfe_flags & NFE_JUMBO_SUP)
++			flags |= NFE_TX_LASTFRAG_V2;
++		else
++			flags |= NFE_TX_LASTFRAG_V1;
++		desc32->flags = htole16(flags);
++	}
++
++	for (i = nsegs - 1; i >= 0; --i) {
++		j = (sc->txq.cur + i) % NFE_TX_RING_COUNT;
++		if (sc->nfe_flags & NFE_40BIT_ADDR) {
++			desc64 = &sc->txq.desc64[j];
++			desc64->flags |= htole16(NFE_TX_VALID);
++		} else {
++			desc32 = &sc->txq.desc32[j];
++			desc32->flags |= htole16(NFE_TX_VALID);
++		}
++	}
++	sc->txq.cur = (sc->txq.cur + nsegs) % NFE_TX_RING_COUNT;
++
++	data->m = m0;
++	data->active = map;
++	data->nsegs = nsegs;
++
++	bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
++
++	return 0;
++}
++
++
++static void nfe_setmulti(struct nfe_softc *sc)
++{
++	struct ifnet *ifp = sc->nfe_ifp;
++	struct ifmultiaddr	*ifma;
++	u_int8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
++	u_int32_t filter = NFE_RXFILTER_MAGIC;
++	u_int8_t etherbroadcastaddr[ETHER_ADDR_LEN] =
++	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
++	int i;
++
++	NFE_LOCK_ASSERT(sc);
++
++	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
++		bzero(addr, ETHER_ADDR_LEN);
++		bzero(mask, ETHER_ADDR_LEN);
++		goto done;
++	}
++
++	bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
++	bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
++
++	IF_ADDR_LOCK(ifp);
++	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
++		u_char *addrp;
++
++		if (ifma->ifma_addr->sa_family != AF_LINK)
++			continue;
++
++		addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
++		for (i = 0; i < ETHER_ADDR_LEN; i++) {
++			u_int8_t mcaddr = addrp[i];
++			addr[i] &= mcaddr;
++			mask[i] &= ~mcaddr;
++		}
++	}
++	IF_ADDR_UNLOCK(ifp);
++
++	for (i = 0; i < ETHER_ADDR_LEN; i++) {
++		mask[i] |= addr[i];
++	}
++
++done:
++	addr[0] |= 0x01;	/* make sure multicast bit is set */
++
++	NFE_WRITE(sc, NFE_MULTIADDR_HI,
++	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
++	NFE_WRITE(sc, NFE_MULTIADDR_LO,
++	    addr[5] <<  8 | addr[4]);
++	NFE_WRITE(sc, NFE_MULTIMASK_HI,
++	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
++	NFE_WRITE(sc, NFE_MULTIMASK_LO,
++	    mask[5] <<  8 | mask[4]);
++
++	filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
++	NFE_WRITE(sc, NFE_RXFILTER, filter);
++}
++
++static void nfe_start(struct ifnet *ifp)
++{
++	struct nfe_softc *sc;
++
++	sc = ifp->if_softc;
++	NFE_LOCK(sc);
++	nfe_start_locked(ifp);
++	NFE_UNLOCK(sc);
++}
++
++static void nfe_start_locked(struct ifnet *ifp)
++{
++	struct nfe_softc *sc = ifp->if_softc;
++	int old = sc->txq.cur;
++	struct mbuf *m0;
++
++	NFE_LOCK_ASSERT(sc);
++
++	if (!sc->nfe_link || ifp->if_drv_flags & IFF_DRV_OACTIVE) {
++		return;
++	}
++
++	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
++
++		IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
++		if (m0 == NULL)
++			break;
++
++		if (nfe_encap(sc, m0) != 0) {
++			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
++			IFQ_DRV_PREPEND(&ifp->if_snd, m0);
++			break;
++		}
++
++		BPF_MTAP(ifp, m0);
++	}
++	if (sc->txq.cur == old)	{ /* nothing sent */
++		return;
++	}
++
++	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
++			BUS_DMASYNC_PREWRITE);		
++
++	/* kick Tx */
++	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
++
++	/*
++	 * Set a timeout in case the chip goes out to lunch.
++	 */
++	ifp->if_timer = 5;
++
++	return;
++}
++
++static void nfe_watchdog(struct ifnet *ifp)
++{
++	struct nfe_softc *sc = ifp->if_softc;
++	int pendings;
++
++	NFE_LOCK(sc);
++
++	pendings = sc->txq.queued;
++	nfe_txeof(sc);
++	if (sc->txq.queued < pendings) {
++		NFE_UNLOCK(sc);
++		return;
++	}
++
++	printf("nfe%d: watchdog timeout (%d)\n", sc->nfe_unit, sc->txq.queued);
++
++	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
++	nfe_init_locked(sc);
++
++	ifp->if_oerrors++;
++
++	NFE_UNLOCK(sc);
++
++	return;
++}
++
++static void nfe_init(void *xsc)
++{
++	struct nfe_softc *sc = xsc;
++
++	NFE_LOCK(sc);
++	nfe_init_locked(sc);
++	NFE_UNLOCK(sc);
++
++	return;
++}
++
++static void nfe_init_locked(void *xsc)
++{
++	struct nfe_softc *sc = xsc;
++	struct ifnet *ifp = sc->nfe_ifp;
++	struct mii_data *mii;
++	u_int32_t tmp;
++
++	NFE_LOCK_ASSERT(sc);
++
++	mii = device_get_softc(sc->nfe_miibus);
++
++	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
++		return;
++	}
++
++	nfe_stop(ifp, 0);
++
++	NFE_WRITE(sc, NFE_TX_UNK, 0);
++	NFE_WRITE(sc, NFE_STATUS, 0);
++
++	sc->rxtxctl = NFE_RXTX_BIT2;
++	if (sc->nfe_flags & NFE_40BIT_ADDR)
++		sc->rxtxctl |= NFE_RXTX_V3MAGIC;
++	else if (sc->nfe_flags & NFE_JUMBO_SUP)
++		sc->rxtxctl |= NFE_RXTX_V2MAGIC;
++#ifdef NFE_CSUM
++	if (sc->nfe_flags & NFE_HW_CSUM)
++		sc->rxtxctl |= NFE_RXTX_RXCSUM;
++#endif
++
++#if NVLAN > 0
++	/*
++	 * Although the adapter is capable of stripping VLAN tags from received
++	 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
++	 * purpose.  This will be done in software by our network stack.
++	 */
++	if (sc->nfe_flags & NFE_HW_VLAN)
++		sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
++#endif
++
++	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
++	DELAY(10);
++	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
++
++#if NVLAN
++	if (sc->nfe_flags & NFE_HW_VLAN)
++		NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
++#endif
++
++	NFE_WRITE(sc, NFE_SETUP_R6, 0);
++
++	/* set MAC address */
++	nfe_set_macaddr(sc, sc->eaddr);
++
++	/* tell MAC where rings are in memory */
++#ifdef __LP64__
++	NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
++#endif
++	NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
++#ifdef __LP64__
++	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
++#endif
++	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
++
++	NFE_WRITE(sc, NFE_RING_SIZE,
++	    (NFE_RX_RING_COUNT - 1) << 16 |
++	    (NFE_TX_RING_COUNT - 1));
++
++	NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
++
++	/* force MAC to wakeup */
++	tmp = NFE_READ(sc, NFE_PWR_STATE);
++	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
++	DELAY(10);
++	tmp = NFE_READ(sc, NFE_PWR_STATE);
++	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
++
++	if(opt_mode == NFE_MODE_CPU) {
++		/* configure interrupts coalescing/mitigation */
++		NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
++	} else {
++		/* no interrupt mitigation: one interrupt per packet */
++		NFE_WRITE(sc, NFE_IMTIMER, 970);
++	}
++
++	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
++	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
++	NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
++
++	/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
++	NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
++
++	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
++	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
++
++	sc->rxtxctl &= ~NFE_RXTX_BIT2;
++	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
++	DELAY(10);
++	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
++
++	/* set Rx filter */
++	nfe_setmulti(sc);
++
++	/* enable Rx */
++	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
++
++	/* enable Tx */
++	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
++
++	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
++
++#ifdef DEVICE_POLLING
++	if (ifp->if_capenable & IFCAP_POLLING)
++		NFE_WRITE(sc, NFE_IRQ_MASK, 0);
++	else
++#endif
++	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); /* enable interrupts */
++
++	sc->nfe_link = 0;
++
++	nfe_ifmedia_upd(ifp);
++
++	ifp->if_drv_flags |= IFF_DRV_RUNNING;
++	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
++
++	callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
++
++	return;
++}
++
++static void nfe_stop(struct ifnet *ifp, int disable)
++{
++	struct nfe_softc *sc = ifp->if_softc;
++	struct mii_data  *mii;
++
++	NFE_LOCK_ASSERT(sc);
++
++	ifp->if_timer = 0;
++	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
++
++	mii = device_get_softc(sc->nfe_miibus);
++
++	callout_stop(&sc->nfe_stat_ch);
++
++	/* abort Tx */
++	NFE_WRITE(sc, NFE_TX_CTL, 0);
++
++	/* disable Rx */
++	NFE_WRITE(sc, NFE_RX_CTL, 0);
++
++	/* disable interrupts */
++	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
++
++	sc->nfe_link = 0;
++
++	/* reset Tx and Rx rings */
++	nfe_reset_tx_ring(sc, &sc->txq);
++	nfe_reset_rx_ring(sc, &sc->rxq);
++
++	return;
++}
++
++static int nfe_ifmedia_upd(struct ifnet *ifp)
++{
++	struct nfe_softc	*sc = ifp->if_softc;
++	int error;
++
++	NFE_LOCK(sc);
++	error=nfe_ifmedia_upd_locked(ifp);
++	NFE_UNLOCK(sc);
++
++	return (error);
++}
++
++static int nfe_ifmedia_upd_locked(struct ifnet *ifp)
++{
++	struct nfe_softc	*sc = ifp->if_softc;
++	struct mii_data		*mii;
++
++	NFE_LOCK_ASSERT(sc);
++
++	mii = device_get_softc(sc->nfe_miibus);
++
++	if (mii->mii_instance) {
++		struct mii_softc *miisc;
++		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
++		    miisc = LIST_NEXT(miisc, mii_list)) {
++			mii_phy_reset(miisc);
++		}
++	}
++	mii_mediachg(mii);
++
++	return (0);
++}
++
++static void nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
++{
++	struct nfe_softc	*sc;
++	struct mii_data		*mii;
++
++	sc = ifp->if_softc;
++
++	NFE_LOCK(sc);
++	mii = device_get_softc(sc->nfe_miibus);
++	mii_pollstat(mii);
++	ifmr->ifm_active = mii->mii_media_active;
++	ifmr->ifm_status = mii->mii_media_status;
++	NFE_UNLOCK(sc);
++
++	return;
++}
++
++static void nfe_tick(void *xsc)
++{
++	struct nfe_softc *sc;
++
++	sc = xsc;
++
++	NFE_LOCK(sc);
++	nfe_tick_locked(sc);
++	NFE_UNLOCK(sc);
++}
++
++
++void nfe_tick_locked(struct nfe_softc *arg)
++{
++	struct nfe_softc	*sc;
++	struct mii_data		*mii;
++	struct ifnet		*ifp;
++
++	sc = arg;
++
++	NFE_LOCK_ASSERT(sc); 
++
++	ifp = sc->nfe_ifp;
++
++	mii = device_get_softc(sc->nfe_miibus);
++	mii_tick(mii);
++
++	if (!sc->nfe_link) {
++		if (mii->mii_media_status & IFM_ACTIVE &&
++		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
++			sc->nfe_link++;
++			if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T
++			    && bootverbose)
++				if_printf(sc->nfe_ifp, "gigabit link up\n");
++					if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
++#ifdef NFE_TXTASK
++						taskqueue_enqueue(taskqueue_swi,
++								  &sc->nfe_txtask);
++#else
++						nfe_start_locked(ifp);
++#endif
++		}
++	}
++	callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
++
++	return;
++}
++
++#ifdef NFE_TXTASK
++static void nfe_tx_task(void *arg, int npending)
++{
++	struct ifnet *ifp;
++
++	ifp = arg;
++	nfe_start(ifp);
++
++	return;
++}
++#endif
++
++static void nfe_shutdown(device_t dev)
++{
++	struct nfe_softc *sc;
++	struct ifnet *ifp;
++
++	sc = device_get_softc(dev);
++
++	NFE_LOCK(sc);
++	ifp = sc->nfe_ifp;
++	nfe_stop(ifp,0);
++	/* nfe_reset(sc); */
++	NFE_UNLOCK(sc);
++
++	return;
++}
++
++
++static void nfe_get_macaddr(struct nfe_softc *sc, u_char *addr)
++{
++	uint32_t tmp;
++
++	tmp = NFE_READ(sc, NFE_MACADDR_LO);
++	addr[0] = (tmp >> 8) & 0xff;
++	addr[1] = (tmp & 0xff);
++
++	tmp = NFE_READ(sc, NFE_MACADDR_HI);
++	addr[2] = (tmp >> 24) & 0xff;
++	addr[3] = (tmp >> 16) & 0xff;
++	addr[4] = (tmp >>  8) & 0xff;
++	addr[5] = (tmp & 0xff);
++}
++
++static void nfe_set_macaddr(struct nfe_softc *sc, u_char *addr)
++{
++
++	NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] <<  8 | addr[4]);
++	NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
++	    addr[1] << 8 | addr[0]);
++}
++
++/*
++ * Map a single buffer address.
++ */
++
++static void
++nfe_dma_map_segs(arg, segs, nseg, error)
++	void *arg;
++	bus_dma_segment_t *segs;
++	int error, nseg;
++{
++
++	if (error)
++		return;
++
++	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
++
++	*(bus_dma_segment_t *)arg = *segs;
++
++	return;
++}
+diff -Nurd sys/dev/nfe/if_nfereg.h sys/dev/nfe/if_nfereg.h
+--- sys/dev/nfe/if_nfereg.h	1970-01-01 01:00:00.000000000 +0100
++++ sys/dev/nfe/if_nfereg.h	2006-09-05 07:01:26.000000000 +0200
+@@ -0,0 +1,248 @@
++/*	$OpenBSD: if_nfereg.h,v 1.16 2006/02/22 19:23:44 damien Exp $	*/
++
++/*-
++ * Copyright (c) 2005 Jonathan Gray <jsg at openbsd.org>
++ *
++ * Permission to use, copy, modify, and distribute this software for any
++ * purpose with or without fee is hereby granted, provided that the above
++ * copyright notice and this permission notice appear in all copies.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ *
++ * $FreeBSD: src/sys/dev/nfe/if_nfereg.h,v 1.2 2006/06/26 23:41:07 obrien Exp $
++ */
++
++#define NFE_PCI_BA		0x10
++
++#define NFE_RX_RING_COUNT	128
++#define NFE_TX_RING_COUNT	256
++
++#define NFE_JBYTES		(ETHER_MAX_LEN_JUMBO + ETHER_ALIGN)
++#define NFE_JPOOL_COUNT		(NFE_RX_RING_COUNT + 64)
++#define NFE_JPOOL_SIZE		(NFE_JPOOL_COUNT * NFE_JBYTES)
++
++#define NFE_MAX_SCATTER		32
++
++#define NFE_IRQ_STATUS		0x000
++#define NFE_IRQ_MASK		0x004
++#define NFE_SETUP_R6		0x008
++#define NFE_IMTIMER		0x00c
++#define NFE_MISC1		0x080
++#define NFE_TX_CTL		0x084
++#define NFE_TX_STATUS		0x088
++#define NFE_RXFILTER		0x08c
++#define NFE_RXBUFSZ		0x090
++#define NFE_RX_CTL		0x094
++#define NFE_RX_STATUS		0x098
++#define NFE_RNDSEED		0x09c
++#define NFE_SETUP_R1		0x0a0
++#define NFE_SETUP_R2		0x0a4
++#define NFE_MACADDR_HI		0x0a8
++#define NFE_MACADDR_LO		0x0ac
++#define NFE_MULTIADDR_HI	0x0b0
++#define NFE_MULTIADDR_LO	0x0b4
++#define NFE_MULTIMASK_HI	0x0b8
++#define NFE_MULTIMASK_LO	0x0bc
++#define NFE_PHY_IFACE		0x0c0
++#define NFE_TX_RING_ADDR_LO	0x100
++#define NFE_RX_RING_ADDR_LO	0x104
++#define NFE_RING_SIZE		0x108
++#define NFE_TX_UNK		0x10c
++#define NFE_LINKSPEED		0x110
++#define NFE_SETUP_R5		0x130
++#define NFE_SETUP_R3		0x13C
++#define NFE_SETUP_R7		0x140
++#define NFE_RXTX_CTL		0x144
++#define NFE_TX_RING_ADDR_HI	0x148
++#define NFE_RX_RING_ADDR_HI	0x14c
++#define NFE_PHY_STATUS		0x180
++#define NFE_SETUP_R4		0x184
++#define NFE_STATUS		0x188
++#define NFE_PHY_SPEED		0x18c
++#define NFE_PHY_CTL		0x190
++#define NFE_PHY_DATA		0x194
++#define NFE_WOL_CTL		0x200
++#define NFE_PATTERN_CRC		0x204
++#define NFE_PATTERN_MASK	0x208
++#define NFE_PWR_CAP		0x268
++#define NFE_PWR_STATE		0x26c
++#define NFE_VTAG_CTL		0x300
++
++#define NFE_PHY_ERROR		0x00001
++#define NFE_PHY_WRITE		0x00400
++#define NFE_PHY_BUSY		0x08000
++#define NFE_PHYADD_SHIFT	5
++
++#define NFE_STATUS_MAGIC	0x140000
++
++#define NFE_R1_MAGIC		0x16070f
++#define NFE_R2_MAGIC		0x16
++#define NFE_R4_MAGIC		0x08
++#define NFE_R6_MAGIC		0x03
++#define NFE_WOL_MAGIC		0x7770
++#define NFE_RX_START		0x01
++#define NFE_TX_START		0x01
++
++#define NFE_IRQ_RXERR		0x0001
++#define NFE_IRQ_RX		0x0002
++#define NFE_IRQ_RX_NOBUF	0x0004
++#define NFE_IRQ_TXERR		0x0008
++#define NFE_IRQ_TX_DONE		0x0010
++#define NFE_IRQ_TIMER		0x0020
++#define NFE_IRQ_LINK		0x0040
++#define NFE_IRQ_TXERR2		0x0080
++#define NFE_IRQ_TX1		0x0100
++
++#define NFE_IRQ_WANTED							\
++	(NFE_IRQ_RXERR | NFE_IRQ_RX_NOBUF | NFE_IRQ_RX |		\
++	 NFE_IRQ_TXERR | NFE_IRQ_TXERR2 | NFE_IRQ_TX_DONE |		\
++	 NFE_IRQ_LINK)
++
++#define NFE_RXTX_KICKTX		0x0001
++#define NFE_RXTX_BIT1		0x0002
++#define NFE_RXTX_BIT2		0x0004
++#define NFE_RXTX_RESET		0x0010
++#define NFE_RXTX_VTAG_STRIP	0x0040
++#define NFE_RXTX_VTAG_INSERT	0x0080
++#define NFE_RXTX_RXCSUM		0x0400
++#define NFE_RXTX_V2MAGIC	0x2100
++#define NFE_RXTX_V3MAGIC	0x2200
++#define NFE_RXFILTER_MAGIC	0x007f0008
++#define NFE_U2M			(1 << 5)
++#define NFE_PROMISC		(1 << 7)
++
++/* default interrupt moderation timer of 128us */
++#define NFE_IM_DEFAULT	((128 * 100) / 1024)
++
++#define NFE_VTAG_ENABLE		(1 << 13)
++
++#define NFE_PWR_VALID		(1 << 8)
++#define NFE_PWR_WAKEUP		(1 << 15)
++
++#define NFE_MEDIA_SET		0x10000
++#define	NFE_MEDIA_1000T		0x00032
++#define NFE_MEDIA_100TX		0x00064
++#define NFE_MEDIA_10T		0x003e8
++
++#define NFE_PHY_100TX		(1 << 0)
++#define NFE_PHY_1000T		(1 << 1)
++#define NFE_PHY_HDX		(1 << 8)
++
++#define NFE_MISC1_MAGIC		0x003b0f3c
++#define NFE_MISC1_HDX		(1 << 1)
++
++#define NFE_SEED_MASK		0x0003ff00
++#define NFE_SEED_10T		0x00007f00
++#define NFE_SEED_100TX		0x00002d00
++#define NFE_SEED_1000T		0x00007400
++
++/* Rx/Tx descriptor */
++struct nfe_desc32 {
++	uint32_t	physaddr;
++	uint16_t	length;
++	uint16_t	flags;
++#define NFE_RX_FIXME_V1		0x6004
++#define NFE_RX_VALID_V1		(1 << 0)
++#define NFE_TX_ERROR_V1		0x7808
++#define NFE_TX_LASTFRAG_V1	(1 << 0)
++#define NFE_RX_ERROR1_V1	(1<<7)
++#define NFE_RX_ERROR2_V1	(1<<8)
++#define NFE_RX_ERROR3_V1	(1<<9)
++#define NFE_RX_ERROR4_V1	(1<<10)
++} __packed;
++
++#define NFE_V1_TXERR	"\020"	\
++	"\14TXERROR\13UNDERFLOW\12LATECOLLISION\11LOSTCARRIER\10DEFERRED" \
++	"\08FORCEDINT\03RETRY\00LASTPACKET"
++
++/* V2 Rx/Tx descriptor */
++struct nfe_desc64 {
++	uint32_t	physaddr[2];
++	uint32_t	vtag;
++#define NFE_RX_VTAG		(1 << 16)
++#define NFE_TX_VTAG		(1 << 18)
++	uint16_t	length;
++	uint16_t	flags;
++#define NFE_RX_FIXME_V2		0x4300
++#define NFE_RX_VALID_V2		(1 << 13)
++#define NFE_TX_ERROR_V2		0x5c04
++#define NFE_TX_LASTFRAG_V2	(1 << 13)
++#define NFE_RX_IP_CSUMOK_V2	0x1000
++#define NFE_RX_UDP_CSUMOK_V2	0x1400
++#define NFE_RX_TCP_CSUMOK_V2	0x1800
++#define NFE_RX_ERROR1_V2	(1<<2)
++#define NFE_RX_ERROR2_V2	(1<<3)
++#define NFE_RX_ERROR3_V2	(1<<4)
++#define NFE_RX_ERROR4_V2	(1<<5)
++} __packed;
++
++#define NFE_V2_TXERR	"\020"	\
++	"\14FORCEDINT\13LASTPACKET\12UNDERFLOW\10LOSTCARRIER\09DEFERRED\02RETRY"
++
++/* flags common to V1/V2 descriptors */
++#define NFE_RX_CSUMOK		0x1c00
++#define NFE_RX_ERROR		(1 << 14)
++#define NFE_RX_READY		(1 << 15)
++#define NFE_TX_TCP_CSUM		(1 << 10)
++#define NFE_TX_IP_CSUM		(1 << 11)
++#define NFE_TX_VALID		(1 << 15)
++
++#define NFE_READ(sc, reg) \
++	bus_space_read_4((sc)->nfe_memt, (sc)->nfe_memh, (reg))
++
++#define NFE_WRITE(sc, reg, val) \
++	bus_space_write_4((sc)->nfe_memt, (sc)->nfe_memh, (reg), (val))
++
++#ifndef PCI_VENDOR_NVIDIA
++#define	PCI_VENDOR_NVIDIA	0x10DE
++#endif
++
++#define	PCI_PRODUCT_NVIDIA_NFORCE_LAN		0x01C3
++#define	PCI_PRODUCT_NVIDIA_NFORCE2_LAN		0x0066
++#define	PCI_PRODUCT_NVIDIA_NFORCE3_LAN1		0x00D6
++#define	PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1	0x0086
++#define	PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2	0x008C
++#define	PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN	0x00E6
++#define	PCI_PRODUCT_NVIDIA_NFORCE3_LAN4		0x00DF
++#define	PCI_PRODUCT_NVIDIA_NFORCE4_LAN1		0x0056
++#define	PCI_PRODUCT_NVIDIA_NFORCE4_LAN2		0x0057
++#define	PCI_PRODUCT_NVIDIA_MCP04_LAN1		0x0037
++#define	PCI_PRODUCT_NVIDIA_MCP04_LAN2		0x0038
++#define	PCI_PRODUCT_NVIDIA_NFORCE430_LAN1	0x0268
++#define	PCI_PRODUCT_NVIDIA_NFORCE430_LAN2	0x0269
++#define	PCI_PRODUCT_NVIDIA_MCP55_LAN1		0x0372
++#define	PCI_PRODUCT_NVIDIA_MCP55_LAN2		0x0373
++#define	PCI_PRODUCT_NVIDIA_MCP61_LAN1		0x03e5
++#define	PCI_PRODUCT_NVIDIA_MCP61_LAN2		0x03e6
++#define	PCI_PRODUCT_NVIDIA_MCP61_LAN3		0x03ee
++#define	PCI_PRODUCT_NVIDIA_MCP61_LAN4		0x03ef
++#define	PCI_PRODUCT_NVIDIA_MCP65_LAN1		0x0450
++#define	PCI_PRODUCT_NVIDIA_MCP65_LAN2		0x0451
++#define	PCI_PRODUCT_NVIDIA_MCP65_LAN3		0x0452
++#define	PCI_PRODUCT_NVIDIA_MCP65_LAN4		0x0453
++
++#define	PCI_PRODUCT_NVIDIA_NFORCE3_LAN2	PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1
++#define	PCI_PRODUCT_NVIDIA_NFORCE3_LAN3	PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2
++#define	PCI_PRODUCT_NVIDIA_NFORCE3_LAN5	PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN
++#define	PCI_PRODUCT_NVIDIA_CK804_LAN1	PCI_PRODUCT_NVIDIA_NFORCE4_LAN1
++#define	PCI_PRODUCT_NVIDIA_CK804_LAN2	PCI_PRODUCT_NVIDIA_NFORCE4_LAN2
++#define	PCI_PRODUCT_NVIDIA_MCP51_LAN1	PCI_PRODUCT_NVIDIA_NFORCE430_LAN1
++#define	PCI_PRODUCT_NVIDIA_MCP51_LAN2	PCI_PRODUCT_NVIDIA_NFORCE430_LAN2
++
++#define NFE_DEBUG		0x0000
++#define NFE_DEBUG_INIT		0x0001
++#define NFE_DEBUG_RUNNING	0x0002
++#define NFE_DEBUG_DEINIT 	0x0004
++#define NFE_DEBUG_IOCTL		0x0008
++#define NFE_DEBUG_INTERRUPT	0x0010
++#define NFE_DEBUG_API		0x0020
++#define NFE_DEBUG_LOCK		0x0040
++#define NFE_DEBUG_BROKEN	0x0080
++#define NFE_DEBUG_MII		0x0100
++#define NFE_DEBUG_ALL		0xFFFF
+diff -Nurd sys/dev/nfe/if_nfevar.h sys/dev/nfe/if_nfevar.h
+--- sys/dev/nfe/if_nfevar.h	1970-01-01 01:00:00.000000000 +0100
++++ sys/dev/nfe/if_nfevar.h	2006-09-05 07:01:26.000000000 +0200
+@@ -0,0 +1,124 @@
++/*	$OpenBSD: if_nfevar.h,v 1.11 2006/02/19 13:57:02 damien Exp $	*/
++
++/*-
++ * Copyright (c) 2005 Jonathan Gray <jsg at openbsd.org>
++ *
++ * Permission to use, copy, modify, and distribute this software for any
++ * purpose with or without fee is hereby granted, provided that the above
++ * copyright notice and this permission notice appear in all copies.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
++ *
++ * $FreeBSD: src/sys/dev/nfe/if_nfevar.h,v 1.2 2006/06/26 23:41:07 obrien Exp $
++ */
++
++#define	NFE_IFQ_MAXLEN	64
++
++struct nfe_tx_data {
++	bus_dmamap_t	tx_data_map;
++	bus_dmamap_t	active;
++	int		nsegs;
++	struct mbuf	*m;
++};
++
++struct nfe_tx_ring {
++	bus_dmamap_t		tx_desc_map;
++	bus_dma_segment_t	tx_desc_segs;
++	bus_addr_t		physaddr;
++	struct nfe_desc32	*desc32;
++	struct nfe_desc64	*desc64;
++	struct nfe_tx_data	data[NFE_TX_RING_COUNT];
++	int			queued;
++	int			cur;
++	int			next;
++	bus_addr_t		tx_desc_addr;
++	bus_addr_t		tx_data_addr;
++	bus_dma_tag_t		tx_desc_tag;
++	bus_dma_tag_t		tx_data_tag;
++};
++
++struct nfe_jbuf {
++	caddr_t			buf;
++	bus_dmamap_t		rx_jumbo_map;
++	bus_dma_segment_t	rx_jumbo_seg;
++	struct mbuf	        *m;
++	SLIST_ENTRY(nfe_jbuf)	jnext;
++};
++
++struct nfe_rx_data {
++	bus_dmamap_t	rx_data_map;
++	struct mbuf	*m;
++};
++
++struct nfe_rx_ring {
++	bus_dmamap_t		rx_desc_map;
++	bus_dma_segment_t	rx_desc_segs;
++        bus_dma_tag_t		rx_desc_tag;
++	bus_addr_t		rx_desc_addr;
++        bus_dma_tag_t		rx_jpool_tag;
++	bus_dmamap_t		rx_jpool_map;
++	caddr_t			jpool;
++	struct nfe_jbuf		jbuf[NFE_JPOOL_COUNT];
++	SLIST_HEAD(__jfreelist, nfe_jbuf)	jfreelist;
++	bus_addr_t		physaddr;
++	struct nfe_desc32	*desc32;
++	struct nfe_desc64	*desc64;
++        bus_dma_tag_t		rx_data_tag;
++	struct nfe_rx_data	data[NFE_RX_RING_COUNT];
++	int			bufsz;
++	int			cur;
++	int			next;
++};
++
++struct nfe_softc {
++	struct ifnet		*nfe_ifp;
++	device_t		nfe_dev;
++	device_t		nfe_miibus;
++	struct mtx		nfe_mtx;
++	struct mtx		nfe_jlist_mtx;
++	bus_space_handle_t	nfe_memh;
++	bus_space_tag_t		nfe_memt;
++	struct resource		*nfe_res;
++	struct resource		*nfe_irq;
++	void			*nfe_intrhand;
++	struct mii_data		nfe_mii;
++	u_int8_t		nfe_unit;
++	struct callout		nfe_stat_ch;
++
++	struct arpcom		nfe_arpcom;
++	bus_dma_tag_t		nfe_parent_tag;
++  /*	struct timeout		nfe_tick_ch; */
++	void			*nfe_powerhook;
++
++	int			nfe_if_flags;
++	u_int			nfe_flags;
++#define	NFE_JUMBO_SUP	0x01
++#define	NFE_40BIT_ADDR	0x02
++#define	NFE_HW_CSUM	0x04
++#define	NFE_HW_VLAN	0x08
++#define	NFE_USE_JUMBO	0x10
++	u_int32_t		rxtxctl;
++	u_int8_t		mii_phyaddr;
++	u_char			eaddr[ETHER_ADDR_LEN];
++	struct task		nfe_txtask;
++	int			nfe_link;
++
++	struct nfe_tx_ring	txq;
++	struct nfe_rx_ring	rxq;
++
++#ifdef DEVICE_POLLING
++	int			rxcycles;
++#endif
++};
++
++struct nfe_type {
++	u_int16_t	vid_id;
++	u_int16_t	dev_id;
++	char		*name;
++};
+diff -Nurd sys/i386/conf/GENERIC sys/i386/conf/GENERIC
+--- sys/i386/conf/GENERIC	2006-11-03 09:00:17.000000000 +0100
++++ sys/i386/conf/GENERIC	2006-11-03 08:55:46.000000000 +0100
+@@ -193,6 +193,7 @@
+ device		dc		# DEC/Intel 21143 and various workalikes
+ device		fxp		# Intel EtherExpress PRO/100B (82557, 82558)
+ device		lge		# Level 1 LXT1001 gigabit Ethernet
++device		nfe		# NVidia NForce 3/4
+ device		nge		# NatSemi DP83820 gigabit Ethernet
+ device		pcn		# AMD Am79C97x PCI 10/100(precedence over 'lnc')
+ device		re		# RealTek 8139C+/8169/8169S/8110S
+diff -Nurd sys/modules/nfe/Makefile sys/modules/nfe/Makefile
+--- sys/modules/nfe/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ sys/modules/nfe/Makefile	2006-11-03 08:53:03.000000000 +0100
+@@ -0,0 +1,6 @@
++.PATH: ${.CURDIR}/../../dev/nfe
++
++KMOD=	if_nfe
++SRCS=	if_nfe.c miibus_if.h opt_bdg.h device_if.h bus_if.h pci_if.h
++
++.include <bsd.kmod.mk>
+diff -Nurd sys/pc98/conf/GENERIC sys/pc98/conf/GENERIC
+--- sys/pc98/conf/GENERIC	2006-11-03 09:00:04.000000000 +0100
++++ sys/pc98/conf/GENERIC	2006-11-03 08:55:51.000000000 +0100
+@@ -168,6 +168,7 @@
+ device		dc		# DEC/Intel 21143 and various workalikes
+ device		fxp		# Intel EtherExpress PRO/100B (82557, 82558)
+ device		lge		# Level 1 LXT1001 gigabit Ethernet
++device		nfe		# NVIDIA nForce MCP Ethernet
+ device		nge		# NatSemi DP83820 gigabit Ethernet
+ device		pcn		# AMD Am79C97x PCI 10/100 (precedence over 'lnc')
+ device		re		# RealTek 8139C+/8169/8169S/8110S
+diff -Nurd sys/modules/Makefile sys/modules/Makefile 
+--- sys/modules/Makefile	2006-11-03 13:44:50.000000000 +0100
++++ sys/modules/Makefile	2006-11-03 15:29:22.000000000 +0100
+@@ -161,6 +161,7 @@
+ 	${_ncv} \
+ 	${_ndis} \
+ 	netgraph \
++	nfe \
+ 	nfsclient \
+ 	nfsserver \
+ 	nge \




More information about the Glibc-bsd-commits mailing list