Merge branch 'upstream-davem' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

This commit is contained in:
David S. Miller 2008-02-12 17:51:26 -08:00
commit b791dd3ed7
23 changed files with 5441 additions and 1793 deletions

View File

@ -168,7 +168,7 @@ static int debug = -1;
* Warning: 64K ring has hardware issues and may lock up.
*/
#if defined(CONFIG_SH_DREAMCAST)
#define RX_BUF_IDX 1 /* 16K ring */
#define RX_BUF_IDX 0 /* 8K ring */
#else
#define RX_BUF_IDX 2 /* 32K ring */
#endif

View File

@ -931,6 +931,14 @@ config ENC28J60_WRITEVERIFY
Enable the verify after the buffer write useful for debugging purpose.
If unsure, say N.
config DM9000_DEBUGLEVEL
int "DM9000 maximum debug level"
depends on DM9000
default 4
help
The maximum level of debugging code compiled into the DM9000
driver.
config SMC911X
tristate "SMSC LAN911[5678] support"
select CRC32
@ -2352,6 +2360,16 @@ config GELIC_NET
To compile this driver as a module, choose M here: the
module will be called ps3_gelic.
config GELIC_WIRELESS
bool "PS3 Wireless support"
depends on GELIC_NET
help
This option adds the support for the wireless feature of PS3.
If you have the wireless-less model of PS3 or have no plan to
use wireless feature, disabling this option saves memory. As
the driver automatically distinguishes the models, you can
safely enable this option even if you have a wireless-less model.
config GIANFAR
tristate "Gianfar Ethernet"
depends on FSL_SOC

View File

@ -70,7 +70,8 @@ obj-$(CONFIG_BNX2X) += bnx2x.o
spidernet-y += spider_net.o spider_net_ethtool.o
obj-$(CONFIG_SPIDER_NET) += spidernet.o sungem_phy.o
obj-$(CONFIG_GELIC_NET) += ps3_gelic.o
ps3_gelic-objs += ps3_gelic_net.o
gelic_wireless-$(CONFIG_GELIC_WIRELESS) += ps3_gelic_wireless.o
ps3_gelic-objs += ps3_gelic_net.o $(gelic_wireless-y)
obj-$(CONFIG_TC35815) += tc35815.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o

View File

@ -404,7 +404,7 @@ found:
if (neigh->nud_state & NUD_FAILED) {
arpq = e->arpq_head;
e->arpq_head = e->arpq_tail = NULL;
} else if (neigh_is_connected(neigh))
} else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
setup_l2e_send_pending(dev, NULL, e);
} else {
e->state = neigh_is_connected(neigh) ?

View File

@ -1059,6 +1059,14 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
htonl(V_WR_TID(q->token)));
}
static inline void t3_stop_queue(struct net_device *dev, struct sge_qset *qs,
struct sge_txq *q)
{
netif_stop_queue(dev);
set_bit(TXQ_ETH, &qs->txq_stopped);
q->stops++;
}
/**
* eth_xmit - add a packet to the Ethernet Tx queue
* @skb: the packet
@ -1090,31 +1098,18 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
ndesc = calc_tx_descs(skb);
if (unlikely(credits < ndesc)) {
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
set_bit(TXQ_ETH, &qs->txq_stopped);
q->stops++;
dev_err(&adap->pdev->dev,
"%s: Tx ring %u full while queue awake!\n",
dev->name, q->cntxt_id & 7);
}
t3_stop_queue(dev, qs, q);
dev_err(&adap->pdev->dev,
"%s: Tx ring %u full while queue awake!\n",
dev->name, q->cntxt_id & 7);
spin_unlock(&q->lock);
return NETDEV_TX_BUSY;
}
q->in_use += ndesc;
if (unlikely(credits - ndesc < q->stop_thres)) {
q->stops++;
netif_stop_queue(dev);
set_bit(TXQ_ETH, &qs->txq_stopped);
#if !USE_GTS
if (should_restart_tx(q) &&
test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
q->restarts++;
netif_wake_queue(dev);
}
#endif
}
if (unlikely(credits - ndesc < q->stop_thres))
if (USE_GTS || !should_restart_tx(q))
t3_stop_queue(dev, qs, q);
gen = q->gen;
q->unacked += ndesc;

File diff suppressed because it is too large Load Diff

View File

@ -926,8 +926,6 @@ e1000_probe(struct pci_dev *pdev,
{
struct net_device *netdev;
struct e1000_adapter *adapter;
unsigned long mmio_start, mmio_len;
unsigned long flash_start, flash_len;
static int cards_found = 0;
static int global_quad_port_a = 0; /* global ksp3 port a indication */
@ -970,11 +968,9 @@ e1000_probe(struct pci_dev *pdev,
adapter->hw.back = adapter;
adapter->msg_enable = (1 << debug) - 1;
mmio_start = pci_resource_start(pdev, BAR_0);
mmio_len = pci_resource_len(pdev, BAR_0);
err = -EIO;
adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
pci_resource_len(pdev, BAR_0));
if (!adapter->hw.hw_addr)
goto err_ioremap;
@ -1009,10 +1005,6 @@ e1000_probe(struct pci_dev *pdev,
#endif
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len;
netdev->base_addr = adapter->hw.io_base;
adapter->bd_number = cards_found;
/* setup the private structure */
@ -1025,9 +1017,9 @@ e1000_probe(struct pci_dev *pdev,
* because it depends on mac_type */
if ((adapter->hw.mac_type == e1000_ich8lan) &&
(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
flash_start = pci_resource_start(pdev, 1);
flash_len = pci_resource_len(pdev, 1);
adapter->hw.flash_address = ioremap(flash_start, flash_len);
adapter->hw.flash_address =
ioremap(pci_resource_start(pdev, 1),
pci_resource_len(pdev, 1));
if (!adapter->hw.flash_address)
goto err_flashmap;
}

View File

@ -166,21 +166,24 @@
* Hardware access:
*/
#define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
#define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
#define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
#define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */
#define DEV_HAS_MSI 0x0040 /* device supports MSI */
#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
#define DEV_HAS_STATISTICS_V1 0x0400 /* device supports hw statistics version 1 */
#define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */
#define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */
#define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */
#define DEV_HAS_CORRECT_MACADDR 0x4000 /* device supports correct mac address order */
#define DEV_NEED_TIMERIRQ 0x00001 /* set the timer irq flag in the irq mask */
#define DEV_NEED_LINKTIMER 0x00002 /* poll link settings. Relies on the timer irq */
#define DEV_HAS_LARGEDESC 0x00004 /* device supports jumbo frames and needs packet format 2 */
#define DEV_HAS_HIGH_DMA 0x00008 /* device supports 64bit dma */
#define DEV_HAS_CHECKSUM 0x00010 /* device supports tx and rx checksum offloads */
#define DEV_HAS_VLAN 0x00020 /* device supports vlan tagging and striping */
#define DEV_HAS_MSI 0x00040 /* device supports MSI */
#define DEV_HAS_MSI_X 0x00080 /* device supports MSI-X */
#define DEV_HAS_POWER_CNTRL 0x00100 /* device supports power savings */
#define DEV_HAS_STATISTICS_V1 0x00200 /* device supports hw statistics version 1 */
#define DEV_HAS_STATISTICS_V2 0x00400 /* device supports hw statistics version 2 */
#define DEV_HAS_TEST_EXTENDED 0x00800 /* device supports extended diagnostic test */
#define DEV_HAS_MGMT_UNIT 0x01000 /* device supports management unit */
#define DEV_HAS_CORRECT_MACADDR 0x02000 /* device supports correct mac address order */
#define DEV_HAS_COLLISION_FIX 0x04000 /* device supports tx collision fix */
#define DEV_HAS_PAUSEFRAME_TX_V1 0x08000 /* device supports tx pause frames version 1 */
#define DEV_HAS_PAUSEFRAME_TX_V2 0x10000 /* device supports tx pause frames version 2 */
#define DEV_HAS_PAUSEFRAME_TX_V3 0x20000 /* device supports tx pause frames version 3 */
enum {
NvRegIrqStatus = 0x000,
@ -266,9 +269,12 @@ enum {
#define NVREG_RNDSEED_FORCE3 0x7400
NvRegTxDeferral = 0xA0,
#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
#define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
#define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
#define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
NvRegRxDeferral = 0xA4,
#define NVREG_RX_DEFERRAL_DEFAULT 0x16
NvRegMacAddrA = 0xA8,
@ -318,8 +324,10 @@ enum {
NvRegTxRingPhysAddrHigh = 0x148,
NvRegRxRingPhysAddrHigh = 0x14C,
NvRegTxPauseFrame = 0x170,
#define NVREG_TX_PAUSEFRAME_DISABLE 0x01ff0080
#define NVREG_TX_PAUSEFRAME_ENABLE 0x01800010
#define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
NvRegMIIStatus = 0x180,
#define NVREG_MIISTAT_ERROR 0x0001
#define NVREG_MIISTAT_LINKCHANGE 0x0008
@ -2751,7 +2759,12 @@ static void nv_update_pause(struct net_device *dev, u32 pause_flags)
if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame);
u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)
pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
writel(pause_enable, base + NvRegTxPauseFrame);
writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
} else {
@ -2785,6 +2798,7 @@ static int nv_update_linkspeed(struct net_device *dev)
int retval = 0;
u32 control_1000, status_1000, phyreg, pause_flags, txreg;
u32 txrxFlags = 0;
u32 phy_exp;
/* BMSR_LSTATUS is latched, read it twice:
* we want the current value.
@ -2912,13 +2926,25 @@ set_speed:
phyreg |= PHY_1000;
writel(phyreg, base + NvRegPhyInterface);
phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
if (phyreg & PHY_RGMII) {
if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
txreg = NVREG_TX_DEFERRAL_RGMII_1000;
else
txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
} else {
if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
else
txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
} else {
txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
}
}
} else {
txreg = NVREG_TX_DEFERRAL_DEFAULT;
if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
else
txreg = NVREG_TX_DEFERRAL_DEFAULT;
}
writel(txreg, base + NvRegTxDeferral);
@ -5155,7 +5181,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
}
np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
(id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
(id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
}
@ -5559,107 +5587,107 @@ static struct pci_device_id pci_tbl[] = {
},
{ /* MCP55 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
{ /* MCP55 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
},
{ /* MCP61 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP61 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP61 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP61 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP65 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP67 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP67 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP67 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP67 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP73 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
},
{ /* MCP73 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
},
{ /* MCP73 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
},
{ /* MCP73 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
},
{0,},
};

File diff suppressed because it is too large Load Diff

View File

@ -36,12 +36,12 @@
struct scp_struct
{
unsigned short zero_dum0; /* has to be zero */
unsigned char sysbus; /* 0=16Bit,1=8Bit */
unsigned char zero_dum1; /* has to be zero for 586 */
unsigned short zero_dum2;
unsigned short zero_dum3;
char *iscp; /* pointer to the iscp-block */
u16 zero_dum0; /* has to be zero */
u8 sysbus; /* 0=16Bit,1=8Bit */
u8 zero_dum1; /* has to be zero for 586 */
u8 zero_dum2;
u8 zero_dum3;
u32 iscp; /* pointer to the iscp-block */
};
@ -50,10 +50,10 @@ struct scp_struct
*/
struct iscp_struct
{
unsigned char busy; /* 586 clears after successful init */
unsigned char zero_dummy; /* has to be zero */
unsigned short scb_offset; /* pointeroffset to the scb_base */
char *scb_base; /* base-address of all 16-bit offsets */
u8 busy; /* 586 clears after successful init */
u8 zero_dummy; /* has to be zero */
u16 scb_offset; /* pointeroffset to the scb_base */
u32 scb_base; /* base-address of all 16-bit offsets */
};
/*
@ -61,16 +61,16 @@ struct iscp_struct
*/
struct scb_struct
{
unsigned char rus;
unsigned char cus;
unsigned char cmd_ruc; /* command word: RU part */
unsigned char cmd_cuc; /* command word: CU part & ACK */
unsigned short cbl_offset; /* pointeroffset, command block list */
unsigned short rfa_offset; /* pointeroffset, receive frame area */
unsigned short crc_errs; /* CRC-Error counter */
unsigned short aln_errs; /* alignmenterror counter */
unsigned short rsc_errs; /* Resourceerror counter */
unsigned short ovrn_errs; /* OVerrunerror counter */
u8 rus;
u8 cus;
u8 cmd_ruc; /* command word: RU part */
u8 cmd_cuc; /* command word: CU part & ACK */
u16 cbl_offset; /* pointeroffset, command block list */
u16 rfa_offset; /* pointeroffset, receive frame area */
u16 crc_errs; /* CRC-Error counter */
u16 aln_errs; /* alignmenterror counter */
u16 rsc_errs; /* Resourceerror counter */
u16 ovrn_errs; /* OVerrunerror counter */
};
/*
@ -119,16 +119,16 @@ struct scb_struct
*/
struct rfd_struct
{
unsigned char stat_low; /* status word */
unsigned char stat_high; /* status word */
unsigned char rfd_sf; /* 82596 mode only */
unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */
unsigned short next; /* linkoffset to next RFD */
unsigned short rbd_offset; /* pointeroffset to RBD-buffer */
unsigned char dest[6]; /* ethernet-address, destination */
unsigned char source[6]; /* ethernet-address, source */
unsigned short length; /* 802.3 frame-length */
unsigned short zero_dummy; /* dummy */
u8 stat_low; /* status word */
u8 stat_high; /* status word */
u8 rfd_sf; /* 82596 mode only */
u8 last; /* Bit15,Last Frame on List / Bit14,suspend */
u16 next; /* linkoffset to next RFD */
u16 rbd_offset; /* pointeroffset to RBD-buffer */
u8 dest[6]; /* ethernet-address, destination */
u8 source[6]; /* ethernet-address, source */
u16 length; /* 802.3 frame-length */
u16 zero_dummy; /* dummy */
};
#define RFD_LAST 0x80 /* last: last rfd in the list */
@ -153,11 +153,11 @@ struct rfd_struct
*/
struct rbd_struct
{
unsigned short status; /* status word,number of used bytes in buff */
unsigned short next; /* pointeroffset to next RBD */
char *buffer; /* receive buffer address pointer */
unsigned short size; /* size of this buffer */
unsigned short zero_dummy; /* dummy */
u16 status; /* status word,number of used bytes in buff */
u16 next; /* pointeroffset to next RBD */
u32 buffer; /* receive buffer address pointer */
u16 size; /* size of this buffer */
u16 zero_dummy; /* dummy */
};
#define RBD_LAST 0x8000 /* last buffer */
@ -195,9 +195,9 @@ struct rbd_struct
*/
struct nop_cmd_struct
{
unsigned short cmd_status; /* status of this command */
unsigned short cmd_cmd; /* the command itself (+bits) */
unsigned short cmd_link; /* offsetpointer to next command */
u16 cmd_status; /* status of this command */
u16 cmd_cmd; /* the command itself (+bits) */
u16 cmd_link; /* offsetpointer to next command */
};
/*
@ -205,10 +205,10 @@ struct nop_cmd_struct
*/
struct iasetup_cmd_struct
{
unsigned short cmd_status;
unsigned short cmd_cmd;
unsigned short cmd_link;
unsigned char iaddr[6];
u16 cmd_status;
u16 cmd_cmd;
u16 cmd_link;
u8 iaddr[6];
};
/*
@ -216,21 +216,21 @@ struct iasetup_cmd_struct
*/
struct configure_cmd_struct
{
unsigned short cmd_status;
unsigned short cmd_cmd;
unsigned short cmd_link;
unsigned char byte_cnt; /* size of the config-cmd */
unsigned char fifo; /* fifo/recv monitor */
unsigned char sav_bf; /* save bad frames (bit7=1)*/
unsigned char adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/
unsigned char priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */
unsigned char ifs; /* inter frame spacing */
unsigned char time_low; /* slot time low */
unsigned char time_high; /* slot time high(0-2) and max. retries(4-7) */
unsigned char promisc; /* promisc-mode(0) , et al (1-7) */
unsigned char carr_coll; /* carrier(0-3)/collision(4-7) stuff */
unsigned char fram_len; /* minimal frame len */
unsigned char dummy; /* dummy */
u16 cmd_status;
u16 cmd_cmd;
u16 cmd_link;
u8 byte_cnt; /* size of the config-cmd */
u8 fifo; /* fifo/recv monitor */
u8 sav_bf; /* save bad frames (bit7=1)*/
u8 adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/
u8 priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */
u8 ifs; /* inter frame spacing */
u8 time_low; /* slot time low */
u8 time_high; /* slot time high(0-2) and max. retries(4-7) */
u8 promisc; /* promisc-mode(0) , et al (1-7) */
u8 carr_coll; /* carrier(0-3)/collision(4-7) stuff */
u8 fram_len; /* minimal frame len */
u8 dummy; /* dummy */
};
/*
@ -238,11 +238,11 @@ struct configure_cmd_struct
*/
struct mcsetup_cmd_struct
{
unsigned short cmd_status;
unsigned short cmd_cmd;
unsigned short cmd_link;
unsigned short mc_cnt; /* number of bytes in the MC-List */
unsigned char mc_list[0][6]; /* pointer to 6 bytes entries */
u16 cmd_status;
u16 cmd_cmd;
u16 cmd_link;
u16 mc_cnt; /* number of bytes in the MC-List */
u8 mc_list[0][6]; /* pointer to 6 bytes entries */
};
/*
@ -250,10 +250,10 @@ struct mcsetup_cmd_struct
*/
struct dump_cmd_struct
{
unsigned short cmd_status;
unsigned short cmd_cmd;
unsigned short cmd_link;
unsigned short dump_offset; /* pointeroffset to DUMP space */
u16 cmd_status;
u16 cmd_cmd;
u16 cmd_link;
u16 dump_offset; /* pointeroffset to DUMP space */
};
/*
@ -261,12 +261,12 @@ struct dump_cmd_struct
*/
struct transmit_cmd_struct
{
unsigned short cmd_status;
unsigned short cmd_cmd;
unsigned short cmd_link;
unsigned short tbd_offset; /* pointeroffset to TBD */
unsigned char dest[6]; /* destination address of the frame */
unsigned short length; /* user defined: 802.3 length / Ether type */
u16 cmd_status;
u16 cmd_cmd;
u16 cmd_link;
u16 tbd_offset; /* pointeroffset to TBD */
u8 dest[6]; /* destination address of the frame */
u16 length; /* user defined: 802.3 length / Ether type */
};
#define TCMD_ERRMASK 0x0fa0
@ -281,10 +281,10 @@ struct transmit_cmd_struct
struct tdr_cmd_struct
{
unsigned short cmd_status;
unsigned short cmd_cmd;
unsigned short cmd_link;
unsigned short status;
u16 cmd_status;
u16 cmd_cmd;
u16 cmd_link;
u16 status;
};
#define TDR_LNK_OK 0x8000 /* No link problem identified */
@ -298,9 +298,9 @@ struct tdr_cmd_struct
*/
struct tbd_struct
{
unsigned short size; /* size + EOF-Flag(15) */
unsigned short next; /* pointeroffset to next TBD */
char *buffer; /* pointer to buffer */
u16 size; /* size + EOF-Flag(15) */
u16 next; /* pointeroffset to next TBD */
u32 buffer; /* pointer to buffer */
};
#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */

View File

@ -174,7 +174,11 @@ static int homepna[MAX_UNITS];
#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS))
#define PKT_BUF_SZ 1544
#define PKT_BUF_SKB 1544
/* actual buffer length after being aligned */
#define PKT_BUF_SIZE (PKT_BUF_SKB - NET_IP_ALIGN)
/* chip wants twos complement of the (aligned) buffer length */
#define NEG_BUF_SIZE (NET_IP_ALIGN - PKT_BUF_SKB)
/* Offsets from base I/O address. */
#define PCNET32_WIO_RDP 0x10
@ -604,7 +608,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
/* now allocate any new buffers needed */
for (; new < size; new++ ) {
struct sk_buff *rx_skbuff;
new_skb_list[new] = dev_alloc_skb(PKT_BUF_SZ);
new_skb_list[new] = dev_alloc_skb(PKT_BUF_SKB);
if (!(rx_skbuff = new_skb_list[new])) {
/* keep the original lists and buffers */
if (netif_msg_drv(lp))
@ -613,20 +617,20 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
dev->name);
goto free_all_new;
}
skb_reserve(rx_skbuff, 2);
skb_reserve(rx_skbuff, NET_IP_ALIGN);
new_dma_addr_list[new] =
pci_map_single(lp->pci_dev, rx_skbuff->data,
PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
new_rx_ring[new].buf_length = cpu_to_le16(2 - PKT_BUF_SZ);
new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
new_rx_ring[new].status = cpu_to_le16(0x8000);
}
/* and free any unneeded buffers */
for (; new < lp->rx_ring_size; new++) {
if (lp->rx_skbuff[new]) {
pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new],
PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
dev_kfree_skb(lp->rx_skbuff[new]);
}
}
@ -651,7 +655,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
for (; --new >= lp->rx_ring_size; ) {
if (new_skb_list[new]) {
pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
dev_kfree_skb(new_skb_list[new]);
}
}
@ -678,7 +682,7 @@ static void pcnet32_purge_rx_ring(struct net_device *dev)
wmb(); /* Make sure adapter sees owner change */
if (lp->rx_skbuff[i]) {
pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(lp->rx_skbuff[i]);
}
lp->rx_skbuff[i] = NULL;
@ -1201,7 +1205,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4;
/* Discard oversize frames. */
if (unlikely(pkt_len > PKT_BUF_SZ - 2)) {
if (unlikely(pkt_len > PKT_BUF_SIZE)) {
if (netif_msg_drv(lp))
printk(KERN_ERR "%s: Impossible packet size %d!\n",
dev->name, pkt_len);
@ -1218,26 +1222,26 @@ static void pcnet32_rx_entry(struct net_device *dev,
if (pkt_len > rx_copybreak) {
struct sk_buff *newskb;
if ((newskb = dev_alloc_skb(PKT_BUF_SZ))) {
skb_reserve(newskb, 2);
if ((newskb = dev_alloc_skb(PKT_BUF_SKB))) {
skb_reserve(newskb, NET_IP_ALIGN);
skb = lp->rx_skbuff[entry];
pci_unmap_single(lp->pci_dev,
lp->rx_dma_addr[entry],
PKT_BUF_SZ - 2,
PKT_BUF_SIZE,
PCI_DMA_FROMDEVICE);
skb_put(skb, pkt_len);
lp->rx_skbuff[entry] = newskb;
lp->rx_dma_addr[entry] =
pci_map_single(lp->pci_dev,
newskb->data,
PKT_BUF_SZ - 2,
PKT_BUF_SIZE,
PCI_DMA_FROMDEVICE);
rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]);
rx_in_place = 1;
} else
skb = NULL;
} else {
skb = dev_alloc_skb(pkt_len + 2);
skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN);
}
if (skb == NULL) {
@ -1250,7 +1254,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
}
skb->dev = dev;
if (!rx_in_place) {
skb_reserve(skb, 2); /* 16 byte align */
skb_reserve(skb, NET_IP_ALIGN);
skb_put(skb, pkt_len); /* Make room */
pci_dma_sync_single_for_cpu(lp->pci_dev,
lp->rx_dma_addr[entry],
@ -1291,7 +1295,7 @@ static int pcnet32_rx(struct net_device *dev, int budget)
* The docs say that the buffer length isn't touched, but Andrew
* Boyd of QNX reports that some revs of the 79C965 clear it.
*/
rxp->buf_length = cpu_to_le16(2 - PKT_BUF_SZ);
rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE);
wmb(); /* Make sure owner changes after others are visible */
rxp->status = cpu_to_le16(0x8000);
entry = (++lp->cur_rx) & lp->rx_mod_mask;
@ -1774,8 +1778,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
if (pcnet32_debug & NETIF_MSG_PROBE) {
for (i = 0; i < 6; i++)
printk(" %2.2x", dev->dev_addr[i]);
DECLARE_MAC_BUF(mac);
printk(" %s", print_mac(mac, dev->dev_addr));
/* Version 0x2623 and 0x2624 */
if (((chip_version + 1) & 0xfffe) == 0x2624) {
@ -2396,7 +2400,7 @@ static int pcnet32_init_ring(struct net_device *dev)
if (rx_skbuff == NULL) {
if (!
(rx_skbuff = lp->rx_skbuff[i] =
dev_alloc_skb(PKT_BUF_SZ))) {
dev_alloc_skb(PKT_BUF_SKB))) {
/* there is not much, we can do at this point */
if (netif_msg_drv(lp))
printk(KERN_ERR
@ -2404,16 +2408,16 @@ static int pcnet32_init_ring(struct net_device *dev)
dev->name);
return -1;
}
skb_reserve(rx_skbuff, 2);
skb_reserve(rx_skbuff, NET_IP_ALIGN);
}
rmb();
if (lp->rx_dma_addr[i] == 0)
lp->rx_dma_addr[i] =
pci_map_single(lp->pci_dev, rx_skbuff->data,
PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
lp->rx_ring[i].buf_length = cpu_to_le16(2 - PKT_BUF_SZ);
lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
wmb(); /* Make sure owner changes after all others are visible */
lp->rx_ring[i].status = cpu_to_le16(0x8000);
}

View File

@ -236,12 +236,12 @@ module_init(fixed_mdio_bus_init);
static void __exit fixed_mdio_bus_exit(void)
{
struct fixed_mdio_bus *fmb = &platform_fmb;
struct fixed_phy *fp;
struct fixed_phy *fp, *tmp;
mdiobus_unregister(&fmb->mii_bus);
platform_device_unregister(pdev);
list_for_each_entry(fp, &fmb->phys, node) {
list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
list_del(&fp->node);
kfree(fp);
}

File diff suppressed because it is too large Load Diff

View File

@ -35,198 +35,323 @@
#define GELIC_NET_MAX_MTU VLAN_ETH_FRAME_LEN
#define GELIC_NET_MIN_MTU VLAN_ETH_ZLEN
#define GELIC_NET_RXBUF_ALIGN 128
#define GELIC_NET_RX_CSUM_DEFAULT 1 /* hw chksum */
#define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */
#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ
#define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS)
#define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL
#define GELIC_NET_VLAN_POS (VLAN_ETH_ALEN * 2)
#define GELIC_NET_VLAN_MAX 4
#define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */
enum gelic_net_int0_status {
GELIC_NET_GDTDCEINT = 24,
GELIC_NET_GRFANMINT = 28,
};
/* virtual interrupt status register bits */
/* INT1 */
#define GELIC_CARD_TX_RAM_FULL_ERR 0x0000000000000001L
#define GELIC_CARD_RX_RAM_FULL_ERR 0x0000000000000002L
#define GELIC_CARD_TX_SHORT_FRAME_ERR 0x0000000000000004L
#define GELIC_CARD_TX_INVALID_DESCR_ERR 0x0000000000000008L
#define GELIC_CARD_RX_FIFO_FULL_ERR 0x0000000000002000L
#define GELIC_CARD_RX_DESCR_CHAIN_END 0x0000000000004000L
#define GELIC_CARD_RX_INVALID_DESCR_ERR 0x0000000000008000L
#define GELIC_CARD_TX_RESPONCE_ERR 0x0000000000010000L
#define GELIC_CARD_RX_RESPONCE_ERR 0x0000000000100000L
#define GELIC_CARD_TX_PROTECTION_ERR 0x0000000000400000L
#define GELIC_CARD_RX_PROTECTION_ERR 0x0000000004000000L
#define GELIC_CARD_TX_TCP_UDP_CHECKSUM_ERR 0x0000000008000000L
#define GELIC_CARD_PORT_STATUS_CHANGED 0x0000000020000000L
#define GELIC_CARD_WLAN_EVENT_RECEIVED 0x0000000040000000L
#define GELIC_CARD_WLAN_COMMAND_COMPLETED 0x0000000080000000L
/* INT 0 */
#define GELIC_CARD_TX_FLAGGED_DESCR 0x0004000000000000L
#define GELIC_CARD_RX_FLAGGED_DESCR 0x0040000000000000L
#define GELIC_CARD_TX_TRANSFER_END 0x0080000000000000L
#define GELIC_CARD_TX_DESCR_CHAIN_END 0x0100000000000000L
#define GELIC_CARD_NUMBER_OF_RX_FRAME 0x1000000000000000L
#define GELIC_CARD_ONE_TIME_COUNT_TIMER 0x4000000000000000L
#define GELIC_CARD_FREE_RUN_COUNT_TIMER 0x8000000000000000L
/* GHIINT1STS bits */
enum gelic_net_int1_status {
GELIC_NET_GDADCEINT = 14,
};
/* initial interrupt mask */
#define GELIC_CARD_TXINT GELIC_CARD_TX_DESCR_CHAIN_END
/* interrupt mask */
#define GELIC_NET_TXINT (1L << (GELIC_NET_GDTDCEINT + 32))
#define GELIC_NET_RXINT0 (1L << (GELIC_NET_GRFANMINT + 32))
#define GELIC_NET_RXINT1 (1L << GELIC_NET_GDADCEINT)
#define GELIC_NET_RXINT (GELIC_NET_RXINT0 | GELIC_NET_RXINT1)
#define GELIC_CARD_RXINT (GELIC_CARD_RX_DESCR_CHAIN_END | \
GELIC_CARD_NUMBER_OF_RX_FRAME)
/* RX descriptor data_status bits */
#define GELIC_NET_RXDMADU 0x80000000 /* destination MAC addr unknown */
#define GELIC_NET_RXLSTFBF 0x40000000 /* last frame buffer */
#define GELIC_NET_RXIPCHK 0x20000000 /* IP checksum performed */
#define GELIC_NET_RXTCPCHK 0x10000000 /* TCP/UDP checksup performed */
#define GELIC_NET_RXIPSPKT 0x08000000 /* IPsec packet */
#define GELIC_NET_RXIPSAHPRT 0x04000000 /* IPsec AH protocol performed */
#define GELIC_NET_RXIPSESPPRT 0x02000000 /* IPsec ESP protocol performed */
#define GELIC_NET_RXSESPAH 0x01000000 /*
* IPsec ESP protocol auth
* performed
*/
enum gelic_descr_rx_status {
GELIC_DESCR_RXDMADU = 0x80000000, /* destination MAC addr unknown */
GELIC_DESCR_RXLSTFBF = 0x40000000, /* last frame buffer */
GELIC_DESCR_RXIPCHK = 0x20000000, /* IP checksum performed */
GELIC_DESCR_RXTCPCHK = 0x10000000, /* TCP/UDP checksup performed */
GELIC_DESCR_RXWTPKT = 0x00C00000, /*
* wakeup trigger packet
* 01: Magic Packet (TM)
* 10: ARP packet
* 11: Multicast MAC addr
*/
GELIC_DESCR_RXVLNPKT = 0x00200000, /* VLAN packet */
/* bit 20..16 reserved */
GELIC_DESCR_RXRRECNUM = 0x0000ff00, /* reception receipt number */
/* bit 7..0 reserved */
};
#define GELIC_NET_RXWTPKT 0x00C00000 /*
* wakeup trigger packet
* 01: Magic Packet (TM)
* 10: ARP packet
* 11: Multicast MAC addr
*/
#define GELIC_NET_RXVLNPKT 0x00200000 /* VLAN packet */
/* bit 20..16 reserved */
#define GELIC_NET_RXRRECNUM 0x0000ff00 /* reception receipt number */
#define GELIC_NET_RXRRECNUM_SHIFT 8
/* bit 7..0 reserved */
#define GELIC_DESCR_DATA_STATUS_CHK_MASK \
(GELIC_DESCR_RXIPCHK | GELIC_DESCR_RXTCPCHK)
#define GELIC_NET_TXDESC_TAIL 0
#define GELIC_NET_DATA_STATUS_CHK_MASK (GELIC_NET_RXIPCHK | GELIC_NET_RXTCPCHK)
/* TX descriptor data_status bits */
enum gelic_descr_tx_status {
GELIC_DESCR_TX_TAIL = 0x00000001, /* gelic treated this
* descriptor was end of
* a tx frame
*/
};
/* RX descriptor data_error bits */
/* bit 31 reserved */
#define GELIC_NET_RXALNERR 0x40000000 /* alignement error 10/100M */
#define GELIC_NET_RXOVERERR 0x20000000 /* oversize error */
#define GELIC_NET_RXRNTERR 0x10000000 /* Runt error */
#define GELIC_NET_RXIPCHKERR 0x08000000 /* IP checksum error */
#define GELIC_NET_RXTCPCHKERR 0x04000000 /* TCP/UDP checksum error */
#define GELIC_NET_RXUMCHSP 0x02000000 /* unmatched sp on sp */
#define GELIC_NET_RXUMCHSPI 0x01000000 /* unmatched SPI on SAD */
#define GELIC_NET_RXUMCHSAD 0x00800000 /* unmatched SAD */
#define GELIC_NET_RXIPSAHERR 0x00400000 /* auth error on AH protocol
* processing */
#define GELIC_NET_RXIPSESPAHERR 0x00200000 /* auth error on ESP protocol
* processing */
#define GELIC_NET_RXDRPPKT 0x00100000 /* drop packet */
#define GELIC_NET_RXIPFMTERR 0x00080000 /* IP packet format error */
/* bit 18 reserved */
#define GELIC_NET_RXDATAERR 0x00020000 /* IP packet format error */
#define GELIC_NET_RXCALERR 0x00010000 /* cariier extension length
* error */
#define GELIC_NET_RXCREXERR 0x00008000 /* carrier extention error */
#define GELIC_NET_RXMLTCST 0x00004000 /* multicast address frame */
/* bit 13..0 reserved */
#define GELIC_NET_DATA_ERROR_CHK_MASK \
(GELIC_NET_RXIPCHKERR | GELIC_NET_RXTCPCHKERR)
/* RX descriptor data error bits */
enum gelic_descr_rx_error {
/* bit 31 reserved */
GELIC_DESCR_RXALNERR = 0x40000000, /* alignement error 10/100M */
GELIC_DESCR_RXOVERERR = 0x20000000, /* oversize error */
GELIC_DESCR_RXRNTERR = 0x10000000, /* Runt error */
GELIC_DESCR_RXIPCHKERR = 0x08000000, /* IP checksum error */
GELIC_DESCR_RXTCPCHKERR = 0x04000000, /* TCP/UDP checksum error */
GELIC_DESCR_RXDRPPKT = 0x00100000, /* drop packet */
GELIC_DESCR_RXIPFMTERR = 0x00080000, /* IP packet format error */
/* bit 18 reserved */
GELIC_DESCR_RXDATAERR = 0x00020000, /* IP packet format error */
GELIC_DESCR_RXCALERR = 0x00010000, /* cariier extension length
* error */
GELIC_DESCR_RXCREXERR = 0x00008000, /* carrier extention error */
GELIC_DESCR_RXMLTCST = 0x00004000, /* multicast address frame */
/* bit 13..0 reserved */
};
#define GELIC_DESCR_DATA_ERROR_CHK_MASK \
(GELIC_DESCR_RXIPCHKERR | GELIC_DESCR_RXTCPCHKERR)
/* DMA command and status (RX and TX)*/
enum gelic_descr_dma_status {
GELIC_DESCR_DMA_COMPLETE = 0x00000000, /* used in tx */
GELIC_DESCR_DMA_BUFFER_FULL = 0x00000000, /* used in rx */
GELIC_DESCR_DMA_RESPONSE_ERROR = 0x10000000, /* used in rx, tx */
GELIC_DESCR_DMA_PROTECTION_ERROR = 0x20000000, /* used in rx, tx */
GELIC_DESCR_DMA_FRAME_END = 0x40000000, /* used in rx */
GELIC_DESCR_DMA_FORCE_END = 0x50000000, /* used in rx, tx */
GELIC_DESCR_DMA_CARDOWNED = 0xa0000000, /* used in rx, tx */
GELIC_DESCR_DMA_NOT_IN_USE = 0xb0000000, /* any other value */
};
#define GELIC_DESCR_DMA_STAT_MASK (0xf0000000)
/* tx descriptor command and status */
#define GELIC_NET_DMAC_CMDSTAT_NOCS 0xa0080000 /* middle of frame */
#define GELIC_NET_DMAC_CMDSTAT_TCPCS 0xa00a0000
#define GELIC_NET_DMAC_CMDSTAT_UDPCS 0xa00b0000
#define GELIC_NET_DMAC_CMDSTAT_END_FRAME 0x00040000 /* end of frame */
enum gelic_descr_tx_dma_status {
/* [19] */
GELIC_DESCR_TX_DMA_IKE = 0x00080000, /* IPSEC off */
/* [18] */
GELIC_DESCR_TX_DMA_FRAME_TAIL = 0x00040000, /* last descriptor of
* the packet
*/
/* [17..16] */
GELIC_DESCR_TX_DMA_TCP_CHKSUM = 0x00020000, /* TCP packet */
GELIC_DESCR_TX_DMA_UDP_CHKSUM = 0x00030000, /* UDP packet */
GELIC_DESCR_TX_DMA_NO_CHKSUM = 0x00000000, /* no checksum */
#define GELIC_NET_DMAC_CMDSTAT_RXDCEIS 0x00000002 /* descriptor chain end
* interrupt status */
#define GELIC_NET_DMAC_CMDSTAT_CHAIN_END 0x00000002 /* RXDCEIS:DMA stopped */
#define GELIC_NET_DESCR_IND_PROC_SHIFT 28
#define GELIC_NET_DESCR_IND_PROC_MASKO 0x0fffffff
enum gelic_net_descr_status {
GELIC_NET_DESCR_COMPLETE = 0x00, /* used in tx */
GELIC_NET_DESCR_BUFFER_FULL = 0x00, /* used in rx */
GELIC_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */
GELIC_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */
GELIC_NET_DESCR_FRAME_END = 0x04, /* used in rx */
GELIC_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */
GELIC_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */
GELIC_NET_DESCR_NOT_IN_USE = 0x0b /* any other value */
/* [1] */
GELIC_DESCR_TX_DMA_CHAIN_END = 0x00000002, /* DMA terminated
* due to chain end
*/
};
#define GELIC_DESCR_DMA_CMD_NO_CHKSUM \
(GELIC_DESCR_DMA_CARDOWNED | GELIC_DESCR_TX_DMA_IKE | \
GELIC_DESCR_TX_DMA_NO_CHKSUM)
#define GELIC_DESCR_DMA_CMD_TCP_CHKSUM \
(GELIC_DESCR_DMA_CARDOWNED | GELIC_DESCR_TX_DMA_IKE | \
GELIC_DESCR_TX_DMA_TCP_CHKSUM)
#define GELIC_DESCR_DMA_CMD_UDP_CHKSUM \
(GELIC_DESCR_DMA_CARDOWNED | GELIC_DESCR_TX_DMA_IKE | \
GELIC_DESCR_TX_DMA_UDP_CHKSUM)
enum gelic_descr_rx_dma_status {
/* [ 1 ] */
GELIC_DESCR_RX_DMA_CHAIN_END = 0x00000002, /* DMA terminated
* due to chain end
*/
};
/* for lv1_net_control */
#define GELIC_NET_GET_MAC_ADDRESS 0x0000000000000001
#define GELIC_NET_GET_ETH_PORT_STATUS 0x0000000000000002
#define GELIC_NET_SET_NEGOTIATION_MODE 0x0000000000000003
#define GELIC_NET_GET_VLAN_ID 0x0000000000000004
enum gelic_lv1_net_control_code {
GELIC_LV1_GET_MAC_ADDRESS = 1,
GELIC_LV1_GET_ETH_PORT_STATUS = 2,
GELIC_LV1_SET_NEGOTIATION_MODE = 3,
GELIC_LV1_GET_VLAN_ID = 4,
GELIC_LV1_GET_CHANNEL = 6,
GELIC_LV1_POST_WLAN_CMD = 9,
GELIC_LV1_GET_WLAN_CMD_RESULT = 10,
GELIC_LV1_GET_WLAN_EVENT = 11
};
#define GELIC_NET_LINK_UP 0x0000000000000001
#define GELIC_NET_FULL_DUPLEX 0x0000000000000002
#define GELIC_NET_AUTO_NEG 0x0000000000000004
#define GELIC_NET_SPEED_10 0x0000000000000010
#define GELIC_NET_SPEED_100 0x0000000000000020
#define GELIC_NET_SPEED_1000 0x0000000000000040
/* status returened from GET_ETH_PORT_STATUS */
enum gelic_lv1_ether_port_status {
GELIC_LV1_ETHER_LINK_UP = 0x0000000000000001L,
GELIC_LV1_ETHER_FULL_DUPLEX = 0x0000000000000002L,
GELIC_LV1_ETHER_AUTO_NEG = 0x0000000000000004L,
#define GELIC_NET_VLAN_ALL 0x0000000000000001
#define GELIC_NET_VLAN_WIRED 0x0000000000000002
#define GELIC_NET_VLAN_WIRELESS 0x0000000000000003
#define GELIC_NET_VLAN_PSP 0x0000000000000004
#define GELIC_NET_VLAN_PORT0 0x0000000000000010
#define GELIC_NET_VLAN_PORT1 0x0000000000000011
#define GELIC_NET_VLAN_PORT2 0x0000000000000012
#define GELIC_NET_VLAN_DAEMON_CLIENT_BSS 0x0000000000000013
#define GELIC_NET_VLAN_LIBERO_CLIENT_BSS 0x0000000000000014
#define GELIC_NET_VLAN_NO_ENTRY -6
GELIC_LV1_ETHER_SPEED_10 = 0x0000000000000010L,
GELIC_LV1_ETHER_SPEED_100 = 0x0000000000000020L,
GELIC_LV1_ETHER_SPEED_1000 = 0x0000000000000040L,
GELIC_LV1_ETHER_SPEED_MASK = 0x0000000000000070L
};
#define GELIC_NET_PORT 2 /* for port status */
enum gelic_lv1_vlan_index {
/* for outgoing packets */
GELIC_LV1_VLAN_TX_ETHERNET = 0x0000000000000002L,
GELIC_LV1_VLAN_TX_WIRELESS = 0x0000000000000003L,
/* for incoming packets */
GELIC_LV1_VLAN_RX_ETHERNET = 0x0000000000000012L,
GELIC_LV1_VLAN_RX_WIRELESS = 0x0000000000000013L
};
/* size of hardware part of gelic descriptor */
#define GELIC_NET_DESCR_SIZE (32)
struct gelic_net_descr {
#define GELIC_DESCR_SIZE (32)
enum gelic_port_type {
GELIC_PORT_ETHERNET = 0,
GELIC_PORT_WIRELESS = 1,
GELIC_PORT_MAX
};
struct gelic_descr {
/* as defined by the hardware */
u32 buf_addr;
u32 buf_size;
u32 next_descr_addr;
u32 dmac_cmd_status;
u32 result_size;
u32 valid_size; /* all zeroes for tx */
u32 data_status;
u32 data_error; /* all zeroes for tx */
__be32 buf_addr;
__be32 buf_size;
__be32 next_descr_addr;
__be32 dmac_cmd_status;
__be32 result_size;
__be32 valid_size; /* all zeroes for tx */
__be32 data_status;
__be32 data_error; /* all zeroes for tx */
/* used in the driver */
struct sk_buff *skb;
dma_addr_t bus_addr;
struct gelic_net_descr *next;
struct gelic_net_descr *prev;
struct vlan_ethhdr vlan;
struct gelic_descr *next;
struct gelic_descr *prev;
} __attribute__((aligned(32)));
struct gelic_net_descr_chain {
struct gelic_descr_chain {
/* we walk from tail to head */
struct gelic_net_descr *head;
struct gelic_net_descr *tail;
struct gelic_descr *head;
struct gelic_descr *tail;
};
struct gelic_net_card {
struct net_device *netdev;
struct gelic_vlan_id {
u16 tx;
u16 rx;
};
struct gelic_card {
struct napi_struct napi;
struct net_device *netdev[GELIC_PORT_MAX];
/*
* hypervisor requires irq_status should be
* 8 bytes aligned, but u64 member is
* always disposed in that manner
*/
u64 irq_status;
u64 ghiintmask;
u64 irq_mask;
struct ps3_system_bus_device *dev;
u32 vlan_id[GELIC_NET_VLAN_MAX];
int vlan_index;
struct gelic_vlan_id vlan[GELIC_PORT_MAX];
int vlan_required;
struct gelic_net_descr_chain tx_chain;
struct gelic_net_descr_chain rx_chain;
struct gelic_descr_chain tx_chain;
struct gelic_descr_chain rx_chain;
int rx_dma_restart_required;
/* gurad dmac descriptor chain*/
spinlock_t chain_lock;
int rx_csum;
/* guard tx_dma_progress */
spinlock_t tx_dma_lock;
/*
* tx_lock guards tx descriptor list and
* tx_dma_progress.
*/
spinlock_t tx_lock;
int tx_dma_progress;
struct work_struct tx_timeout_task;
atomic_t tx_timeout_task_counter;
wait_queue_head_t waitq;
struct gelic_net_descr *tx_top, *rx_top;
struct gelic_net_descr descr[0];
/* only first user should up the card */
struct semaphore updown_lock;
atomic_t users;
u64 ether_port_status;
/* original address returned by kzalloc */
void *unalign;
/*
* each netdevice has copy of irq
*/
unsigned int irq;
struct gelic_descr *tx_top, *rx_top;
struct gelic_descr descr[0]; /* must be the last */
};
struct gelic_port {
struct gelic_card *card;
struct net_device *netdev;
enum gelic_port_type type;
long priv[0]; /* long for alignment */
};
extern unsigned long p_to_lp(long pa);
static inline struct gelic_card *port_to_card(struct gelic_port *p)
{
return p->card;
}
static inline struct net_device *port_to_netdev(struct gelic_port *p)
{
return p->netdev;
}
static inline struct gelic_card *netdev_card(struct net_device *d)
{
return ((struct gelic_port *)netdev_priv(d))->card;
}
static inline struct gelic_port *netdev_port(struct net_device *d)
{
return (struct gelic_port *)netdev_priv(d);
}
static inline struct device *ctodev(struct gelic_card *card)
{
return &card->dev->core;
}
static inline u64 bus_id(struct gelic_card *card)
{
return card->dev->bus_id;
}
static inline u64 dev_id(struct gelic_card *card)
{
return card->dev->dev_id;
}
static inline void *port_priv(struct gelic_port *port)
{
return port->priv;
}
extern int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
/* shared netdev ops */
extern void gelic_card_up(struct gelic_card *card);
extern void gelic_card_down(struct gelic_card *card);
extern int gelic_net_open(struct net_device *netdev);
extern int gelic_net_stop(struct net_device *netdev);
extern int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
extern void gelic_net_set_multi(struct net_device *netdev);
extern void gelic_net_tx_timeout(struct net_device *netdev);
extern int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
extern int gelic_net_setup_netdev(struct net_device *netdev,
struct gelic_card *card);
/* shared ethtool ops */
extern void gelic_net_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info);
extern u32 gelic_net_get_rx_csum(struct net_device *netdev);
extern int gelic_net_set_rx_csum(struct net_device *netdev, u32 data);
extern void gelic_net_poll_controller(struct net_device *netdev);
#endif /* _GELIC_NET_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,329 @@
/*
* PS3 gelic network driver.
*
* Copyright (C) 2007 Sony Computer Entertainment Inc.
* Copyright 2007 Sony Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _GELIC_WIRELESS_H
#define _GELIC_WIRELESS_H
#include <linux/wireless.h>
#include <net/iw_handler.h>
/* return value from GELIC_LV1_GET_WLAN_EVENT netcontrol */
enum gelic_lv1_wl_event {
GELIC_LV1_WL_EVENT_DEVICE_READY = 0x01, /* Eurus ready */
GELIC_LV1_WL_EVENT_SCAN_COMPLETED = 0x02, /* Scan has completed */
GELIC_LV1_WL_EVENT_DEAUTH = 0x04, /* Deauthed by the AP */
GELIC_LV1_WL_EVENT_BEACON_LOST = 0x08, /* Beacon lost detected */
GELIC_LV1_WL_EVENT_CONNECTED = 0x10, /* Connected to AP */
GELIC_LV1_WL_EVENT_WPA_CONNECTED = 0x20, /* WPA connection */
GELIC_LV1_WL_EVENT_WPA_ERROR = 0x40, /* MIC error */
};
/* arguments for GELIC_LV1_POST_WLAN_COMMAND netcontrol */
enum gelic_eurus_command {
GELIC_EURUS_CMD_ASSOC = 1, /* association start */
GELIC_EURUS_CMD_DISASSOC = 2, /* disassociate */
GELIC_EURUS_CMD_START_SCAN = 3, /* scan start */
GELIC_EURUS_CMD_GET_SCAN = 4, /* get scan result */
GELIC_EURUS_CMD_SET_COMMON_CFG = 5, /* set common config */
GELIC_EURUS_CMD_GET_COMMON_CFG = 6, /* set common config */
GELIC_EURUS_CMD_SET_WEP_CFG = 7, /* set WEP config */
GELIC_EURUS_CMD_GET_WEP_CFG = 8, /* get WEP config */
GELIC_EURUS_CMD_SET_WPA_CFG = 9, /* set WPA config */
GELIC_EURUS_CMD_GET_WPA_CFG = 10, /* get WPA config */
GELIC_EURUS_CMD_GET_RSSI_CFG = 11, /* get RSSI info. */
GELIC_EURUS_CMD_MAX_INDEX
};
/* for GELIC_EURUS_CMD_COMMON_CFG */
enum gelic_eurus_bss_type {
GELIC_EURUS_BSS_INFRA = 0,
GELIC_EURUS_BSS_ADHOC = 1, /* not supported */
};
enum gelic_eurus_auth_method {
GELIC_EURUS_AUTH_OPEN = 0, /* FIXME: WLAN_AUTH_OPEN */
GELIC_EURUS_AUTH_SHARED = 1, /* not supported */
};
enum gelic_eurus_opmode {
GELIC_EURUS_OPMODE_11BG = 0, /* 802.11b/g */
GELIC_EURUS_OPMODE_11B = 1, /* 802.11b only */
GELIC_EURUS_OPMODE_11G = 2, /* 802.11g only */
};
struct gelic_eurus_common_cfg {
/* all fields are big endian */
u16 scan_index;
u16 bss_type; /* infra or adhoc */
u16 auth_method; /* shared key or open */
u16 op_mode; /* B/G */
} __attribute__((packed));
/* for GELIC_EURUS_CMD_WEP_CFG */
enum gelic_eurus_wep_security {
GELIC_EURUS_WEP_SEC_NONE = 0,
GELIC_EURUS_WEP_SEC_40BIT = 1,
GELIC_EURUS_WEP_SEC_104BIT = 2,
};
struct gelic_eurus_wep_cfg {
/* all fields are big endian */
u16 security;
u8 key[4][16];
} __attribute__((packed));
/* for GELIC_EURUS_CMD_WPA_CFG */
enum gelic_eurus_wpa_security {
GELIC_EURUS_WPA_SEC_NONE = 0x0000,
/* group=TKIP, pairwise=TKIP */
GELIC_EURUS_WPA_SEC_WPA_TKIP_TKIP = 0x0001,
/* group=AES, pairwise=AES */
GELIC_EURUS_WPA_SEC_WPA_AES_AES = 0x0002,
/* group=TKIP, pairwise=TKIP */
GELIC_EURUS_WPA_SEC_WPA2_TKIP_TKIP = 0x0004,
/* group=AES, pairwise=AES */
GELIC_EURUS_WPA_SEC_WPA2_AES_AES = 0x0008,
/* group=TKIP, pairwise=AES */
GELIC_EURUS_WPA_SEC_WPA_TKIP_AES = 0x0010,
/* group=TKIP, pairwise=AES */
GELIC_EURUS_WPA_SEC_WPA2_TKIP_AES = 0x0020,
};
enum gelic_eurus_wpa_psk_type {
GELIC_EURUS_WPA_PSK_PASSPHRASE = 0, /* passphrase string */
GELIC_EURUS_WPA_PSK_BIN = 1, /* 32 bytes binary key */
};
#define GELIC_WL_EURUS_PSK_MAX_LEN 64
#define WPA_PSK_LEN 32 /* WPA spec says 256bit */
struct gelic_eurus_wpa_cfg {
/* all fields are big endian */
u16 security;
u16 psk_type; /* psk key encoding type */
u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN]; /* psk key; hex or passphrase */
} __attribute__((packed));
/* for GELIC_EURUS_CMD_{START,GET}_SCAN */
enum gelic_eurus_scan_capability {
GELIC_EURUS_SCAN_CAP_ADHOC = 0x0000,
GELIC_EURUS_SCAN_CAP_INFRA = 0x0001,
GELIC_EURUS_SCAN_CAP_MASK = 0x0001,
};
enum gelic_eurus_scan_sec_type {
GELIC_EURUS_SCAN_SEC_NONE = 0x0000,
GELIC_EURUS_SCAN_SEC_WEP = 0x0100,
GELIC_EURUS_SCAN_SEC_WPA = 0x0200,
GELIC_EURUS_SCAN_SEC_WPA2 = 0x0400,
GELIC_EURUS_SCAN_SEC_MASK = 0x0f00,
};
enum gelic_eurus_scan_sec_wep_type {
GELIC_EURUS_SCAN_SEC_WEP_UNKNOWN = 0x0000,
GELIC_EURUS_SCAN_SEC_WEP_40 = 0x0001,
GELIC_EURUS_SCAN_SEC_WEP_104 = 0x0002,
GELIC_EURUS_SCAN_SEC_WEP_MASK = 0x0003,
};
enum gelic_eurus_scan_sec_wpa_type {
GELIC_EURUS_SCAN_SEC_WPA_UNKNOWN = 0x0000,
GELIC_EURUS_SCAN_SEC_WPA_TKIP = 0x0001,
GELIC_EURUS_SCAN_SEC_WPA_AES = 0x0002,
GELIC_EURUS_SCAN_SEC_WPA_MASK = 0x0003,
};
/*
* hw BSS information structure returned from GELIC_EURUS_CMD_GET_SCAN
*/
struct gelic_eurus_scan_info {
/* all fields are big endian */
__be16 size;
__be16 rssi; /* percentage */
__be16 channel; /* channel number */
__be16 beacon_period; /* FIXME: in msec unit */
__be16 capability;
__be16 security;
u8 bssid[8]; /* last ETH_ALEN are valid. bssid[0],[1] are unused */
u8 essid[32]; /* IW_ESSID_MAX_SIZE */
u8 rate[16]; /* first MAX_RATES_LENGTH(12) are valid */
u8 ext_rate[16]; /* first MAX_RATES_EX_LENGTH(16) are valid */
__be32 reserved1;
__be32 reserved2;
__be32 reserved3;
__be32 reserved4;
u8 elements[0]; /* ie */
} __attribute__ ((packed));
/* the hypervisor returns bbs up to 16 */
#define GELIC_EURUS_MAX_SCAN (16)
struct gelic_wl_scan_info {
struct list_head list;
struct gelic_eurus_scan_info *hwinfo;
int valid; /* set 1 if this entry was in latest scanned list
* from Eurus */
unsigned int eurus_index; /* index in the Eurus list */
unsigned long last_scanned; /* acquired time */
unsigned int rate_len;
unsigned int rate_ext_len;
unsigned int essid_len;
};
/* for GELIC_EURUS_CMD_GET_RSSI */
struct gelic_eurus_rssi_info {
/* big endian */
__be16 rssi;
} __attribute__ ((packed));
/* for 'stat' member of gelic_wl_info */
enum gelic_wl_info_status_bit {
GELIC_WL_STAT_CONFIGURED,
GELIC_WL_STAT_CH_INFO, /* ch info aquired */
GELIC_WL_STAT_ESSID_SET, /* ESSID specified by userspace */
GELIC_WL_STAT_BSSID_SET, /* BSSID specified by userspace */
GELIC_WL_STAT_WPA_PSK_SET, /* PMK specified by userspace */
GELIC_WL_STAT_WPA_LEVEL_SET, /* WEP or WPA[2] selected */
};
/* for 'scan_stat' member of gelic_wl_info */
enum gelic_wl_scan_state {
/* just initialized or get last scan result failed */
GELIC_WL_SCAN_STAT_INIT,
/* scan request issued, accepted or chip is scanning */
GELIC_WL_SCAN_STAT_SCANNING,
/* scan results retrieved */
GELIC_WL_SCAN_STAT_GOT_LIST,
};
/* for 'cipher_method' */
enum gelic_wl_cipher_method {
GELIC_WL_CIPHER_NONE,
GELIC_WL_CIPHER_WEP,
GELIC_WL_CIPHER_TKIP,
GELIC_WL_CIPHER_AES,
};
/* for 'wpa_level' */
enum gelic_wl_wpa_level {
GELIC_WL_WPA_LEVEL_NONE,
GELIC_WL_WPA_LEVEL_WPA,
GELIC_WL_WPA_LEVEL_WPA2,
};
/* for 'assoc_stat' */
enum gelic_wl_assoc_state {
GELIC_WL_ASSOC_STAT_DISCONN,
GELIC_WL_ASSOC_STAT_ASSOCIATING,
GELIC_WL_ASSOC_STAT_ASSOCIATED,
};
/* part of private data alloc_etherdev() allocated */
#define GELIC_WEP_KEYS 4
struct gelic_wl_info {
/* bss list */
struct semaphore scan_lock;
struct list_head network_list;
struct list_head network_free_list;
struct gelic_wl_scan_info *networks;
unsigned long scan_age; /* last scanned time */
enum gelic_wl_scan_state scan_stat;
struct completion scan_done;
/* eurus command queue */
struct workqueue_struct *eurus_cmd_queue;
struct completion cmd_done_intr;
/* eurus event handling */
struct workqueue_struct *event_queue;
struct delayed_work event_work;
/* wl status bits */
unsigned long stat;
enum gelic_eurus_auth_method auth_method; /* open/shared */
enum gelic_wl_cipher_method group_cipher_method;
enum gelic_wl_cipher_method pairwise_cipher_method;
enum gelic_wl_wpa_level wpa_level; /* wpa/wpa2 */
/* association handling */
struct semaphore assoc_stat_lock;
struct delayed_work assoc_work;
enum gelic_wl_assoc_state assoc_stat;
struct completion assoc_done;
spinlock_t lock;
u16 ch_info; /* available channels. bit0 = ch1 */
/* WEP keys */
u8 key[GELIC_WEP_KEYS][IW_ENCODING_TOKEN_MAX];
unsigned long key_enabled;
unsigned int key_len[GELIC_WEP_KEYS];
unsigned int current_key;
/* WWPA PSK */
u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN];
enum gelic_eurus_wpa_psk_type psk_type;
unsigned int psk_len;
u8 essid[IW_ESSID_MAX_SIZE];
u8 bssid[ETH_ALEN]; /* userland requested */
u8 active_bssid[ETH_ALEN]; /* associated bssid */
unsigned int essid_len;
/* buffer for hypervisor IO */
void *buf;
struct iw_public_data wireless_data;
struct iw_statistics iwstat;
};
#define GELIC_WL_BSS_MAX_ENT 32
#define GELIC_WL_ASSOC_RETRY 50
static inline struct gelic_port *wl_port(struct gelic_wl_info *wl)
{
return container_of((void *)wl, struct gelic_port, priv);
}
static inline struct gelic_wl_info *port_wl(struct gelic_port *port)
{
return port_priv(port);
}
struct gelic_eurus_cmd {
struct work_struct work;
struct gelic_wl_info *wl;
unsigned int cmd; /* command code */
u64 tag;
u64 size;
void *buffer;
unsigned int buf_size;
struct completion done;
int status;
u64 cmd_status;
};
/* private ioctls to pass PSK */
#define GELIC_WL_PRIV_SET_PSK (SIOCIWFIRSTPRIV + 0)
#define GELIC_WL_PRIV_GET_PSK (SIOCIWFIRSTPRIV + 1)
extern int gelic_wl_driver_probe(struct gelic_card *card);
extern int gelic_wl_driver_remove(struct gelic_card *card);
extern void gelic_wl_interrupt(struct net_device *netdev, u64 status);
#endif /* _GELIC_WIRELESS_H */

View File

@ -61,7 +61,6 @@
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (6000 * HZ / 1000)
#define TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
/* RDC MAC I/O Size */
#define R6040_IO_SIZE 256
@ -174,8 +173,6 @@ struct r6040_private {
struct net_device *dev;
struct mii_if_info mii_if;
struct napi_struct napi;
struct net_device_stats stats;
u16 napi_rx_running;
void __iomem *base;
};
@ -235,17 +232,53 @@ static void mdio_write(struct net_device *dev, int mii_id, int reg, int val)
phy_write(ioaddr, lp->phy_addr, reg, val);
}
static void r6040_tx_timeout(struct net_device *dev)
static void r6040_free_txbufs(struct net_device *dev)
{
struct r6040_private *priv = netdev_priv(dev);
struct r6040_private *lp = netdev_priv(dev);
int i;
disable_irq(dev->irq);
napi_disable(&priv->napi);
spin_lock(&priv->lock);
dev->stats.tx_errors++;
spin_unlock(&priv->lock);
for (i = 0; i < TX_DCNT; i++) {
if (lp->tx_insert_ptr->skb_ptr) {
pci_unmap_single(lp->pdev, lp->tx_insert_ptr->buf,
MAX_BUF_SIZE, PCI_DMA_TODEVICE);
dev_kfree_skb(lp->tx_insert_ptr->skb_ptr);
lp->rx_insert_ptr->skb_ptr = NULL;
}
lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp;
}
}
netif_stop_queue(dev);
static void r6040_free_rxbufs(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
int i;
for (i = 0; i < RX_DCNT; i++) {
if (lp->rx_insert_ptr->skb_ptr) {
pci_unmap_single(lp->pdev, lp->rx_insert_ptr->buf,
MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
dev_kfree_skb(lp->rx_insert_ptr->skb_ptr);
lp->rx_insert_ptr->skb_ptr = NULL;
}
lp->rx_insert_ptr = lp->rx_insert_ptr->vndescp;
}
}
static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring,
dma_addr_t desc_dma, int size)
{
struct r6040_descriptor *desc = desc_ring;
dma_addr_t mapping = desc_dma;
while (size-- > 0) {
mapping += sizeof(sizeof(*desc));
desc->ndesc = cpu_to_le32(mapping);
desc->vndescp = desc + 1;
desc++;
}
desc--;
desc->ndesc = cpu_to_le32(desc_dma);
desc->vndescp = desc_ring;
}
/* Allocate skb buffer for rx descriptor */
@ -256,7 +289,7 @@ static void rx_buf_alloc(struct r6040_private *lp, struct net_device *dev)
descptr = lp->rx_insert_ptr;
while (lp->rx_free_desc < RX_DCNT) {
descptr->skb_ptr = dev_alloc_skb(MAX_BUF_SIZE);
descptr->skb_ptr = netdev_alloc_skb(dev, MAX_BUF_SIZE);
if (!descptr->skb_ptr)
break;
@ -272,6 +305,63 @@ static void rx_buf_alloc(struct r6040_private *lp, struct net_device *dev)
lp->rx_insert_ptr = descptr;
}
static void r6040_alloc_txbufs(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
lp->tx_free_desc = TX_DCNT;
lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring;
r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT);
iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0);
iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1);
}
static void r6040_alloc_rxbufs(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
lp->rx_free_desc = 0;
lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring;
r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT);
rx_buf_alloc(lp, dev);
iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0);
iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1);
}
static void r6040_tx_timeout(struct net_device *dev)
{
struct r6040_private *priv = netdev_priv(dev);
void __iomem *ioaddr = priv->base;
printk(KERN_WARNING "%s: transmit timed out, status %4.4x, PHY status "
"%4.4x\n",
dev->name, ioread16(ioaddr + MIER),
mdio_read(dev, priv->mii_if.phy_id, MII_BMSR));
disable_irq(dev->irq);
napi_disable(&priv->napi);
spin_lock(&priv->lock);
/* Clear all descriptors */
r6040_free_txbufs(dev);
r6040_free_rxbufs(dev);
r6040_alloc_txbufs(dev);
r6040_alloc_rxbufs(dev);
/* Reset MAC */
iowrite16(MAC_RST, ioaddr + MCR1);
spin_unlock(&priv->lock);
enable_irq(dev->irq);
dev->stats.tx_errors++;
netif_wake_queue(dev);
}
static struct net_device_stats *r6040_get_stats(struct net_device *dev)
{
@ -280,11 +370,11 @@ static struct net_device_stats *r6040_get_stats(struct net_device *dev)
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
priv->stats.rx_crc_errors += ioread8(ioaddr + ME_CNT1);
priv->stats.multicast += ioread8(ioaddr + ME_CNT0);
dev->stats.rx_crc_errors += ioread8(ioaddr + ME_CNT1);
dev->stats.multicast += ioread8(ioaddr + ME_CNT0);
spin_unlock_irqrestore(&priv->lock, flags);
return &priv->stats;
return &dev->stats;
}
/* Stop RDC MAC and Free the allocated resource */
@ -293,7 +383,6 @@ static void r6040_down(struct net_device *dev)
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
struct pci_dev *pdev = lp->pdev;
int i;
int limit = 2048;
u16 *adrp;
u16 cmd;
@ -313,27 +402,12 @@ static void r6040_down(struct net_device *dev)
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
free_irq(dev->irq, dev);
/* Free RX buffer */
for (i = 0; i < RX_DCNT; i++) {
if (lp->rx_insert_ptr->skb_ptr) {
pci_unmap_single(lp->pdev, lp->rx_insert_ptr->buf,
MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
dev_kfree_skb(lp->rx_insert_ptr->skb_ptr);
lp->rx_insert_ptr->skb_ptr = NULL;
}
lp->rx_insert_ptr = lp->rx_insert_ptr->vndescp;
}
r6040_free_rxbufs(dev);
/* Free TX buffer */
for (i = 0; i < TX_DCNT; i++) {
if (lp->tx_insert_ptr->skb_ptr) {
pci_unmap_single(lp->pdev, lp->tx_insert_ptr->buf,
MAX_BUF_SIZE, PCI_DMA_TODEVICE);
dev_kfree_skb(lp->tx_insert_ptr->skb_ptr);
lp->rx_insert_ptr->skb_ptr = NULL;
}
lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp;
}
r6040_free_txbufs(dev);
/* Free Descriptor memory */
pci_free_consistent(pdev, RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma);
@ -432,19 +506,24 @@ static int r6040_rx(struct net_device *dev, int limit)
/* Check for errors */
err = ioread16(ioaddr + MLSR);
if (err & 0x0400) priv->stats.rx_errors++;
if (err & 0x0400)
dev->stats.rx_errors++;
/* RX FIFO over-run */
if (err & 0x8000) priv->stats.rx_fifo_errors++;
if (err & 0x8000)
dev->stats.rx_fifo_errors++;
/* RX descriptor unavailable */
if (err & 0x0080) priv->stats.rx_frame_errors++;
if (err & 0x0080)
dev->stats.rx_frame_errors++;
/* Received packet with length over buffer lenght */
if (err & 0x0020) priv->stats.rx_over_errors++;
if (err & 0x0020)
dev->stats.rx_over_errors++;
/* Received packet with too long or short */
if (err & (0x0010|0x0008)) priv->stats.rx_length_errors++;
if (err & (0x0010 | 0x0008))
dev->stats.rx_length_errors++;
/* Received packet with CRC errors */
if (err & 0x0004) {
spin_lock(&priv->lock);
priv->stats.rx_crc_errors++;
dev->stats.rx_crc_errors++;
spin_unlock(&priv->lock);
}
@ -469,8 +548,8 @@ static int r6040_rx(struct net_device *dev, int limit)
/* Send to upper layer */
netif_receive_skb(skb_ptr);
dev->last_rx = jiffies;
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += descptr->len;
dev->stats.rx_packets++;
dev->stats.rx_bytes += descptr->len;
/* To next descriptor */
descptr = descptr->vndescp;
priv->rx_free_desc--;
@ -498,11 +577,13 @@ static void r6040_tx(struct net_device *dev)
/* Check for errors */
err = ioread16(ioaddr + MLSR);
if (err & 0x0200) priv->stats.rx_fifo_errors++;
if (err & (0x2000 | 0x4000)) priv->stats.tx_carrier_errors++;
if (err & 0x0200)
dev->stats.rx_fifo_errors++;
if (err & (0x2000 | 0x4000))
dev->stats.tx_carrier_errors++;
if (descptr->status & 0x8000)
break; /* Not complte */
break; /* Not complete */
skb_ptr = descptr->skb_ptr;
pci_unmap_single(priv->pdev, descptr->buf,
skb_ptr->len, PCI_DMA_TODEVICE);
@ -545,7 +626,6 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
u16 status;
int handled = 1;
/* Mask off RDC MAC interrupt */
iowrite16(MSK_INT, ioaddr + MIER);
@ -565,7 +645,7 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
if (status & 0x10)
r6040_tx(dev);
return IRQ_RETVAL(handled);
return IRQ_HANDLED;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@ -577,53 +657,15 @@ static void r6040_poll_controller(struct net_device *dev)
}
#endif
static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring,
dma_addr_t desc_dma, int size)
{
struct r6040_descriptor *desc = desc_ring;
dma_addr_t mapping = desc_dma;
while (size-- > 0) {
mapping += sizeof(sizeof(*desc));
desc->ndesc = cpu_to_le32(mapping);
desc->vndescp = desc + 1;
desc++;
}
desc--;
desc->ndesc = cpu_to_le32(desc_dma);
desc->vndescp = desc_ring;
}
/* Init RDC MAC */
static void r6040_up(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
/* Initialize */
lp->tx_free_desc = TX_DCNT;
lp->rx_free_desc = 0;
/* Init descriptor */
lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring;
lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring;
/* Init TX descriptor */
r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT);
/* Init RX descriptor */
r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT);
/* Allocate buffer for RX descriptor */
rx_buf_alloc(lp, dev);
/*
* TX and RX descriptor start registers.
* Lower 16-bits to MxD_SA0. Higher 16-bits to MxD_SA1.
*/
iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0);
iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1);
iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0);
iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1);
/* Initialise and alloc RX/TX buffers */
r6040_alloc_txbufs(dev);
r6040_alloc_rxbufs(dev);
/* Buffer Size Register */
iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR);
@ -689,8 +731,7 @@ static void r6040_timer(unsigned long data)
}
/* Timer active again */
lp->timer.expires = TIMER_WUT;
add_timer(&lp->timer);
mod_timer(&lp->timer, jiffies + round_jiffies(HZ));
}
/* Read/set MAC address routines */
@ -746,14 +787,10 @@ static int r6040_open(struct net_device *dev)
napi_enable(&lp->napi);
netif_start_queue(dev);
if (lp->switch_sig != ICPLUS_PHY_ID) {
/* set and active a timer process */
init_timer(&lp->timer);
lp->timer.expires = TIMER_WUT;
lp->timer.data = (unsigned long)dev;
lp->timer.function = &r6040_timer;
add_timer(&lp->timer);
}
/* set and active a timer process */
setup_timer(&lp->timer, r6040_timer, (unsigned long) dev);
if (lp->switch_sig != ICPLUS_PHY_ID)
mod_timer(&lp->timer, jiffies + HZ);
return 0;
}

View File

@ -1630,7 +1630,8 @@ static inline void sis190_init_rxfilter(struct net_device *dev)
SIS_PCI_COMMIT();
}
static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
struct net_device *dev)
{
u8 from;

View File

@ -114,11 +114,20 @@ do { \
debug_event(claw_dbf_##name,level,(void*)(addr),len); \
} while (0)
/* Allow to sort out low debug levels early to avoid wasted sprints */
static inline int claw_dbf_passes(debug_info_t *dbf_grp, int level)
{
return (level <= dbf_grp->level);
}
#define CLAW_DBF_TEXT_(level,name,text...) \
do { \
sprintf(debug_buffer, text); \
debug_text_event(claw_dbf_##name,level, debug_buffer);\
} while (0)
do { \
if (claw_dbf_passes(claw_dbf_##name, level)) { \
sprintf(debug_buffer, text); \
debug_text_event(claw_dbf_##name, level, \
debug_buffer); \
} \
} while (0)
/*******************************************************
* Define Control Blocks *
@ -278,8 +287,6 @@ struct claw_env {
__u16 write_size; /* write buffer size */
__u16 dev_id; /* device ident */
__u8 packing; /* are we packing? */
volatile __u8 queme_switch; /* gate for imed packing */
volatile unsigned long pk_delay; /* Delay for adaptive packing */
__u8 in_use; /* device active flag */
struct net_device *ndev; /* backward ptr to the net dev*/
};

View File

@ -94,7 +94,7 @@ static int
lcs_register_debug_facility(void)
{
lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8);
lcs_dbf_trace = debug_register("lcs_trace", 2, 2, 8);
lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8);
if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) {
PRINT_ERR("Not enough memory for debug facility.\n");
lcs_unregister_debug_facility();

View File

@ -16,11 +16,19 @@ do { \
debug_event(lcs_dbf_##name,level,(void*)(addr),len); \
} while (0)
/* Allow to sort out low debug levels early to avoid wasted sprints */
static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level)
{
return (level <= dbf_grp->level);
}
#define LCS_DBF_TEXT_(level,name,text...) \
do { \
sprintf(debug_buffer, text); \
debug_text_event(lcs_dbf_##name,level, debug_buffer);\
} while (0)
do { \
if (lcs_dbf_passes(lcs_dbf_##name, level)) { \
sprintf(debug_buffer, text); \
debug_text_event(lcs_dbf_##name, level, debug_buffer); \
} \
} while (0)
/**
* sysfs related stuff

View File

@ -97,12 +97,22 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
#define IUCV_DBF_TEXT_(name,level,text...) \
do { \
char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \
sprintf(iucv_dbf_txt_buf, text); \
debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \
put_cpu_var(iucv_dbf_txt_buf); \
/* Allow to sort out low debug levels early to avoid wasted sprints */
static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
{
return (level <= dbf_grp->level);
}
#define IUCV_DBF_TEXT_(name, level, text...) \
do { \
if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
char* iucv_dbf_txt_buf = \
get_cpu_var(iucv_dbf_txt_buf); \
sprintf(iucv_dbf_txt_buf, text); \
debug_text_event(iucv_dbf_##name, level, \
iucv_dbf_txt_buf); \
put_cpu_var(iucv_dbf_txt_buf); \
} \
} while (0)
#define IUCV_DBF_SPRINTF(name,level,text...) \
@ -137,6 +147,7 @@ PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
#define PRINTK_HEADER " iucv: " /* for debugging */
static struct device_driver netiucv_driver = {
.owner = THIS_MODULE,
.name = "netiucv",
.bus = &iucv_bus,
};
@ -572,9 +583,9 @@ static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
}
/**
* Dummy NOP action for all statemachines
* NOP action for statemachines
*/
static void fsm_action_nop(fsm_instance *fi, int event, void *arg)
static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
{
}
@ -1110,7 +1121,7 @@ static const fsm_node dev_fsm[] = {
{ DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
{ DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
{ DEV_STATE_RUNNING, DEV_EVENT_CONUP, fsm_action_nop },
{ DEV_STATE_RUNNING, DEV_EVENT_CONUP, netiucv_action_nop },
};
static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);

View File

@ -19,6 +19,8 @@
#define DM9000_PLATF_8BITONLY (0x0001)
#define DM9000_PLATF_16BITONLY (0x0002)
#define DM9000_PLATF_32BITONLY (0x0004)
#define DM9000_PLATF_EXT_PHY (0x0008)
#define DM9000_PLATF_NO_EEPROM (0x0010)
/* platfrom data for platfrom device structure's platfrom_data field */