Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

Conflicts:

	drivers/net/wireless/rt2x00/Kconfig
	drivers/net/wireless/rt2x00/rt2x00usb.c
	net/sctp/protocol.c
This commit is contained in:
David S. Miller 2008-06-16 18:25:48 -07:00
commit caea902f72
23 changed files with 251 additions and 180 deletions

View file

@ -1542,7 +1542,8 @@ he_start(struct atm_dev *dev)
/* initialize framer */
#ifdef CONFIG_ATM_HE_USE_SUNI
suni_init(he_dev->atm_dev);
if (he_isMM(he_dev))
suni_init(he_dev->atm_dev);
if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
he_dev->atm_dev->phy->start(he_dev->atm_dev);
#endif /* CONFIG_ATM_HE_USE_SUNI */
@ -1554,6 +1555,7 @@ he_start(struct atm_dev *dev)
val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
he_phy_put(he_dev->atm_dev, SUNI_TACP_IUCHP_CLP, SUNI_TACP_IUCHP);
}
/* 5.1.12 enable transmit and receive */
@ -2844,10 +2846,15 @@ he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
if (copy_from_user(&reg, arg,
sizeof(struct he_ioctl_reg)))
return -EFAULT;
spin_lock_irqsave(&he_dev->global_lock, flags);
switch (reg.type) {
case HE_REGTYPE_PCI:
if (reg.addr < 0 || reg.addr >= HE_REGMAP_SIZE) {
err = -EINVAL;
break;
}
reg.val = he_readl(he_dev, reg.addr);
break;
case HE_REGTYPE_RCM:

View file

@ -267,13 +267,7 @@ struct he_dev {
char prod_id[30];
char mac_addr[6];
int media; /*
* 0x26 = HE155 MM
* 0x27 = HE622 MM
* 0x46 = HE155 SM
* 0x47 = HE622 SM
*/
int media;
unsigned int vcibits, vpibits;
unsigned int cells_per_row;
@ -392,6 +386,7 @@ struct he_vcc
#define HE_DEV(dev) ((struct he_dev *) (dev)->dev_data)
#define he_is622(dev) ((dev)->media & 0x1)
#define he_isMM(dev) ((dev)->media & 0x20)
#define HE_REGMAP_SIZE 0x100000
@ -876,8 +871,8 @@ struct he_vcc
#define M_SN 0x3a /* integer */
#define MEDIA 0x3e /* integer */
#define HE155MM 0x26
#define HE155SM 0x27
#define HE622MM 0x46
#define HE622MM 0x27
#define HE155SM 0x46
#define HE622SM 0x47
#define MAC_ADDR 0x42 /* char[] */

View file

@ -2562,17 +2562,11 @@ static int __devinit ia_start(struct atm_dev *dev)
error = suni_init(dev);
if (error)
goto err_free_rx;
/*
* Enable interrupt on loss of signal
* SUNI_RSOP_CIE - 0x10
* SUNI_RSOP_CIE_LOSE - 0x04
*/
ia_phy_put(dev, ia_phy_get(dev, 0x10) | 0x04, 0x10);
#ifndef MODULE
error = dev->phy->start(dev);
if (error)
goto err_free_rx;
#endif
if (dev->phy->start) {
error = dev->phy->start(dev);
if (error)
goto err_free_rx;
}
/* Get iadev->carrier_detect status */
IaFrontEndIntr(iadev);
}
@ -3198,6 +3192,8 @@ static int __devinit ia_init_one(struct pci_dev *pdev,
IF_INIT(printk("dev_id = 0x%x iadev->LineRate = %d \n", (u32)dev,
iadev->LineRate);)
pci_set_drvdata(pdev, dev);
ia_dev[iadev_count] = iadev;
_ia_dev[iadev_count] = dev;
iadev_count++;
@ -3219,8 +3215,6 @@ static int __devinit ia_init_one(struct pci_dev *pdev,
iadev->next_board = ia_boards;
ia_boards = dev;
pci_set_drvdata(pdev, dev);
return 0;
err_out_deregister_dev:
@ -3238,9 +3232,14 @@ static void __devexit ia_remove_one(struct pci_dev *pdev)
struct atm_dev *dev = pci_get_drvdata(pdev);
IADEV *iadev = INPH_IA_DEV(dev);
ia_phy_put(dev, ia_phy_get(dev,0x10) & ~(0x4), 0x10);
/* Disable phy interrupts */
ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
SUNI_RSOP_CIE);
udelay(1);
if (dev->phy && dev->phy->stop)
dev->phy->stop(dev);
/* De-register device */
free_irq(iadev->irq, dev);
iadev_count--;

View file

@ -649,7 +649,6 @@ struct b43_pio {
/* Context information for a noise calculation (Link Quality). */
struct b43_noise_calculation {
u8 channel_at_start;
bool calculation_running;
u8 nr_samples;
s8 samples[8][4];

View file

@ -795,66 +795,23 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
{
struct b43_dmaring *ring;
int err;
int nr_slots;
dma_addr_t dma_test;
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
goto out;
ring->type = type;
nr_slots = B43_RXRING_SLOTS;
ring->nr_slots = B43_RXRING_SLOTS;
if (for_tx)
nr_slots = B43_TXRING_SLOTS;
ring->nr_slots = B43_TXRING_SLOTS;
ring->meta = kcalloc(nr_slots, sizeof(struct b43_dmadesc_meta),
ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
GFP_KERNEL);
if (!ring->meta)
goto err_kfree_ring;
if (for_tx) {
ring->txhdr_cache = kcalloc(nr_slots,
b43_txhdr_size(dev),
GFP_KERNEL);
if (!ring->txhdr_cache)
goto err_kfree_meta;
/* test for ability to dma to txhdr_cache */
dma_test = dma_map_single(dev->dev->dma_dev,
ring->txhdr_cache,
b43_txhdr_size(dev),
DMA_TO_DEVICE);
if (b43_dma_mapping_error(ring, dma_test,
b43_txhdr_size(dev), 1)) {
/* ugh realloc */
kfree(ring->txhdr_cache);
ring->txhdr_cache = kcalloc(nr_slots,
b43_txhdr_size(dev),
GFP_KERNEL | GFP_DMA);
if (!ring->txhdr_cache)
goto err_kfree_meta;
dma_test = dma_map_single(dev->dev->dma_dev,
ring->txhdr_cache,
b43_txhdr_size(dev),
DMA_TO_DEVICE);
if (b43_dma_mapping_error(ring, dma_test,
b43_txhdr_size(dev), 1)) {
b43err(dev->wl,
"TXHDR DMA allocation failed\n");
goto err_kfree_txhdr_cache;
}
}
dma_unmap_single(dev->dev->dma_dev,
dma_test, b43_txhdr_size(dev),
DMA_TO_DEVICE);
}
ring->type = type;
ring->dev = dev;
ring->nr_slots = nr_slots;
ring->mmio_base = b43_dmacontroller_base(type, controller_index);
ring->index = controller_index;
if (type == B43_DMA_64BIT)
@ -879,6 +836,48 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
ring->last_injected_overflow = jiffies;
#endif
if (for_tx) {
ring->txhdr_cache = kcalloc(ring->nr_slots,
b43_txhdr_size(dev),
GFP_KERNEL);
if (!ring->txhdr_cache)
goto err_kfree_meta;
/* test for ability to dma to txhdr_cache */
dma_test = dma_map_single(dev->dev->dma_dev,
ring->txhdr_cache,
b43_txhdr_size(dev),
DMA_TO_DEVICE);
if (b43_dma_mapping_error(ring, dma_test,
b43_txhdr_size(dev), 1)) {
/* ugh realloc */
kfree(ring->txhdr_cache);
ring->txhdr_cache = kcalloc(ring->nr_slots,
b43_txhdr_size(dev),
GFP_KERNEL | GFP_DMA);
if (!ring->txhdr_cache)
goto err_kfree_meta;
dma_test = dma_map_single(dev->dev->dma_dev,
ring->txhdr_cache,
b43_txhdr_size(dev),
DMA_TO_DEVICE);
if (b43_dma_mapping_error(ring, dma_test,
b43_txhdr_size(dev), 1)) {
b43err(dev->wl,
"TXHDR DMA allocation failed\n");
goto err_kfree_txhdr_cache;
}
}
dma_unmap_single(dev->dev->dma_dev,
dma_test, b43_txhdr_size(dev),
DMA_TO_DEVICE);
}
err = alloc_ringmemory(ring);
if (err)
goto err_kfree_txhdr_cache;

View file

@ -1145,7 +1145,6 @@ static void b43_generate_noise_sample(struct b43_wldev *dev)
b43_jssi_write(dev, 0x7F7F7F7F);
b43_write32(dev, B43_MMIO_MACCMD,
b43_read32(dev, B43_MMIO_MACCMD) | B43_MACCMD_BGNOISE);
B43_WARN_ON(dev->noisecalc.channel_at_start != dev->phy.channel);
}
static void b43_calculate_link_quality(struct b43_wldev *dev)
@ -1154,7 +1153,6 @@ static void b43_calculate_link_quality(struct b43_wldev *dev)
if (dev->noisecalc.calculation_running)
return;
dev->noisecalc.channel_at_start = dev->phy.channel;
dev->noisecalc.calculation_running = 1;
dev->noisecalc.nr_samples = 0;
@ -1171,9 +1169,16 @@ static void handle_irq_noise(struct b43_wldev *dev)
/* Bottom half of Link Quality calculation. */
/* Possible race condition: It might be possible that the user
* changed to a different channel in the meantime since we
* started the calculation. We ignore that fact, since it's
* not really that much of a problem. The background noise is
* an estimation only anyway. Slightly wrong results will get damped
* by the averaging of the 8 sample rounds. Additionally the
* value is shortlived. So it will be replaced by the next noise
* calculation round soon. */
B43_WARN_ON(!dev->noisecalc.calculation_running);
if (dev->noisecalc.channel_at_start != phy->channel)
goto drop_calculation;
*((__le32 *)noise) = cpu_to_le32(b43_jssi_read(dev));
if (noise[0] == 0x7F || noise[1] == 0x7F ||
noise[2] == 0x7F || noise[3] == 0x7F)
@ -1214,11 +1219,10 @@ static void handle_irq_noise(struct b43_wldev *dev)
average -= 48;
dev->stats.link_noise = average;
drop_calculation:
dev->noisecalc.calculation_running = 0;
return;
}
generate_new:
generate_new:
b43_generate_noise_sample(dev);
}

View file

@ -36,12 +36,13 @@ config RT2X00_LIB_FIRMWARE
config RT2X00_LIB_RFKILL
boolean
depends on RT2X00_LIB
depends on INPUT
select RFKILL
select INPUT_POLLDEV
config RT2X00_LIB_LEDS
boolean
depends on RT2X00_LIB
depends on RT2X00_LIB && NEW_LEDS
config RT2400PCI
tristate "Ralink rt2400 (PCI/PCMCIA) support"
@ -56,7 +57,7 @@ config RT2400PCI
config RT2400PCI_RFKILL
bool "Ralink rt2400 rfkill support"
depends on RT2400PCI
depends on RT2400PCI && INPUT
select RT2X00_LIB_RFKILL
---help---
This adds support for integrated rt2400 hardware that features a
@ -65,7 +66,7 @@ config RT2400PCI_RFKILL
config RT2400PCI_LEDS
bool "Ralink rt2400 leds support"
depends on RT2400PCI
depends on RT2400PCI && NEW_LEDS
select LEDS_CLASS
select RT2X00_LIB_LEDS
---help---
@ -84,7 +85,7 @@ config RT2500PCI
config RT2500PCI_RFKILL
bool "Ralink rt2500 rfkill support"
depends on RT2500PCI
depends on RT2500PCI && INPUT
select RT2X00_LIB_RFKILL
---help---
This adds support for integrated rt2500 hardware that features a
@ -93,7 +94,7 @@ config RT2500PCI_RFKILL
config RT2500PCI_LEDS
bool "Ralink rt2500 leds support"
depends on RT2500PCI
depends on RT2500PCI && NEW_LEDS
select LEDS_CLASS
select RT2X00_LIB_LEDS
---help---
@ -114,7 +115,7 @@ config RT61PCI
config RT61PCI_RFKILL
bool "Ralink rt2501/rt61 rfkill support"
depends on RT61PCI
depends on RT61PCI && INPUT
select RT2X00_LIB_RFKILL
---help---
This adds support for integrated rt61 hardware that features a
@ -123,7 +124,7 @@ config RT61PCI_RFKILL
config RT61PCI_LEDS
bool "Ralink rt2501/rt61 leds support"
depends on RT61PCI
depends on RT61PCI && NEW_LEDS
select LEDS_CLASS
select RT2X00_LIB_LEDS
---help---
@ -141,7 +142,7 @@ config RT2500USB
config RT2500USB_LEDS
bool "Ralink rt2500 leds support"
depends on RT2500USB
depends on RT2500USB && NEW_LEDS
select LEDS_CLASS
select RT2X00_LIB_LEDS
---help---
@ -161,7 +162,7 @@ config RT73USB
config RT73USB_LEDS
bool "Ralink rt2501/rt73 leds support"
depends on RT73USB
depends on RT73USB && NEW_LEDS
select LEDS_CLASS
select RT2X00_LIB_LEDS
---help---

View file

@ -357,8 +357,7 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
if (pci_set_mwi(pci_dev))
ERROR_PROBE("MWI not available.\n");
if (pci_set_dma_mask(pci_dev, DMA_64BIT_MASK) &&
pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) {
if (pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) {
ERROR_PROBE("PCI DMA not supported.\n");
retval = -EIO;
goto exit_disable_device;

View file

@ -345,8 +345,11 @@ void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
}
/*
* Kill guardian urb.
* Kill guardian urb (if required by driver).
*/
if (!test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
return;
for (i = 0; i < rt2x00dev->bcn->limit; i++) {
bcn_priv = rt2x00dev->bcn->entries[i].priv_data;
if (bcn_priv->guardian_urb)

View file

@ -2114,6 +2114,7 @@ static struct usb_device_id rt73usb_device_table[] = {
/* D-Link */
{ USB_DEVICE(0x07d1, 0x3c03), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c04), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c06), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c07), USB_DEVICE_DATA(&rt73usb_ops) },
/* Gemtek */
{ USB_DEVICE(0x15a9, 0x0004), USB_DEVICE_DATA(&rt73usb_ops) },

View file

@ -1168,15 +1168,21 @@ EXPORT_SYMBOL(ssb_dma_translation);
int ssb_dma_set_mask(struct ssb_device *ssb_dev, u64 mask)
{
struct device *dma_dev = ssb_dev->dma_dev;
int err = 0;
#ifdef CONFIG_SSB_PCIHOST
if (ssb_dev->bus->bustype == SSB_BUSTYPE_PCI)
return dma_set_mask(dma_dev, mask);
if (ssb_dev->bus->bustype == SSB_BUSTYPE_PCI) {
err = pci_set_dma_mask(ssb_dev->bus->host_pci, mask);
if (err)
return err;
err = pci_set_consistent_dma_mask(ssb_dev->bus->host_pci, mask);
return err;
}
#endif
dma_dev->coherent_dma_mask = mask;
dma_dev->dma_mask = &dma_dev->coherent_dma_mask;
return 0;
return err;
}
EXPORT_SYMBOL(ssb_dma_set_mask);

View file

@ -41,7 +41,7 @@ struct ip_tunnel_prl {
__u16 __reserved;
__u32 datalen;
__u32 __reserved2;
void __user *data;
/* data follows */
};
/* PRL flags */

View file

@ -188,10 +188,13 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct br2684_dev *brdev,
return 0;
}
}
} else {
skb_push(skb, 2);
if (brdev->payload == p_bridged)
} else { /* e_vc */
if (brdev->payload == p_bridged) {
skb_push(skb, 2);
memset(skb->data, 0, 2);
} else { /* p_routed */
skb_pull(skb, ETH_HLEN);
}
}
skb_debug(skb);
@ -377,11 +380,8 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
(skb->data + 6, ethertype_ipv4,
sizeof(ethertype_ipv4)) == 0)
skb->protocol = __constant_htons(ETH_P_IP);
else {
brdev->stats.rx_errors++;
dev_kfree_skb(skb);
return;
}
else
goto error;
skb_pull(skb, sizeof(llc_oui_ipv4));
skb_reset_network_header(skb);
skb->pkt_type = PACKET_HOST;
@ -394,44 +394,56 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
(memcmp(skb->data, llc_oui_pid_pad, 7) == 0)) {
skb_pull(skb, sizeof(llc_oui_pid_pad));
skb->protocol = eth_type_trans(skb, net_dev);
} else {
brdev->stats.rx_errors++;
dev_kfree_skb(skb);
return;
}
} else
goto error;
} else {
/* first 2 chars should be 0 */
if (*((u16 *) (skb->data)) != 0) {
brdev->stats.rx_errors++;
dev_kfree_skb(skb);
return;
} else { /* e_vc */
if (brdev->payload == p_routed) {
struct iphdr *iph;
skb_reset_network_header(skb);
iph = ip_hdr(skb);
if (iph->version == 4)
skb->protocol = __constant_htons(ETH_P_IP);
else if (iph->version == 6)
skb->protocol = __constant_htons(ETH_P_IPV6);
else
goto error;
skb->pkt_type = PACKET_HOST;
} else { /* p_bridged */
/* first 2 chars should be 0 */
if (*((u16 *) (skb->data)) != 0)
goto error;
skb_pull(skb, BR2684_PAD_LEN);
skb->protocol = eth_type_trans(skb, net_dev);
}
skb_pull(skb, BR2684_PAD_LEN + ETH_HLEN); /* pad, dstmac, srcmac, ethtype */
skb->protocol = eth_type_trans(skb, net_dev);
}
#ifdef CONFIG_ATM_BR2684_IPFILTER
if (unlikely(packet_fails_filter(skb->protocol, brvcc, skb))) {
brdev->stats.rx_dropped++;
dev_kfree_skb(skb);
return;
}
if (unlikely(packet_fails_filter(skb->protocol, brvcc, skb)))
goto dropped;
#endif /* CONFIG_ATM_BR2684_IPFILTER */
skb->dev = net_dev;
ATM_SKB(skb)->vcc = atmvcc; /* needed ? */
pr_debug("received packet's protocol: %x\n", ntohs(skb->protocol));
skb_debug(skb);
if (unlikely(!(net_dev->flags & IFF_UP))) {
/* sigh, interface is down */
brdev->stats.rx_dropped++;
dev_kfree_skb(skb);
return;
}
/* sigh, interface is down? */
if (unlikely(!(net_dev->flags & IFF_UP)))
goto dropped;
brdev->stats.rx_packets++;
brdev->stats.rx_bytes += skb->len;
memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
netif_rx(skb);
return;
dropped:
brdev->stats.rx_dropped++;
goto free_skb;
error:
brdev->stats.rx_errors++;
free_skb:
dev_kfree_skb(skb);
return;
}
/*
@ -518,9 +530,9 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
struct sk_buff *next = skb->next;
skb->next = skb->prev = NULL;
br2684_push(atmvcc, skb);
BRPRIV(skb->dev)->stats.rx_bytes -= skb->len;
BRPRIV(skb->dev)->stats.rx_packets--;
br2684_push(atmvcc, skb);
skb = next;
}

View file

@ -119,6 +119,7 @@
#include <linux/err.h>
#include <linux/ctype.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include "net-sysfs.h"
@ -1362,6 +1363,29 @@ void netif_device_attach(struct net_device *dev)
}
EXPORT_SYMBOL(netif_device_attach);
static bool can_checksum_protocol(unsigned long features, __be16 protocol)
{
return ((features & NETIF_F_GEN_CSUM) ||
((features & NETIF_F_IP_CSUM) &&
protocol == htons(ETH_P_IP)) ||
((features & NETIF_F_IPV6_CSUM) &&
protocol == htons(ETH_P_IPV6)));
}
static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
{
if (can_checksum_protocol(dev->features, skb->protocol))
return true;
if (skb->protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
if (can_checksum_protocol(dev->features & dev->vlan_features,
veh->h_vlan_encapsulated_proto))
return true;
}
return false;
}
/*
* Invalidate hardware checksum when packet is to be mangled, and
@ -1640,14 +1664,8 @@ int dev_queue_xmit(struct sk_buff *skb)
if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_set_transport_header(skb, skb->csum_start -
skb_headroom(skb));
if (!(dev->features & NETIF_F_GEN_CSUM) &&
!((dev->features & NETIF_F_IP_CSUM) &&
skb->protocol == htons(ETH_P_IP)) &&
!((dev->features & NETIF_F_IPV6_CSUM) &&
skb->protocol == htons(ETH_P_IPV6)))
if (skb_checksum_help(skb))
goto out_kfree_skb;
if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
goto out_kfree_skb;
}
gso:

View file

@ -468,9 +468,9 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
reqp=&lopt->syn_table[i];
while ((req = *reqp) != NULL) {
if (time_after_eq(now, req->expires)) {
if ((req->retrans < (inet_rsk(req)->acked ? max_retries : thresh)) &&
(inet_rsk(req)->acked ||
!req->rsk_ops->rtx_syn_ack(parent, req))) {
if ((req->retrans < thresh ||
(inet_rsk(req)->acked && req->retrans < max_retries))
&& !req->rsk_ops->rtx_syn_ack(parent, req)) {
unsigned long timeo;
if (req->retrans++ == 0)

View file

@ -931,7 +931,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
srcp = inet->num;
seq_printf(seq, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d",
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n",
i, src, srcp, dest, destp, sp->sk_state,
atomic_read(&sp->sk_wmem_alloc),
atomic_read(&sp->sk_rmem_alloc),

View file

@ -83,10 +83,6 @@
int sysctl_tcp_tw_reuse __read_mostly;
int sysctl_tcp_low_latency __read_mostly;
/* Check TCP sequence numbers in ICMP packets. */
#define ICMP_MIN_LENGTH 8
void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
#ifdef CONFIG_TCP_MD5SIG
static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,

View file

@ -220,15 +220,18 @@ __ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr)
}
static int ipip6_tunnel_get_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
struct ip_tunnel_prl __user *a)
{
struct ip_tunnel_prl *kp;
struct ip_tunnel_prl kprl, *kp;
struct ip_tunnel_prl_entry *prl;
unsigned int cmax, c = 0, ca, len;
int ret = 0;
cmax = a->datalen / sizeof(*a);
if (cmax > 1 && a->addr != htonl(INADDR_ANY))
if (copy_from_user(&kprl, a, sizeof(kprl)))
return -EFAULT;
cmax = kprl.datalen / sizeof(kprl);
if (cmax > 1 && kprl.addr != htonl(INADDR_ANY))
cmax = 1;
/* For simple GET or for root users,
@ -259,26 +262,25 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
for (prl = t->prl; prl; prl = prl->next) {
if (c > cmax)
break;
if (a->addr != htonl(INADDR_ANY) && prl->addr != a->addr)
if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr)
continue;
kp[c].addr = prl->addr;
kp[c].flags = prl->flags;
c++;
if (a->addr != htonl(INADDR_ANY))
if (kprl.addr != htonl(INADDR_ANY))
break;
}
out:
read_unlock(&ipip6_lock);
len = sizeof(*kp) * c;
ret = len ? copy_to_user(a->data, kp, len) : 0;
ret = 0;
if ((len && copy_to_user(a + 1, kp, len)) || put_user(len, &a->datalen))
ret = -EFAULT;
kfree(kp);
if (ret)
return -EFAULT;
a->datalen = len;
return 0;
return ret;
}
static int
@ -871,11 +873,20 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
break;
case SIOCGETPRL:
err = -EINVAL;
if (dev == sitn->fb_tunnel_dev)
goto done;
err = -ENOENT;
if (!(t = netdev_priv(dev)))
goto done;
err = ipip6_tunnel_get_prl(t, ifr->ifr_ifru.ifru_data);
break;
case SIOCADDPRL:
case SIOCDELPRL:
case SIOCCHGPRL:
err = -EPERM;
if (cmd != SIOCGETPRL && !capable(CAP_NET_ADMIN))
if (!capable(CAP_NET_ADMIN))
goto done;
err = -EINVAL;
if (dev == sitn->fb_tunnel_dev)
@ -888,12 +899,6 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
goto done;
switch (cmd) {
case SIOCGETPRL:
err = ipip6_tunnel_get_prl(t, &prl);
if (!err && copy_to_user(ifr->ifr_ifru.ifru_data,
&prl, sizeof(prl)))
err = -EFAULT;
break;
case SIOCDELPRL:
err = ipip6_tunnel_del_prl(t, &prl);
break;
@ -902,8 +907,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
err = ipip6_tunnel_add_prl(t, &prl, cmd == SIOCCHGPRL);
break;
}
if (cmd != SIOCGETPRL)
netdev_state_change(dev);
netdev_state_change(dev);
break;
default:

View file

@ -508,7 +508,8 @@ static int ieee80211_ioctl_giwap(struct net_device *dev,
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
if (sdata->u.sta.state == IEEE80211_ASSOCIATED) {
if (sdata->u.sta.state == IEEE80211_ASSOCIATED ||
sdata->u.sta.state == IEEE80211_IBSS_JOINED) {
ap_addr->sa_family = ARPHRD_ETHER;
memcpy(&ap_addr->sa_data, sdata->u.sta.bssid, ETH_ALEN);
return 0;

View file

@ -637,7 +637,7 @@ int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
#ifdef CONFIG_MAC80211_HT_DEBUG
if (net_ratelimit())
printk(KERN_DEBUG "allocated aggregation queue"
" %d tid %d addr %s pool=0x%lX",
" %d tid %d addr %s pool=0x%lX\n",
i, tid, print_mac(mac, sta->addr),
q->qdisc_pool[0]);
#endif /* CONFIG_MAC80211_HT_DEBUG */

View file

@ -26,6 +26,7 @@
* and many others. thanks.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
@ -51,13 +52,17 @@
*/
#define HTB_HSIZE 16 /* classid hash size */
#define HTB_HYSTERESIS 1 /* whether to use mode hysteresis for speedup */
static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
#if HTB_VER >> 16 != TC_HTB_PROTOVER
#error "Mismatched sch_htb.c and pkt_sch.h"
#endif
/* Module parameter and sysfs export */
module_param (htb_hysteresis, int, 0640);
MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
/* used internaly to keep status of single class */
enum htb_cmode {
HTB_CANT_SEND, /* class can't send and can't borrow */
@ -460,19 +465,21 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
htb_remove_class_from_row(q, cl, mask);
}
#if HTB_HYSTERESIS
static inline long htb_lowater(const struct htb_class *cl)
{
return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
if (htb_hysteresis)
return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
else
return 0;
}
static inline long htb_hiwater(const struct htb_class *cl)
{
return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
if (htb_hysteresis)
return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
else
return 0;
}
#else
#define htb_lowater(cl) (0)
#define htb_hiwater(cl) (0)
#endif
/**
* htb_class_mode - computes and returns current class mode

View file

@ -476,6 +476,15 @@ static void sctp_association_destroy(struct sctp_association *asoc)
void sctp_assoc_set_primary(struct sctp_association *asoc,
struct sctp_transport *transport)
{
int changeover = 0;
/* it's a changeover only if we already have a primary path
* that we are changing
*/
if (asoc->peer.primary_path != NULL &&
asoc->peer.primary_path != transport)
changeover = 1 ;
asoc->peer.primary_path = transport;
/* Set a default msg_name for events. */
@ -501,12 +510,12 @@ void sctp_assoc_set_primary(struct sctp_association *asoc,
* double switch to the same destination address.
*/
if (transport->cacc.changeover_active)
transport->cacc.cycling_changeover = 1;
transport->cacc.cycling_changeover = changeover;
/* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
* a changeover has occurred.
*/
transport->cacc.changeover_active = 1;
transport->cacc.changeover_active = changeover;
/* 3) The sender MUST store the next TSN to be sent in
* next_tsn_at_change.

View file

@ -108,16 +108,27 @@ static __init int sctp_proc_init(void)
}
if (sctp_snmp_proc_init())
goto out_nomem;
goto out_snmp_proc_init;
if (sctp_eps_proc_init())
goto out_nomem;
goto out_eps_proc_init;
if (sctp_assocs_proc_init())
goto out_nomem;
goto out_assocs_proc_init;
if (sctp_remaddr_proc_init())
goto out_nomem;
goto out_remaddr_proc_init;
return 0;
out_remaddr_proc_init:
sctp_remaddr_proc_exit();
out_assocs_proc_init:
sctp_eps_proc_exit();
out_eps_proc_init:
sctp_snmp_proc_exit();
out_snmp_proc_init:
if (proc_net_sctp) {
proc_net_sctp = NULL;
remove_proc_entry("sctp", init_net.proc_net);
}
out_nomem:
return -ENOMEM;
}