Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (51 commits)
  netfilter: ipset: Fix the order of listing of sets
  ip6_pol_route panic: Do not allow VLAN on loopback
  bnx2x: Fix port identification problem
  r8169: add Realtek as maintainer.
  ip: ip_options_compile() resilient to NULL skb route
  bna: fix memory leak during RX path cleanup
  bna: fix for clean fw re-initialization
  usbnet: Fix up 'FLAG_POINTTOPOINT' and 'FLAG_MULTI_PACKET' overlaps.
  iwlegacy: fix tx_power initialization
  Revert "tcp: disallow bind() to reuse addr/port"
  qlcnic: limit skb frags for non tso packet
  net: can: mscan: fix build breakage in mpc5xxx_can
  netfilter: ipset: set match and SET target fixes
  netfilter: ipset: bitmap:ip,mac type requires "src" for MAC
  sctp: fix oops while removed transport still using as retran path
  sctp: fix oops when updating retransmit path with DEBUG on
  net: Disable NETIF_F_TSO_ECN when TSO is disabled
  net: Disable all TSO features when SG is disabled
  sfc: Use rmb() to ensure reads occur in order
  ieee802154: Remove hacked CFLAGS in net/ieee802154/Makefile
  ...
This commit is contained in:
Linus Torvalds 2011-04-19 15:16:41 -07:00
commit 6cf544377f
64 changed files with 330 additions and 203 deletions

View file

@ -151,6 +151,7 @@ S: Maintained
F: drivers/net/hamradio/6pack.c
8169 10/100/1000 GIGABIT ETHERNET DRIVER
M: Realtek linux nic maintainers <nic_swsd@realtek.com>
M: Francois Romieu <romieu@fr.zoreil.com>
L: netdev@vger.kernel.org
S: Maintained

View file

@ -142,6 +142,7 @@ static int cn_call_callback(struct sk_buff *skb)
cbq->callback(msg, nsp);
kfree_skb(skb);
cn_queue_release_callback(cbq);
err = 0;
}
return err;

View file

@ -38,6 +38,8 @@
#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
#define bfa_ioc_notify_fail(__ioc) \
((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
#define bfa_ioc_sync_start(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
#define bfa_ioc_sync_join(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
#define bfa_ioc_sync_leave(__ioc) \
@ -602,7 +604,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_SEMLOCKED:
if (bfa_ioc_firmware_lock(ioc)) {
if (bfa_ioc_sync_complete(ioc)) {
if (bfa_ioc_sync_start(ioc)) {
iocpf->retry_count = 0;
bfa_ioc_sync_join(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
@ -1314,7 +1316,7 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
* execution context (driver/bios) must match.
*/
static bool
bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
{
struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
@ -1325,7 +1327,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
if (fwhdr.signature != drv_fwhdr->signature)
return false;
if (fwhdr.exec != drv_fwhdr->exec)
if (swab32(fwhdr.param) != boot_env)
return false;
return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
@ -1352,9 +1354,12 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
{
enum bfi_ioc_state ioc_fwstate;
bool fwvalid;
u32 boot_env;
ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
boot_env = BFI_BOOT_LOADER_OS;
if (force)
ioc_fwstate = BFI_IOC_UNINIT;
@ -1362,10 +1367,10 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
* check if firmware is valid
*/
fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
false : bfa_ioc_fwver_valid(ioc);
false : bfa_ioc_fwver_valid(ioc, boot_env);
if (!fwvalid) {
bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
return;
}
@ -1396,7 +1401,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
/**
* Initialize the h/w for any other states.
*/
bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
}
void
@ -1506,7 +1511,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc)
*/
static void
bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
u32 boot_param)
u32 boot_env)
{
u32 *fwimg;
u32 pgnum, pgoff;
@ -1558,10 +1563,10 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
/*
* Set boot type and boot param at the end.
*/
writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
writel(boot_type, ((ioc->ioc_regs.smem_page_start)
+ (BFI_BOOT_TYPE_OFF)));
writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
+ (BFI_BOOT_PARAM_OFF)));
writel(boot_env, ((ioc->ioc_regs.smem_page_start)
+ (BFI_BOOT_LOADER_OFF)));
}
static void
@ -1721,7 +1726,7 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
* as the entry vector.
*/
static void
bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env)
{
void __iomem *rb;
@ -1734,7 +1739,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
* Initialize IOC state of all functions on a chip reset.
*/
rb = ioc->pcidev.pci_bar_kva;
if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
} else {
@ -1743,7 +1748,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
}
bfa_ioc_msgflush(ioc);
bfa_ioc_download_fw(ioc, boot_type, boot_param);
bfa_ioc_download_fw(ioc, boot_type, boot_env);
/**
* Enable interrupts just before starting LPU

View file

@ -194,6 +194,7 @@ struct bfa_ioc_hwif {
bool msix);
void (*ioc_notify_fail) (struct bfa_ioc *ioc);
void (*ioc_ownership_reset) (struct bfa_ioc *ioc);
bool (*ioc_sync_start) (struct bfa_ioc *ioc);
void (*ioc_sync_join) (struct bfa_ioc *ioc);
void (*ioc_sync_leave) (struct bfa_ioc *ioc);
void (*ioc_sync_ack) (struct bfa_ioc *ioc);

View file

@ -41,6 +41,7 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
@ -63,6 +64,7 @@ bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
@ -342,6 +344,32 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
bfa_nw_ioc_hw_sem_release(ioc);
}
/**
* Synchronized IOC failure processing routines
*/
static bool
bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
{
u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
/*
* Driver load time. If the sync required bit for this PCI fn
* is set, it is due to an unclean exit by the driver for this
* PCI fn in the previous incarnation. Whoever comes here first
* should clean it up, no matter which PCI fn.
*/
if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
writel(0, ioc->ioc_regs.ioc_fail_sync);
writel(1, ioc->ioc_regs.ioc_usage_reg);
writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
return true;
}
return bfa_ioc_ct_sync_complete(ioc);
}
/**
* Synchronized IOC failure processing routines
*/

View file

@ -184,12 +184,14 @@ enum bfi_mclass {
#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
#define BFI_BOOT_TYPE_OFF 8
#define BFI_BOOT_PARAM_OFF 12
#define BFI_BOOT_LOADER_OFF 12
#define BFI_BOOT_TYPE_NORMAL 0 /* param is device id */
#define BFI_BOOT_TYPE_NORMAL 0
#define BFI_BOOT_TYPE_FLASH 1
#define BFI_BOOT_TYPE_MEMTEST 2
#define BFI_BOOT_LOADER_OS 0
#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3

View file

@ -1837,7 +1837,6 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
/* Initialize the Rx event handlers */
rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
rx_cbfn.rcb_destroy_cbfn = NULL;
rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;

View file

@ -2114,19 +2114,18 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
for (i = 0; i < (data * 2); i++) {
if ((i % 2) == 0)
bnx2x_set_led(&bp->link_params, &bp->link_vars,
LED_MODE_OPER, SPEED_1000);
LED_MODE_ON, SPEED_1000);
else
bnx2x_set_led(&bp->link_params, &bp->link_vars,
LED_MODE_OFF, 0);
LED_MODE_FRONT_PANEL_OFF, 0);
msleep_interruptible(500);
if (signal_pending(current))
break;
}
if (bp->link_vars.link_up)
bnx2x_set_led(&bp->link_params, &bp->link_vars, LED_MODE_OPER,
bp->link_vars.line_speed);
bnx2x_set_led(&bp->link_params, &bp->link_vars,
LED_MODE_OPER, bp->link_vars.line_speed);
return 0;
}

View file

@ -176,7 +176,7 @@ static int tlb_initialize(struct bonding *bond)
bond_info->tx_hashtbl = new_hashtbl;
for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) {
tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1);
tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
}
_unlock_tx_hashtbl(bond);
@ -701,7 +701,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
*/
rlb_choose_channel(skb, bond);
/* The ARP relpy packets must be delayed so that
/* The ARP reply packets must be delayed so that
* they can cancel out the influence of the ARP request.
*/
bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY;
@ -1042,7 +1042,7 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
*
* If the permanent hw address of @slave is @bond's hw address, we need to
* find a different hw address to give @slave, that isn't in use by any other
* slave in the bond. This address must be, of course, one of the premanent
* slave in the bond. This address must be, of course, one of the permanent
* addresses of the other slaves.
*
* We go over the slave list, and for each slave there we compare its

View file

@ -75,7 +75,7 @@ struct tlb_client_info {
* gave this entry index.
*/
u32 tx_bytes; /* Each Client accumulates the BytesTx that
* were tranmitted to it, and after each
* were transmitted to it, and after each
* CallBack the LoadHistory is divided
* by the balance interval
*/
@ -122,7 +122,6 @@ struct tlb_slave_info {
};
struct alb_bond_info {
struct timer_list alb_timer;
struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */
spinlock_t tx_hashtbl_lock;
u32 unbalanced_load;
@ -140,7 +139,6 @@ struct alb_bond_info {
struct slave *next_rx_slave;/* next slave to be assigned
* to a new rx client for
*/
u32 rlb_interval_counter;
u8 primary_is_promisc; /* boolean */
u32 rlb_promisc_timeout_counter;/* counts primary
* promiscuity time

View file

@ -260,7 +260,7 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
if (!ofdev->dev.of_match)
return -EINVAL;
data = (struct mpc5xxx_can_data *)of_dev->dev.of_match->data;
data = (struct mpc5xxx_can_data *)ofdev->dev.of_match->data;
base = of_iomap(np, 0);
if (!base) {

View file

@ -173,7 +173,8 @@ static void loopback_setup(struct net_device *dev)
| NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
| NETIF_F_LLTX
| NETIF_F_NETNS_LOCAL;
| NETIF_F_NETNS_LOCAL
| NETIF_F_VLAN_CHALLENGED;
dev->ethtool_ops = &loopback_ethtool_ops;
dev->header_ops = &eth_header_ops;
dev->netdev_ops = &loopback_ops;

View file

@ -860,6 +860,9 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
prev_eedata = eedata;
}
/* Store MAC Address in perm_addr */
memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
dev->base_addr = (unsigned long __force) ioaddr;
dev->irq = irq;

View file

@ -174,7 +174,7 @@
#define MAX_NUM_CARDS 4
#define MAX_BUFFERS_PER_CMD 32
#define NETXEN_MAX_FRAGS_PER_TX 14
#define MAX_TSO_HEADER_DESC 2
#define MGMT_CMD_DESC_RESV 4
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
@ -558,7 +558,7 @@ struct netxen_recv_crb {
*/
struct netxen_cmd_buffer {
struct sk_buff *skb;
struct netxen_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1];
u32 frag_count;
};

View file

@ -1844,6 +1844,8 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct cmd_desc_type0 *hwdesc, *first_desc;
struct pci_dev *pdev;
int i, k;
int delta = 0;
struct skb_frag_struct *frag;
u32 producer;
int frag_count, no_of_desc;
@ -1851,6 +1853,21 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
frag_count = skb_shinfo(skb)->nr_frags + 1;
/* 14 frags supported for normal packet and
* 32 frags supported for TSO packet
*/
if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) {
for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) {
frag = &skb_shinfo(skb)->frags[i];
delta += frag->size;
}
if (!__pskb_pull_tail(skb, delta))
goto drop_packet;
frag_count = 1 + skb_shinfo(skb)->nr_frags;
}
/* 4 fragments per cmd des */
no_of_desc = (frag_count + 3) >> 2;

View file

@ -99,6 +99,7 @@
#define TX_UDPV6_PKT 0x0c
/* Tx defines */
#define QLCNIC_MAX_FRAGS_PER_TX 14
#define MAX_TSO_HEADER_DESC 2
#define MGMT_CMD_DESC_RESV 4
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \

View file

@ -2099,6 +2099,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct cmd_desc_type0 *hwdesc, *first_desc;
struct pci_dev *pdev;
struct ethhdr *phdr;
int delta = 0;
int i, k;
u32 producer;
@ -2118,6 +2119,19 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
frag_count = skb_shinfo(skb)->nr_frags + 1;
/* 14 frags supported for normal packet and
* 32 frags supported for TSO packet
*/
if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
delta += skb_shinfo(skb)->frags[i].size;
if (!__pskb_pull_tail(skb, delta))
goto drop_packet;
frag_count = 1 + skb_shinfo(skb)->nr_frags;
}
/* 4 fragments per cmd des */
no_of_desc = (frag_count + 3) >> 2;

View file

@ -328,7 +328,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
* processing to finish, then directly poll (and ack ) the eventq.
* Finally reenable NAPI and interrupts.
*
* Since we are touching interrupts the caller should hold the suspend lock
* This is for use only during a loopback self-test. It must not
* deliver any packets up the stack as this can result in deadlock.
*/
void efx_process_channel_now(struct efx_channel *channel)
{
@ -336,6 +337,7 @@ void efx_process_channel_now(struct efx_channel *channel)
BUG_ON(channel->channel >= efx->n_channels);
BUG_ON(!channel->enabled);
BUG_ON(!efx->loopback_selftest);
/* Disable interrupts and wait for ISRs to complete */
efx_nic_disable_interrupts(efx);
@ -1436,7 +1438,7 @@ static void efx_start_all(struct efx_nic *efx)
* restart the transmit interface early so the watchdog timer stops */
efx_start_port(efx);
if (efx_dev_registered(efx))
if (efx_dev_registered(efx) && !efx->port_inhibited)
netif_tx_wake_all_queues(efx->net_dev);
efx_for_each_channel(channel, efx)

View file

@ -152,6 +152,7 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
spin_lock_irqsave(&efx->biu_lock, flags);
value->u32[0] = _efx_readd(efx, reg + 0);
rmb();
value->u32[1] = _efx_readd(efx, reg + 4);
value->u32[2] = _efx_readd(efx, reg + 8);
value->u32[3] = _efx_readd(efx, reg + 12);
@ -174,6 +175,7 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
value->u64[0] = (__force __le64)__raw_readq(membase + addr);
#else
value->u32[0] = (__force __le32)__raw_readl(membase + addr);
rmb();
value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);

View file

@ -330,7 +330,6 @@ enum efx_rx_alloc_method {
* @eventq_mask: Event queue pointer mask
* @eventq_read_ptr: Event queue read pointer
* @last_eventq_read_ptr: Last event queue read pointer value.
* @magic_count: Event queue test event count
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
* @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
@ -360,7 +359,6 @@ struct efx_channel {
unsigned int eventq_mask;
unsigned int eventq_read_ptr;
unsigned int last_eventq_read_ptr;
unsigned int magic_count;
unsigned int irq_count;
unsigned int irq_mod_score;

View file

@ -84,7 +84,8 @@ static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
static inline efx_qword_t *efx_event(struct efx_channel *channel,
unsigned int index)
{
return ((efx_qword_t *) (channel->eventq.addr)) + index;
return ((efx_qword_t *) (channel->eventq.addr)) +
(index & channel->eventq_mask);
}
/* See if an event is present
@ -673,7 +674,8 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel)
efx_dword_t reg;
struct efx_nic *efx = channel->efx;
EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
channel->eventq_read_ptr & channel->eventq_mask);
efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
channel->channel);
}
@ -908,7 +910,7 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
if (code == EFX_CHANNEL_MAGIC_TEST(channel))
++channel->magic_count;
; /* ignore */
else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
/* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the
@ -1015,8 +1017,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
/* Clear this event by marking it all ones */
EFX_SET_QWORD(*p_event);
/* Increment read pointer */
read_ptr = (read_ptr + 1) & channel->eventq_mask;
++read_ptr;
ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
@ -1060,6 +1061,13 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
return spent;
}
/* Check whether an event is present in the eventq at the current
* read pointer. Only useful for self-test.
*/
bool efx_nic_event_present(struct efx_channel *channel)
{
return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
}
/* Allocate buffer table entries for event queue */
int efx_nic_probe_eventq(struct efx_channel *channel)
@ -1165,7 +1173,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
unsigned int read_ptr = channel->eventq_read_ptr;
unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask;
unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
do {
efx_qword_t *event = efx_event(channel, read_ptr);
@ -1205,7 +1213,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
* it's ok to throw away every non-flush event */
EFX_SET_QWORD(*event);
read_ptr = (read_ptr + 1) & channel->eventq_mask;
++read_ptr;
} while (read_ptr != end_ptr);
channel->eventq_read_ptr = read_ptr;

View file

@ -184,6 +184,7 @@ extern void efx_nic_fini_eventq(struct efx_channel *channel);
extern void efx_nic_remove_eventq(struct efx_channel *channel);
extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
extern bool efx_nic_event_present(struct efx_channel *channel);
/* MAC/PHY */
extern void falcon_drain_tx_fifo(struct efx_nic *efx);

View file

@ -131,8 +131,6 @@ static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
static int efx_test_interrupts(struct efx_nic *efx,
struct efx_self_tests *tests)
{
struct efx_channel *channel;
netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
tests->interrupt = -1;
@ -140,15 +138,6 @@ static int efx_test_interrupts(struct efx_nic *efx,
efx->last_irq_cpu = -1;
smp_wmb();
/* ACK each interrupting event queue. Receiving an interrupt due to
* traffic before a test event is raised is considered a pass */
efx_for_each_channel(channel, efx) {
if (channel->work_pending)
efx_process_channel_now(channel);
if (efx->last_irq_cpu >= 0)
goto success;
}
efx_nic_generate_interrupt(efx);
/* Wait for arrival of test interrupt. */
@ -173,13 +162,13 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
struct efx_self_tests *tests)
{
struct efx_nic *efx = channel->efx;
unsigned int magic_count, count;
unsigned int read_ptr, count;
tests->eventq_dma[channel->channel] = -1;
tests->eventq_int[channel->channel] = -1;
tests->eventq_poll[channel->channel] = -1;
magic_count = channel->magic_count;
read_ptr = channel->eventq_read_ptr;
channel->efx->last_irq_cpu = -1;
smp_wmb();
@ -190,10 +179,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
do {
schedule_timeout_uninterruptible(HZ / 100);
if (channel->work_pending)
efx_process_channel_now(channel);
if (channel->magic_count != magic_count)
if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr)
goto eventq_ok;
} while (++count < 2);
@ -211,8 +197,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
}
/* Check to see if event was received even if interrupt wasn't */
efx_process_channel_now(channel);
if (channel->magic_count != magic_count) {
if (efx_nic_event_present(channel)) {
netif_err(efx, drv, efx->net_dev,
"channel %d event was generated, but "
"failed to trigger an interrupt\n", channel->channel);
@ -770,6 +755,8 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
__efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
netif_tx_wake_all_queues(efx->net_dev);
return rc_test;
}

View file

@ -435,7 +435,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
* queue state. */
smp_mb();
if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
likely(efx->port_enabled)) {
likely(efx->port_enabled) &&
likely(!efx->port_inhibited)) {
fill_level = tx_queue->insert_count - tx_queue->read_count;
if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));

View file

@ -240,7 +240,8 @@ static const struct ethtool_ops sis900_ethtool_ops;
* @net_dev: the net device to get address for
*
* Older SiS900 and friends, use EEPROM to store MAC address.
* MAC address is read from read_eeprom() into @net_dev->dev_addr.
* MAC address is read from read_eeprom() into @net_dev->dev_addr and
* @net_dev->perm_addr.
*/
static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev)
@ -261,6 +262,9 @@ static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_de
for (i = 0; i < 3; i++)
((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
/* Store MAC Address in perm_addr */
memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
return 1;
}
@ -271,7 +275,8 @@ static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_de
*
* SiS630E model, use APC CMOS RAM to store MAC address.
* APC CMOS RAM is accessed through ISA bridge.
* MAC address is read into @net_dev->dev_addr.
* MAC address is read into @net_dev->dev_addr and
* @net_dev->perm_addr.
*/
static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
@ -296,6 +301,10 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
outb(0x09 + i, 0x70);
((u8 *)(net_dev->dev_addr))[i] = inb(0x71);
}
/* Store MAC Address in perm_addr */
memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40);
pci_dev_put(isa_bridge);
@ -310,7 +319,7 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
*
* SiS635 model, set MAC Reload Bit to load Mac address from APC
* to rfdr. rfdr is accessed through rfcr. MAC address is read into
* @net_dev->dev_addr.
* @net_dev->dev_addr and @net_dev->perm_addr.
*/
static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
@ -334,6 +343,9 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
*( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr);
}
/* Store MAC Address in perm_addr */
memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
/* enable packet filtering */
outl(rfcrSave | RFEN, rfcr + ioaddr);
@ -353,7 +365,7 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
* EEDONE signal to refuse EEPROM access by LAN.
* The EEPROM map of SiS962 or SiS963 is different to SiS900.
* The signature field in SiS962 or SiS963 spec is meaningless.
* MAC address is read into @net_dev->dev_addr.
* MAC address is read into @net_dev->dev_addr and @net_dev->perm_addr.
*/
static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
@ -372,6 +384,9 @@ static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
for (i = 0; i < 3; i++)
((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
/* Store MAC Address in perm_addr */
memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
outl(EEDONE, ee_addr);
return 1;
} else {

View file

@ -26,9 +26,9 @@
#undef DWMAC_DMA_DEBUG
#ifdef DWMAC_DMA_DEBUG
#define DBG(fmt, args...) printk(fmt, ## args)
#define DWMAC_LIB_DBG(fmt, args...) printk(fmt, ## args)
#else
#define DBG(fmt, args...) do { } while (0)
#define DWMAC_LIB_DBG(fmt, args...) do { } while (0)
#endif
/* CSR1 enables the transmit DMA to check for new descriptor */
@ -152,7 +152,7 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
/* read the status register (CSR5) */
u32 intr_status = readl(ioaddr + DMA_STATUS);
DBG(INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
DWMAC_LIB_DBG(KERN_INFO "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
#ifdef DWMAC_DMA_DEBUG
/* It displays the DMA process states (CSR5 register) */
show_tx_process_state(intr_status);
@ -160,43 +160,43 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
#endif
/* ABNORMAL interrupts */
if (unlikely(intr_status & DMA_STATUS_AIS)) {
DBG(INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
DWMAC_LIB_DBG(KERN_INFO "CSR5[15] DMA ABNORMAL IRQ: ");
if (unlikely(intr_status & DMA_STATUS_UNF)) {
DBG(INFO, "transmit underflow\n");
DWMAC_LIB_DBG(KERN_INFO "transmit underflow\n");
ret = tx_hard_error_bump_tc;
x->tx_undeflow_irq++;
}
if (unlikely(intr_status & DMA_STATUS_TJT)) {
DBG(INFO, "transmit jabber\n");
DWMAC_LIB_DBG(KERN_INFO "transmit jabber\n");
x->tx_jabber_irq++;
}
if (unlikely(intr_status & DMA_STATUS_OVF)) {
DBG(INFO, "recv overflow\n");
DWMAC_LIB_DBG(KERN_INFO "recv overflow\n");
x->rx_overflow_irq++;
}
if (unlikely(intr_status & DMA_STATUS_RU)) {
DBG(INFO, "receive buffer unavailable\n");
DWMAC_LIB_DBG(KERN_INFO "receive buffer unavailable\n");
x->rx_buf_unav_irq++;
}
if (unlikely(intr_status & DMA_STATUS_RPS)) {
DBG(INFO, "receive process stopped\n");
DWMAC_LIB_DBG(KERN_INFO "receive process stopped\n");
x->rx_process_stopped_irq++;
}
if (unlikely(intr_status & DMA_STATUS_RWT)) {
DBG(INFO, "receive watchdog\n");
DWMAC_LIB_DBG(KERN_INFO "receive watchdog\n");
x->rx_watchdog_irq++;
}
if (unlikely(intr_status & DMA_STATUS_ETI)) {
DBG(INFO, "transmit early interrupt\n");
DWMAC_LIB_DBG(KERN_INFO "transmit early interrupt\n");
x->tx_early_irq++;
}
if (unlikely(intr_status & DMA_STATUS_TPS)) {
DBG(INFO, "transmit process stopped\n");
DWMAC_LIB_DBG(KERN_INFO "transmit process stopped\n");
x->tx_process_stopped_irq++;
ret = tx_hard_error;
}
if (unlikely(intr_status & DMA_STATUS_FBI)) {
DBG(INFO, "fatal bus error\n");
DWMAC_LIB_DBG(KERN_INFO "fatal bus error\n");
x->fatal_bus_error_irq++;
ret = tx_hard_error;
}
@ -215,7 +215,7 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
/* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
DBG(INFO, "\n\n");
DWMAC_LIB_DBG(KERN_INFO "\n\n");
return ret;
}

View file

@ -750,7 +750,6 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
priv->xstats.threshold = tc;
}
stmmac_tx_err(priv);
} else if (unlikely(status == tx_hard_error))
stmmac_tx_err(priv);
}
@ -781,21 +780,6 @@ static int stmmac_open(struct net_device *dev)
stmmac_verify_args();
ret = stmmac_init_phy(dev);
if (unlikely(ret)) {
pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
return ret;
}
/* Request the IRQ lines */
ret = request_irq(dev->irq, stmmac_interrupt,
IRQF_SHARED, dev->name, dev);
if (unlikely(ret < 0)) {
pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
__func__, dev->irq, ret);
return ret;
}
#ifdef CONFIG_STMMAC_TIMER
priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
if (unlikely(priv->tm == NULL)) {
@ -814,6 +798,11 @@ static int stmmac_open(struct net_device *dev)
} else
priv->tm->enable = 1;
#endif
ret = stmmac_init_phy(dev);
if (unlikely(ret)) {
pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
goto open_error;
}
/* Create and initialize the TX/RX descriptors chains. */
priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
@ -822,12 +811,11 @@ static int stmmac_open(struct net_device *dev)
init_dma_desc_rings(dev);
/* DMA initialization and SW reset */
if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
priv->dma_tx_phy,
priv->dma_rx_phy) < 0)) {
ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
priv->dma_tx_phy, priv->dma_rx_phy);
if (ret < 0) {
pr_err("%s: DMA initialization failed\n", __func__);
return -1;
goto open_error;
}
/* Copy the MAC addr into the HW */
@ -848,6 +836,15 @@ static int stmmac_open(struct net_device *dev)
writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
/* Request the IRQ lines */
ret = request_irq(dev->irq, stmmac_interrupt,
IRQF_SHARED, dev->name, dev);
if (unlikely(ret < 0)) {
pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
__func__, dev->irq, ret);
goto open_error;
}
/* Enable the MAC Rx/Tx */
stmmac_enable_mac(priv->ioaddr);
@ -878,7 +875,17 @@ static int stmmac_open(struct net_device *dev)
napi_enable(&priv->napi);
skb_queue_head_init(&priv->rx_recycle);
netif_start_queue(dev);
return 0;
open_error:
#ifdef CONFIG_STMMAC_TIMER
kfree(priv->tm);
#endif
if (priv->phydev)
phy_disconnect(priv->phydev);
return ret;
}
/**

View file

@ -1251,7 +1251,7 @@ static netdev_tx_t xl_xmit(struct sk_buff *skb, struct net_device *dev)
/*
* The NIC has told us that a packet has been downloaded onto the card, we must
* find out which packet it has done, clear the skb and information for the packet
* then advance around the ring for all tranmitted packets
* then advance around the ring for all transmitted packets
*/
static void xl_dn_comp(struct net_device *dev)
@ -1568,7 +1568,7 @@ static void xl_arb_cmd(struct net_device *dev)
if (lan_status_diff & LSC_SOFT_ERR)
printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
if (lan_status_diff & LSC_SS)
printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)

View file

@ -1675,7 +1675,7 @@ static void streamer_arb_cmd(struct net_device *dev)
if (lan_status_diff & LSC_SOFT_ERR)
printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n", dev->name);
printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n", dev->name);
if (lan_status_diff & LSC_SS)
printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)

View file

@ -1500,7 +1500,7 @@ static void olympic_arb_cmd(struct net_device *dev)
if (lan_status_diff & LSC_SOFT_ERR)
printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
if (lan_status_diff & LSC_SS)
printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)

View file

@ -1040,7 +1040,7 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
}
ret = ath9k_htc_hw_init(hif_dev->htc_handle,
&hif_dev->udev->dev, hif_dev->device_id,
&interface->dev, hif_dev->device_id,
hif_dev->udev->product, id->driver_info);
if (ret) {
ret = -EINVAL;
@ -1158,7 +1158,7 @@ static int ath9k_hif_usb_resume(struct usb_interface *interface)
#endif
static struct usb_driver ath9k_hif_usb_driver = {
.name = "ath9k_hif_usb",
.name = KBUILD_MODNAME,
.probe = ath9k_hif_usb_probe,
.disconnect = ath9k_hif_usb_disconnect,
#ifdef CONFIG_PM

View file

@ -1254,15 +1254,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ah->txchainmask = common->tx_chainmask;
ah->rxchainmask = common->rx_chainmask;
if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) {
ath9k_hw_abortpcurecv(ah);
if (!ath9k_hw_stopdmarecv(ah)) {
ath_dbg(common, ATH_DBG_XMIT,
"Failed to stop receive dma\n");
bChannelChange = false;
}
}
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
return -EIO;

View file

@ -751,28 +751,47 @@ void ath9k_hw_abortpcurecv(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
{
#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
#define AH_RX_TIME_QUANTUM 100 /* usec */
struct ath_common *common = ath9k_hw_common(ah);
u32 mac_status, last_mac_status = 0;
int i;
/* Enable access to the DMA observation bus */
REG_WRITE(ah, AR_MACMISC,
((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
(AR_MACMISC_MISC_OBS_BUS_1 <<
AR_MACMISC_MISC_OBS_BUS_MSB_S)));
REG_WRITE(ah, AR_CR, AR_CR_RXD);
/* Wait for rx enable bit to go low */
for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
break;
if (!AR_SREV_9300_20_OR_LATER(ah)) {
mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0;
if (mac_status == 0x1c0 && mac_status == last_mac_status) {
*reset = true;
break;
}
last_mac_status = mac_status;
}
udelay(AH_TIME_QUANTUM);
}
if (i == 0) {
ath_err(common,
"DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
"DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n",
AH_RX_STOP_DMA_TIMEOUT / 1000,
REG_READ(ah, AR_CR),
REG_READ(ah, AR_DIAG_SW));
REG_READ(ah, AR_DIAG_SW),
REG_READ(ah, AR_DMADBG_7));
return false;
} else {
return true;

View file

@ -695,7 +695,7 @@ bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning);
void ath9k_hw_abortpcurecv(struct ath_hw *ah);
bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset);
int ath9k_hw_beaconq_setup(struct ath_hw *ah);
/* Interrupt Handling */

View file

@ -1376,7 +1376,6 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
ath9k_calculate_iter_data(hw, vif, &iter_data);
ath9k_ps_wakeup(sc);
/* Set BSSID mask. */
memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
ath_hw_setbssidmask(common);
@ -1411,7 +1410,6 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
}
ath9k_hw_set_interrupts(ah, ah->imask);
ath9k_ps_restore(sc);
/* Set up ANI */
if ((iter_data.naps + iter_data.nadhocs) > 0) {
@ -1457,6 +1455,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
struct ath_vif *avp = (void *)vif->drv_priv;
int ret = 0;
ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
switch (vif->type) {
@ -1503,6 +1502,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
ath9k_do_vif_add_setup(hw, vif);
out:
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
return ret;
}
@ -1517,6 +1517,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n");
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
/* See if new interface type is valid. */
if ((new_type == NL80211_IFTYPE_ADHOC) &&
@ -1546,6 +1547,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
ath9k_do_vif_add_setup(hw, vif);
out:
ath9k_ps_restore(sc);
mutex_unlock(&sc->mutex);
return ret;
}
@ -1558,6 +1560,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
sc->nvifs--;
@ -1569,6 +1572,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
ath9k_calculate_summary_state(hw, NULL);
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
}
static void ath9k_enable_ps(struct ath_softc *sc)
@ -1809,6 +1813,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
txq = sc->tx.txq_map[queue];
ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
@ -1832,6 +1837,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
ath_beaconq_config(sc);
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
return ret;
}
@ -1894,6 +1900,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
int slottime;
int error;
ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
if (changed & BSS_CHANGED_BSSID) {
@ -1994,6 +2001,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
}
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
}
static u64 ath9k_get_tsf(struct ieee80211_hw *hw)

View file

@ -486,12 +486,12 @@ int ath_startrecv(struct ath_softc *sc)
bool ath_stoprecv(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
bool stopped;
bool stopped, reset = false;
spin_lock_bh(&sc->rx.rxbuflock);
ath9k_hw_abortpcurecv(ah);
ath9k_hw_setrxfilter(ah, 0);
stopped = ath9k_hw_stopdmarecv(ah);
stopped = ath9k_hw_stopdmarecv(ah, &reset);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
ath_edma_stop_recv(sc);
@ -506,7 +506,7 @@ bool ath_stoprecv(struct ath_softc *sc)
"confusing the DMA engine when we start RX up\n");
ATH_DBG_WARN_ON_ONCE(!stopped);
}
return stopped;
return stopped || reset;
}
void ath_flushrecv(struct ath_softc *sc)

View file

@ -195,6 +195,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = {
{APL9_WORLD, CTL_ETSI, CTL_ETSI},
{APL3_FCCA, CTL_FCC, CTL_FCC},
{APL7_FCCA, CTL_FCC, CTL_FCC},
{APL1_ETSIC, CTL_FCC, CTL_ETSI},
{APL2_ETSIC, CTL_FCC, CTL_ETSI},
{APL2_APLD, CTL_FCC, NO_CTL},

View file

@ -1,6 +1,5 @@
config IWLWIFI_LEGACY
tristate "Intel Wireless Wifi legacy devices"
depends on PCI && MAC80211
tristate
select FW_LOADER
select NEW_LEDS
select LEDS_CLASS
@ -65,7 +64,8 @@ endmenu
config IWL4965
tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
depends on IWLWIFI_LEGACY
depends on PCI && MAC80211
select IWLWIFI_LEGACY
---help---
This option enables support for
@ -92,7 +92,8 @@ config IWL4965
config IWL3945
tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
depends on IWLWIFI_LEGACY
depends on PCI && MAC80211
select IWLWIFI_LEGACY
---help---
Select to build the driver supporting the:

View file

@ -74,8 +74,6 @@
/* RSSI to dBm */
#define IWL39_RSSI_OFFSET 95
#define IWL_DEFAULT_TX_POWER 0x0F
/*
* EEPROM related constants, enums, and structures.
*/

View file

@ -804,9 +804,6 @@ struct iwl4965_scd_bc_tbl {
#define IWL4965_DEFAULT_TX_RETRY 15
/* Limit range of txpower output target to be between these values */
#define IWL4965_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */
/* EEPROM */
#define IWL4965_FIRST_AMPDU_QUEUE 10

View file

@ -160,6 +160,7 @@ int iwl_legacy_init_geos(struct iwl_priv *priv)
struct ieee80211_channel *geo_ch;
struct ieee80211_rate *rates;
int i = 0;
s8 max_tx_power = 0;
if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
@ -235,8 +236,8 @@ int iwl_legacy_init_geos(struct iwl_priv *priv)
geo_ch->flags |= ch->ht40_extension_channel;
if (ch->max_power_avg > priv->tx_power_device_lmt)
priv->tx_power_device_lmt = ch->max_power_avg;
if (ch->max_power_avg > max_tx_power)
max_tx_power = ch->max_power_avg;
} else {
geo_ch->flags |= IEEE80211_CHAN_DISABLED;
}
@ -249,6 +250,10 @@ int iwl_legacy_init_geos(struct iwl_priv *priv)
geo_ch->flags);
}
priv->tx_power_device_lmt = max_tx_power;
priv->tx_power_user_lmt = max_tx_power;
priv->tx_power_next = max_tx_power;
if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
priv->cfg->sku & IWL_SKU_A) {
IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
@ -1124,11 +1129,11 @@ int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
if (!priv->cfg->ops->lib->send_tx_power)
return -EOPNOTSUPP;
if (tx_power < IWL4965_TX_POWER_TARGET_POWER_MIN) {
/* 0 dBm mean 1 milliwatt */
if (tx_power < 0) {
IWL_WARN(priv,
"Requested user TXPOWER %d below lower limit %d.\n",
tx_power,
IWL4965_TX_POWER_TARGET_POWER_MIN);
"Requested user TXPOWER %d below 1 mW.\n",
tx_power);
return -EINVAL;
}

View file

@ -471,13 +471,6 @@ int iwl_legacy_init_channel_map(struct iwl_priv *priv)
flags & EEPROM_CHANNEL_RADAR))
? "" : "not ");
/* Set the tx_power_user_lmt to the highest power
* supported by any channel */
if (eeprom_ch_info[ch].max_power_avg >
priv->tx_power_user_lmt)
priv->tx_power_user_lmt =
eeprom_ch_info[ch].max_power_avg;
ch_info++;
}
}

View file

@ -3825,10 +3825,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
priv->force_reset[IWL_FW_RESET].reset_duration =
IWL_DELAY_NEXT_FORCE_FW_RELOAD;
priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
priv->tx_power_next = IWL_DEFAULT_TX_POWER;
if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
eeprom->version);

View file

@ -3140,12 +3140,6 @@ static int iwl4965_init_drv(struct iwl_priv *priv)
iwl_legacy_init_scan_params(priv);
/* Set the tx_power_user_lmt to the lowest power level
* this value will get overwritten by channel max power avg
* from eeprom */
priv->tx_power_user_lmt = IWL4965_TX_POWER_TARGET_POWER_MIN;
priv->tx_power_next = IWL4965_TX_POWER_TARGET_POWER_MIN;
ret = iwl_legacy_init_channel_map(priv);
if (ret) {
IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);

View file

@ -530,6 +530,9 @@ static struct iwl_ht_params iwl5000_ht_params = {
struct iwl_cfg iwl5300_agn_cfg = {
.name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
IWL_DEVICE_5000,
/* at least EEPROM 0x11A has wrong info */
.valid_tx_ant = ANT_ABC, /* .cfg overwrite */
.valid_rx_ant = ANT_ABC, /* .cfg overwrite */
.ht_params = &iwl5000_ht_params,
};

View file

@ -137,6 +137,7 @@ struct mwl8k_tx_queue {
struct mwl8k_priv {
struct ieee80211_hw *hw;
struct pci_dev *pdev;
int irq;
struct mwl8k_device_info *device_info;
@ -3761,9 +3762,11 @@ static int mwl8k_start(struct ieee80211_hw *hw)
rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
IRQF_SHARED, MWL8K_NAME, hw);
if (rc) {
priv->irq = -1;
wiphy_err(hw->wiphy, "failed to register IRQ handler\n");
return -EIO;
}
priv->irq = priv->pdev->irq;
/* Enable TX reclaim and RX tasklets. */
tasklet_enable(&priv->poll_tx_task);
@ -3800,6 +3803,7 @@ static int mwl8k_start(struct ieee80211_hw *hw)
if (rc) {
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
free_irq(priv->pdev->irq, hw);
priv->irq = -1;
tasklet_disable(&priv->poll_tx_task);
tasklet_disable(&priv->poll_rx_task);
}
@ -3818,7 +3822,10 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
/* Disable interrupts */
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
free_irq(priv->pdev->irq, hw);
if (priv->irq != -1) {
free_irq(priv->pdev->irq, hw);
priv->irq = -1;
}
/* Stop finalize join worker */
cancel_work_sync(&priv->finalize_join_worker);

View file

@ -703,7 +703,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
struct p54_tx_info *p54info;
struct p54_hdr *hdr;
struct p54_tx_data *txhdr;
unsigned int padding, len, extra_len;
unsigned int padding, len, extra_len = 0;
int i, j, ridx;
u16 hdr_flags = 0, aid = 0;
u8 rate, queue = 0, crypt_offset = 0;

View file

@ -103,8 +103,8 @@ struct driver_info {
* Indicates to usbnet, that USB driver accumulates multiple IP packets.
* Affects statistic (counters) and short packet handling.
*/
#define FLAG_MULTI_PACKET 0x1000
#define FLAG_RX_ASSEMBLE 0x2000 /* rx packets may span >1 frames */
#define FLAG_MULTI_PACKET 0x2000
#define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */
/* init device ... can sleep, or cause probe() failure */
int (*bind)(struct usbnet *, struct usb_interface *);

View file

@ -249,11 +249,9 @@ static int br_parse_ip_options(struct sk_buff *skb)
goto drop;
}
/* Zero out the CB buffer if no options present */
if (iph->ihl == 5) {
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
if (iph->ihl == 5)
return 0;
}
opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
if (ip_options_compile(dev_net(dev), opt, skb))

View file

@ -13,6 +13,7 @@
#include <net/caif/cfsrvl.h>
#include <net/caif/cfpkt.h>
#define container_obj(layr) ((struct cfsrvl *) layr)
#define DGM_CMD_BIT 0x80
@ -83,6 +84,7 @@ static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt)
static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
{
u8 packet_type;
u32 zero = 0;
struct caif_payload_info *info;
struct cfsrvl *service = container_obj(layr);
@ -94,7 +96,9 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
if (cfpkt_getlen(pkt) > DGM_MTU)
return -EMSGSIZE;
cfpkt_add_head(pkt, &zero, 4);
cfpkt_add_head(pkt, &zero, 3);
packet_type = 0x08; /* B9 set - UNCLASSIFIED */
cfpkt_add_head(pkt, &packet_type, 1);
/* Add info for MUX-layer to route the packet out. */
info = cfpkt_info(pkt);

View file

@ -244,9 +244,9 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
int phyid)
{
struct cfmuxl *muxl = container_obj(layr);
struct list_head *node;
struct list_head *node, *next;
struct cflayer *layer;
list_for_each(node, &muxl->srvl_list) {
list_for_each_safe(node, next, &muxl->srvl_list) {
layer = list_entry(node, struct cflayer, node);
if (cfsrvl_phyid_match(layer, phyid))
layer->ctrlcmd(layer, ctrl, phyid);

View file

@ -5203,11 +5203,15 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
}
/* TSO requires that SG is present as well. */
if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n");
features &= ~NETIF_F_TSO;
if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
netdev_info(dev, "Dropping TSO features since no SG feature.\n");
features &= ~NETIF_F_ALL_TSO;
}
/* TSO ECN requires that TSO is present as well. */
if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
features &= ~NETIF_F_TSO_ECN;
/* Software GSO depends on SG. */
if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n");

View file

@ -1,5 +1,3 @@
obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
af_802154-y := af_ieee802154.o raw.o dgram.o
ccflags-y += -Wall -DDEBUG

View file

@ -73,7 +73,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
if (!reuse || !sk2->sk_reuse ||
((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) {
sk2->sk_state == TCP_LISTEN) {
const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
sk2_rcv_saddr == sk_rcv_saddr(sk))
@ -122,8 +122,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
(tb->num_owners < smallest_size || smallest_size == -1)) {
smallest_size = tb->num_owners;
smallest_rover = rover;
if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
spin_unlock(&head->lock);
snum = smallest_rover;
goto have_snum;

View file

@ -354,7 +354,8 @@ static void inetpeer_free_rcu(struct rcu_head *head)
}
/* May be called with local BH enabled. */
static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
struct inet_peer __rcu **stack[PEER_MAXDEPTH])
{
int do_free;
@ -368,7 +369,6 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
* We use refcnt=-1 to alert lockless readers this entry is deleted.
*/
if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
struct inet_peer __rcu **stack[PEER_MAXDEPTH];
struct inet_peer __rcu ***stackptr, ***delp;
if (lookup(&p->daddr, stack, base) != p)
BUG();
@ -422,7 +422,7 @@ static struct inet_peer_base *peer_to_base(struct inet_peer *p)
}
/* May be called with local BH enabled. */
static int cleanup_once(unsigned long ttl)
static int cleanup_once(unsigned long ttl, struct inet_peer __rcu **stack[PEER_MAXDEPTH])
{
struct inet_peer *p = NULL;
@ -454,7 +454,7 @@ static int cleanup_once(unsigned long ttl)
* happen because of entry limits in route cache. */
return -1;
unlink_from_pool(p, peer_to_base(p));
unlink_from_pool(p, peer_to_base(p), stack);
return 0;
}
@ -524,7 +524,7 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
if (base->total >= inet_peer_threshold)
/* Remove one less-recently-used entry. */
cleanup_once(0);
cleanup_once(0, stack);
return p;
}
@ -540,6 +540,7 @@ static void peer_check_expire(unsigned long dummy)
{
unsigned long now = jiffies;
int ttl, total;
struct inet_peer __rcu **stack[PEER_MAXDEPTH];
total = compute_total();
if (total >= inet_peer_threshold)
@ -548,7 +549,7 @@ static void peer_check_expire(unsigned long dummy)
ttl = inet_peer_maxttl
- (inet_peer_maxttl - inet_peer_minttl) / HZ *
total / inet_peer_threshold * HZ;
while (!cleanup_once(ttl)) {
while (!cleanup_once(ttl, stack)) {
if (jiffies != now)
break;
}

View file

@ -329,7 +329,7 @@ int ip_options_compile(struct net *net,
pp_ptr = optptr + 2;
goto error;
}
if (skb) {
if (rt) {
memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
opt->is_changed = 1;
}
@ -371,7 +371,7 @@ int ip_options_compile(struct net *net,
goto error;
}
opt->ts = optptr - iph;
if (skb) {
if (rt) {
memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
timeptr = (__be32*)&optptr[optptr[2]+3];
}
@ -603,7 +603,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
unsigned long orefdst;
int err;
if (!opt->srr)
if (!opt->srr || !rt)
return 0;
if (skb->pkt_type != PACKET_HOST)

View file

@ -311,7 +311,6 @@ static struct ctl_table ipv4_table[] = {
.mode = 0644,
.proc_handler = proc_do_large_bitmap,
},
#ifdef CONFIG_IP_MULTICAST
{
.procname = "igmp_max_memberships",
.data = &sysctl_igmp_max_memberships,
@ -319,8 +318,6 @@ static struct ctl_table ipv4_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
#endif
{
.procname = "igmp_max_msf",
.data = &sysctl_igmp_max_msf,

View file

@ -44,7 +44,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
(!sk->sk_reuse || !sk2->sk_reuse ||
((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) &&
sk2->sk_state == TCP_LISTEN) &&
ipv6_rcv_saddr_equal(sk, sk2))
break;
}

View file

@ -1297,8 +1297,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
/* Note : socket.c set MSG_EOR on SEQPACKET sockets */
if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT |
MSG_NOSIGNAL)) {
err = -EINVAL;
goto out;
return -EINVAL;
}
lock_sock(sk);

View file

@ -121,8 +121,7 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
s32 data_size = ntohs(pdulen) - llc_len;
if (data_size < 0 ||
((skb_tail_pointer(skb) -
(u8 *)pdu) - llc_len) < data_size)
!pskb_may_pull(skb, data_size))
return 0;
if (unlikely(pskb_trim_rcsum(skb, data_size)))
return 0;

View file

@ -343,6 +343,10 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
ipset_adtfn adtfn = set->variant->adt[adt];
struct ipmac data;
/* MAC can be src only */
if (!(flags & IPSET_DIM_TWO_SRC))
return 0;
data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
if (data.id < map->first_ip || data.id > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE;

View file

@ -1022,8 +1022,9 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
if (cb->args[1] >= ip_set_max)
goto out;
pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
max = cb->args[0] == DUMP_ONE ? cb->args[1] + 1 : ip_set_max;
dump_last:
pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
for (; cb->args[1] < max; cb->args[1]++) {
index = (ip_set_id_t) cb->args[1];
set = ip_set_list[index];
@ -1038,8 +1039,8 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
* so that lists (unions of sets) are dumped last.
*/
if (cb->args[0] != DUMP_ONE &&
!((cb->args[0] == DUMP_ALL) ^
(set->type->features & IPSET_DUMP_LAST)))
((cb->args[0] == DUMP_ALL) ==
!!(set->type->features & IPSET_DUMP_LAST)))
continue;
pr_debug("List set: %s\n", set->name);
if (!cb->args[2]) {
@ -1083,6 +1084,12 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
goto release_refcount;
}
}
/* If we dump all sets, continue with dumping last ones */
if (cb->args[0] == DUMP_ALL) {
cb->args[0] = DUMP_LAST;
cb->args[1] = 0;
goto dump_last;
}
goto out;
nla_put_failure:
@ -1093,11 +1100,6 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
pr_debug("release set %s\n", ip_set_list[index]->name);
ip_set_put_byindex(index);
}
/* If we dump all sets, continue with dumping last ones */
if (cb->args[0] == DUMP_ALL && cb->args[1] >= max && !cb->args[2])
cb->args[0] = DUMP_LAST;
out:
if (nlh) {
nlmsg_end(skb, nlh);

View file

@ -81,6 +81,7 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par)
if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) {
pr_warning("Protocol error: set match dimension "
"is over the limit!\n");
ip_set_nfnl_put(info->match_set.index);
return -ERANGE;
}
@ -135,6 +136,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
if (index == IPSET_INVALID_ID) {
pr_warning("Cannot find del_set index %u as target\n",
info->del_set.index);
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(info->add_set.index);
return -ENOENT;
}
}
@ -142,6 +145,10 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) {
pr_warning("Protocol error: SET target dimension "
"is over the limit!\n");
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(info->add_set.index);
if (info->del_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(info->del_set.index);
return -ERANGE;
}
@ -192,6 +199,7 @@ set_match_checkentry(const struct xt_mtchk_param *par)
if (info->match_set.dim > IPSET_DIM_MAX) {
pr_warning("Protocol error: set match dimension "
"is over the limit!\n");
ip_set_nfnl_put(info->match_set.index);
return -ERANGE;
}
@ -219,7 +227,7 @@ set_target(struct sk_buff *skb, const struct xt_action_param *par)
if (info->del_set.index != IPSET_INVALID_ID)
ip_set_del(info->del_set.index,
skb, par->family,
info->add_set.dim,
info->del_set.dim,
info->del_set.flags);
return XT_CONTINUE;
@ -245,13 +253,19 @@ set_target_checkentry(const struct xt_tgchk_param *par)
if (index == IPSET_INVALID_ID) {
pr_warning("Cannot find del_set index %u as target\n",
info->del_set.index);
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(info->add_set.index);
return -ENOENT;
}
}
if (info->add_set.dim > IPSET_DIM_MAX ||
info->del_set.flags > IPSET_DIM_MAX) {
info->del_set.dim > IPSET_DIM_MAX) {
pr_warning("Protocol error: SET target dimension "
"is over the limit!\n");
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(info->add_set.index);
if (info->del_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(info->del_set.index);
return -ERANGE;
}

View file

@ -569,6 +569,8 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
sctp_assoc_set_primary(asoc, transport);
if (asoc->peer.active_path == peer)
asoc->peer.active_path = transport;
if (asoc->peer.retran_path == peer)
asoc->peer.retran_path = transport;
if (asoc->peer.last_data_from == peer)
asoc->peer.last_data_from = transport;
@ -1323,6 +1325,8 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
if (t)
asoc->peer.retran_path = t;
else
t = asoc->peer.retran_path;
SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association"
" %p addr: ",