Merge branch 'net-subsystem-misc-refcounter-conversions'

Elena Reshetova says:

====================
v2 net subsystem misc refcounter conversions

Changes in v2:
 * rebase on top of net-next
 * currently by default refcount_t = atomic_t (*) and uses all
   atomic standard operations unless CONFIG_REFCOUNT_FULL is enabled.
   This is a compromise for the systems that are critical on
   performance (such as net) and cannot accept even slight delay
   on the refcounter operations.

This series, for various misc network components, replaces atomic_t reference
counters with the new refcount_t type and API (see include/linux/refcount.h).
By doing this we prevent intentional or accidental
underflows or overflows that can led to use-after-free vulnerabilities.
These are the last networking-related conversions with the exception of
network drivers (to be send separately).

Please excuse the long patch set, but seems like breaking it up
won't save that much on CC list and most of the changes are
trivial.

The patches are fully independent and can be cherry-picked separately.
In order to try with refcount functionality enabled in run-time,
CONFIG_REFCOUNT_FULL must be enabled.

NOTE: automatic kernel builder for some reason doesn't like all my
network branches and regularly times out the builds on these branches.
Suggestion for "waiting a day for a good coverage" doesn't work, as
we have seen with generic network conversions. So please wait for the
full report from kernel test rebot before merging further up.
This has been compile-tested in 116 configs, but 71 timed out (including
all s390-related configs again). I am trying to see if they can fix
build coverage for me in meanwhile.

* The respective change is currently merged into -next as
  "locking/refcount: Create unchecked atomic_t implementation".
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-07-04 22:35:20 +01:00
commit dcc13ee85f
61 changed files with 223 additions and 209 deletions

View File

@ -1034,11 +1034,11 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
/* The vxlan_sock is only used by dev, leaving group has
* no effect on other vxlan devices.
*/
if (family == AF_INET && sock4 && atomic_read(&sock4->refcnt) == 1)
if (family == AF_INET && sock4 && refcount_read(&sock4->refcnt) == 1)
return false;
#if IS_ENABLED(CONFIG_IPV6)
sock6 = rtnl_dereference(dev->vn6_sock);
if (family == AF_INET6 && sock6 && atomic_read(&sock6->refcnt) == 1)
if (family == AF_INET6 && sock6 && refcount_read(&sock6->refcnt) == 1)
return false;
#endif
@ -1075,7 +1075,7 @@ static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
if (!vs)
return false;
if (!atomic_dec_and_test(&vs->refcnt))
if (!refcount_dec_and_test(&vs->refcnt))
return false;
vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
@ -2825,7 +2825,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
}
vs->sock = sock;
atomic_set(&vs->refcnt, 1);
refcount_set(&vs->refcnt, 1);
vs->flags = (flags & VXLAN_F_RCV_FLAGS);
spin_lock(&vn->sock_lock);
@ -2860,7 +2860,7 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
spin_lock(&vn->sock_lock);
vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
vxlan->cfg.dst_port, vxlan->cfg.flags);
if (vs && !atomic_add_unless(&vs->refcnt, 1, 0)) {
if (vs && !refcount_inc_not_zero(&vs->refcnt)) {
spin_unlock(&vn->sock_lock);
return -EBUSY;
}

View File

@ -11,6 +11,7 @@
#include <linux/uio.h>
#include <net/sock.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
#include <uapi/linux/atmdev.h>
#ifdef CONFIG_PROC_FS
@ -158,7 +159,7 @@ struct atm_dev {
struct k_atm_dev_stats stats; /* statistics */
char signal; /* signal status (ATM_PHY_SIG_*) */
int link_rate; /* link rate (default: OC3) */
atomic_t refcnt; /* reference count */
refcount_t refcnt; /* reference count */
spinlock_t lock; /* protect internal members */
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *proc_entry; /* proc entry */
@ -261,13 +262,13 @@ static inline int atm_may_send(struct atm_vcc *vcc,unsigned int size)
static inline void atm_dev_hold(struct atm_dev *dev)
{
atomic_inc(&dev->refcnt);
refcount_inc(&dev->refcnt);
}
static inline void atm_dev_put(struct atm_dev *dev)
{
if (atomic_dec_and_test(&dev->refcnt)) {
if (refcount_dec_and_test(&dev->refcnt)) {
BUG_ON(!test_bit(ATM_DF_REMOVED, &dev->flags));
if (dev->ops->dev_close)
dev->ops->dev_close(dev);

View File

@ -13,6 +13,7 @@
#define _LINUX_SUNRPC_AUTH_GSS_H
#ifdef __KERNEL__
#include <linux/refcount.h>
#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/gss_api.h>
@ -65,7 +66,7 @@ struct rpc_gss_init_res {
* the wire when communicating with a server. */
struct gss_cl_ctx {
atomic_t count;
refcount_t count;
enum rpc_gss_proc gc_proc;
u32 gc_seq;
spinlock_t gc_seq_lock;

View File

@ -11,7 +11,7 @@
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
#include <net/neighbour.h>
#include <net/sock.h>
@ -158,7 +158,7 @@ enum {
typedef struct ax25_uid_assoc {
struct hlist_node uid_node;
atomic_t refcount;
refcount_t refcount;
kuid_t uid;
ax25_address call;
} ax25_uid_assoc;
@ -167,11 +167,11 @@ typedef struct ax25_uid_assoc {
hlist_for_each_entry(__ax25, list, uid_node)
#define ax25_uid_hold(ax25) \
atomic_inc(&((ax25)->refcount))
refcount_inc(&((ax25)->refcount))
static inline void ax25_uid_put(ax25_uid_assoc *assoc)
{
if (atomic_dec_and_test(&assoc->refcount)) {
if (refcount_dec_and_test(&assoc->refcount)) {
kfree(assoc);
}
}
@ -185,7 +185,7 @@ typedef struct {
typedef struct ax25_route {
struct ax25_route *next;
atomic_t refcount;
refcount_t refcount;
ax25_address callsign;
struct net_device *dev;
ax25_digi *digipeat;
@ -194,14 +194,14 @@ typedef struct ax25_route {
static inline void ax25_hold_route(ax25_route *ax25_rt)
{
atomic_inc(&ax25_rt->refcount);
refcount_inc(&ax25_rt->refcount);
}
void __ax25_put_route(ax25_route *ax25_rt);
static inline void ax25_put_route(ax25_route *ax25_rt)
{
if (atomic_dec_and_test(&ax25_rt->refcount))
if (refcount_dec_and_test(&ax25_rt->refcount))
__ax25_put_route(ax25_rt);
}
@ -244,7 +244,7 @@ typedef struct ax25_cb {
unsigned char window;
struct timer_list timer, dtimer;
struct sock *sk; /* Backlink to socket */
atomic_t refcount;
refcount_t refcount;
} ax25_cb;
struct ax25_sock {
@ -266,11 +266,11 @@ static inline struct ax25_cb *sk_to_ax25(const struct sock *sk)
hlist_for_each_entry(__ax25, list, ax25_node)
#define ax25_cb_hold(__ax25) \
atomic_inc(&((__ax25)->refcount))
refcount_inc(&((__ax25)->refcount))
static __inline__ void ax25_cb_put(ax25_cb *ax25)
{
if (atomic_dec_and_test(&ax25->refcount)) {
if (refcount_dec_and_test(&ax25->refcount)) {
kfree(ax25->digipeat);
kfree(ax25);
}

View File

@ -38,7 +38,7 @@
#include <linux/skbuff.h>
#include <net/netlabel.h>
#include <net/request_sock.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
#include <asm/unaligned.h>
/* known doi values */
@ -57,7 +57,7 @@ struct calipso_doi {
u32 doi;
u32 type;
atomic_t refcount;
refcount_t refcount;
struct list_head list;
struct rcu_head rcu;
};

View File

@ -2,6 +2,7 @@
#define _NET_DN_FIB_H
#include <linux/netlink.h>
#include <linux/refcount.h>
extern const struct nla_policy rtm_dn_policy[];
@ -28,7 +29,7 @@ struct dn_fib_info {
struct dn_fib_info *fib_next;
struct dn_fib_info *fib_prev;
int fib_treeref;
atomic_t fib_clntref;
refcount_t fib_clntref;
int fib_dead;
unsigned int fib_flags;
int fib_protocol;
@ -130,7 +131,7 @@ void dn_fib_free_info(struct dn_fib_info *fi);
static inline void dn_fib_info_put(struct dn_fib_info *fi)
{
if (atomic_dec_and_test(&fi->fib_clntref))
if (refcount_dec_and_test(&fi->fib_clntref))
dn_fib_free_info(fi);
}

View File

@ -14,6 +14,7 @@
#include <linux/ipx.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/refcount.h>
struct ipx_address {
__be32 net;
@ -54,7 +55,7 @@ struct ipx_interface {
/* IPX address */
__be32 if_netnum;
unsigned char if_node[IPX_NODE_LEN];
atomic_t refcnt;
refcount_t refcnt;
/* physical device info */
struct net_device *if_dev;
@ -80,7 +81,7 @@ struct ipx_route {
unsigned char ir_routed;
unsigned char ir_router_node[IPX_NODE_LEN];
struct list_head node; /* node in ipx_routes list */
atomic_t refcnt;
refcount_t refcnt;
};
struct ipx_cb {
@ -139,7 +140,7 @@ const char *ipx_device_name(struct ipx_interface *intrfc);
static __inline__ void ipxitf_hold(struct ipx_interface *intrfc)
{
atomic_inc(&intrfc->refcnt);
refcount_inc(&intrfc->refcnt);
}
void ipxitf_down(struct ipx_interface *intrfc);
@ -157,18 +158,18 @@ int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
static __inline__ void ipxitf_put(struct ipx_interface *intrfc)
{
if (atomic_dec_and_test(&intrfc->refcnt))
if (refcount_dec_and_test(&intrfc->refcnt))
ipxitf_down(intrfc);
}
static __inline__ void ipxrtr_hold(struct ipx_route *rt)
{
atomic_inc(&rt->refcnt);
refcount_inc(&rt->refcnt);
}
static __inline__ void ipxrtr_put(struct ipx_route *rt)
{
if (atomic_dec_and_test(&rt->refcnt))
if (refcount_dec_and_test(&rt->refcnt))
kfree(rt);
}
#endif /* _NET_INET_IPX_H_ */

View File

@ -1,6 +1,7 @@
#ifndef _LAPB_H
#define _LAPB_H
#include <linux/lapb.h>
#include <linux/refcount.h>
#define LAPB_HEADER_LEN 20 /* LAPB over Ethernet + a bit more */
@ -101,7 +102,7 @@ struct lapb_cb {
struct lapb_frame frmr_data;
unsigned char frmr_type;
atomic_t refcnt;
refcount_t refcnt;
};
/* lapb_iface.c */

View File

@ -55,7 +55,7 @@ struct llc_sap {
unsigned char state;
unsigned char p_bit;
unsigned char f_bit;
atomic_t refcnt;
refcount_t refcnt;
int (*rcv_func)(struct sk_buff *skb,
struct net_device *dev,
struct packet_type *pt,
@ -113,14 +113,14 @@ struct llc_sap *llc_sap_open(unsigned char lsap,
struct net_device *orig_dev));
static inline void llc_sap_hold(struct llc_sap *sap)
{
atomic_inc(&sap->refcnt);
refcount_inc(&sap->refcnt);
}
void llc_sap_close(struct llc_sap *sap);
static inline void llc_sap_put(struct llc_sap *sap)
{
if (atomic_dec_and_test(&sap->refcnt))
if (refcount_dec_and_test(&sap->refcnt))
llc_sap_close(sap);
}

View File

@ -11,6 +11,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <linux/refcount.h>
#define NR_NETWORK_LEN 15
#define NR_TRANSPORT_LEN 5
@ -93,7 +94,7 @@ struct nr_neigh {
unsigned short count;
unsigned int number;
unsigned char failed;
atomic_t refcount;
refcount_t refcount;
};
struct nr_route {
@ -109,7 +110,7 @@ struct nr_node {
unsigned char which;
unsigned char count;
struct nr_route routes[3];
atomic_t refcount;
refcount_t refcount;
spinlock_t node_lock;
};
@ -118,21 +119,21 @@ struct nr_node {
*********************************************************************/
#define nr_node_hold(__nr_node) \
atomic_inc(&((__nr_node)->refcount))
refcount_inc(&((__nr_node)->refcount))
static __inline__ void nr_node_put(struct nr_node *nr_node)
{
if (atomic_dec_and_test(&nr_node->refcount)) {
if (refcount_dec_and_test(&nr_node->refcount)) {
kfree(nr_node);
}
}
#define nr_neigh_hold(__nr_neigh) \
atomic_inc(&((__nr_neigh)->refcount))
refcount_inc(&((__nr_neigh)->refcount))
static __inline__ void nr_neigh_put(struct nr_neigh *nr_neigh)
{
if (atomic_dec_and_test(&nr_neigh->refcount)) {
if (refcount_dec_and_test(&nr_neigh->refcount)) {
if (nr_neigh->ax25)
ax25_cb_put(nr_neigh->ax25);
kfree(nr_neigh->digipeat);

View File

@ -9,6 +9,7 @@
#include <linux/percpu.h>
#include <linux/dynamic_queue_limits.h>
#include <linux/list.h>
#include <linux/refcount.h>
#include <net/gen_stats.h>
#include <net/rtnetlink.h>
@ -95,7 +96,7 @@ struct Qdisc {
struct sk_buff *skb_bad_txq;
struct rcu_head rcu_head;
int padded;
atomic_t refcnt;
refcount_t refcnt;
spinlock_t busylock ____cacheline_aligned_in_smp;
};

View File

@ -31,6 +31,7 @@
#define __sctp_auth_h__
#include <linux/list.h>
#include <linux/refcount.h>
struct sctp_endpoint;
struct sctp_association;
@ -53,7 +54,7 @@ struct sctp_hmac {
* over SCTP-AUTH
*/
struct sctp_auth_bytes {
atomic_t refcnt;
refcount_t refcnt;
__u32 len;
__u8 data[];
};
@ -76,7 +77,7 @@ static inline void sctp_auth_key_hold(struct sctp_auth_bytes *key)
if (!key)
return;
atomic_inc(&key->refcnt);
refcount_inc(&key->refcnt);
}
void sctp_auth_key_put(struct sctp_auth_bytes *key);

View File

@ -496,7 +496,7 @@ struct sctp_datamsg {
/* Chunks waiting to be submitted to lower layer. */
struct list_head chunks;
/* Reference counting. */
atomic_t refcnt;
refcount_t refcnt;
/* When is this message no longer interesting to the peer? */
unsigned long expires_at;
/* Did the messenge fail to send? */
@ -524,7 +524,7 @@ int sctp_chunk_abandoned(struct sctp_chunk *);
struct sctp_chunk {
struct list_head list;
atomic_t refcnt;
refcount_t refcnt;
/* How many times this chunk have been sent, for prsctp RTX policy */
int sent_count;
@ -735,7 +735,7 @@ struct sctp_transport {
struct rhlist_head node;
/* Reference counting. */
atomic_t refcnt;
refcount_t refcnt;
/* RTO-Pending : A flag used to track if one of the DATA
* chunks sent to this address is currently being
* used to compute a RTT. If this flag is 0,
@ -1174,7 +1174,7 @@ struct sctp_ep_common {
* refcnt - Reference count access to this object.
* dead - Do not attempt to use this object.
*/
atomic_t refcnt;
refcount_t refcnt;
bool dead;
/* What socket does this endpoint belong to? */

View File

@ -183,7 +183,7 @@ struct vxlan_sock {
struct hlist_node hlist;
struct socket *sock;
struct hlist_head vni_list[VNI_HASH_SIZE];
atomic_t refcnt;
refcount_t refcnt;
u32 flags;
};

View File

@ -11,6 +11,7 @@
#define _X25_H
#include <linux/x25.h>
#include <linux/slab.h>
#include <linux/refcount.h>
#include <net/sock.h>
#define X25_ADDR_LEN 16
@ -129,7 +130,7 @@ struct x25_route {
struct x25_address address;
unsigned int sigdigits;
struct net_device *dev;
atomic_t refcnt;
refcount_t refcnt;
};
struct x25_neigh {
@ -141,7 +142,7 @@ struct x25_neigh {
unsigned long t20;
struct timer_list t20timer;
unsigned long global_facil_mask;
atomic_t refcnt;
refcount_t refcnt;
};
struct x25_sock {
@ -242,12 +243,12 @@ void x25_link_free(void);
/* x25_neigh.c */
static __inline__ void x25_neigh_hold(struct x25_neigh *nb)
{
atomic_inc(&nb->refcnt);
refcount_inc(&nb->refcnt);
}
static __inline__ void x25_neigh_put(struct x25_neigh *nb)
{
if (atomic_dec_and_test(&nb->refcnt))
if (refcount_dec_and_test(&nb->refcnt))
kfree(nb);
}
@ -265,12 +266,12 @@ void x25_route_free(void);
static __inline__ void x25_route_hold(struct x25_route *rt)
{
atomic_inc(&rt->refcnt);
refcount_inc(&rt->refcnt);
}
static __inline__ void x25_route_put(struct x25_route *rt)
{
if (atomic_dec_and_test(&rt->refcnt))
if (refcount_dec_and_test(&rt->refcnt))
kfree(rt);
}

View File

@ -13,6 +13,7 @@
#include <linux/mutex.h>
#include <linux/audit.h>
#include <linux/slab.h>
#include <linux/refcount.h>
#include <net/sock.h>
#include <net/dst.h>
@ -137,7 +138,7 @@ struct xfrm_state {
struct hlist_node bysrc;
struct hlist_node byspi;
atomic_t refcnt;
refcount_t refcnt;
spinlock_t lock;
struct xfrm_id id;
@ -559,7 +560,7 @@ struct xfrm_policy {
/* This lock only affects elements except for entry. */
rwlock_t lock;
atomic_t refcnt;
refcount_t refcnt;
struct timer_list timer;
struct flow_cache_object flo;
@ -815,14 +816,14 @@ static inline void xfrm_audit_state_icvfail(struct xfrm_state *x,
static inline void xfrm_pol_hold(struct xfrm_policy *policy)
{
if (likely(policy != NULL))
atomic_inc(&policy->refcnt);
refcount_inc(&policy->refcnt);
}
void xfrm_policy_destroy(struct xfrm_policy *policy);
static inline void xfrm_pol_put(struct xfrm_policy *policy)
{
if (atomic_dec_and_test(&policy->refcnt))
if (refcount_dec_and_test(&policy->refcnt))
xfrm_policy_destroy(policy);
}
@ -837,18 +838,18 @@ void __xfrm_state_destroy(struct xfrm_state *);
static inline void __xfrm_state_put(struct xfrm_state *x)
{
atomic_dec(&x->refcnt);
refcount_dec(&x->refcnt);
}
static inline void xfrm_state_put(struct xfrm_state *x)
{
if (atomic_dec_and_test(&x->refcnt))
if (refcount_dec_and_test(&x->refcnt))
__xfrm_state_destroy(x);
}
static inline void xfrm_state_hold(struct xfrm_state *x)
{
atomic_inc(&x->refcnt);
refcount_inc(&x->refcnt);
}
static inline bool addr_match(const void *token1, const void *token2,
@ -1029,7 +1030,7 @@ struct xfrm_offload {
};
struct sec_path {
atomic_t refcnt;
refcount_t refcnt;
int len;
int olen;
@ -1050,7 +1051,7 @@ static inline struct sec_path *
secpath_get(struct sec_path *sp)
{
if (sp)
atomic_inc(&sp->refcnt);
refcount_inc(&sp->refcnt);
return sp;
}
@ -1059,7 +1060,7 @@ void __secpath_destroy(struct sec_path *sp);
static inline void
secpath_put(struct sec_path *sp)
{
if (sp && atomic_dec_and_test(&sp->refcnt))
if (sp && refcount_dec_and_test(&sp->refcnt))
__secpath_destroy(sp);
}

View File

@ -101,12 +101,12 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc);
/* must be done under lec_arp_lock */
static inline void lec_arp_hold(struct lec_arp_table *entry)
{
atomic_inc(&entry->usage);
refcount_inc(&entry->usage);
}
static inline void lec_arp_put(struct lec_arp_table *entry)
{
if (atomic_dec_and_test(&entry->usage))
if (refcount_dec_and_test(&entry->usage))
kfree(entry);
}
@ -1564,7 +1564,7 @@ static struct lec_arp_table *make_entry(struct lec_priv *priv,
to_return->last_used = jiffies;
to_return->priv = priv;
skb_queue_head_init(&to_return->tx_wait);
atomic_set(&to_return->usage, 1);
refcount_set(&to_return->usage, 1);
return to_return;
}

View File

@ -47,7 +47,7 @@ struct lec_arp_table {
* the length of the tlvs array
*/
struct sk_buff_head tx_wait; /* wait queue for outgoing packets */
atomic_t usage; /* usage count */
refcount_t usage; /* usage count */
};
/*

View File

@ -40,7 +40,7 @@ static in_cache_entry *in_cache_get(__be32 dst_ip,
entry = client->in_cache;
while (entry != NULL) {
if (entry->ctrl_info.in_dst_ip == dst_ip) {
atomic_inc(&entry->use);
refcount_inc(&entry->use);
read_unlock_bh(&client->ingress_lock);
return entry;
}
@ -61,7 +61,7 @@ static in_cache_entry *in_cache_get_with_mask(__be32 dst_ip,
entry = client->in_cache;
while (entry != NULL) {
if ((entry->ctrl_info.in_dst_ip & mask) == (dst_ip & mask)) {
atomic_inc(&entry->use);
refcount_inc(&entry->use);
read_unlock_bh(&client->ingress_lock);
return entry;
}
@ -82,7 +82,7 @@ static in_cache_entry *in_cache_get_by_vcc(struct atm_vcc *vcc,
entry = client->in_cache;
while (entry != NULL) {
if (entry->shortcut == vcc) {
atomic_inc(&entry->use);
refcount_inc(&entry->use);
read_unlock_bh(&client->ingress_lock);
return entry;
}
@ -105,7 +105,7 @@ static in_cache_entry *in_cache_add_entry(__be32 dst_ip,
dprintk("adding an ingress entry, ip = %pI4\n", &dst_ip);
atomic_set(&entry->use, 1);
refcount_set(&entry->use, 1);
dprintk("new_in_cache_entry: about to lock\n");
write_lock_bh(&client->ingress_lock);
entry->next = client->in_cache;
@ -121,7 +121,7 @@ static in_cache_entry *in_cache_add_entry(__be32 dst_ip,
entry->count = 1;
entry->entry_state = INGRESS_INVALID;
entry->ctrl_info.holding_time = HOLDING_TIME_DEFAULT;
atomic_inc(&entry->use);
refcount_inc(&entry->use);
write_unlock_bh(&client->ingress_lock);
dprintk("new_in_cache_entry: unlocked\n");
@ -178,7 +178,7 @@ static int cache_hit(in_cache_entry *entry, struct mpoa_client *mpc)
static void in_cache_put(in_cache_entry *entry)
{
if (atomic_dec_and_test(&entry->use)) {
if (refcount_dec_and_test(&entry->use)) {
memset(entry, 0, sizeof(in_cache_entry));
kfree(entry);
}
@ -339,7 +339,7 @@ static eg_cache_entry *eg_cache_get_by_cache_id(__be32 cache_id,
entry = mpc->eg_cache;
while (entry != NULL) {
if (entry->ctrl_info.cache_id == cache_id) {
atomic_inc(&entry->use);
refcount_inc(&entry->use);
read_unlock_irq(&mpc->egress_lock);
return entry;
}
@ -360,7 +360,7 @@ static eg_cache_entry *eg_cache_get_by_tag(__be32 tag, struct mpoa_client *mpc)
entry = mpc->eg_cache;
while (entry != NULL) {
if (entry->ctrl_info.tag == tag) {
atomic_inc(&entry->use);
refcount_inc(&entry->use);
read_unlock_irqrestore(&mpc->egress_lock, flags);
return entry;
}
@ -382,7 +382,7 @@ static eg_cache_entry *eg_cache_get_by_vcc(struct atm_vcc *vcc,
entry = mpc->eg_cache;
while (entry != NULL) {
if (entry->shortcut == vcc) {
atomic_inc(&entry->use);
refcount_inc(&entry->use);
read_unlock_irqrestore(&mpc->egress_lock, flags);
return entry;
}
@ -402,7 +402,7 @@ static eg_cache_entry *eg_cache_get_by_src_ip(__be32 ipaddr,
entry = mpc->eg_cache;
while (entry != NULL) {
if (entry->latest_ip_addr == ipaddr) {
atomic_inc(&entry->use);
refcount_inc(&entry->use);
read_unlock_irq(&mpc->egress_lock);
return entry;
}
@ -415,7 +415,7 @@ static eg_cache_entry *eg_cache_get_by_src_ip(__be32 ipaddr,
static void eg_cache_put(eg_cache_entry *entry)
{
if (atomic_dec_and_test(&entry->use)) {
if (refcount_dec_and_test(&entry->use)) {
memset(entry, 0, sizeof(eg_cache_entry));
kfree(entry);
}
@ -468,7 +468,7 @@ static eg_cache_entry *eg_cache_add_entry(struct k_message *msg,
dprintk("adding an egress entry, ip = %pI4, this should be our IP\n",
&msg->content.eg_info.eg_dst_ip);
atomic_set(&entry->use, 1);
refcount_set(&entry->use, 1);
dprintk("new_eg_cache_entry: about to lock\n");
write_lock_irq(&client->egress_lock);
entry->next = client->eg_cache;
@ -484,7 +484,7 @@ static eg_cache_entry *eg_cache_add_entry(struct k_message *msg,
dprintk("new_eg_cache_entry cache_id %u\n",
ntohl(entry->ctrl_info.cache_id));
dprintk("mps_ip = %pI4\n", &entry->ctrl_info.mps_ip);
atomic_inc(&entry->use);
refcount_inc(&entry->use);
write_unlock_irq(&client->egress_lock);
dprintk("new_eg_cache_entry: unlocked\n");

View File

@ -6,6 +6,7 @@
#include <linux/atm.h>
#include <linux/atmdev.h>
#include <linux/atmmpc.h>
#include <linux/refcount.h>
struct mpoa_client;
@ -25,7 +26,7 @@ typedef struct in_cache_entry {
struct atm_vcc *shortcut;
uint8_t MPS_ctrl_ATM_addr[ATM_ESA_LEN];
struct in_ctrl_info ctrl_info;
atomic_t use;
refcount_t use;
} in_cache_entry;
struct in_cache_ops{
@ -58,7 +59,7 @@ typedef struct eg_cache_entry{
uint16_t entry_state;
__be32 latest_ip_addr; /* The src IP address of the last packet */
struct eg_ctrl_info ctrl_info;
atomic_t use;
refcount_t use;
} eg_cache_entry;
struct eg_cache_ops{

View File

@ -61,7 +61,7 @@ static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
add_stats(seq, "0", &dev->stats.aal0);
seq_puts(seq, " ");
add_stats(seq, "5", &dev->stats.aal5);
seq_printf(seq, "\t[%d]", atomic_read(&dev->refcnt));
seq_printf(seq, "\t[%d]", refcount_read(&dev->refcnt));
seq_putc(seq, '\n');
}

View File

@ -109,7 +109,7 @@ struct atm_dev *atm_dev_register(const char *type, struct device *parent,
else
memset(&dev->flags, 0, sizeof(dev->flags));
memset(&dev->stats, 0, sizeof(dev->stats));
atomic_set(&dev->refcnt, 1);
refcount_set(&dev->refcnt, 1);
if (atm_proc_dev_register(dev) < 0) {
pr_err("atm_proc_dev_register failed for dev %s\n", type);

View File

@ -510,7 +510,7 @@ ax25_cb *ax25_create_cb(void)
if ((ax25 = kzalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL)
return NULL;
atomic_set(&ax25->refcount, 1);
refcount_set(&ax25->refcount, 1);
skb_queue_head_init(&ax25->write_queue);
skb_queue_head_init(&ax25->frag_queue);

View File

@ -114,7 +114,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
return -ENOMEM;
}
atomic_set(&ax25_rt->refcount, 1);
refcount_set(&ax25_rt->refcount, 1);
ax25_rt->callsign = route->dest_addr;
ax25_rt->dev = ax25_dev->dev;
ax25_rt->digipeat = NULL;

View File

@ -107,7 +107,7 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
if ((ax25_uid = kmalloc(sizeof(*ax25_uid), GFP_KERNEL)) == NULL)
return -ENOMEM;
atomic_set(&ax25_uid->refcount, 1);
refcount_set(&ax25_uid->refcount, 1);
ax25_uid->uid = sax25_kuid;
ax25_uid->call = sax->sax25_call;

View File

@ -21,6 +21,7 @@
#include <net/ip6_fib.h>
#include <linux/if_vlan.h>
#include <linux/rhashtable.h>
#include <linux/refcount.h>
#define BR_HASH_BITS 8
#define BR_HASH_SIZE (1 << BR_HASH_BITS)
@ -127,7 +128,7 @@ struct net_bridge_vlan {
struct net_bridge_port *port;
};
union {
atomic_t refcnt;
refcount_t refcnt;
struct net_bridge_vlan *brvlan;
};

View File

@ -158,7 +158,7 @@ static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid
if (WARN_ON(!masterv))
return NULL;
}
atomic_inc(&masterv->refcnt);
refcount_inc(&masterv->refcnt);
return masterv;
}
@ -182,7 +182,7 @@ static void br_vlan_put_master(struct net_bridge_vlan *masterv)
return;
vg = br_vlan_group(masterv->br);
if (atomic_dec_and_test(&masterv->refcnt)) {
if (refcount_dec_and_test(&masterv->refcnt)) {
rhashtable_remove_fast(&vg->vlan_hash,
&masterv->vnode, br_vlan_rht_params);
__vlan_del_list(masterv);
@ -573,7 +573,7 @@ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
br_err(br, "failed insert local address into bridge forwarding table\n");
return ret;
}
atomic_inc(&vlan->refcnt);
refcount_inc(&vlan->refcnt);
vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
vg->num_vlans++;
}
@ -595,7 +595,7 @@ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
vlan->br = br;
if (flags & BRIDGE_VLAN_INFO_BRENTRY)
atomic_set(&vlan->refcnt, 1);
refcount_set(&vlan->refcnt, 1);
ret = __vlan_add(vlan, flags);
if (ret) {
free_percpu(vlan->stats);

View File

@ -389,7 +389,7 @@ link_it:
}
fi->fib_treeref++;
atomic_inc(&fi->fib_clntref);
refcount_set(&fi->fib_clntref, 1);
spin_lock(&dn_fib_info_lock);
fi->fib_next = dn_fib_info_list;
fi->fib_prev = NULL;
@ -425,7 +425,7 @@ int dn_fib_semantic_match(int type, struct dn_fib_info *fi, const struct flowidn
switch (type) {
case RTN_NAT:
DN_FIB_RES_RESET(*res);
atomic_inc(&fi->fib_clntref);
refcount_inc(&fi->fib_clntref);
return 0;
case RTN_UNICAST:
case RTN_LOCAL:
@ -438,7 +438,7 @@ int dn_fib_semantic_match(int type, struct dn_fib_info *fi, const struct flowidn
}
if (nhsel < fi->fib_nhs) {
res->nh_sel = nhsel;
atomic_inc(&fi->fib_clntref);
refcount_inc(&fi->fib_clntref);
return 0;
}
endfor_nexthops(fi);

View File

@ -338,7 +338,7 @@ static struct calipso_doi *calipso_doi_search(u32 doi)
struct calipso_doi *iter;
list_for_each_entry_rcu(iter, &calipso_doi_list, list)
if (iter->doi == doi && atomic_read(&iter->refcount))
if (iter->doi == doi && refcount_read(&iter->refcount))
return iter;
return NULL;
}
@ -370,7 +370,7 @@ static int calipso_doi_add(struct calipso_doi *doi_def,
if (doi_def->doi == CALIPSO_DOI_UNKNOWN)
goto doi_add_return;
atomic_set(&doi_def->refcount, 1);
refcount_set(&doi_def->refcount, 1);
spin_lock(&calipso_doi_list_lock);
if (calipso_doi_search(doi_def->doi)) {
@ -458,7 +458,7 @@ static int calipso_doi_remove(u32 doi, struct netlbl_audit *audit_info)
ret_val = -ENOENT;
goto doi_remove_return;
}
if (!atomic_dec_and_test(&doi_def->refcount)) {
if (!refcount_dec_and_test(&doi_def->refcount)) {
spin_unlock(&calipso_doi_list_lock);
ret_val = -EBUSY;
goto doi_remove_return;
@ -499,7 +499,7 @@ static struct calipso_doi *calipso_doi_getdef(u32 doi)
doi_def = calipso_doi_search(doi);
if (!doi_def)
goto doi_getdef_return;
if (!atomic_inc_not_zero(&doi_def->refcount))
if (!refcount_inc_not_zero(&doi_def->refcount))
doi_def = NULL;
doi_getdef_return:
@ -520,7 +520,7 @@ static void calipso_doi_putdef(struct calipso_doi *doi_def)
if (!doi_def)
return;
if (!atomic_dec_and_test(&doi_def->refcount))
if (!refcount_dec_and_test(&doi_def->refcount))
return;
spin_lock(&calipso_doi_list_lock);
list_del_rcu(&doi_def->list);
@ -553,7 +553,7 @@ static int calipso_doi_walk(u32 *skip_cnt,
rcu_read_lock();
list_for_each_entry_rcu(iter_doi, &calipso_doi_list, list)
if (atomic_read(&iter_doi->refcount) > 0) {
if (refcount_read(&iter_doi->refcount) > 0) {
if (doi_cnt++ < *skip_cnt)
continue;
ret_val = callback(iter_doi, cb_arg);

View File

@ -308,7 +308,7 @@ void ipxitf_down(struct ipx_interface *intrfc)
static void __ipxitf_put(struct ipx_interface *intrfc)
{
if (atomic_dec_and_test(&intrfc->refcnt))
if (refcount_dec_and_test(&intrfc->refcnt))
__ipxitf_down(intrfc);
}
@ -876,7 +876,7 @@ static struct ipx_interface *ipxitf_alloc(struct net_device *dev, __be32 netnum,
intrfc->if_ipx_offset = ipx_offset;
intrfc->if_sknum = IPX_MIN_EPHEMERAL_SOCKET;
INIT_HLIST_HEAD(&intrfc->if_sklist);
atomic_set(&intrfc->refcnt, 1);
refcount_set(&intrfc->refcnt, 1);
spin_lock_init(&intrfc->if_sklist_lock);
}
@ -1105,7 +1105,7 @@ static struct ipx_interface *ipxitf_auto_create(struct net_device *dev,
memcpy((char *)&(intrfc->if_node[IPX_NODE_LEN-dev->addr_len]),
dev->dev_addr, dev->addr_len);
spin_lock_init(&intrfc->if_sklist_lock);
atomic_set(&intrfc->refcnt, 1);
refcount_set(&intrfc->refcnt, 1);
ipxitf_insert(intrfc);
dev_hold(dev);
}

View File

@ -53,7 +53,7 @@ static int ipx_seq_interface_show(struct seq_file *seq, void *v)
seq_printf(seq, "%-11s", ipx_device_name(i));
seq_printf(seq, "%-9s", ipx_frame_name(i->if_dlink_type));
#ifdef IPX_REFCNT_DEBUG
seq_printf(seq, "%6d", atomic_read(&i->refcnt));
seq_printf(seq, "%6d", refcount_read(&i->refcnt));
#endif
seq_puts(seq, "\n");
out:

View File

@ -59,7 +59,7 @@ int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc,
if (!rt)
goto out;
atomic_set(&rt->refcnt, 1);
refcount_set(&rt->refcnt, 1);
ipxrtr_hold(rt);
write_lock_bh(&ipx_routes_lock);
list_add(&rt->node, &ipx_routes);

View File

@ -2177,7 +2177,7 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, const struct xfrm_policy *
}
hdr->sadb_msg_len = size / sizeof(uint64_t);
hdr->sadb_msg_reserved = atomic_read(&xp->refcnt);
hdr->sadb_msg_reserved = refcount_read(&xp->refcnt);
return 0;
}

View File

@ -132,12 +132,12 @@ static inline struct l2tp_net *l2tp_pernet(const struct net *net)
*/
static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
{
atomic_inc(&tunnel->ref_count);
refcount_inc(&tunnel->ref_count);
}
static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
{
if (atomic_dec_and_test(&tunnel->ref_count))
if (refcount_dec_and_test(&tunnel->ref_count))
l2tp_tunnel_free(tunnel);
}
#ifdef L2TP_REFCNT_DEBUG
@ -145,14 +145,14 @@ static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
do { \
pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", \
__func__, __LINE__, (_t)->name, \
atomic_read(&_t->ref_count)); \
refcount_read(&_t->ref_count)); \
l2tp_tunnel_inc_refcount_1(_t); \
} while (0)
#define l2tp_tunnel_dec_refcount(_t) \
do { \
pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", \
__func__, __LINE__, (_t)->name, \
atomic_read(&_t->ref_count)); \
refcount_read(&_t->ref_count)); \
l2tp_tunnel_dec_refcount_1(_t); \
} while (0)
#else
@ -1353,7 +1353,7 @@ static void l2tp_udp_encap_destroy(struct sock *sk)
*/
static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
{
BUG_ON(atomic_read(&tunnel->ref_count) != 0);
BUG_ON(refcount_read(&tunnel->ref_count) != 0);
BUG_ON(tunnel->sock != NULL);
l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name);
kfree_rcu(tunnel, rcu);
@ -1667,7 +1667,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
/* Bump the reference count. The tunnel context is deleted
* only when this drops to zero. Must be done before list insertion
*/
l2tp_tunnel_inc_refcount(tunnel);
refcount_set(&tunnel->ref_count, 1);
spin_lock_bh(&pn->l2tp_tunnel_list_lock);
list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
@ -1706,7 +1706,7 @@ void l2tp_session_free(struct l2tp_session *session)
{
struct l2tp_tunnel *tunnel = session->tunnel;
BUG_ON(atomic_read(&session->ref_count) != 0);
BUG_ON(refcount_read(&session->ref_count) != 0);
if (tunnel) {
BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
@ -1854,7 +1854,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
/* Bump the reference count. The session context is deleted
* only when this drops to zero.
*/
l2tp_session_inc_refcount(session);
refcount_set(&session->ref_count, 1);
l2tp_tunnel_inc_refcount(tunnel);
/* Ensure tunnel socket isn't deleted */

View File

@ -7,6 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/refcount.h>
#ifndef _L2TP_CORE_H_
#define _L2TP_CORE_H_
@ -98,7 +99,7 @@ struct l2tp_session {
int nr_oos_count; /* For OOS recovery */
int nr_oos_count_max;
struct hlist_node hlist; /* Hash list node */
atomic_t ref_count;
refcount_t ref_count;
char name[32]; /* for logging */
char ifname[IFNAMSIZ];
@ -177,7 +178,7 @@ struct l2tp_tunnel {
struct list_head list; /* Keep a list of all tunnels */
struct net *l2tp_net; /* the net we belong to */
atomic_t ref_count;
refcount_t ref_count;
#ifdef CONFIG_DEBUG_FS
void (*show)(struct seq_file *m, void *arg);
#endif
@ -273,12 +274,12 @@ int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
*/
static inline void l2tp_session_inc_refcount_1(struct l2tp_session *session)
{
atomic_inc(&session->ref_count);
refcount_inc(&session->ref_count);
}
static inline void l2tp_session_dec_refcount_1(struct l2tp_session *session)
{
if (atomic_dec_and_test(&session->ref_count))
if (refcount_dec_and_test(&session->ref_count))
l2tp_session_free(session);
}
@ -287,14 +288,14 @@ static inline void l2tp_session_dec_refcount_1(struct l2tp_session *session)
do { \
pr_debug("l2tp_session_inc_refcount: %s:%d %s: cnt=%d\n", \
__func__, __LINE__, (_s)->name, \
atomic_read(&_s->ref_count)); \
refcount_read(&_s->ref_count)); \
l2tp_session_inc_refcount_1(_s); \
} while (0)
#define l2tp_session_dec_refcount(_s) \
do { \
pr_debug("l2tp_session_dec_refcount: %s:%d %s: cnt=%d\n", \
__func__, __LINE__, (_s)->name, \
atomic_read(&_s->ref_count)); \
refcount_read(&_s->ref_count)); \
l2tp_session_dec_refcount_1(_s); \
} while (0)
#else

View File

@ -145,7 +145,7 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
"");
seq_printf(m, " %d sessions, refcnt %d/%d\n", session_count,
tunnel->sock ? refcount_read(&tunnel->sock->sk_refcnt) : 0,
atomic_read(&tunnel->ref_count));
refcount_read(&tunnel->ref_count));
seq_printf(m, " %08x rx %ld/%ld/%ld rx %ld/%ld/%ld\n",
tunnel->debug,
atomic_long_read(&tunnel->stats.tx_packets),
@ -170,7 +170,7 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
"");
if (session->send_seq || session->recv_seq)
seq_printf(m, " nr %hu, ns %hu\n", session->nr, session->ns);
seq_printf(m, " refcnt %d\n", atomic_read(&session->ref_count));
seq_printf(m, " refcnt %d\n", refcount_read(&session->ref_count));
seq_printf(m, " config %d/%d/%c/%c/%s/%s %08x %u\n",
session->mtu, session->mru,
session->recv_seq ? 'R' : '-',

View File

@ -1616,7 +1616,7 @@ static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
seq_printf(m, "\nTUNNEL '%s', %c %d\n",
tunnel->name,
(tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N',
atomic_read(&tunnel->ref_count) - 1);
refcount_read(&tunnel->ref_count) - 1);
seq_printf(m, " %08x %ld/%ld/%ld %ld/%ld/%ld\n",
tunnel->debug,
atomic_long_read(&tunnel->stats.tx_packets),

View File

@ -54,12 +54,12 @@ static void lapb_free_cb(struct lapb_cb *lapb)
static __inline__ void lapb_hold(struct lapb_cb *lapb)
{
atomic_inc(&lapb->refcnt);
refcount_inc(&lapb->refcnt);
}
static __inline__ void lapb_put(struct lapb_cb *lapb)
{
if (atomic_dec_and_test(&lapb->refcnt))
if (refcount_dec_and_test(&lapb->refcnt))
lapb_free_cb(lapb);
}
@ -136,7 +136,7 @@ static struct lapb_cb *lapb_create_cb(void)
lapb->mode = LAPB_DEFAULT_MODE;
lapb->window = LAPB_DEFAULT_WINDOW;
lapb->state = LAPB_STATE_0;
atomic_set(&lapb->refcnt, 1);
refcount_set(&lapb->refcnt, 1);
out:
return lapb;
}

View File

@ -41,7 +41,7 @@ static struct llc_sap *llc_sap_alloc(void)
spin_lock_init(&sap->sk_lock);
for (i = 0; i < LLC_SK_LADDR_HASH_ENTRIES; i++)
INIT_HLIST_NULLS_HEAD(&sap->sk_laddr_hash[i], i);
atomic_set(&sap->refcnt, 1);
refcount_set(&sap->refcnt, 1);
}
return sap;
}

View File

@ -149,7 +149,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
nr_neigh->count = 0;
nr_neigh->number = nr_neigh_no++;
nr_neigh->failed = 0;
atomic_set(&nr_neigh->refcount, 1);
refcount_set(&nr_neigh->refcount, 1);
if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
nr_neigh->digipeat = kmemdup(ax25_digi,
@ -184,7 +184,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
nr_node->which = 0;
nr_node->count = 1;
atomic_set(&nr_node->refcount, 1);
refcount_set(&nr_node->refcount, 1);
spin_lock_init(&nr_node->node_lock);
nr_node->routes[0].quality = quality;
@ -431,7 +431,7 @@ static int __must_check nr_add_neigh(ax25_address *callsign,
nr_neigh->count = 0;
nr_neigh->number = nr_neigh_no++;
nr_neigh->failed = 0;
atomic_set(&nr_neigh->refcount, 1);
refcount_set(&nr_neigh->refcount, 1);
if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
nr_neigh->digipeat = kmemdup(ax25_digi, sizeof(*ax25_digi),

View File

@ -118,8 +118,8 @@ static void rds_ib_dev_free(struct work_struct *work)
void rds_ib_dev_put(struct rds_ib_device *rds_ibdev)
{
BUG_ON(atomic_read(&rds_ibdev->refcount) <= 0);
if (atomic_dec_and_test(&rds_ibdev->refcount))
BUG_ON(refcount_read(&rds_ibdev->refcount) == 0);
if (refcount_dec_and_test(&rds_ibdev->refcount))
queue_work(rds_wq, &rds_ibdev->free_work);
}
@ -137,7 +137,7 @@ static void rds_ib_add_one(struct ib_device *device)
return;
spin_lock_init(&rds_ibdev->spinlock);
atomic_set(&rds_ibdev->refcount, 1);
refcount_set(&rds_ibdev->refcount, 1);
INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
rds_ibdev->max_wrs = device->attrs.max_qp_wr;
@ -205,10 +205,10 @@ static void rds_ib_add_one(struct ib_device *device)
down_write(&rds_ib_devices_lock);
list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
up_write(&rds_ib_devices_lock);
atomic_inc(&rds_ibdev->refcount);
refcount_inc(&rds_ibdev->refcount);
ib_set_client_data(device, &rds_ib_client, rds_ibdev);
atomic_inc(&rds_ibdev->refcount);
refcount_inc(&rds_ibdev->refcount);
rds_ib_nodev_connect();
@ -239,7 +239,7 @@ struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device)
rcu_read_lock();
rds_ibdev = ib_get_client_data(device, &rds_ib_client);
if (rds_ibdev)
atomic_inc(&rds_ibdev->refcount);
refcount_inc(&rds_ibdev->refcount);
rcu_read_unlock();
return rds_ibdev;
}

View File

@ -230,7 +230,7 @@ struct rds_ib_device {
unsigned int max_initiator_depth;
unsigned int max_responder_resources;
spinlock_t spinlock; /* protect the above */
atomic_t refcount;
refcount_t refcount;
struct work_struct free_work;
int *vector_load;
};

View File

@ -52,7 +52,7 @@ static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
if (i_ipaddr->ipaddr == ipaddr) {
atomic_inc(&rds_ibdev->refcount);
refcount_inc(&rds_ibdev->refcount);
rcu_read_unlock();
return rds_ibdev;
}
@ -134,7 +134,7 @@ void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *con
spin_unlock_irq(&ib_nodev_conns_lock);
ic->rds_ibdev = rds_ibdev;
atomic_inc(&rds_ibdev->refcount);
refcount_inc(&rds_ibdev->refcount);
}
void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)

View File

@ -48,8 +48,8 @@ static unsigned int rds_exthdr_size[__RDS_EXTHDR_MAX] = {
void rds_message_addref(struct rds_message *rm)
{
rdsdebug("addref rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
atomic_inc(&rm->m_refcount);
rdsdebug("addref rm %p ref %d\n", rm, refcount_read(&rm->m_refcount));
refcount_inc(&rm->m_refcount);
}
EXPORT_SYMBOL_GPL(rds_message_addref);
@ -83,9 +83,9 @@ static void rds_message_purge(struct rds_message *rm)
void rds_message_put(struct rds_message *rm)
{
rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
WARN(!atomic_read(&rm->m_refcount), "danger refcount zero on %p\n", rm);
if (atomic_dec_and_test(&rm->m_refcount)) {
rdsdebug("put rm %p ref %d\n", rm, refcount_read(&rm->m_refcount));
WARN(!refcount_read(&rm->m_refcount), "danger refcount zero on %p\n", rm);
if (refcount_dec_and_test(&rm->m_refcount)) {
BUG_ON(!list_empty(&rm->m_sock_item));
BUG_ON(!list_empty(&rm->m_conn_item));
rds_message_purge(rm);
@ -206,7 +206,7 @@ struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
rm->m_used_sgs = 0;
rm->m_total_sgs = extra_len / sizeof(struct scatterlist);
atomic_set(&rm->m_refcount, 1);
refcount_set(&rm->m_refcount, 1);
INIT_LIST_HEAD(&rm->m_sock_item);
INIT_LIST_HEAD(&rm->m_conn_item);
spin_lock_init(&rm->m_rs_lock);

View File

@ -84,7 +84,7 @@ static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
if (insert) {
rb_link_node(&insert->r_rb_node, parent, p);
rb_insert_color(&insert->r_rb_node, root);
atomic_inc(&insert->r_refcount);
refcount_inc(&insert->r_refcount);
}
return NULL;
}
@ -99,7 +99,7 @@ static void rds_destroy_mr(struct rds_mr *mr)
unsigned long flags;
rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
mr->r_key, atomic_read(&mr->r_refcount));
mr->r_key, refcount_read(&mr->r_refcount));
if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
return;
@ -223,7 +223,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
goto out;
}
atomic_set(&mr->r_refcount, 1);
refcount_set(&mr->r_refcount, 1);
RB_CLEAR_NODE(&mr->r_rb_node);
mr->r_trans = rs->rs_transport;
mr->r_sock = rs;
@ -307,7 +307,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
if (mr_ret) {
atomic_inc(&mr->r_refcount);
refcount_inc(&mr->r_refcount);
*mr_ret = mr;
}
@ -756,7 +756,7 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
if (!mr)
err = -EINVAL; /* invalid r_key */
else
atomic_inc(&mr->r_refcount);
refcount_inc(&mr->r_refcount);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (mr) {

View File

@ -8,6 +8,7 @@
#include <linux/mutex.h>
#include <linux/rds.h>
#include <linux/rhashtable.h>
#include <linux/refcount.h>
#include "info.h"
@ -261,7 +262,7 @@ struct rds_ext_header_rdma_dest {
#define RDS_MSG_RX_CMSG 3
struct rds_incoming {
atomic_t i_refcount;
refcount_t i_refcount;
struct list_head i_item;
struct rds_connection *i_conn;
struct rds_conn_path *i_conn_path;
@ -276,7 +277,7 @@ struct rds_incoming {
struct rds_mr {
struct rb_node r_rb_node;
atomic_t r_refcount;
refcount_t r_refcount;
u32 r_key;
/* A copy of the creation flags */
@ -355,7 +356,7 @@ static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
#define RDS_MSG_FLUSH 8
struct rds_message {
atomic_t m_refcount;
refcount_t m_refcount;
struct list_head m_sock_item;
struct list_head m_conn_item;
struct rds_incoming m_inc;
@ -856,7 +857,7 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
void __rds_put_mr_final(struct rds_mr *mr);
static inline void rds_mr_put(struct rds_mr *mr)
{
if (atomic_dec_and_test(&mr->r_refcount))
if (refcount_dec_and_test(&mr->r_refcount))
__rds_put_mr_final(mr);
}

View File

@ -45,7 +45,7 @@ void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
{
int i;
atomic_set(&inc->i_refcount, 1);
refcount_set(&inc->i_refcount, 1);
INIT_LIST_HEAD(&inc->i_item);
inc->i_conn = conn;
inc->i_saddr = saddr;
@ -61,7 +61,7 @@ EXPORT_SYMBOL_GPL(rds_inc_init);
void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *cp,
__be32 saddr)
{
atomic_set(&inc->i_refcount, 1);
refcount_set(&inc->i_refcount, 1);
INIT_LIST_HEAD(&inc->i_item);
inc->i_conn = cp->cp_conn;
inc->i_conn_path = cp;
@ -74,14 +74,14 @@ EXPORT_SYMBOL_GPL(rds_inc_path_init);
static void rds_inc_addref(struct rds_incoming *inc)
{
rdsdebug("addref inc %p ref %d\n", inc, atomic_read(&inc->i_refcount));
atomic_inc(&inc->i_refcount);
rdsdebug("addref inc %p ref %d\n", inc, refcount_read(&inc->i_refcount));
refcount_inc(&inc->i_refcount);
}
void rds_inc_put(struct rds_incoming *inc)
{
rdsdebug("put inc %p ref %d\n", inc, atomic_read(&inc->i_refcount));
if (atomic_dec_and_test(&inc->i_refcount)) {
rdsdebug("put inc %p ref %d\n", inc, refcount_read(&inc->i_refcount));
if (refcount_dec_and_test(&inc->i_refcount)) {
BUG_ON(!list_empty(&inc->i_item));
inc->i_conn->c_trans->inc_free(inc);

View File

@ -839,7 +839,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
old = dev_graft_qdisc(dev_queue, new);
if (new && i > 0)
atomic_inc(&new->refcnt);
refcount_inc(&new->refcnt);
if (!ingress)
qdisc_destroy(old);
@ -850,7 +850,7 @@ skip:
notify_and_destroy(net, skb, n, classid,
dev->qdisc, new);
if (new && !new->ops->attach)
atomic_inc(&new->refcnt);
refcount_inc(&new->refcnt);
dev->qdisc = new ? : &noop_qdisc;
if (new && new->ops->attach)
@ -1259,7 +1259,7 @@ replay:
if (q == p ||
(p && check_loop(q, p, 0)))
return -ELOOP;
atomic_inc(&q->refcnt);
refcount_inc(&q->refcnt);
goto graft;
} else {
if (!q)
@ -1374,7 +1374,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
tcm->tcm_parent = clid;
tcm->tcm_handle = q->handle;
tcm->tcm_info = atomic_read(&q->refcnt);
tcm->tcm_info = refcount_read(&q->refcnt);
if (nla_put_string(skb, TCA_KIND, q->ops->id))
goto nla_put_failure;
if (q->ops->dump && q->ops->dump(q, skb) < 0)

View File

@ -633,7 +633,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
sch->dequeue = ops->dequeue;
sch->dev_queue = dev_queue;
dev_hold(dev);
atomic_set(&sch->refcnt, 1);
refcount_set(&sch->refcnt, 1);
return sch;
errout:
@ -701,7 +701,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
const struct Qdisc_ops *ops = qdisc->ops;
if (qdisc->flags & TCQ_F_BUILTIN ||
!atomic_dec_and_test(&qdisc->refcnt))
!refcount_dec_and_test(&qdisc->refcnt))
return;
#ifdef CONFIG_NET_SCHED
@ -739,7 +739,7 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
spin_lock_bh(root_lock);
/* Prune old scheduler */
if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
if (oqdisc && refcount_read(&oqdisc->refcnt) <= 1)
qdisc_reset(oqdisc);
/* ... and graft new one */
@ -785,7 +785,7 @@ static void attach_default_qdiscs(struct net_device *dev)
dev->priv_flags & IFF_NO_QUEUE) {
netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
dev->qdisc = txq->qdisc_sleeping;
atomic_inc(&dev->qdisc->refcnt);
refcount_inc(&dev->qdisc->refcnt);
} else {
qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT);
if (qdisc) {

View File

@ -88,7 +88,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
/* Initialize the object handling fields. */
atomic_set(&asoc->base.refcnt, 1);
refcount_set(&asoc->base.refcnt, 1);
/* Initialize the bind addr area. */
sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
@ -873,7 +873,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
/* Hold a reference to an association. */
void sctp_association_hold(struct sctp_association *asoc)
{
atomic_inc(&asoc->base.refcnt);
refcount_inc(&asoc->base.refcnt);
}
/* Release a reference to an association and cleanup
@ -881,7 +881,7 @@ void sctp_association_hold(struct sctp_association *asoc)
*/
void sctp_association_put(struct sctp_association *asoc)
{
if (atomic_dec_and_test(&asoc->base.refcnt))
if (refcount_dec_and_test(&asoc->base.refcnt))
sctp_association_destroy(asoc);
}

View File

@ -63,7 +63,7 @@ void sctp_auth_key_put(struct sctp_auth_bytes *key)
if (!key)
return;
if (atomic_dec_and_test(&key->refcnt)) {
if (refcount_dec_and_test(&key->refcnt)) {
kzfree(key);
SCTP_DBG_OBJCNT_DEC(keys);
}
@ -84,7 +84,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
return NULL;
key->len = key_len;
atomic_set(&key->refcnt, 1);
refcount_set(&key->refcnt, 1);
SCTP_DBG_OBJCNT_INC(keys);
return key;

View File

@ -49,7 +49,7 @@
/* Initialize datamsg from memory. */
static void sctp_datamsg_init(struct sctp_datamsg *msg)
{
atomic_set(&msg->refcnt, 1);
refcount_set(&msg->refcnt, 1);
msg->send_failed = 0;
msg->send_error = 0;
msg->can_delay = 1;
@ -136,13 +136,13 @@ static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
/* Hold a reference. */
static void sctp_datamsg_hold(struct sctp_datamsg *msg)
{
atomic_inc(&msg->refcnt);
refcount_inc(&msg->refcnt);
}
/* Release a reference. */
void sctp_datamsg_put(struct sctp_datamsg *msg)
{
if (atomic_dec_and_test(&msg->refcnt))
if (refcount_dec_and_test(&msg->refcnt))
sctp_datamsg_destroy(msg);
}

View File

@ -114,7 +114,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
ep->base.type = SCTP_EP_TYPE_SOCKET;
/* Initialize the basic object fields. */
atomic_set(&ep->base.refcnt, 1);
refcount_set(&ep->base.refcnt, 1);
ep->base.dead = false;
/* Create an input queue. */
@ -285,7 +285,7 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
/* Hold a reference to an endpoint. */
void sctp_endpoint_hold(struct sctp_endpoint *ep)
{
atomic_inc(&ep->base.refcnt);
refcount_inc(&ep->base.refcnt);
}
/* Release a reference to an endpoint and clean up if there are
@ -293,7 +293,7 @@ void sctp_endpoint_hold(struct sctp_endpoint *ep)
*/
void sctp_endpoint_put(struct sctp_endpoint *ep)
{
if (atomic_dec_and_test(&ep->base.refcnt))
if (refcount_dec_and_test(&ep->base.refcnt))
sctp_endpoint_destroy(ep);
}

View File

@ -1345,7 +1345,7 @@ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb,
INIT_LIST_HEAD(&retval->transmitted_list);
INIT_LIST_HEAD(&retval->frag_list);
SCTP_DBG_OBJCNT_INC(chunk);
atomic_set(&retval->refcnt, 1);
refcount_set(&retval->refcnt, 1);
nodata:
return retval;
@ -1458,13 +1458,13 @@ void sctp_chunk_free(struct sctp_chunk *chunk)
/* Grab a reference to the chunk. */
void sctp_chunk_hold(struct sctp_chunk *ch)
{
atomic_inc(&ch->refcnt);
refcount_inc(&ch->refcnt);
}
/* Release a reference to the chunk. */
void sctp_chunk_put(struct sctp_chunk *ch)
{
if (atomic_dec_and_test(&ch->refcnt))
if (refcount_dec_and_test(&ch->refcnt))
sctp_chunk_destroy(ch);
}

View File

@ -99,7 +99,7 @@ static struct sctp_transport *sctp_transport_init(struct net *net,
/* Initialize the 64-bit random nonce sent with heartbeat. */
get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
atomic_set(&peer->refcnt, 1);
refcount_set(&peer->refcnt, 1);
return peer;
}
@ -172,7 +172,7 @@ static void sctp_transport_destroy_rcu(struct rcu_head *head)
*/
static void sctp_transport_destroy(struct sctp_transport *transport)
{
if (unlikely(atomic_read(&transport->refcnt))) {
if (unlikely(refcount_read(&transport->refcnt))) {
WARN(1, "Attempt to destroy undead transport %p!\n", transport);
return;
}
@ -311,7 +311,7 @@ void sctp_transport_route(struct sctp_transport *transport,
/* Hold a reference to a transport. */
int sctp_transport_hold(struct sctp_transport *transport)
{
return atomic_add_unless(&transport->refcnt, 1, 0);
return refcount_inc_not_zero(&transport->refcnt);
}
/* Release a reference to a transport and clean up
@ -319,7 +319,7 @@ int sctp_transport_hold(struct sctp_transport *transport)
*/
void sctp_transport_put(struct sctp_transport *transport)
{
if (atomic_dec_and_test(&transport->refcnt))
if (refcount_dec_and_test(&transport->refcnt))
sctp_transport_destroy(transport);
}

View File

@ -117,14 +117,14 @@ static const struct rpc_pipe_ops gss_upcall_ops_v1;
static inline struct gss_cl_ctx *
gss_get_ctx(struct gss_cl_ctx *ctx)
{
atomic_inc(&ctx->count);
refcount_inc(&ctx->count);
return ctx;
}
static inline void
gss_put_ctx(struct gss_cl_ctx *ctx)
{
if (atomic_dec_and_test(&ctx->count))
if (refcount_dec_and_test(&ctx->count))
gss_free_ctx(ctx);
}
@ -200,7 +200,7 @@ gss_alloc_context(void)
ctx->gc_proc = RPC_GSS_PROC_DATA;
ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */
spin_lock_init(&ctx->gc_seq_lock);
atomic_set(&ctx->count,1);
refcount_set(&ctx->count,1);
}
return ctx;
}
@ -287,7 +287,7 @@ err:
#define UPCALL_BUF_LEN 128
struct gss_upcall_msg {
atomic_t count;
refcount_t count;
kuid_t uid;
struct rpc_pipe_msg msg;
struct list_head list;
@ -328,7 +328,7 @@ static void
gss_release_msg(struct gss_upcall_msg *gss_msg)
{
struct net *net = gss_msg->auth->net;
if (!atomic_dec_and_test(&gss_msg->count))
if (!refcount_dec_and_test(&gss_msg->count))
return;
put_pipe_version(net);
BUG_ON(!list_empty(&gss_msg->list));
@ -348,7 +348,7 @@ __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth
continue;
if (auth && pos->auth->service != auth->service)
continue;
atomic_inc(&pos->count);
refcount_inc(&pos->count);
dprintk("RPC: %s found msg %p\n", __func__, pos);
return pos;
}
@ -369,7 +369,7 @@ gss_add_msg(struct gss_upcall_msg *gss_msg)
spin_lock(&pipe->lock);
old = __gss_find_upcall(pipe, gss_msg->uid, gss_msg->auth);
if (old == NULL) {
atomic_inc(&gss_msg->count);
refcount_inc(&gss_msg->count);
list_add(&gss_msg->list, &pipe->in_downcall);
} else
gss_msg = old;
@ -383,7 +383,7 @@ __gss_unhash_msg(struct gss_upcall_msg *gss_msg)
list_del_init(&gss_msg->list);
rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
wake_up_all(&gss_msg->waitqueue);
atomic_dec(&gss_msg->count);
refcount_dec(&gss_msg->count);
}
static void
@ -506,7 +506,7 @@ gss_alloc_msg(struct gss_auth *gss_auth,
INIT_LIST_HEAD(&gss_msg->list);
rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
init_waitqueue_head(&gss_msg->waitqueue);
atomic_set(&gss_msg->count, 1);
refcount_set(&gss_msg->count, 1);
gss_msg->uid = uid;
gss_msg->auth = gss_auth;
switch (vers) {
@ -542,11 +542,11 @@ gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred)
gss_msg = gss_add_msg(gss_new);
if (gss_msg == gss_new) {
int res;
atomic_inc(&gss_msg->count);
refcount_inc(&gss_msg->count);
res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
if (res) {
gss_unhash_msg(gss_new);
atomic_dec(&gss_msg->count);
refcount_dec(&gss_msg->count);
gss_release_msg(gss_new);
gss_msg = ERR_PTR(res);
}
@ -595,7 +595,7 @@ gss_refresh_upcall(struct rpc_task *task)
task->tk_timeout = 0;
gss_cred->gc_upcall = gss_msg;
/* gss_upcall_callback will release the reference to gss_upcall_msg */
atomic_inc(&gss_msg->count);
refcount_inc(&gss_msg->count);
rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback);
} else {
gss_handle_downcall_result(gss_cred, gss_msg);
@ -815,7 +815,7 @@ restart:
if (!list_empty(&gss_msg->msg.list))
continue;
gss_msg->msg.errno = -EPIPE;
atomic_inc(&gss_msg->count);
refcount_inc(&gss_msg->count);
__gss_unhash_msg(gss_msg);
spin_unlock(&pipe->lock);
gss_release_msg(gss_msg);
@ -834,7 +834,7 @@ gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
if (msg->errno < 0) {
dprintk("RPC: %s releasing msg %p\n",
__func__, gss_msg);
atomic_inc(&gss_msg->count);
refcount_inc(&gss_msg->count);
gss_unhash_msg(gss_msg);
if (msg->errno == -ETIMEDOUT)
warn_gssd();

View File

@ -266,7 +266,7 @@ void x25_link_device_up(struct net_device *dev)
X25_MASK_PACKET_SIZE |
X25_MASK_WINDOW_SIZE;
nb->t20 = sysctl_x25_restart_request_timeout;
atomic_set(&nb->refcnt, 1);
refcount_set(&nb->refcnt, 1);
write_lock_bh(&x25_neigh_list_lock);
list_add(&nb->node, &x25_neigh_list);

View File

@ -55,7 +55,7 @@ static int x25_add_route(struct x25_address *address, unsigned int sigdigits,
rt->sigdigits = sigdigits;
rt->dev = dev;
atomic_set(&rt->refcnt, 1);
refcount_set(&rt->refcnt, 1);
list_add(&rt->node, &x25_route_list);
rc = 0;

View File

@ -116,7 +116,7 @@ struct sec_path *secpath_dup(struct sec_path *src)
for (i = 0; i < sp->len; i++)
xfrm_state_hold(sp->xvec[i]);
}
atomic_set(&sp->refcnt, 1);
refcount_set(&sp->refcnt, 1);
return sp;
}
EXPORT_SYMBOL(secpath_dup);
@ -126,7 +126,7 @@ int secpath_set(struct sk_buff *skb)
struct sec_path *sp;
/* Allocate new secpath or COW existing one. */
if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
if (!skb->sp || refcount_read(&skb->sp->refcnt) != 1) {
sp = secpath_dup(skb->sp);
if (!sp)
return -ENOMEM;

View File

@ -62,7 +62,7 @@ static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
{
return atomic_inc_not_zero(&policy->refcnt);
return refcount_inc_not_zero(&policy->refcnt);
}
static inline bool
@ -292,7 +292,7 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
INIT_HLIST_NODE(&policy->bydst);
INIT_HLIST_NODE(&policy->byidx);
rwlock_init(&policy->lock);
atomic_set(&policy->refcnt, 1);
refcount_set(&policy->refcnt, 1);
skb_queue_head_init(&policy->polq.hold_queue);
setup_timer(&policy->timer, xfrm_policy_timer,
(unsigned long)policy);

View File

@ -48,7 +48,7 @@ static HLIST_HEAD(xfrm_state_gc_list);
static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x)
{
return atomic_inc_not_zero(&x->refcnt);
return refcount_inc_not_zero(&x->refcnt);
}
static inline unsigned int xfrm_dst_hash(struct net *net,
@ -558,7 +558,7 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
if (x) {
write_pnet(&x->xs_net, net);
atomic_set(&x->refcnt, 1);
refcount_set(&x->refcnt, 1);
atomic_set(&x->tunnel_users, 0);
INIT_LIST_HEAD(&x->km.all);
INIT_HLIST_NODE(&x->bydst);