mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 17:08:10 +00:00
7123aaa3a1
/proc/net/unix has quadratic behavior, and can hold unix_table_lock for a while if high number of unix sockets are alive. (90 ms for 200k sockets...) We already have a hash table, so its quite easy to use it. Problem is unbound sockets are still hashed in a single hash slot (unix_socket_table[UNIX_HASH_TABLE]) This patch also spreads unbound sockets to 256 hash slots, to speedup both /proc/net/unix and unix_diag. Time to read /proc/net/unix with 200k unix sockets : (time dd if=/proc/net/unix of=/dev/null bs=4k) before : 520 secs after : 2 secs Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Pavel Emelyanov <xemul@parallels.com> Signed-off-by: David S. Miller <davem@davemloft.net>
79 lines
2.1 KiB
C
79 lines
2.1 KiB
C
#ifndef __LINUX_NET_AFUNIX_H
|
|
#define __LINUX_NET_AFUNIX_H
|
|
|
|
#include <linux/socket.h>
|
|
#include <linux/un.h>
|
|
#include <linux/mutex.h>
|
|
#include <net/sock.h>
|
|
|
|
extern void unix_inflight(struct file *fp);
|
|
extern void unix_notinflight(struct file *fp);
|
|
extern void unix_gc(void);
|
|
extern void wait_for_unix_gc(void);
|
|
extern struct sock *unix_get_socket(struct file *filp);
|
|
extern struct sock *unix_peer_get(struct sock *);
|
|
|
|
#define UNIX_HASH_SIZE 256
|
|
#define UNIX_HASH_BITS 8
|
|
|
|
extern unsigned int unix_tot_inflight;
|
|
extern spinlock_t unix_table_lock;
|
|
extern struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
|
|
|
|
struct unix_address {
|
|
atomic_t refcnt;
|
|
int len;
|
|
unsigned int hash;
|
|
struct sockaddr_un name[0];
|
|
};
|
|
|
|
struct unix_skb_parms {
|
|
struct pid *pid; /* Skb credentials */
|
|
const struct cred *cred;
|
|
struct scm_fp_list *fp; /* Passed files */
|
|
#ifdef CONFIG_SECURITY_NETWORK
|
|
u32 secid; /* Security ID */
|
|
#endif
|
|
};
|
|
|
|
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
|
|
#define UNIXSID(skb) (&UNIXCB((skb)).secid)
|
|
|
|
#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
|
|
#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock)
|
|
#define unix_state_lock_nested(s) \
|
|
spin_lock_nested(&unix_sk(s)->lock, \
|
|
SINGLE_DEPTH_NESTING)
|
|
|
|
/* The AF_UNIX socket */
|
|
struct unix_sock {
|
|
/* WARNING: sk has to be the first member */
|
|
struct sock sk;
|
|
struct unix_address *addr;
|
|
struct path path;
|
|
struct mutex readlock;
|
|
struct sock *peer;
|
|
struct sock *other;
|
|
struct list_head link;
|
|
atomic_long_t inflight;
|
|
spinlock_t lock;
|
|
unsigned int gc_candidate : 1;
|
|
unsigned int gc_maybe_cycle : 1;
|
|
unsigned char recursion_level;
|
|
struct socket_wq peer_wq;
|
|
};
|
|
#define unix_sk(__sk) ((struct unix_sock *)__sk)
|
|
|
|
#define peer_wait peer_wq.wait
|
|
|
|
long unix_inq_len(struct sock *sk);
|
|
long unix_outq_len(struct sock *sk);
|
|
|
|
#ifdef CONFIG_SYSCTL
|
|
extern int unix_sysctl_register(struct net *net);
|
|
extern void unix_sysctl_unregister(struct net *net);
|
|
#else
|
|
static inline int unix_sysctl_register(struct net *net) { return 0; }
|
|
static inline void unix_sysctl_unregister(struct net *net) {}
|
|
#endif
|
|
#endif
|