mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-28 05:12:49 +00:00
filter: constify sk_run_filter()
sk_run_filter() doesnt write on skb, change its prototype to reflect this. Fix two af_packet comments. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
38f49e8801
commit
62ab081213
4 changed files with 22 additions and 20 deletions
|
@ -148,7 +148,7 @@ struct sk_buff;
|
||||||
struct sock;
|
struct sock;
|
||||||
|
|
||||||
extern int sk_filter(struct sock *sk, struct sk_buff *skb);
|
extern int sk_filter(struct sock *sk, struct sk_buff *skb);
|
||||||
extern unsigned int sk_run_filter(struct sk_buff *skb,
|
extern unsigned int sk_run_filter(const struct sk_buff *skb,
|
||||||
const struct sock_filter *filter);
|
const struct sock_filter *filter);
|
||||||
extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
|
extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
|
||||||
extern int sk_detach_filter(struct sock *sk);
|
extern int sk_detach_filter(struct sock *sk);
|
||||||
|
|
|
@ -88,7 +88,7 @@ enum {
|
||||||
};
|
};
|
||||||
|
|
||||||
/* No hurry in this branch */
|
/* No hurry in this branch */
|
||||||
static void *__load_pointer(struct sk_buff *skb, int k)
|
static void *__load_pointer(const struct sk_buff *skb, int k)
|
||||||
{
|
{
|
||||||
u8 *ptr = NULL;
|
u8 *ptr = NULL;
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ static void *__load_pointer(struct sk_buff *skb, int k)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void *load_pointer(struct sk_buff *skb, int k,
|
static inline void *load_pointer(const struct sk_buff *skb, int k,
|
||||||
unsigned int size, void *buffer)
|
unsigned int size, void *buffer)
|
||||||
{
|
{
|
||||||
if (k >= 0)
|
if (k >= 0)
|
||||||
|
@ -160,7 +160,8 @@ EXPORT_SYMBOL(sk_filter);
|
||||||
* and last instruction guaranteed to be a RET, we dont need to check
|
* and last instruction guaranteed to be a RET, we dont need to check
|
||||||
* flen. (We used to pass to this function the length of filter)
|
* flen. (We used to pass to this function the length of filter)
|
||||||
*/
|
*/
|
||||||
unsigned int sk_run_filter(struct sk_buff *skb, const struct sock_filter *fentry)
|
unsigned int sk_run_filter(const struct sk_buff *skb,
|
||||||
|
const struct sock_filter *fentry)
|
||||||
{
|
{
|
||||||
void *ptr;
|
void *ptr;
|
||||||
u32 A = 0; /* Accumulator */
|
u32 A = 0; /* Accumulator */
|
||||||
|
|
|
@ -26,7 +26,7 @@ static struct sock_filter ptp_filter[] = {
|
||||||
PTP_FILTER
|
PTP_FILTER
|
||||||
};
|
};
|
||||||
|
|
||||||
static unsigned int classify(struct sk_buff *skb)
|
static unsigned int classify(const struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
if (likely(skb->dev &&
|
if (likely(skb->dev &&
|
||||||
skb->dev->phydev &&
|
skb->dev->phydev &&
|
||||||
|
|
|
@ -517,7 +517,8 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
|
static inline unsigned int run_filter(const struct sk_buff *skb,
|
||||||
|
const struct sock *sk,
|
||||||
unsigned int res)
|
unsigned int res)
|
||||||
{
|
{
|
||||||
struct sk_filter *filter;
|
struct sk_filter *filter;
|
||||||
|
@ -532,15 +533,15 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
This function makes lazy skb cloning in hope that most of packets
|
* This function makes lazy skb cloning in hope that most of packets
|
||||||
are discarded by BPF.
|
* are discarded by BPF.
|
||||||
|
*
|
||||||
Note tricky part: we DO mangle shared skb! skb->data, skb->len
|
* Note tricky part: we DO mangle shared skb! skb->data, skb->len
|
||||||
and skb->cb are mangled. It works because (and until) packets
|
* and skb->cb are mangled. It works because (and until) packets
|
||||||
falling here are owned by current CPU. Output packets are cloned
|
* falling here are owned by current CPU. Output packets are cloned
|
||||||
by dev_queue_xmit_nit(), input packets are processed by net_bh
|
* by dev_queue_xmit_nit(), input packets are processed by net_bh
|
||||||
sequencially, so that if we return skb to original state on exit,
|
* sequencially, so that if we return skb to original state on exit,
|
||||||
we will not harm anyone.
|
* we will not harm anyone.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
|
static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||||
|
@ -566,11 +567,11 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||||
|
|
||||||
if (dev->header_ops) {
|
if (dev->header_ops) {
|
||||||
/* The device has an explicit notion of ll header,
|
/* The device has an explicit notion of ll header,
|
||||||
exported to higher levels.
|
* exported to higher levels.
|
||||||
|
*
|
||||||
Otherwise, the device hides datails of it frame
|
* Otherwise, the device hides details of its frame
|
||||||
structure, so that corresponding packet head
|
* structure, so that corresponding packet head is
|
||||||
never delivered to user.
|
* never delivered to user.
|
||||||
*/
|
*/
|
||||||
if (sk->sk_type != SOCK_DGRAM)
|
if (sk->sk_type != SOCK_DGRAM)
|
||||||
skb_push(skb, skb->data - skb_mac_header(skb));
|
skb_push(skb, skb->data - skb_mac_header(skb));
|
||||||
|
|
Loading…
Reference in a new issue