Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-01-24 23:44:15 -05:00
commit 955bd1d216
31 changed files with 277 additions and 126 deletions

View File

@ -62,7 +62,15 @@ trivial patch so apply some common sense.
7. When sending security related changes or reports to a maintainer
please Cc: security@kernel.org, especially if the maintainer
does not respond.
does not respond. Please keep in mind that the security team is
a small set of people who can be efficient only when working on
verified bugs. Please only Cc: this list when you have identified
that the bug would present a short-term risk to other users if it
were publicly disclosed. For example, reports of address leaks do
not represent an immediate threat and are better handled publicly,
and ideally, should come with a patch proposal. Please do not send
automated reports to this list either. Such bugs will be handled
better and faster in the usual public places.
8. Happy hacking.
@ -12253,7 +12261,7 @@ M: Security Officers <security@kernel.org>
S: Supported
SECURITY SUBSYSTEM
M: James Morris <james.l.morris@oracle.com>
M: James Morris <jmorris@namei.org>
M: "Serge E. Hallyn" <serge@hallyn.com>
L: linux-security-module@vger.kernel.org (suggested Cc:)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security.git

View File

@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_MD5_SPARC64) += md5-sparc64.o
obj-$(CONFIG_CRYPTO_AES_SPARC64) += aes-sparc64.o
obj-$(CONFIG_CRYPTO_DES_SPARC64) += des-sparc64.o
obj-$(CONFIG_CRYPTO_DES_SPARC64) += camellia-sparc64.o
obj-$(CONFIG_CRYPTO_CAMELLIA_SPARC64) += camellia-sparc64.o
obj-$(CONFIG_CRYPTO_CRC32C_SPARC64) += crc32c-sparc64.o

View File

@ -29,10 +29,13 @@ KASAN_SANITIZE_stacktrace.o := n
KASAN_SANITIZE_paravirt.o := n
OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_test_nx.o := y
OBJECT_FILES_NON_STANDARD_paravirt_patch_$(BITS).o := y
ifdef CONFIG_FRAME_POINTER
OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y
endif
# If instrumentation of this dir is enabled, boot hangs during first second.
# Probably could be more selective here, but note that files related to irqs,
# boot, dumpstack/stacktrace, etc are either non-interesting or can lead to

View File

@ -8,6 +8,7 @@
#include <asm/ftrace.h>
#include <asm/export.h>
#include <asm/nospec-branch.h>
#include <asm/unwind_hints.h>
.code64
.section .entry.text, "ax"
@ -20,7 +21,6 @@ EXPORT_SYMBOL(__fentry__)
EXPORT_SYMBOL(mcount)
#endif
/* All cases save the original rbp (8 bytes) */
#ifdef CONFIG_FRAME_POINTER
# ifdef CC_USING_FENTRY
/* Save parent and function stack frames (rip and rbp) */
@ -31,7 +31,7 @@ EXPORT_SYMBOL(mcount)
# endif
#else
/* No need to save a stack frame */
# define MCOUNT_FRAME_SIZE 8
# define MCOUNT_FRAME_SIZE 0
#endif /* CONFIG_FRAME_POINTER */
/* Size of stack used to save mcount regs in save_mcount_regs */
@ -64,10 +64,10 @@ EXPORT_SYMBOL(mcount)
*/
.macro save_mcount_regs added=0
/* Always save the original rbp */
#ifdef CONFIG_FRAME_POINTER
/* Save the original rbp */
pushq %rbp
#ifdef CONFIG_FRAME_POINTER
/*
* Stack traces will stop at the ftrace trampoline if the frame pointer
* is not set up properly. If fentry is used, we need to save a frame
@ -105,7 +105,11 @@ EXPORT_SYMBOL(mcount)
* Save the original RBP. Even though the mcount ABI does not
* require this, it helps out callers.
*/
#ifdef CONFIG_FRAME_POINTER
movq MCOUNT_REG_SIZE-8(%rsp), %rdx
#else
movq %rbp, %rdx
#endif
movq %rdx, RBP(%rsp)
/* Copy the parent address into %rsi (second parameter) */
@ -148,7 +152,7 @@ EXPORT_SYMBOL(mcount)
ENTRY(function_hook)
retq
END(function_hook)
ENDPROC(function_hook)
ENTRY(ftrace_caller)
/* save_mcount_regs fills in first two parameters */
@ -184,7 +188,7 @@ GLOBAL(ftrace_graph_call)
/* This is weak to keep gas from relaxing the jumps */
WEAK(ftrace_stub)
retq
END(ftrace_caller)
ENDPROC(ftrace_caller)
ENTRY(ftrace_regs_caller)
/* Save the current flags before any operations that can change them */
@ -255,7 +259,7 @@ GLOBAL(ftrace_regs_caller_end)
jmp ftrace_epilogue
END(ftrace_regs_caller)
ENDPROC(ftrace_regs_caller)
#else /* ! CONFIG_DYNAMIC_FTRACE */
@ -313,9 +317,10 @@ ENTRY(ftrace_graph_caller)
restore_mcount_regs
retq
END(ftrace_graph_caller)
ENDPROC(ftrace_graph_caller)
GLOBAL(return_to_handler)
ENTRY(return_to_handler)
UNWIND_HINT_EMPTY
subq $24, %rsp
/* Save the return values */
@ -330,4 +335,5 @@ GLOBAL(return_to_handler)
movq (%rsp), %rax
addq $24, %rsp
JMP_NOSPEC %rdi
END(return_to_handler)
#endif

View File

@ -74,8 +74,50 @@ static struct orc_entry *orc_module_find(unsigned long ip)
}
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
static struct orc_entry *orc_find(unsigned long ip);
/*
* Ftrace dynamic trampolines do not have orc entries of their own.
* But they are copies of the ftrace entries that are static and
* defined in ftrace_*.S, which do have orc entries.
*
* If the undwinder comes across a ftrace trampoline, then find the
* ftrace function that was used to create it, and use that ftrace
* function's orc entrie, as the placement of the return code in
* the stack will be identical.
*/
static struct orc_entry *orc_ftrace_find(unsigned long ip)
{
struct ftrace_ops *ops;
unsigned long caller;
ops = ftrace_ops_trampoline(ip);
if (!ops)
return NULL;
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
caller = (unsigned long)ftrace_regs_call;
else
caller = (unsigned long)ftrace_call;
/* Prevent unlikely recursion */
if (ip == caller)
return NULL;
return orc_find(caller);
}
#else
static struct orc_entry *orc_ftrace_find(unsigned long ip)
{
return NULL;
}
#endif
static struct orc_entry *orc_find(unsigned long ip)
{
static struct orc_entry *orc;
if (!orc_init)
return NULL;
@ -111,7 +153,11 @@ static struct orc_entry *orc_find(unsigned long ip)
__stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
/* Module lookup: */
return orc_module_find(ip);
orc = orc_module_find(ip);
if (orc)
return orc;
return orc_ftrace_find(ip);
}
static void orc_sort_swap(void *_a, void *_b, int size)

View File

@ -662,11 +662,11 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
*/
static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
{
static const char *name = "PCI Bus 0000:00";
struct resource *res, *conflict;
u32 base, limit, high;
struct pci_dev *other;
struct resource *res;
unsigned i;
int r;
if (!(pci_probe & PCI_BIG_ROOT_WINDOW))
return;
@ -707,21 +707,26 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
* Allocate a 256GB window directly below the 0xfd00000000 hardware
* limit (see AMD Family 15h Models 30h-3Fh BKDG, sec 2.4.6).
*/
res->name = "PCI Bus 0000:00";
res->name = name;
res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM |
IORESOURCE_MEM_64 | IORESOURCE_WINDOW;
res->start = 0xbd00000000ull;
res->end = 0xfd00000000ull - 1;
r = request_resource(&iomem_resource, res);
if (r) {
conflict = request_resource_conflict(&iomem_resource, res);
if (conflict) {
kfree(res);
return;
}
if (conflict->name != name)
return;
dev_info(&dev->dev, "adding root bus resource %pR (tainting kernel)\n",
res);
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
/* We are resuming from suspend; just reenable the window */
res = conflict;
} else {
dev_info(&dev->dev, "adding root bus resource %pR (tainting kernel)\n",
res);
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
pci_bus_add_resource(dev->bus, res, 0);
}
base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) |
AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK;
@ -733,13 +738,16 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
pci_write_config_dword(dev, AMD_141b_MMIO_HIGH(i), high);
pci_write_config_dword(dev, AMD_141b_MMIO_LIMIT(i), limit);
pci_write_config_dword(dev, AMD_141b_MMIO_BASE(i), base);
pci_bus_add_resource(dev->bus, res, 0);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
#endif

View File

@ -494,6 +494,9 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
case 16384:
ret |= EMAC_MR1_RFS_16K;
break;
case 8192:
ret |= EMAC4_MR1_RFS_8K;
break;
case 4096:
ret |= EMAC_MR1_RFS_4K;
break;
@ -516,6 +519,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
case 16384:
ret |= EMAC4_MR1_TFS_16K;
break;
case 8192:
ret |= EMAC4_MR1_TFS_8K;
break;
case 4096:
ret |= EMAC4_MR1_TFS_4K;
break;

View File

@ -138,9 +138,11 @@ struct emac_regs {
#define EMAC4_MR1_RFS_2K 0x00100000
#define EMAC4_MR1_RFS_4K 0x00180000
#define EMAC4_MR1_RFS_8K 0x00200000
#define EMAC4_MR1_RFS_16K 0x00280000
#define EMAC4_MR1_TFS_2K 0x00020000
#define EMAC4_MR1_TFS_4K 0x00030000
#define EMAC4_MR1_TFS_8K 0x00040000
#define EMAC4_MR1_TFS_16K 0x00050000
#define EMAC4_MR1_TR 0x00008000
#define EMAC4_MR1_MWSW_001 0x00001000
@ -229,7 +231,7 @@ struct emac_regs {
#define EMAC_STACR_PHYE 0x00004000
#define EMAC_STACR_STAC_MASK 0x00003000
#define EMAC_STACR_STAC_READ 0x00001000
#define EMAC_STACR_STAC_WRITE 0x00002000
#define EMAC_STACR_STAC_WRITE 0x00000800
#define EMAC_STACR_OPBC_MASK 0x00000C00
#define EMAC_STACR_OPBC_50 0x00000000
#define EMAC_STACR_OPBC_66 0x00000400

View File

@ -7437,6 +7437,8 @@ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
{
struct i40e_vsi *vsi = np->vsi;
if (!tc_can_offload(vsi->netdev))
return -EOPNOTSUPP;
if (cls_flower->common.chain_index)
return -EOPNOTSUPP;

View File

@ -1968,11 +1968,8 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
dipn = htonl(dip);
dev = mlxsw_sp->router->rifs[rif]->dev;
n = neigh_lookup(&arp_tbl, &dipn, dev);
if (!n) {
netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
&dip);
if (!n)
return;
}
netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
neigh_event_send(n, NULL);
@ -1999,11 +1996,8 @@ static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
dev = mlxsw_sp->router->rifs[rif]->dev;
n = neigh_lookup(&nd_tbl, &dip, dev);
if (!n) {
netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
&dip);
if (!n)
return;
}
netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
neigh_event_send(n, NULL);

View File

@ -358,10 +358,27 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
kfree(p_rdma_info);
}
static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn)
{
qed_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey);
}
static void qed_rdma_free(struct qed_hwfn *p_hwfn)
{
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
qed_rdma_free_reserved_lkey(p_hwfn);
qed_rdma_resc_free(p_hwfn);
}
@ -615,9 +632,6 @@ static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
{
struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
/* The first DPI is reserved for the Kernel */
__set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
/* Tid 0 will be used as the key for "reserved MR".
* The driver should allocate memory for it so it can be loaded but no
* ramrod should be passed on it.
@ -797,17 +811,6 @@ static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
return p_hwfn->p_rdma_info->dev;
}
static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
{
struct qed_hwfn *p_hwfn;

View File

@ -842,6 +842,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
struct pppoe_hdr *ph;
struct net_device *dev;
char *start;
int hlen;
lock_sock(sk);
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
@ -860,16 +861,16 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
if (total_len > (dev->mtu + dev->hard_header_len))
goto end;
skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
0, GFP_KERNEL);
hlen = LL_RESERVED_SPACE(dev);
skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
dev->needed_tailroom, 0, GFP_KERNEL);
if (!skb) {
error = -ENOMEM;
goto end;
}
/* Reserve space for headers. */
skb_reserve(skb, dev->hard_header_len);
skb_reserve(skb, hlen);
skb_reset_network_header(skb);
skb->dev = dev;
@ -930,7 +931,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
/* Copy the data if there is no space for the header or if it's
* read-only.
*/
if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len))
if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph)))
goto abort;
__skb_push(skb, sizeof(*ph));

View File

@ -1616,7 +1616,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
rq->rx_ring[i].basePA);
rq->rx_ring[i].base = NULL;
}
rq->buf_info[i] = NULL;
}
if (rq->data_ring.base) {
@ -1638,6 +1637,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
(rq->rx_ring[0].size + rq->rx_ring[1].size);
dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
rq->buf_info_pa);
rq->buf_info[0] = rq->buf_info[1] = NULL;
}
}

View File

@ -904,7 +904,7 @@ static void vhost_dev_lock_vqs(struct vhost_dev *d)
{
int i = 0;
for (i = 0; i < d->nvqs; ++i)
mutex_lock(&d->vqs[i]->mutex);
mutex_lock_nested(&d->vqs[i]->mutex, i);
}
static void vhost_dev_unlock_vqs(struct vhost_dev *d)
@ -1015,6 +1015,10 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
vhost_iotlb_notify_vq(dev, msg);
break;
case VHOST_IOTLB_INVALIDATE:
if (!dev->iotlb) {
ret = -EFAULT;
break;
}
vhost_vq_meta_reset(dev);
vhost_del_umem_range(dev->iotlb, msg->iova,
msg->iova + msg->size - 1);

View File

@ -332,6 +332,8 @@ extern int ftrace_text_reserved(const void *start, const void *end);
extern int ftrace_nr_registered_ops(void);
struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
bool is_ftrace_trampoline(unsigned long addr);
/*

View File

@ -31,17 +31,11 @@
#else
#define MODULE_RANDSTRUCT_PLUGIN
#endif
#ifdef RETPOLINE
#define MODULE_VERMAGIC_RETPOLINE "retpoline "
#else
#define MODULE_VERMAGIC_RETPOLINE ""
#endif
#define VERMAGIC_STRING \
UTS_RELEASE " " \
MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
MODULE_ARCH_VERMAGIC \
MODULE_RANDSTRUCT_PLUGIN \
MODULE_VERMAGIC_RETPOLINE
MODULE_RANDSTRUCT_PLUGIN

View File

@ -332,6 +332,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
int flags);
int ip6_flowlabel_init(void);
void ip6_flowlabel_cleanup(void);
bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np);
static inline void fl6_sock_release(struct ip6_flowlabel *fl)
{

View File

@ -535,7 +535,7 @@ static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
{
switch (layer) {
case TCF_LAYER_LINK:
return skb->data;
return skb_mac_header(skb);
case TCF_LAYER_NETWORK:
return skb_network_header(skb);
case TCF_LAYER_TRANSPORT:

View File

@ -1119,15 +1119,11 @@ static struct ftrace_ops global_ops = {
};
/*
* This is used by __kernel_text_address() to return true if the
* address is on a dynamically allocated trampoline that would
* not return true for either core_kernel_text() or
* is_module_text_address().
* Used by the stack undwinder to know about dynamic ftrace trampolines.
*/
bool is_ftrace_trampoline(unsigned long addr)
struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
{
struct ftrace_ops *op;
bool ret = false;
struct ftrace_ops *op = NULL;
/*
* Some of the ops may be dynamically allocated,
@ -1144,15 +1140,24 @@ bool is_ftrace_trampoline(unsigned long addr)
if (op->trampoline && op->trampoline_size)
if (addr >= op->trampoline &&
addr < op->trampoline + op->trampoline_size) {
ret = true;
goto out;
preempt_enable_notrace();
return op;
}
} while_for_each_ftrace_op(op);
out:
preempt_enable_notrace();
return ret;
return NULL;
}
/*
* This is used by __kernel_text_address() to return true if the
* address is on a dynamically allocated trampoline that would
* not return true for either core_kernel_text() or
* is_module_text_address().
*/
bool is_ftrace_trampoline(unsigned long addr)
{
return ftrace_ops_trampoline(addr) != NULL;
}
struct ftrace_page {

View File

@ -2374,6 +2374,15 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
}
EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
/*
* Skip 3:
*
* trace_buffer_unlock_commit_regs()
* trace_event_buffer_commit()
* trace_event_raw_event_xxx()
*/
# define STACK_SKIP 3
void trace_buffer_unlock_commit_regs(struct trace_array *tr,
struct ring_buffer *buffer,
struct ring_buffer_event *event,
@ -2383,16 +2392,12 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
__buffer_unlock_commit(buffer, event);
/*
* If regs is not set, then skip the following callers:
* trace_buffer_unlock_commit_regs
* event_trigger_unlock_commit
* trace_event_buffer_commit
* trace_event_raw_event_sched_switch
* If regs is not set, then skip the necessary functions.
* Note, we can still get here via blktrace, wakeup tracer
* and mmiotrace, but that's ok if they lose a function or
* two. They are that meaningful.
* two. They are not that meaningful.
*/
ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
ftrace_trace_userstack(buffer, flags, pc);
}
@ -2579,11 +2584,13 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
trace.skip = skip;
/*
* Add two, for this function and the call to save_stack_trace()
* Add one, for this function and the call to save_stack_trace()
* If regs is set, then these functions will not be in the way.
*/
#ifndef CONFIG_UNWINDER_ORC
if (!regs)
trace.skip += 2;
trace.skip++;
#endif
/*
* Since events can happen in NMIs there's no safe way to
@ -2711,11 +2718,10 @@ void trace_dump_stack(int skip)
local_save_flags(flags);
/*
* Skip 3 more, seems to get us at the caller of
* this function.
*/
skip += 3;
#ifndef CONFIG_UNWINDER_ORC
/* Skip 1 to skip this function. */
skip++;
#endif
__ftrace_trace_stack(global_trace.trace_buffer.buffer,
flags, skip, preempt_count(), NULL);
}

View File

@ -1123,13 +1123,22 @@ static __init int register_trigger_snapshot_cmd(void) { return 0; }
#endif /* CONFIG_TRACER_SNAPSHOT */
#ifdef CONFIG_STACKTRACE
/*
* Skip 3:
* stacktrace_trigger()
#ifdef CONFIG_UNWINDER_ORC
/* Skip 2:
* event_triggers_post_call()
* trace_event_raw_event_xxx()
*/
#define STACK_SKIP 3
# define STACK_SKIP 2
#else
/*
* Skip 4:
* stacktrace_trigger()
* event_triggers_post_call()
* trace_event_buffer_commit()
* trace_event_raw_event_xxx()
*/
#define STACK_SKIP 4
#endif
static void
stacktrace_trigger(struct event_trigger_data *data, void *rec)

View File

@ -154,6 +154,24 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
preempt_enable_notrace();
}
#ifdef CONFIG_UNWINDER_ORC
/*
* Skip 2:
*
* function_stack_trace_call()
* ftrace_call()
*/
#define STACK_SKIP 2
#else
/*
* Skip 3:
* __trace_stack()
* function_stack_trace_call()
* ftrace_call()
*/
#define STACK_SKIP 3
#endif
static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
@ -180,15 +198,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
if (likely(disabled == 1)) {
pc = preempt_count();
trace_function(tr, ip, parent_ip, flags, pc);
/*
* skip over 5 funcs:
* __ftrace_trace_stack,
* __trace_stack,
* function_stack_trace_call
* ftrace_list_func
* ftrace_call
*/
__trace_stack(tr, flags, 5, pc);
__trace_stack(tr, flags, STACK_SKIP, pc);
}
atomic_dec(&data->disabled);
@ -367,14 +377,27 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
tracer_tracing_off(tr);
}
#ifdef CONFIG_UNWINDER_ORC
/*
* Skip 4:
* ftrace_stacktrace()
* Skip 3:
*
* function_trace_probe_call()
* ftrace_ops_list_func()
* ftrace_ops_assist_func()
* ftrace_call()
*/
#define STACK_SKIP 4
#define FTRACE_STACK_SKIP 3
#else
/*
* Skip 5:
*
* __trace_stack()
* ftrace_stacktrace()
* function_trace_probe_call()
* ftrace_ops_assist_func()
* ftrace_call()
*/
#define FTRACE_STACK_SKIP 5
#endif
static __always_inline void trace_stack(struct trace_array *tr)
{
@ -384,7 +407,7 @@ static __always_inline void trace_stack(struct trace_array *tr)
local_save_flags(flags);
pc = preempt_count();
__trace_stack(tr, flags, STACK_SKIP, pc);
__trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
}
static void

View File

@ -92,6 +92,7 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
skb_reset_network_header(skb);
skb_mac_header_rebuild(skb);
eth_hdr(skb)->h_proto = skb->protocol;
err = 0;

View File

@ -174,7 +174,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
{
if (!np->autoflowlabel_set)
return ip6_default_np_autolabel(net);

View File

@ -1336,7 +1336,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
break;
case IPV6_AUTOFLOWLABEL:
val = np->autoflowlabel;
val = ip6_autoflowlabel(sock_net(sk), np);
break;
case IPV6_RECVFRAGSIZE:

View File

@ -92,6 +92,7 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
skb_reset_network_header(skb);
skb_mac_header_rebuild(skb);
eth_hdr(skb)->h_proto = skb->protocol;
err = 0;

View File

@ -1387,8 +1387,13 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
if (!csk)
return -EINVAL;
/* We must prevent loops or risk deadlock ! */
if (csk->sk_family == PF_KCM)
/* Only allow TCP sockets to be attached for now */
if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
csk->sk_protocol != IPPROTO_TCP)
return -EOPNOTSUPP;
/* Don't allow listeners or closed sockets */
if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE)
return -EOPNOTSUPP;
psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
@ -1405,9 +1410,18 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
return err;
}
sock_hold(csk);
write_lock_bh(&csk->sk_callback_lock);
/* Check if sk_user_data is aready by KCM or someone else.
* Must be done under lock to prevent race conditions.
*/
if (csk->sk_user_data) {
write_unlock_bh(&csk->sk_callback_lock);
strp_done(&psock->strp);
kmem_cache_free(kcm_psockp, psock);
return -EALREADY;
}
psock->save_data_ready = csk->sk_data_ready;
psock->save_write_space = csk->sk_write_space;
psock->save_state_change = csk->sk_state_change;
@ -1415,8 +1429,11 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
csk->sk_data_ready = psock_data_ready;
csk->sk_write_space = psock_write_space;
csk->sk_state_change = psock_state_change;
write_unlock_bh(&csk->sk_callback_lock);
sock_hold(csk);
/* Finished initialization, now add the psock to the MUX. */
spin_lock_bh(&mux->lock);
head = &mux->psocks;

View File

@ -51,7 +51,7 @@ static int em_nbyte_match(struct sk_buff *skb, struct tcf_ematch *em,
if (!tcf_valid_offset(skb, ptr, nbyte->hdr.len))
return 0;
return !memcmp(ptr + nbyte->hdr.off, nbyte->pattern, nbyte->hdr.len);
return !memcmp(ptr, nbyte->pattern, nbyte->hdr.len);
}
static struct tcf_ematch_ops em_nbyte_ops = {

View File

@ -184,6 +184,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
err = dev->xfrmdev_ops->xdo_dev_state_add(x);
if (err) {
xso->dev = NULL;
dev_put(dev);
return err;
}

View File

@ -317,7 +317,7 @@ retry:
if (!type && try_load) {
request_module("xfrm-offload-%d-%d", family, proto);
try_load = 0;
try_load = false;
goto retry;
}
@ -2279,8 +2279,6 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
goto error;
}
x->km.state = XFRM_STATE_VALID;
error:
return err;
}
@ -2289,7 +2287,13 @@ EXPORT_SYMBOL(__xfrm_init_state);
int xfrm_init_state(struct xfrm_state *x)
{
return __xfrm_init_state(x, true, false);
int err;
err = __xfrm_init_state(x, true, false);
if (!err)
x->km.state = XFRM_STATE_VALID;
return err;
}
EXPORT_SYMBOL(xfrm_init_state);

View File

@ -598,13 +598,6 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
goto error;
}
if (attrs[XFRMA_OFFLOAD_DEV]) {
err = xfrm_dev_state_add(net, x,
nla_data(attrs[XFRMA_OFFLOAD_DEV]));
if (err)
goto error;
}
if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
attrs[XFRMA_REPLAY_ESN_VAL])))
goto error;
@ -620,6 +613,14 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
/* override default values from above */
xfrm_update_ae_params(x, attrs, 0);
/* configure the hardware if offload is requested */
if (attrs[XFRMA_OFFLOAD_DEV]) {
err = xfrm_dev_state_add(net, x,
nla_data(attrs[XFRMA_OFFLOAD_DEV]));
if (err)
goto error;
}
return x;
error:
@ -662,6 +663,9 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
goto out;
}
if (x->km.state == XFRM_STATE_VOID)
x->km.state = XFRM_STATE_VALID;
c.seq = nlh->nlmsg_seq;
c.portid = nlh->nlmsg_pid;
c.event = nlh->nlmsg_type;