Merge remote-tracking branch 'torvalds/master' into perf/core

To pick up fixes.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Arnaldo Carvalho de Melo 2022-11-24 10:08:03 -03:00
commit 0c3852adae
32 changed files with 259 additions and 120 deletions

View file

@ -15952,6 +15952,7 @@ Q: https://patchwork.kernel.org/project/linux-pci/list/
B: https://bugzilla.kernel.org
C: irc://irc.oftc.net/linux-pci
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git
F: Documentation/devicetree/bindings/pci/
F: drivers/pci/controller/
F: drivers/pci/pci-bridge-emul.c
F: drivers/pci/pci-bridge-emul.h
@ -16058,7 +16059,7 @@ F: Documentation/devicetree/bindings/pci/microchip*
F: drivers/pci/controller/*microchip*
PCIE DRIVER FOR QUALCOMM MSM
M: Stanimir Varbanov <svarbanov@mm-sol.com>
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
L: linux-pci@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained

View file

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc6
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*

View file

@ -142,7 +142,7 @@ SECTIONS
#endif
.data.rel.ro : AT(ADDR(.data.rel.ro) - LOAD_OFFSET) {
*(.data.rel.ro*)
*(.data.rel.ro .data.rel.ro.*)
}
.branch_lt : AT(ADDR(.branch_lt) - LOAD_OFFSET) {

View file

@ -861,8 +861,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
pmu_enabled = cpuc->enabled;
cpuc->enabled = 0;
/* stop everything (includes BRS) */
amd_pmu_disable_all();
amd_brs_disable_all();
/* Drain BRS is in use (could be inactive) */
if (cpuc->lbr_users)
@ -873,7 +872,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
cpuc->enabled = pmu_enabled;
if (pmu_enabled)
amd_pmu_enable_all(0);
amd_brs_enable_all();
return amd_pmu_adjust_nmi_window(handled);
}

View file

@ -553,6 +553,7 @@ static void uncore_clean_online(void)
hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
hlist_del(&uncore->node);
kfree(uncore->events);
kfree(uncore);
}
}

View file

@ -1263,6 +1263,15 @@ static int pt_buffer_try_single(struct pt_buffer *buf, int nr_pages)
if (1 << order != nr_pages)
goto out;
/*
* Some processors cannot always support single range for more than
* 4KB - refer errata TGL052, ADL037 and RPL017. Future processors might
* also be affected, so for now rather than trying to keep track of
* which ones, just disable it for all.
*/
if (nr_pages > 1)
goto out;
buf->single = true;
buf->nr_pages = nr_pages;
ret = 0;

View file

@ -37,7 +37,7 @@ __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text");
* rsi = lockval (second argument)
* rdx = internal variable (set to 0)
*/
asm (".pushsection .spinlock.text;"
asm (".pushsection .spinlock.text, \"ax\";"
".globl " PV_UNLOCK ";"
".type " PV_UNLOCK ", @function;"
".align 4,0x90;"

View file

@ -356,6 +356,9 @@ static int sgx_validate_offset_length(struct sgx_encl *encl,
if (!length || !IS_ALIGNED(length, PAGE_SIZE))
return -EINVAL;
if (offset + length < offset)
return -EINVAL;
if (offset + length - PAGE_SIZE >= encl->size)
return -EINVAL;

View file

@ -605,9 +605,9 @@ int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal)
if (test_thread_flag(TIF_NEED_FPU_LOAD))
fpregs_restore_userregs();
save_fpregs_to_fpstate(dst_fpu);
fpregs_unlock();
if (!(clone_flags & CLONE_THREAD))
fpu_inherit_perms(dst_fpu);
fpregs_unlock();
/*
* Children never inherit PASID state.

View file

@ -128,12 +128,15 @@ static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
dw_spi_dma_sg_burst_init(dws);
pci_dev_put(dma_dev);
return 0;
free_rxchan:
dma_release_channel(dws->rxchan);
dws->rxchan = NULL;
err_exit:
pci_dev_put(dma_dev);
return -EBUSY;
}

View file

@ -444,8 +444,7 @@ static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
unsigned int pre, post;
unsigned int fin = spi_imx->spi_clk;
if (unlikely(fspi > fin))
return 0;
fspi = min(fspi, fin);
post = fls(fin) - fls(fspi);
if (fin > fspi << post)
@ -1607,6 +1606,13 @@ static int spi_imx_transfer_one(struct spi_controller *controller,
if (spi_imx->slave_mode)
return spi_imx_pio_transfer_slave(spi, transfer);
/*
* If we decided in spi_imx_can_dma() that we want to do a DMA
* transfer, the SPI transfer has already been mapped, so we
* have to do the DMA transfer here.
*/
if (spi_imx->usedma)
return spi_imx_dma_transfer(spi_imx, transfer);
/*
* Calculate the estimated time in us the transfer runs. Find
* the number of Hz per byte per polling limit.
@ -1618,9 +1624,6 @@ static int spi_imx_transfer_one(struct spi_controller *controller,
if (transfer->len < byte_limit)
return spi_imx_poll_transfer(spi, transfer);
if (spi_imx->usedma)
return spi_imx_dma_transfer(spi_imx, transfer);
return spi_imx_pio_transfer(spi, transfer);
}

View file

@ -1273,8 +1273,11 @@ static int mtk_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct mtk_spi *mdata = spi_master_get_devdata(master);
int ret;
pm_runtime_disable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
return ret;
mtk_spi_reset(mdata);
@ -1283,6 +1286,9 @@ static int mtk_spi_remove(struct platform_device *pdev)
clk_unprepare(mdata->spi_hclk);
}
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}

View file

@ -924,8 +924,9 @@ static int tegra_qspi_start_transfer_one(struct spi_device *spi,
static struct tegra_qspi_client_data *tegra_qspi_parse_cdata_dt(struct spi_device *spi)
{
struct tegra_qspi_client_data *cdata;
struct tegra_qspi *tqspi = spi_master_get_devdata(spi->master);
cdata = devm_kzalloc(&spi->dev, sizeof(*cdata), GFP_KERNEL);
cdata = devm_kzalloc(tqspi->dev, sizeof(*cdata), GFP_KERNEL);
if (!cdata)
return NULL;

View file

@ -203,7 +203,11 @@ static struct fscache_volume *fscache_alloc_volume(const char *volume_key,
struct fscache_volume *volume;
struct fscache_cache *cache;
size_t klen, hlen;
char *key;
u8 *key;
klen = strlen(volume_key);
if (klen > NAME_MAX)
return NULL;
if (!coherency_data)
coherency_len = 0;
@ -229,7 +233,6 @@ static struct fscache_volume *fscache_alloc_volume(const char *volume_key,
/* Stick the length on the front of the key and pad it out to make
* hashing easier.
*/
klen = strlen(volume_key);
hlen = round_up(1 + klen + 1, sizeof(__le32));
key = kzalloc(hlen, GFP_KERNEL);
if (!key)

View file

@ -75,7 +75,7 @@ struct fscache_volume {
atomic_t n_accesses; /* Number of cache accesses in progress */
unsigned int debug_id;
unsigned int key_hash; /* Hash of key string */
char *key; /* Volume ID, eg. "afs@example.com@1234" */
u8 *key; /* Volume ID, eg. "afs@example.com@1234" */
struct list_head proc_link; /* Link in /proc/fs/fscache/volumes */
struct hlist_bl_node hash_link; /* Link in hash table */
struct work_struct work;

View file

@ -100,7 +100,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table);
struct file *filp, poll_table *poll_table, int full);
void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
#define RING_BUFFER_ALL_CPUS -1

View file

@ -26,13 +26,13 @@ struct trace_export {
int flags;
};
struct trace_array;
#ifdef CONFIG_TRACING
int register_ftrace_export(struct trace_export *export);
int unregister_ftrace_export(struct trace_export *export);
struct trace_array;
void trace_printk_init_buffers(void);
__printf(3, 4)
int trace_array_printk(struct trace_array *tr, unsigned long ip,

View file

@ -9306,14 +9306,27 @@ static int __perf_event_overflow(struct perf_event *event,
}
if (event->attr.sigtrap) {
/*
* Should not be able to return to user space without processing
* pending_sigtrap (kernel events can overflow multiple times).
*/
WARN_ON_ONCE(event->pending_sigtrap && event->attr.exclude_kernel);
unsigned int pending_id = 1;
if (regs)
pending_id = hash32_ptr((void *)instruction_pointer(regs)) ?: 1;
if (!event->pending_sigtrap) {
event->pending_sigtrap = 1;
event->pending_sigtrap = pending_id;
local_inc(&event->ctx->nr_pending);
} else if (event->attr.exclude_kernel) {
/*
* Should not be able to return to user space without
* consuming pending_sigtrap; with exceptions:
*
* 1. Where !exclude_kernel, events can overflow again
* in the kernel without returning to user space.
*
* 2. Events that can overflow again before the IRQ-
* work without user space progress (e.g. hrtimer).
* To approximate progress (with false negatives),
* check 32-bit hash of the current IP.
*/
WARN_ON_ONCE(event->pending_sigtrap != pending_id);
}
event->pending_addr = data->addr;
irq_work_queue(&event->pending_irq);

View file

@ -1766,7 +1766,13 @@ static int __unregister_kprobe_top(struct kprobe *p)
if ((list_p != p) && (list_p->post_handler))
goto noclean;
}
ap->post_handler = NULL;
/*
* For the kprobe-on-ftrace case, we keep the
* post_handler setting to identify this aggrprobe
* armed with kprobe_ipmodify_ops.
*/
if (!kprobe_ftrace(ap))
ap->post_handler = NULL;
}
noclean:
/*

View file

@ -171,12 +171,27 @@ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
return 0;
}
static bool rseq_warn_flags(const char *str, u32 flags)
{
u32 test_flags;
if (!flags)
return false;
test_flags = flags & RSEQ_CS_NO_RESTART_FLAGS;
if (test_flags)
pr_warn_once("Deprecated flags (%u) in %s ABI structure", test_flags, str);
test_flags = flags & ~RSEQ_CS_NO_RESTART_FLAGS;
if (test_flags)
pr_warn_once("Unknown flags (%u) in %s ABI structure", test_flags, str);
return true;
}
static int rseq_need_restart(struct task_struct *t, u32 cs_flags)
{
u32 flags, event_mask;
int ret;
if (WARN_ON_ONCE(cs_flags & RSEQ_CS_NO_RESTART_FLAGS) || cs_flags)
if (rseq_warn_flags("rseq_cs", cs_flags))
return -EINVAL;
/* Get thread flags. */
@ -184,7 +199,7 @@ static int rseq_need_restart(struct task_struct *t, u32 cs_flags)
if (ret)
return ret;
if (WARN_ON_ONCE(flags & RSEQ_CS_NO_RESTART_FLAGS) || flags)
if (rseq_warn_flags("rseq", flags))
return -EINVAL;
/*

View file

@ -4200,6 +4200,40 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
return success;
}
static bool __task_needs_rq_lock(struct task_struct *p)
{
unsigned int state = READ_ONCE(p->__state);
/*
* Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
* the task is blocked. Make sure to check @state since ttwu() can drop
* locks at the end, see ttwu_queue_wakelist().
*/
if (state == TASK_RUNNING || state == TASK_WAKING)
return true;
/*
* Ensure we load p->on_rq after p->__state, otherwise it would be
* possible to, falsely, observe p->on_rq == 0.
*
* See try_to_wake_up() for a longer comment.
*/
smp_rmb();
if (p->on_rq)
return true;
#ifdef CONFIG_SMP
/*
* Ensure the task has finished __schedule() and will not be referenced
* anymore. Again, see try_to_wake_up() for a longer comment.
*/
smp_rmb();
smp_cond_load_acquire(&p->on_cpu, !VAL);
#endif
return false;
}
/**
* task_call_func - Invoke a function on task in fixed state
* @p: Process for which the function is to be invoked, can be @current.
@ -4217,28 +4251,12 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
int task_call_func(struct task_struct *p, task_call_f func, void *arg)
{
struct rq *rq = NULL;
unsigned int state;
struct rq_flags rf;
int ret;
raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
state = READ_ONCE(p->__state);
/*
* Ensure we load p->on_rq after p->__state, otherwise it would be
* possible to, falsely, observe p->on_rq == 0.
*
* See try_to_wake_up() for a longer comment.
*/
smp_rmb();
/*
* Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
* the task is blocked. Make sure to check @state since ttwu() can drop
* locks at the end, see ttwu_queue_wakelist().
*/
if (state == TASK_RUNNING || state == TASK_WAKING || p->on_rq)
if (__task_needs_rq_lock(p))
rq = __task_rq_lock(p, &rf);
/*

View file

@ -1289,6 +1289,7 @@ static int ftrace_add_mod(struct trace_array *tr,
if (!ftrace_mod)
return -ENOMEM;
INIT_LIST_HEAD(&ftrace_mod->list);
ftrace_mod->func = kstrdup(func, GFP_KERNEL);
ftrace_mod->module = kstrdup(module, GFP_KERNEL);
ftrace_mod->enable = enable;
@ -3190,7 +3191,7 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
/* if we can't allocate this size, try something smaller */
if (!order)
return -ENOMEM;
order >>= 1;
order--;
goto again;
}
@ -7391,7 +7392,7 @@ void __init ftrace_init(void)
}
pr_info("ftrace: allocating %ld entries in %ld pages\n",
count, count / ENTRIES_PER_PAGE + 1);
count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
ret = ftrace_process_locs(NULL,
__start_mcount_loc,

View file

@ -73,6 +73,10 @@ static struct trace_event_file *gen_kretprobe_test;
#define KPROBE_GEN_TEST_ARG3 NULL
#endif
static bool trace_event_file_is_valid(struct trace_event_file *input)
{
return input && !IS_ERR(input);
}
/*
* Test to make sure we can create a kprobe event, then add more
@ -139,6 +143,8 @@ static int __init test_gen_kprobe_cmd(void)
kfree(buf);
return ret;
delete:
if (trace_event_file_is_valid(gen_kprobe_test))
gen_kprobe_test = NULL;
/* We got an error after creating the event, delete it */
ret = kprobe_event_delete("gen_kprobe_test");
goto out;
@ -202,6 +208,8 @@ static int __init test_gen_kretprobe_cmd(void)
kfree(buf);
return ret;
delete:
if (trace_event_file_is_valid(gen_kretprobe_test))
gen_kretprobe_test = NULL;
/* We got an error after creating the event, delete it */
ret = kprobe_event_delete("gen_kretprobe_test");
goto out;
@ -217,10 +225,12 @@ static int __init kprobe_event_gen_test_init(void)
ret = test_gen_kretprobe_cmd();
if (ret) {
WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
"kprobes",
"gen_kretprobe_test", false));
trace_put_event_file(gen_kretprobe_test);
if (trace_event_file_is_valid(gen_kretprobe_test)) {
WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
"kprobes",
"gen_kretprobe_test", false));
trace_put_event_file(gen_kretprobe_test);
}
WARN_ON(kprobe_event_delete("gen_kretprobe_test"));
}
@ -229,24 +239,30 @@ static int __init kprobe_event_gen_test_init(void)
static void __exit kprobe_event_gen_test_exit(void)
{
/* Disable the event or you can't remove it */
WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr,
"kprobes",
"gen_kprobe_test", false));
if (trace_event_file_is_valid(gen_kprobe_test)) {
/* Disable the event or you can't remove it */
WARN_ON(trace_array_set_clr_event(gen_kprobe_test->tr,
"kprobes",
"gen_kprobe_test", false));
/* Now give the file and instance back */
trace_put_event_file(gen_kprobe_test);
}
/* Now give the file and instance back */
trace_put_event_file(gen_kprobe_test);
/* Now unregister and free the event */
WARN_ON(kprobe_event_delete("gen_kprobe_test"));
/* Disable the event or you can't remove it */
WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
"kprobes",
"gen_kretprobe_test", false));
if (trace_event_file_is_valid(gen_kretprobe_test)) {
/* Disable the event or you can't remove it */
WARN_ON(trace_array_set_clr_event(gen_kretprobe_test->tr,
"kprobes",
"gen_kretprobe_test", false));
/* Now give the file and instance back */
trace_put_event_file(gen_kretprobe_test);
}
/* Now give the file and instance back */
trace_put_event_file(gen_kretprobe_test);
/* Now unregister and free the event */
WARN_ON(kprobe_event_delete("gen_kretprobe_test"));

View file

@ -83,8 +83,10 @@ struct rethook *rethook_alloc(void *data, rethook_handler_t handler)
{
struct rethook *rh = kzalloc(sizeof(struct rethook), GFP_KERNEL);
if (!rh || !handler)
if (!rh || !handler) {
kfree(rh);
return NULL;
}
rh->data = data;
rh->handler = handler;

View file

@ -519,6 +519,7 @@ struct ring_buffer_per_cpu {
local_t committing;
local_t commits;
local_t pages_touched;
local_t pages_lost;
local_t pages_read;
long last_pages_touch;
size_t shortest_full;
@ -894,10 +895,18 @@ size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
{
size_t read;
size_t lost;
size_t cnt;
read = local_read(&buffer->buffers[cpu]->pages_read);
lost = local_read(&buffer->buffers[cpu]->pages_lost);
cnt = local_read(&buffer->buffers[cpu]->pages_touched);
if (WARN_ON_ONCE(cnt < lost))
return 0;
cnt -= lost;
/* The reader can read an empty page, but not more than that */
if (cnt < read) {
WARN_ON_ONCE(read > cnt + 1);
@ -907,6 +916,21 @@ size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
return cnt - read;
}
static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
size_t nr_pages;
size_t dirty;
nr_pages = cpu_buffer->nr_pages;
if (!nr_pages || !full)
return true;
dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
return (dirty * 100) > (full * nr_pages);
}
/*
* rb_wake_up_waiters - wake up tasks waiting for ring buffer input
*
@ -1046,22 +1070,20 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
!ring_buffer_empty_cpu(buffer, cpu)) {
unsigned long flags;
bool pagebusy;
size_t nr_pages;
size_t dirty;
bool done;
if (!full)
break;
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
nr_pages = cpu_buffer->nr_pages;
dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
done = !pagebusy && full_hit(buffer, cpu, full);
if (!cpu_buffer->shortest_full ||
cpu_buffer->shortest_full > full)
cpu_buffer->shortest_full = full;
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
if (!pagebusy &&
(!nr_pages || (dirty * 100) > full * nr_pages))
if (done)
break;
}
@ -1087,6 +1109,7 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
* @cpu: the cpu buffer to wait on
* @filp: the file descriptor
* @poll_table: The poll descriptor
* @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
*
* If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
* as data is added to any of the @buffer's cpu buffers. Otherwise
@ -1096,14 +1119,15 @@ int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
* zero otherwise.
*/
__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table)
struct file *filp, poll_table *poll_table, int full)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct rb_irq_work *work;
if (cpu == RING_BUFFER_ALL_CPUS)
if (cpu == RING_BUFFER_ALL_CPUS) {
work = &buffer->irq_work;
else {
full = 0;
} else {
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return -EINVAL;
@ -1111,8 +1135,14 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
work = &cpu_buffer->irq_work;
}
poll_wait(filp, &work->waiters, poll_table);
work->waiters_pending = true;
if (full) {
poll_wait(filp, &work->full_waiters, poll_table);
work->full_waiters_pending = true;
} else {
poll_wait(filp, &work->waiters, poll_table);
work->waiters_pending = true;
}
/*
* There's a tight race between setting the waiters_pending and
* checking if the ring buffer is empty. Once the waiters_pending bit
@ -1128,6 +1158,9 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
*/
smp_mb();
if (full)
return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0;
if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
(cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
return EPOLLIN | EPOLLRDNORM;
@ -1769,9 +1802,9 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
free_buffer_page(cpu_buffer->reader_page);
rb_head_page_deactivate(cpu_buffer);
if (head) {
rb_head_page_deactivate(cpu_buffer);
list_for_each_entry_safe(bpage, tmp, head, list) {
list_del_init(&bpage->list);
free_buffer_page(bpage);
@ -2007,6 +2040,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
*/
local_add(page_entries, &cpu_buffer->overrun);
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
local_inc(&cpu_buffer->pages_lost);
}
/*
@ -2491,6 +2525,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
*/
local_add(entries, &cpu_buffer->overrun);
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
local_inc(&cpu_buffer->pages_lost);
/*
* The entries will be zeroed out when we move the
@ -3155,10 +3190,6 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
static __always_inline void
rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
{
size_t nr_pages;
size_t dirty;
size_t full;
if (buffer->irq_work.waiters_pending) {
buffer->irq_work.waiters_pending = false;
/* irq_work_queue() supplies it's own memory barriers */
@ -3182,10 +3213,7 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
full = cpu_buffer->shortest_full;
nr_pages = cpu_buffer->nr_pages;
dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu);
if (full && nr_pages && (dirty * 100) <= full * nr_pages)
if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full))
return;
cpu_buffer->irq_work.wakeup_full = true;
@ -5248,6 +5276,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
local_set(&cpu_buffer->committing, 0);
local_set(&cpu_buffer->commits, 0);
local_set(&cpu_buffer->pages_touched, 0);
local_set(&cpu_buffer->pages_lost, 0);
local_set(&cpu_buffer->pages_read, 0);
cpu_buffer->last_pages_touch = 0;
cpu_buffer->shortest_full = 0;

View file

@ -120,15 +120,13 @@ static int __init test_gen_synth_cmd(void)
/* Now generate a gen_synth_test event */
ret = synth_event_trace_array(gen_synth_test, vals, ARRAY_SIZE(vals));
out:
free:
kfree(buf);
return ret;
delete:
/* We got an error after creating the event, delete it */
synth_event_delete("gen_synth_test");
free:
kfree(buf);
goto out;
goto free;
}
/*
@ -227,15 +225,13 @@ static int __init test_empty_synth_event(void)
/* Now trace an empty_synth_test event */
ret = synth_event_trace_array(empty_synth_test, vals, ARRAY_SIZE(vals));
out:
free:
kfree(buf);
return ret;
delete:
/* We got an error after creating the event, delete it */
synth_event_delete("empty_synth_test");
free:
kfree(buf);
goto out;
goto free;
}
static struct synth_field_desc create_synth_test_fields[] = {

View file

@ -6657,6 +6657,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
mutex_unlock(&trace_types_lock);
free_cpumask_var(iter->started);
kfree(iter->fmt);
mutex_destroy(&iter->mutex);
kfree(iter);
@ -6681,7 +6682,7 @@ trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_tabl
return EPOLLIN | EPOLLRDNORM;
else
return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
filp, poll_table);
filp, poll_table, iter->tr->buffer_percent);
}
static __poll_t
@ -7802,6 +7803,7 @@ static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
int len)
{
struct tracing_log_err *err;
char *cmd;
if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
err = alloc_tracing_log_err(len);
@ -7810,12 +7812,12 @@ static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
return err;
}
cmd = kzalloc(len, GFP_KERNEL);
if (!cmd)
return ERR_PTR(-ENOMEM);
err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
kfree(err->cmd);
err->cmd = kzalloc(len, GFP_KERNEL);
if (!err->cmd)
return ERR_PTR(-ENOMEM);
err->cmd = cmd;
list_del(&err->list);
return err;

View file

@ -52,6 +52,7 @@ static void trace_event_probe_cleanup(struct trace_eprobe *ep)
kfree(ep->event_system);
if (ep->event)
trace_event_put_ref(ep->event);
kfree(ep->filter_str);
kfree(ep);
}
@ -563,6 +564,9 @@ static void eprobe_trigger_func(struct event_trigger_data *data,
{
struct eprobe_data *edata = data->private_data;
if (unlikely(!rec))
return;
__eprobe_trace_func(edata, rec);
}
@ -642,7 +646,7 @@ new_eprobe_trigger(struct trace_eprobe *ep, struct trace_event_file *file)
INIT_LIST_HEAD(&trigger->list);
if (ep->filter_str) {
ret = create_event_filter(file->tr, file->event_call,
ret = create_event_filter(file->tr, ep->event,
ep->filter_str, false, &filter);
if (ret)
goto error;
@ -900,7 +904,7 @@ static int trace_eprobe_tp_update_arg(struct trace_eprobe *ep, const char *argv[
static int trace_eprobe_parse_filter(struct trace_eprobe *ep, int argc, const char *argv[])
{
struct event_filter *dummy;
struct event_filter *dummy = NULL;
int i, ret, len = 0;
char *p;

View file

@ -828,10 +828,9 @@ static int register_synth_event(struct synth_event *event)
}
ret = set_synth_event_print_fmt(call);
if (ret < 0) {
/* unregister_trace_event() will be called inside */
if (ret < 0)
trace_remove_event_call(call);
goto err;
}
out:
return ret;
err:

View file

@ -201,8 +201,6 @@ print_syscall_exit(struct trace_iterator *iter, int flags,
return trace_handle_return(s);
}
extern char *__bad_type_size(void);
#define SYSCALL_FIELD(_type, _name) { \
.type = #_type, .name = #_name, \
.size = sizeof(_type), .align = __alignof__(_type), \

View file

@ -120,7 +120,7 @@ struct p9_conn {
struct list_head unsent_req_list;
struct p9_req_t *rreq;
struct p9_req_t *wreq;
char tmp_buf[7];
char tmp_buf[P9_HDRSZ];
struct p9_fcall rc;
int wpos;
int wsize;
@ -202,9 +202,11 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
list_move(&req->req_list, &cancel_list);
req->status = REQ_STATUS_ERROR;
}
list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
list_move(&req->req_list, &cancel_list);
req->status = REQ_STATUS_ERROR;
}
spin_unlock(&m->req_lock);
@ -291,7 +293,7 @@ static void p9_read_work(struct work_struct *work)
if (!m->rc.sdata) {
m->rc.sdata = m->tmp_buf;
m->rc.offset = 0;
m->rc.capacity = 7; /* start by reading header */
m->rc.capacity = P9_HDRSZ; /* start by reading header */
}
clear_bit(Rpending, &m->wsched);
@ -314,7 +316,7 @@ static void p9_read_work(struct work_struct *work)
p9_debug(P9_DEBUG_TRANS, "got new header\n");
/* Header size */
m->rc.size = 7;
m->rc.size = P9_HDRSZ;
err = p9_parse_header(&m->rc, &m->rc.size, NULL, NULL, 0);
if (err) {
p9_debug(P9_DEBUG_ERROR,
@ -322,14 +324,6 @@ static void p9_read_work(struct work_struct *work)
goto error;
}
if (m->rc.size >= m->client->msize) {
p9_debug(P9_DEBUG_ERROR,
"requested packet size too big: %d\n",
m->rc.size);
err = -EIO;
goto error;
}
p9_debug(P9_DEBUG_TRANS,
"mux %p pkt: size: %d bytes tag: %d\n",
m, m->rc.size, m->rc.tag);
@ -342,6 +336,14 @@ static void p9_read_work(struct work_struct *work)
goto error;
}
if (m->rc.size > m->rreq->rc.capacity) {
p9_debug(P9_DEBUG_ERROR,
"requested packet size too big: %d for tag %d with capacity %zd\n",
m->rc.size, m->rc.tag, m->rreq->rc.capacity);
err = -EIO;
goto error;
}
if (!m->rreq->rc.sdata) {
p9_debug(P9_DEBUG_ERROR,
"No recv fcall for tag %d (req %p), disconnecting!\n",

View file

@ -208,6 +208,14 @@ static void p9_xen_response(struct work_struct *work)
continue;
}
if (h.size > req->rc.capacity) {
dev_warn(&priv->dev->dev,
"requested packet size too big: %d for tag %d with capacity %zd\n",
h.size, h.tag, req->rc.capacity);
req->status = REQ_STATUS_ERROR;
goto recv_error;
}
memcpy(&req->rc, &h, sizeof(h));
req->rc.offset = 0;
@ -217,6 +225,7 @@ static void p9_xen_response(struct work_struct *work)
masked_prod, &masked_cons,
XEN_9PFS_RING_SIZE(ring));
recv_error:
virt_mb();
cons += h.size;
ring->intf->in_cons = cons;