mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-30 06:10:56 +00:00
bpf: Fix racing between bpf_timer_cancel_and_free and bpf_timer_cancel
[ Upstream commit0281b919e1
] The following race is possible between bpf_timer_cancel_and_free and bpf_timer_cancel. It will lead a UAF on the timer->timer. bpf_timer_cancel(); spin_lock(); t = timer->time; spin_unlock(); bpf_timer_cancel_and_free(); spin_lock(); t = timer->timer; timer->timer = NULL; spin_unlock(); hrtimer_cancel(&t->timer); kfree(t); /* UAF on t */ hrtimer_cancel(&t->timer); In bpf_timer_cancel_and_free, this patch frees the timer->timer after a rcu grace period. This requires a rcu_head addition to the "struct bpf_hrtimer". Another kfree(t) happens in bpf_timer_init, this does not need a kfree_rcu because it is still under the spin_lock and timer->timer has not been visible by others yet. In bpf_timer_cancel, rcu_read_lock() is added because this helper can be used in a non rcu critical section context (e.g. from a sleepable bpf prog). Other timer->timer usages in helpers.c have been audited, bpf_timer_cancel() is the only place where timer->timer is used outside of the spin_lock. Another solution considered is to mark a t->flag in bpf_timer_cancel and clear it after hrtimer_cancel() is done. In bpf_timer_cancel_and_free, it busy waits for the flag to be cleared before kfree(t). This patch goes with a straight forward solution and frees timer->timer after a rcu grace period. Fixes:b00628b1c7
("bpf: Introduce bpf timers.") Suggested-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Hou Tao <houtao1@huawei.com> Link: https://lore.kernel.org/bpf/20240215211218.990808-1-martin.lau@linux.dev Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
d4c58764da
commit
8327ed12e8
1 changed files with 4 additions and 1 deletions
|
@ -1100,6 +1100,7 @@ struct bpf_hrtimer {
|
||||||
struct bpf_prog *prog;
|
struct bpf_prog *prog;
|
||||||
void __rcu *callback_fn;
|
void __rcu *callback_fn;
|
||||||
void *value;
|
void *value;
|
||||||
|
struct rcu_head rcu;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* the actual struct hidden inside uapi struct bpf_timer */
|
/* the actual struct hidden inside uapi struct bpf_timer */
|
||||||
|
@ -1328,6 +1329,7 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
|
||||||
|
|
||||||
if (in_nmi())
|
if (in_nmi())
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
rcu_read_lock();
|
||||||
__bpf_spin_lock_irqsave(&timer->lock);
|
__bpf_spin_lock_irqsave(&timer->lock);
|
||||||
t = timer->timer;
|
t = timer->timer;
|
||||||
if (!t) {
|
if (!t) {
|
||||||
|
@ -1349,6 +1351,7 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
|
||||||
* if it was running.
|
* if it was running.
|
||||||
*/
|
*/
|
||||||
ret = ret ?: hrtimer_cancel(&t->timer);
|
ret = ret ?: hrtimer_cancel(&t->timer);
|
||||||
|
rcu_read_unlock();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1403,7 +1406,7 @@ void bpf_timer_cancel_and_free(void *val)
|
||||||
*/
|
*/
|
||||||
if (this_cpu_read(hrtimer_running) != t)
|
if (this_cpu_read(hrtimer_running) != t)
|
||||||
hrtimer_cancel(&t->timer);
|
hrtimer_cancel(&t->timer);
|
||||||
kfree(t);
|
kfree_rcu(t, rcu);
|
||||||
}
|
}
|
||||||
|
|
||||||
BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
|
BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
|
||||||
|
|
Loading…
Reference in a new issue