linux-stable/include/linux/iopoll.h

206 lines
8 KiB
C
Raw Normal View History

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
*/
#ifndef _LINUX_IOPOLL_H
#define _LINUX_IOPOLL_H
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ktime.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/io.h>
/**
* read_poll_timeout - Periodically poll an address until a condition is
* met or a timeout occurs
* @op: accessor function (takes @args as its arguments)
* @val: Variable to read the value into
* @cond: Break condition (usually involving @val)
* @sleep_us: Maximum time to sleep between reads in us (0
* tight-loops). Should be less than ~20ms since usleep_range
* is used (see Documentation/timers/timers-howto.rst).
* @timeout_us: Timeout in us, 0 means never timeout
* @sleep_before_read: if it is true, sleep @sleep_us before read.
* @args: arguments for @op poll
*
* Returns 0 on success and -ETIMEDOUT upon a timeout. In either
* case, the last read value at @args is stored in @val. Must not
* be called from atomic context if sleep_us or timeout_us are used.
*
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
*/
#define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \
sleep_before_read, args...) \
({ \
u64 __timeout_us = (timeout_us); \
unsigned long __sleep_us = (sleep_us); \
ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
might_sleep_if((__sleep_us) != 0); \
if (sleep_before_read && __sleep_us) \
usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
for (;;) { \
(val) = op(args); \
if (cond) \
break; \
if (__timeout_us && \
ktime_compare(ktime_get(), __timeout) > 0) { \
(val) = op(args); \
break; \
} \
if (__sleep_us) \
usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
iopoll: Call cpu_relax() in busy loops It is considered good practice to call cpu_relax() in busy loops, see Documentation/process/volatile-considered-harmful.rst. This can not only lower CPU power consumption or yield to a hyperthreaded twin processor, but also allows an architecture to mitigate hardware issues (e.g. ARM Erratum 754327 for Cortex-A9 prior to r2p0) in the architecture-specific cpu_relax() implementation. In addition, cpu_relax() is also a compiler barrier. It is not immediately obvious that the @op argument "function" will result in an actual function call (e.g. in case of inlining). Where a function call is a C sequence point, this is lost on inlining. Therefore, with agressive enough optimization it might be possible for the compiler to hoist the: (val) = op(args); "load" out of the loop because it doesn't see the value changing. The addition of cpu_relax() would inhibit this. As the iopoll helpers lack calls to cpu_relax(), people are sometimes reluctant to use them, and may fall back to open-coded polling loops (including cpu_relax() calls) instead. Fix this by adding calls to cpu_relax() to the iopoll helpers: - For the non-atomic case, it is sufficient to call cpu_relax() in case of a zero sleep-between-reads value, as a call to usleep_range() is a safe barrier otherwise. However, it doesn't hurt to add the call regardless, for simplicity, and for similarity with the atomic case below. - For the atomic case, cpu_relax() must be called regardless of the sleep-between-reads value, as there is no guarantee all architecture-specific implementations of udelay() handle this. Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Tony Lindgren <tony@atomide.com> Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org> Link: https://lore.kernel.org/r/45c87bec3397fdd704376807f0eec5cc71be440f.1685692810.git.geert+renesas@glider.be
2023-06-02 08:50:36 +00:00
cpu_relax(); \
} \
(cond) ? 0 : -ETIMEDOUT; \
})
/**
* read_poll_timeout_atomic - Periodically poll an address until a condition is
* met or a timeout occurs
* @op: accessor function (takes @args as its arguments)
* @val: Variable to read the value into
* @cond: Break condition (usually involving @val)
* @delay_us: Time to udelay between reads in us (0 tight-loops). Should
* be less than ~10us since udelay is used (see
* Documentation/timers/timers-howto.rst).
* @timeout_us: Timeout in us, 0 means never timeout
* @delay_before_read: if it is true, delay @delay_us before read.
* @args: arguments for @op poll
*
* Returns 0 on success and -ETIMEDOUT upon a timeout. In either
* case, the last read value at @args is stored in @val.
*
iopoll: Do not use timekeeping in read_poll_timeout_atomic() read_poll_timeout_atomic() uses ktime_get() to implement the timeout feature, just like its non-atomic counterpart. However, there are several issues with this, due to its use in atomic contexts: 1. When called in the s2ram path (as typically done by clock or PM domain drivers), timekeeping may be suspended, triggering the WARN_ON(timekeeping_suspended) in ktime_get(): WARNING: CPU: 0 PID: 654 at kernel/time/timekeeping.c:843 ktime_get+0x28/0x78 Calling ktime_get_mono_fast_ns() instead of ktime_get() would get rid of that warning. However, that would break timeout handling, as (at least on systems with an ARM architectured timer), the time returned by ktime_get_mono_fast_ns() does not advance while timekeeping is suspended. Interestingly, (on the same ARM systems) the time returned by ktime_get() does advance while timekeeping is suspended, despite the warning. 2. Depending on the actual clock source, and especially before a high-resolution clocksource (e.g. the ARM architectured timer) becomes available, time may not advance in atomic contexts, thus breaking timeout handling. Fix this by abandoning the idea that one can rely on timekeeping to implement timeout handling in all atomic contexts, and switch from a global time-based to a locally-estimated timeout handling. In most (all?) cases the timeout condition is exceptional and an error condition, hence any additional delays due to underestimating wall clock time are irrelevant. Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Acked-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Tony Lindgren <tony@atomide.com> Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org> Link: https://lore.kernel.org/r/3d2a2f4e553489392d871108797c3be08f88300b.1685692810.git.geert+renesas@glider.be
2023-06-02 08:50:37 +00:00
* This macro does not rely on timekeeping. Hence it is safe to call even when
* timekeeping is suspended, at the expense of an underestimation of wall clock
* time, which is rather minimal with a non-zero delay_us.
*
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
*/
#define read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, \
delay_before_read, args...) \
({ \
u64 __timeout_us = (timeout_us); \
iopoll: Do not use timekeeping in read_poll_timeout_atomic() read_poll_timeout_atomic() uses ktime_get() to implement the timeout feature, just like its non-atomic counterpart. However, there are several issues with this, due to its use in atomic contexts: 1. When called in the s2ram path (as typically done by clock or PM domain drivers), timekeeping may be suspended, triggering the WARN_ON(timekeeping_suspended) in ktime_get(): WARNING: CPU: 0 PID: 654 at kernel/time/timekeeping.c:843 ktime_get+0x28/0x78 Calling ktime_get_mono_fast_ns() instead of ktime_get() would get rid of that warning. However, that would break timeout handling, as (at least on systems with an ARM architectured timer), the time returned by ktime_get_mono_fast_ns() does not advance while timekeeping is suspended. Interestingly, (on the same ARM systems) the time returned by ktime_get() does advance while timekeeping is suspended, despite the warning. 2. Depending on the actual clock source, and especially before a high-resolution clocksource (e.g. the ARM architectured timer) becomes available, time may not advance in atomic contexts, thus breaking timeout handling. Fix this by abandoning the idea that one can rely on timekeeping to implement timeout handling in all atomic contexts, and switch from a global time-based to a locally-estimated timeout handling. In most (all?) cases the timeout condition is exceptional and an error condition, hence any additional delays due to underestimating wall clock time are irrelevant. Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Acked-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Tony Lindgren <tony@atomide.com> Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org> Link: https://lore.kernel.org/r/3d2a2f4e553489392d871108797c3be08f88300b.1685692810.git.geert+renesas@glider.be
2023-06-02 08:50:37 +00:00
s64 __left_ns = __timeout_us * NSEC_PER_USEC; \
unsigned long __delay_us = (delay_us); \
iopoll: Do not use timekeeping in read_poll_timeout_atomic() read_poll_timeout_atomic() uses ktime_get() to implement the timeout feature, just like its non-atomic counterpart. However, there are several issues with this, due to its use in atomic contexts: 1. When called in the s2ram path (as typically done by clock or PM domain drivers), timekeeping may be suspended, triggering the WARN_ON(timekeeping_suspended) in ktime_get(): WARNING: CPU: 0 PID: 654 at kernel/time/timekeeping.c:843 ktime_get+0x28/0x78 Calling ktime_get_mono_fast_ns() instead of ktime_get() would get rid of that warning. However, that would break timeout handling, as (at least on systems with an ARM architectured timer), the time returned by ktime_get_mono_fast_ns() does not advance while timekeeping is suspended. Interestingly, (on the same ARM systems) the time returned by ktime_get() does advance while timekeeping is suspended, despite the warning. 2. Depending on the actual clock source, and especially before a high-resolution clocksource (e.g. the ARM architectured timer) becomes available, time may not advance in atomic contexts, thus breaking timeout handling. Fix this by abandoning the idea that one can rely on timekeeping to implement timeout handling in all atomic contexts, and switch from a global time-based to a locally-estimated timeout handling. In most (all?) cases the timeout condition is exceptional and an error condition, hence any additional delays due to underestimating wall clock time are irrelevant. Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Acked-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Tony Lindgren <tony@atomide.com> Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org> Link: https://lore.kernel.org/r/3d2a2f4e553489392d871108797c3be08f88300b.1685692810.git.geert+renesas@glider.be
2023-06-02 08:50:37 +00:00
u64 __delay_ns = __delay_us * NSEC_PER_USEC; \
if (delay_before_read && __delay_us) { \
udelay(__delay_us); \
iopoll: Do not use timekeeping in read_poll_timeout_atomic() read_poll_timeout_atomic() uses ktime_get() to implement the timeout feature, just like its non-atomic counterpart. However, there are several issues with this, due to its use in atomic contexts: 1. When called in the s2ram path (as typically done by clock or PM domain drivers), timekeeping may be suspended, triggering the WARN_ON(timekeeping_suspended) in ktime_get(): WARNING: CPU: 0 PID: 654 at kernel/time/timekeeping.c:843 ktime_get+0x28/0x78 Calling ktime_get_mono_fast_ns() instead of ktime_get() would get rid of that warning. However, that would break timeout handling, as (at least on systems with an ARM architectured timer), the time returned by ktime_get_mono_fast_ns() does not advance while timekeeping is suspended. Interestingly, (on the same ARM systems) the time returned by ktime_get() does advance while timekeeping is suspended, despite the warning. 2. Depending on the actual clock source, and especially before a high-resolution clocksource (e.g. the ARM architectured timer) becomes available, time may not advance in atomic contexts, thus breaking timeout handling. Fix this by abandoning the idea that one can rely on timekeeping to implement timeout handling in all atomic contexts, and switch from a global time-based to a locally-estimated timeout handling. In most (all?) cases the timeout condition is exceptional and an error condition, hence any additional delays due to underestimating wall clock time are irrelevant. Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Acked-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Tony Lindgren <tony@atomide.com> Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org> Link: https://lore.kernel.org/r/3d2a2f4e553489392d871108797c3be08f88300b.1685692810.git.geert+renesas@glider.be
2023-06-02 08:50:37 +00:00
if (__timeout_us) \
__left_ns -= __delay_ns; \
} \
for (;;) { \
(val) = op(args); \
if (cond) \
break; \
iopoll: Do not use timekeeping in read_poll_timeout_atomic() read_poll_timeout_atomic() uses ktime_get() to implement the timeout feature, just like its non-atomic counterpart. However, there are several issues with this, due to its use in atomic contexts: 1. When called in the s2ram path (as typically done by clock or PM domain drivers), timekeeping may be suspended, triggering the WARN_ON(timekeeping_suspended) in ktime_get(): WARNING: CPU: 0 PID: 654 at kernel/time/timekeeping.c:843 ktime_get+0x28/0x78 Calling ktime_get_mono_fast_ns() instead of ktime_get() would get rid of that warning. However, that would break timeout handling, as (at least on systems with an ARM architectured timer), the time returned by ktime_get_mono_fast_ns() does not advance while timekeeping is suspended. Interestingly, (on the same ARM systems) the time returned by ktime_get() does advance while timekeeping is suspended, despite the warning. 2. Depending on the actual clock source, and especially before a high-resolution clocksource (e.g. the ARM architectured timer) becomes available, time may not advance in atomic contexts, thus breaking timeout handling. Fix this by abandoning the idea that one can rely on timekeeping to implement timeout handling in all atomic contexts, and switch from a global time-based to a locally-estimated timeout handling. In most (all?) cases the timeout condition is exceptional and an error condition, hence any additional delays due to underestimating wall clock time are irrelevant. Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Acked-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Tony Lindgren <tony@atomide.com> Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org> Link: https://lore.kernel.org/r/3d2a2f4e553489392d871108797c3be08f88300b.1685692810.git.geert+renesas@glider.be
2023-06-02 08:50:37 +00:00
if (__timeout_us && __left_ns < 0) { \
(val) = op(args); \
break; \
} \
iopoll: Do not use timekeeping in read_poll_timeout_atomic() read_poll_timeout_atomic() uses ktime_get() to implement the timeout feature, just like its non-atomic counterpart. However, there are several issues with this, due to its use in atomic contexts: 1. When called in the s2ram path (as typically done by clock or PM domain drivers), timekeeping may be suspended, triggering the WARN_ON(timekeeping_suspended) in ktime_get(): WARNING: CPU: 0 PID: 654 at kernel/time/timekeeping.c:843 ktime_get+0x28/0x78 Calling ktime_get_mono_fast_ns() instead of ktime_get() would get rid of that warning. However, that would break timeout handling, as (at least on systems with an ARM architectured timer), the time returned by ktime_get_mono_fast_ns() does not advance while timekeeping is suspended. Interestingly, (on the same ARM systems) the time returned by ktime_get() does advance while timekeeping is suspended, despite the warning. 2. Depending on the actual clock source, and especially before a high-resolution clocksource (e.g. the ARM architectured timer) becomes available, time may not advance in atomic contexts, thus breaking timeout handling. Fix this by abandoning the idea that one can rely on timekeeping to implement timeout handling in all atomic contexts, and switch from a global time-based to a locally-estimated timeout handling. In most (all?) cases the timeout condition is exceptional and an error condition, hence any additional delays due to underestimating wall clock time are irrelevant. Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Acked-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Tony Lindgren <tony@atomide.com> Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org> Link: https://lore.kernel.org/r/3d2a2f4e553489392d871108797c3be08f88300b.1685692810.git.geert+renesas@glider.be
2023-06-02 08:50:37 +00:00
if (__delay_us) { \
udelay(__delay_us); \
iopoll: Do not use timekeeping in read_poll_timeout_atomic() read_poll_timeout_atomic() uses ktime_get() to implement the timeout feature, just like its non-atomic counterpart. However, there are several issues with this, due to its use in atomic contexts: 1. When called in the s2ram path (as typically done by clock or PM domain drivers), timekeeping may be suspended, triggering the WARN_ON(timekeeping_suspended) in ktime_get(): WARNING: CPU: 0 PID: 654 at kernel/time/timekeeping.c:843 ktime_get+0x28/0x78 Calling ktime_get_mono_fast_ns() instead of ktime_get() would get rid of that warning. However, that would break timeout handling, as (at least on systems with an ARM architectured timer), the time returned by ktime_get_mono_fast_ns() does not advance while timekeeping is suspended. Interestingly, (on the same ARM systems) the time returned by ktime_get() does advance while timekeeping is suspended, despite the warning. 2. Depending on the actual clock source, and especially before a high-resolution clocksource (e.g. the ARM architectured timer) becomes available, time may not advance in atomic contexts, thus breaking timeout handling. Fix this by abandoning the idea that one can rely on timekeeping to implement timeout handling in all atomic contexts, and switch from a global time-based to a locally-estimated timeout handling. In most (all?) cases the timeout condition is exceptional and an error condition, hence any additional delays due to underestimating wall clock time are irrelevant. Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Acked-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Tony Lindgren <tony@atomide.com> Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org> Link: https://lore.kernel.org/r/3d2a2f4e553489392d871108797c3be08f88300b.1685692810.git.geert+renesas@glider.be
2023-06-02 08:50:37 +00:00
if (__timeout_us) \
__left_ns -= __delay_ns; \
} \
iopoll: Call cpu_relax() in busy loops It is considered good practice to call cpu_relax() in busy loops, see Documentation/process/volatile-considered-harmful.rst. This can not only lower CPU power consumption or yield to a hyperthreaded twin processor, but also allows an architecture to mitigate hardware issues (e.g. ARM Erratum 754327 for Cortex-A9 prior to r2p0) in the architecture-specific cpu_relax() implementation. In addition, cpu_relax() is also a compiler barrier. It is not immediately obvious that the @op argument "function" will result in an actual function call (e.g. in case of inlining). Where a function call is a C sequence point, this is lost on inlining. Therefore, with agressive enough optimization it might be possible for the compiler to hoist the: (val) = op(args); "load" out of the loop because it doesn't see the value changing. The addition of cpu_relax() would inhibit this. As the iopoll helpers lack calls to cpu_relax(), people are sometimes reluctant to use them, and may fall back to open-coded polling loops (including cpu_relax() calls) instead. Fix this by adding calls to cpu_relax() to the iopoll helpers: - For the non-atomic case, it is sufficient to call cpu_relax() in case of a zero sleep-between-reads value, as a call to usleep_range() is a safe barrier otherwise. However, it doesn't hurt to add the call regardless, for simplicity, and for similarity with the atomic case below. - For the atomic case, cpu_relax() must be called regardless of the sleep-between-reads value, as there is no guarantee all architecture-specific implementations of udelay() handle this. Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Tony Lindgren <tony@atomide.com> Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org> Link: https://lore.kernel.org/r/45c87bec3397fdd704376807f0eec5cc71be440f.1685692810.git.geert+renesas@glider.be
2023-06-02 08:50:36 +00:00
cpu_relax(); \
iopoll: Do not use timekeeping in read_poll_timeout_atomic() read_poll_timeout_atomic() uses ktime_get() to implement the timeout feature, just like its non-atomic counterpart. However, there are several issues with this, due to its use in atomic contexts: 1. When called in the s2ram path (as typically done by clock or PM domain drivers), timekeeping may be suspended, triggering the WARN_ON(timekeeping_suspended) in ktime_get(): WARNING: CPU: 0 PID: 654 at kernel/time/timekeeping.c:843 ktime_get+0x28/0x78 Calling ktime_get_mono_fast_ns() instead of ktime_get() would get rid of that warning. However, that would break timeout handling, as (at least on systems with an ARM architectured timer), the time returned by ktime_get_mono_fast_ns() does not advance while timekeeping is suspended. Interestingly, (on the same ARM systems) the time returned by ktime_get() does advance while timekeeping is suspended, despite the warning. 2. Depending on the actual clock source, and especially before a high-resolution clocksource (e.g. the ARM architectured timer) becomes available, time may not advance in atomic contexts, thus breaking timeout handling. Fix this by abandoning the idea that one can rely on timekeeping to implement timeout handling in all atomic contexts, and switch from a global time-based to a locally-estimated timeout handling. In most (all?) cases the timeout condition is exceptional and an error condition, hence any additional delays due to underestimating wall clock time are irrelevant. Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Acked-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Tony Lindgren <tony@atomide.com> Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org> Link: https://lore.kernel.org/r/3d2a2f4e553489392d871108797c3be08f88300b.1685692810.git.geert+renesas@glider.be
2023-06-02 08:50:37 +00:00
if (__timeout_us) \
__left_ns--; \
} \
(cond) ? 0 : -ETIMEDOUT; \
})
/**
* readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
* @op: accessor function (takes @addr as its only argument)
* @addr: Address to poll
* @val: Variable to read the value into
* @cond: Break condition (usually involving @val)
* @sleep_us: Maximum time to sleep between reads in us (0
* tight-loops). Should be less than ~20ms since usleep_range
* is used (see Documentation/timers/timers-howto.rst).
* @timeout_us: Timeout in us, 0 means never timeout
*
* Returns 0 on success and -ETIMEDOUT upon a timeout. In either
* case, the last read value at @addr is stored in @val. Must not
* be called from atomic context if sleep_us or timeout_us are used.
*
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
*/
#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \
read_poll_timeout(op, val, cond, sleep_us, timeout_us, false, addr)
/**
* readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs
* @op: accessor function (takes @addr as its only argument)
* @addr: Address to poll
* @val: Variable to read the value into
* @cond: Break condition (usually involving @val)
* @delay_us: Time to udelay between reads in us (0 tight-loops). Should
* be less than ~10us since udelay is used (see
* Documentation/timers/timers-howto.rst).
* @timeout_us: Timeout in us, 0 means never timeout
*
* Returns 0 on success and -ETIMEDOUT upon a timeout. In either
* case, the last read value at @addr is stored in @val.
*
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
*/
#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, false, addr)
#define readb_poll_timeout(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout(readb, addr, val, cond, delay_us, timeout_us)
#define readb_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout_atomic(readb, addr, val, cond, delay_us, timeout_us)
#define readw_poll_timeout(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout(readw, addr, val, cond, delay_us, timeout_us)
#define readw_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout_atomic(readw, addr, val, cond, delay_us, timeout_us)
#define readl_poll_timeout(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout(readl, addr, val, cond, delay_us, timeout_us)
#define readl_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout_atomic(readl, addr, val, cond, delay_us, timeout_us)
#define readq_poll_timeout(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout(readq, addr, val, cond, delay_us, timeout_us)
#define readq_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout_atomic(readq, addr, val, cond, delay_us, timeout_us)
#define readb_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout(readb_relaxed, addr, val, cond, delay_us, timeout_us)
#define readb_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout_atomic(readb_relaxed, addr, val, cond, delay_us, timeout_us)
#define readw_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout(readw_relaxed, addr, val, cond, delay_us, timeout_us)
#define readw_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout_atomic(readw_relaxed, addr, val, cond, delay_us, timeout_us)
#define readl_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout(readl_relaxed, addr, val, cond, delay_us, timeout_us)
#define readl_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout_atomic(readl_relaxed, addr, val, cond, delay_us, timeout_us)
#define readq_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout(readq_relaxed, addr, val, cond, delay_us, timeout_us)
#define readq_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout_atomic(readq_relaxed, addr, val, cond, delay_us, timeout_us)
#endif /* _LINUX_IOPOLL_H */