mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 16:38:12 +00:00
4e5cc99e1e
First code becomes more clean by switching to xarray from plain array. Second use-after-free on q->queue_hw_ctx can be fixed because queue_for_each_hw_ctx() may be run when updating nr_hw_queues is in-progress. With this patch, q->hctx_table is defined as xarray, and this structure will share same lifetime with request queue, so queue_for_each_hw_ctx() can use q->hctx_table to lookup hctx reliably. Reported-by: Yu Kuai <yukuai3@huawei.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20220308073219.91173-7-ming.lei@redhat.com [axboe: fix blk_mq_hw_ctx forward declaration] Signed-off-by: Jens Axboe <axboe@kernel.dk>
105 lines
2.7 KiB
C
105 lines
2.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef INT_BLK_MQ_DEBUGFS_H
|
|
#define INT_BLK_MQ_DEBUGFS_H
|
|
|
|
#ifdef CONFIG_BLK_DEBUG_FS
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
struct blk_mq_hw_ctx;
|
|
|
|
struct blk_mq_debugfs_attr {
|
|
const char *name;
|
|
umode_t mode;
|
|
int (*show)(void *, struct seq_file *);
|
|
ssize_t (*write)(void *, const char __user *, size_t, loff_t *);
|
|
/* Set either .show or .seq_ops. */
|
|
const struct seq_operations *seq_ops;
|
|
};
|
|
|
|
int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq);
|
|
int blk_mq_debugfs_rq_show(struct seq_file *m, void *v);
|
|
|
|
void blk_mq_debugfs_register(struct request_queue *q);
|
|
void blk_mq_debugfs_unregister(struct request_queue *q);
|
|
void blk_mq_debugfs_register_hctx(struct request_queue *q,
|
|
struct blk_mq_hw_ctx *hctx);
|
|
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
|
|
void blk_mq_debugfs_register_hctxs(struct request_queue *q);
|
|
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
|
|
|
|
void blk_mq_debugfs_register_sched(struct request_queue *q);
|
|
void blk_mq_debugfs_unregister_sched(struct request_queue *q);
|
|
void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
|
|
struct blk_mq_hw_ctx *hctx);
|
|
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
|
|
|
|
void blk_mq_debugfs_register_rqos(struct rq_qos *rqos);
|
|
void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos);
|
|
void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q);
|
|
#else
|
|
static inline void blk_mq_debugfs_register(struct request_queue *q)
|
|
{
|
|
}
|
|
|
|
static inline void blk_mq_debugfs_unregister(struct request_queue *q)
|
|
{
|
|
}
|
|
|
|
static inline void blk_mq_debugfs_register_hctx(struct request_queue *q,
|
|
struct blk_mq_hw_ctx *hctx)
|
|
{
|
|
}
|
|
|
|
static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
|
|
{
|
|
}
|
|
|
|
static inline void blk_mq_debugfs_register_hctxs(struct request_queue *q)
|
|
{
|
|
}
|
|
|
|
static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
|
|
{
|
|
}
|
|
|
|
static inline void blk_mq_debugfs_register_sched(struct request_queue *q)
|
|
{
|
|
}
|
|
|
|
static inline void blk_mq_debugfs_unregister_sched(struct request_queue *q)
|
|
{
|
|
}
|
|
|
|
static inline void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
|
|
struct blk_mq_hw_ctx *hctx)
|
|
{
|
|
}
|
|
|
|
static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
|
|
{
|
|
}
|
|
|
|
static inline void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
|
|
{
|
|
}
|
|
|
|
static inline void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
|
|
{
|
|
}
|
|
|
|
static inline void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_BLK_DEBUG_FS_ZONED
|
|
int queue_zone_wlock_show(void *data, struct seq_file *m);
|
|
#else
|
|
static inline int queue_zone_wlock_show(void *data, struct seq_file *m)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
#endif
|