block-6.9-20240412

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmYZWKYQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpl7VD/0VyVykpWz9ydtEQvlTLndMASi4U0RpSPPy
 mPX8ydz5k8IywiaAaKxCWK0hCSEXWceQKrbqJqQau9M33JD1Ss7x44BQR/XBZD5J
 vRV5PbnMpIFL1knGzf9xgXKAeEHI2Hi/VKuW3dMoXcXHABfHlbS2ulvpQ97S9oAo
 iCS3728sFOiqcKh2SSSMI04f0o+48F2mh3g8CAd45Kx/kwbEogaxc6U590wbnA+5
 +TTVb+XRT0x55DCq0awtXDxLs4030IkcqJ+S5891m5Dfm8LUt0m0ekr8R9ks/sA+
 +axbcLLw3M3B6QTsT8QCytpAyW5eNBdyGSj4BFl5AOGjUDjuassdzXFtKevItgEd
 kPsuYexNHERnntb+zEXfO7wVvyewy4Dby/WjoYi2hOhvkt73UrR9y93cp3Lq3iI4
 G1+JNLNVkQo/bI0ZtyN5+q39odHcvhGvAr6zAygR+fqU6qOymRvQBpV/bnnMrv2j
 wxlGC5r7tGd7kEwTg/UBK+sVR8OyOm8IJAszK372KRORDRiYLlw7cF+4d5jpG9P5
 frk6z6QrJPoH15iaOjZY62PKEEXiuJbQm2FJYMZvD5upR+UHVvg2xVdLMy/tSnaX
 zgNzlPFPKXXQ72IS8shJ9JE0XZWfZxirbUNIL+1Z2HUUj3t5SduBV/dWEeupPvuv
 5Ml9Lg2uMA==
 =rCiN
 -----END PGP SIGNATURE-----

Merge tag 'block-6.9-20240412' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - MD pull request via Song:
       - UAF fix (Yu)

 - Avoid out-of-bounds shift in blk-iocost (Rik)

 - Fix for q->blkg_list corruption (Ming)

 - Relax virt boundary mask/size segment checking (Ming)

* tag 'block-6.9-20240412' of git://git.kernel.dk/linux:
  block: fix that blk_time_get_ns() doesn't update time after schedule
  block: allow device to have both virt_boundary_mask and max segment size
  block: fix q->blkg_list corruption during disk rebind
  blk-iocost: avoid out of bounds shift
  raid1: fix use-after-free for original bio in raid1_write_request()
This commit is contained in:
Linus Torvalds 2024-04-12 10:22:33 -07:00
commit d7ad058156
6 changed files with 22 additions and 17 deletions

View File

@ -1409,6 +1409,12 @@ static int blkcg_css_online(struct cgroup_subsys_state *css)
return 0;
}
void blkg_init_queue(struct request_queue *q)
{
INIT_LIST_HEAD(&q->blkg_list);
mutex_init(&q->blkcg_mutex);
}
int blkcg_init_disk(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
@ -1416,9 +1422,6 @@ int blkcg_init_disk(struct gendisk *disk)
bool preloaded;
int ret;
INIT_LIST_HEAD(&q->blkg_list);
mutex_init(&q->blkcg_mutex);
new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
if (!new_blkg)
return -ENOMEM;

View File

@ -189,6 +189,7 @@ struct blkcg_policy {
extern struct blkcg blkcg_root;
extern bool blkcg_debug_stats;
void blkg_init_queue(struct request_queue *q);
int blkcg_init_disk(struct gendisk *disk);
void blkcg_exit_disk(struct gendisk *disk);
@ -482,6 +483,7 @@ struct blkcg {
};
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
static inline void blkg_init_queue(struct request_queue *q) { }
static inline int blkcg_init_disk(struct gendisk *disk) { return 0; }
static inline void blkcg_exit_disk(struct gendisk *disk) { }
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }

View File

@ -442,6 +442,8 @@ struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)
init_waitqueue_head(&q->mq_freeze_wq);
mutex_init(&q->mq_freeze_lock);
blkg_init_queue(q);
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
* See blk_register_queue() for details.
@ -1195,6 +1197,7 @@ void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
if (unlikely(!rq_list_empty(plug->cached_rq)))
blk_mq_free_plug_rqs(plug);
plug->cur_ktime = 0;
current->flags &= ~PF_BLOCK_TS;
}

View File

@ -1347,7 +1347,7 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
{
struct ioc *ioc = iocg->ioc;
struct blkcg_gq *blkg = iocg_to_blkg(iocg);
u64 tdelta, delay, new_delay;
u64 tdelta, delay, new_delay, shift;
s64 vover, vover_pct;
u32 hwa;
@ -1362,8 +1362,9 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
/* calculate the current delay in effect - 1/2 every second */
tdelta = now->now - iocg->delay_at;
if (iocg->delay)
delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
shift = div64_u64(tdelta, USEC_PER_SEC);
if (iocg->delay && shift < BITS_PER_LONG)
delay = iocg->delay >> shift;
else
delay = 0;

View File

@ -182,17 +182,13 @@ static int blk_validate_limits(struct queue_limits *lim)
return -EINVAL;
/*
* Devices that require a virtual boundary do not support scatter/gather
* I/O natively, but instead require a descriptor list entry for each
* page (which might not be identical to the Linux PAGE_SIZE). Because
* of that they are not limited by our notion of "segment size".
* Stacking device may have both virtual boundary and max segment
* size limit, so allow this setting now, and long-term the two
* might need to move out of stacking limits since we have immutable
* bvec and lower layer bio splitting is supposed to handle the two
* correctly.
*/
if (lim->virt_boundary_mask) {
if (WARN_ON_ONCE(lim->max_segment_size &&
lim->max_segment_size != UINT_MAX))
return -EINVAL;
lim->max_segment_size = UINT_MAX;
} else {
if (!lim->virt_boundary_mask) {
/*
* The maximum segment size has an odd historic 64k default that
* drivers probably should override. Just like the I/O size we

View File

@ -1558,7 +1558,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
for (j = 0; j < i; j++)
if (r1_bio->bios[j])
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
free_r1bio(r1_bio);
mempool_free(r1_bio, &conf->r1bio_pool);
allow_barrier(conf, bio->bi_iter.bi_sector);
if (bio->bi_opf & REQ_NOWAIT) {