treewide: use prandom_u32_max() when possible, part 2

Rather than incurring a division or requesting too many random bytes for
the given range, use the prandom_u32_max() function, which only takes
the minimum required bytes from the RNG and avoids divisions. This was
done by hand, covering things that coccinelle could not do on its own.

Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Yury Norov <yury.norov@gmail.com>
Reviewed-by: Jan Kara <jack@suse.cz> # for ext2, ext4, and sbitmap
Acked-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
This commit is contained in:
Jason A. Donenfeld 2022-10-05 16:43:38 +02:00
parent 81895a65ec
commit 8b3ccbc1f1
4 changed files with 8 additions and 19 deletions

View File

@ -277,8 +277,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent)
int best_ndir = inodes_per_group;
int best_group = -1;
group = prandom_u32();
parent_group = (unsigned)group % ngroups;
parent_group = prandom_u32_max(ngroups);
for (i = 0; i < ngroups; i++) {
group = (parent_group + i) % ngroups;
desc = ext2_get_group_desc (sb, group, NULL);

View File

@ -463,10 +463,9 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
hinfo.hash_version = DX_HASH_HALF_MD4;
hinfo.seed = sbi->s_hash_seed;
ext4fs_dirhash(parent, qstr->name, qstr->len, &hinfo);
grp = hinfo.hash;
parent_group = hinfo.hash % ngroups;
} else
grp = prandom_u32();
parent_group = (unsigned)grp % ngroups;
parent_group = prandom_u32_max(ngroups);
for (i = 0; i < ngroups; i++) {
g = (parent_group + i) % ngroups;
get_orlov_stats(sb, g, flex_size, &stats);

View File

@ -21,7 +21,7 @@ static int init_alloc_hint(struct sbitmap *sb, gfp_t flags)
int i;
for_each_possible_cpu(i)
*per_cpu_ptr(sb->alloc_hint, i) = prandom_u32() % depth;
*per_cpu_ptr(sb->alloc_hint, i) = prandom_u32_max(depth);
}
return 0;
}

View File

@ -151,9 +151,7 @@ static int random_size_alloc_test(void)
int i;
for (i = 0; i < test_loop_count; i++) {
n = prandom_u32();
n = (n % 100) + 1;
n = prandom_u32_max(100) + 1;
p = vmalloc(n * PAGE_SIZE);
if (!p)
@ -293,16 +291,12 @@ pcpu_alloc_test(void)
return -1;
for (i = 0; i < 35000; i++) {
unsigned int r;
r = prandom_u32();
size = (r % (PAGE_SIZE / 4)) + 1;
size = prandom_u32_max(PAGE_SIZE / 4) + 1;
/*
* Maximum PAGE_SIZE
*/
r = prandom_u32();
align = 1 << ((r % 11) + 1);
align = 1 << (prandom_u32_max(11) + 1);
pcpu[i] = __alloc_percpu(size, align);
if (!pcpu[i])
@ -393,14 +387,11 @@ static struct test_driver {
static void shuffle_array(int *arr, int n)
{
unsigned int rnd;
int i, j;
for (i = n - 1; i > 0; i--) {
rnd = prandom_u32();
/* Cut the range. */
j = rnd % i;
j = prandom_u32_max(i);
/* Swap indexes. */
swap(arr[i], arr[j]);