powerpc/compat_sys: swap hi/lo parts of 64-bit syscall args in LE mode

Swap upper/lower 32 bits for 64-bit compat syscalls, conditioned on
endianness. This is modeled after the same functionality in
arch/mips/kernel/linux32.c.

This fixes compat_sys on ppc64le, when called by 32-bit little-endian
processes.

Tested with `file /bin/bash` (pread64) and `truncate -s 5G test`
(ftruncate64).

Signed-off-by: Will Springer <skirmisher@protonmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/2765111.e9J7NaK4W3@sheen
This commit is contained in:
Will Springer 2021-01-03 01:35:30 +00:00 committed by Michael Ellerman
parent caccf2ac5c
commit 57f48b4b74

View file

@ -59,57 +59,64 @@ unsigned long compat_sys_mmap2(unsigned long addr, size_t len,
/*
* long long munging:
* The 32 bit ABI passes long longs in an odd even register pair.
* High and low parts are swapped depending on endian mode,
* so define a macro (similar to mips linux32) to handle that.
*/
#ifdef __LITTLE_ENDIAN__
#define merge_64(low, high) ((u64)high << 32) | low
#else
#define merge_64(high, low) ((u64)high << 32) | low
#endif
compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count,
u32 reg6, u32 poshi, u32 poslo)
u32 reg6, u32 pos1, u32 pos2)
{
return ksys_pread64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo);
return ksys_pread64(fd, ubuf, count, merge_64(pos1, pos2));
}
compat_ssize_t compat_sys_pwrite64(unsigned int fd, const char __user *ubuf, compat_size_t count,
u32 reg6, u32 poshi, u32 poslo)
u32 reg6, u32 pos1, u32 pos2)
{
return ksys_pwrite64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo);
return ksys_pwrite64(fd, ubuf, count, merge_64(pos1, pos2));
}
compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offhi, u32 offlo, u32 count)
compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offset1, u32 offset2, u32 count)
{
return ksys_readahead(fd, ((loff_t)offhi << 32) | offlo, count);
return ksys_readahead(fd, merge_64(offset1, offset2), count);
}
asmlinkage int compat_sys_truncate64(const char __user * path, u32 reg4,
unsigned long high, unsigned long low)
unsigned long len1, unsigned long len2)
{
return ksys_truncate(path, (high << 32) | low);
return ksys_truncate(path, merge_64(len1, len2));
}
asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
u32 lenhi, u32 lenlo)
asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offset1, u32 offset2,
u32 len1, u32 len2)
{
return ksys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo,
((loff_t)lenhi << 32) | lenlo);
return ksys_fallocate(fd, mode, ((loff_t)offset1 << 32) | offset2,
merge_64(len1, len2));
}
asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long high,
unsigned long low)
asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long len1,
unsigned long len2)
{
return ksys_ftruncate(fd, (high << 32) | low);
return ksys_ftruncate(fd, merge_64(len1, len2));
}
long ppc32_fadvise64(int fd, u32 unused, u32 offset_high, u32 offset_low,
long ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2,
size_t len, int advice)
{
return ksys_fadvise64_64(fd, (u64)offset_high << 32 | offset_low, len,
return ksys_fadvise64_64(fd, merge_64(offset1, offset2), len,
advice);
}
asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags,
unsigned offset_hi, unsigned offset_lo,
unsigned nbytes_hi, unsigned nbytes_lo)
unsigned offset1, unsigned offset2,
unsigned nbytes1, unsigned nbytes2)
{
loff_t offset = ((loff_t)offset_hi << 32) | offset_lo;
loff_t nbytes = ((loff_t)nbytes_hi << 32) | nbytes_lo;
loff_t offset = merge_64(offset1, offset2);
loff_t nbytes = merge_64(nbytes1, nbytes2);
return ksys_sync_file_range(fd, offset, nbytes, flags);
}