mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-08-28 03:40:04 +00:00
tcmu: prevent corruption when invalid data page requested
We will always have a page mapped for cmd data if it is valid command. If the mapping does not exist then something bad happened in userspace and it should not proceed. This has us return VM_FAULT_SIGBUS when this happens instead of returning a freshly allocated paged. The latter can cause corruption because userspace might write the pages data overwriting valid data or return it to the initiator. Signed-off-by: Mike Christie <mchristi@redhat.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
This commit is contained in:
parent
9960f85181
commit
c1c390ba53
1 changed files with 6 additions and 37 deletions
|
@ -1342,7 +1342,6 @@ static int tcmu_find_mem_index(struct vm_area_struct *vma)
|
||||||
static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
|
static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
int ret;
|
|
||||||
|
|
||||||
mutex_lock(&udev->cmdr_lock);
|
mutex_lock(&udev->cmdr_lock);
|
||||||
page = tcmu_get_block_page(udev, dbi);
|
page = tcmu_get_block_page(udev, dbi);
|
||||||
|
@ -1352,42 +1351,12 @@ static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Normally it shouldn't be here:
|
* Userspace messed up and passed in a address not in the
|
||||||
* Only when the userspace has touched the blocks which
|
* data iov passed to it.
|
||||||
* are out of the tcmu_cmd's data iov[], and will return
|
|
||||||
* one zeroed page.
|
|
||||||
*/
|
*/
|
||||||
pr_warn("Block(%u) out of cmd's iov[] has been touched!\n", dbi);
|
pr_err("Invalid addr to data block mapping (dbi %u) on device %s\n",
|
||||||
pr_warn("Mostly it will be a bug of userspace, please have a check!\n");
|
dbi, udev->name);
|
||||||
|
page = NULL;
|
||||||
if (dbi >= udev->dbi_thresh) {
|
|
||||||
/* Extern the udev->dbi_thresh to dbi + 1 */
|
|
||||||
udev->dbi_thresh = dbi + 1;
|
|
||||||
udev->dbi_max = dbi;
|
|
||||||
}
|
|
||||||
|
|
||||||
page = radix_tree_lookup(&udev->data_blocks, dbi);
|
|
||||||
if (!page) {
|
|
||||||
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
|
||||||
if (!page) {
|
|
||||||
mutex_unlock(&udev->cmdr_lock);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = radix_tree_insert(&udev->data_blocks, dbi, page);
|
|
||||||
if (ret) {
|
|
||||||
mutex_unlock(&udev->cmdr_lock);
|
|
||||||
__free_page(page);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Since this case is rare in page fault routine, here we
|
|
||||||
* will allow the global_db_count >= tcmu_global_max_blocks
|
|
||||||
* to reduce possible page fault call trace.
|
|
||||||
*/
|
|
||||||
atomic_inc(&global_db_count);
|
|
||||||
}
|
|
||||||
mutex_unlock(&udev->cmdr_lock);
|
mutex_unlock(&udev->cmdr_lock);
|
||||||
|
|
||||||
return page;
|
return page;
|
||||||
|
@ -1422,7 +1391,7 @@ static int tcmu_vma_fault(struct vm_fault *vmf)
|
||||||
dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE;
|
dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE;
|
||||||
page = tcmu_try_get_block_page(udev, dbi);
|
page = tcmu_try_get_block_page(udev, dbi);
|
||||||
if (!page)
|
if (!page)
|
||||||
return VM_FAULT_NOPAGE;
|
return VM_FAULT_SIGBUS;
|
||||||
}
|
}
|
||||||
|
|
||||||
get_page(page);
|
get_page(page);
|
||||||
|
|
Loading…
Reference in a new issue