bcachefs: Use kvpmalloc mempools for compression bounce

This fixes an issue where mounting would fail because of memory
fragmentation - previously the compression bounce buffers were using
get_free_pages().

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2020-03-29 12:33:41 -04:00 committed by Kent Overstreet
parent 5a655f06c9
commit 22f776985f
1 changed files with 5 additions and 20 deletions

View File

@ -17,7 +17,6 @@ struct bbuf {
BB_NONE, BB_NONE,
BB_VMAP, BB_VMAP,
BB_KMALLOC, BB_KMALLOC,
BB_VMALLOC,
BB_MEMPOOL, BB_MEMPOOL,
} type; } type;
int rw; int rw;
@ -33,17 +32,7 @@ static struct bbuf __bounce_alloc(struct bch_fs *c, unsigned size, int rw)
if (b) if (b)
return (struct bbuf) { .b = b, .type = BB_KMALLOC, .rw = rw }; return (struct bbuf) { .b = b, .type = BB_KMALLOC, .rw = rw };
b = mempool_alloc(&c->compression_bounce[rw], GFP_NOWAIT);
b = b ? page_address(b) : NULL;
if (b)
return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw };
b = vmalloc(size);
if (b)
return (struct bbuf) { .b = b, .type = BB_VMALLOC, .rw = rw };
b = mempool_alloc(&c->compression_bounce[rw], GFP_NOIO); b = mempool_alloc(&c->compression_bounce[rw], GFP_NOIO);
b = b ? page_address(b) : NULL;
if (b) if (b)
return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw }; return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw };
@ -129,12 +118,8 @@ static void bio_unmap_or_unbounce(struct bch_fs *c, struct bbuf buf)
case BB_KMALLOC: case BB_KMALLOC:
kfree(buf.b); kfree(buf.b);
break; break;
case BB_VMALLOC:
vfree(buf.b);
break;
case BB_MEMPOOL: case BB_MEMPOOL:
mempool_free(virt_to_page(buf.b), mempool_free(buf.b, &c->compression_bounce[buf.rw]);
&c->compression_bounce[buf.rw]);
break; break;
} }
} }
@ -561,15 +546,15 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
have_compressed: have_compressed:
if (!mempool_initialized(&c->compression_bounce[READ])) { if (!mempool_initialized(&c->compression_bounce[READ])) {
ret = mempool_init_page_pool(&c->compression_bounce[READ], ret = mempool_init_kvpmalloc_pool(&c->compression_bounce[READ],
1, order); 1, order);
if (ret) if (ret)
goto out; goto out;
} }
if (!mempool_initialized(&c->compression_bounce[WRITE])) { if (!mempool_initialized(&c->compression_bounce[WRITE])) {
ret = mempool_init_page_pool(&c->compression_bounce[WRITE], ret = mempool_init_kvpmalloc_pool(&c->compression_bounce[WRITE],
1, order); 1, order);
if (ret) if (ret)
goto out; goto out;
} }