pipe: kill ->map() and ->unmap()

all pipe_buffer_operations have the same instances of those...

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Al Viro 2014-02-02 21:09:54 -05:00
parent 58bda1da4b
commit fbb32750a6
7 changed files with 29 additions and 100 deletions

View File

@ -901,9 +901,9 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
if (len + offset > PAGE_SIZE)
len = PAGE_SIZE - offset;
src = buf->ops->map(pipe, buf, 1);
src = kmap_atomic(buf->page);
memcpy(page_address(page) + offset, src + buf->offset, len);
buf->ops->unmap(pipe, buf, src);
kunmap_atomic(src);
sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
}

View File

@ -667,7 +667,7 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
struct pipe_buffer *buf = cs->currbuf;
if (!cs->write) {
buf->ops->unmap(cs->pipe, buf, cs->mapaddr);
kunmap_atomic(cs->mapaddr);
} else {
kunmap_atomic(cs->mapaddr);
buf->len = PAGE_SIZE - cs->len;
@ -706,7 +706,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
BUG_ON(!cs->nr_segs);
cs->currbuf = buf;
cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
cs->mapaddr = kmap_atomic(buf->page);
cs->len = buf->len;
cs->buf = cs->mapaddr + buf->offset;
cs->pipebufs++;
@ -874,7 +874,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
out_fallback_unlock:
unlock_page(newpage);
out_fallback:
cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
cs->mapaddr = kmap_atomic(buf->page);
cs->buf = cs->mapaddr + buf->offset;
err = lock_request(cs->fc, cs->req);

View File

@ -225,52 +225,6 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
page_cache_release(page);
}
/**
* generic_pipe_buf_map - virtually map a pipe buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer that should be mapped
* @atomic: whether to use an atomic map
*
* Description:
* This function returns a kernel virtual address mapping for the
* pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided
* and the caller has to be careful not to fault before calling
* the unmap function.
*
* Note that this function calls kmap_atomic() if @atomic != 0.
*/
void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
struct pipe_buffer *buf, int atomic)
{
if (atomic) {
buf->flags |= PIPE_BUF_FLAG_ATOMIC;
return kmap_atomic(buf->page);
}
return kmap(buf->page);
}
EXPORT_SYMBOL(generic_pipe_buf_map);
/**
* generic_pipe_buf_unmap - unmap a previously mapped pipe buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer that should be unmapped
* @map_data: the data that the mapping function returned
*
* Description:
* This function undoes the mapping that ->map() provided.
*/
void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
struct pipe_buffer *buf, void *map_data)
{
if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
kunmap_atomic(map_data);
} else
kunmap(buf->page);
}
EXPORT_SYMBOL(generic_pipe_buf_unmap);
/**
* generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
* @pipe: the pipe that the buffer belongs to
@ -351,8 +305,6 @@ EXPORT_SYMBOL(generic_pipe_buf_release);
static const struct pipe_buf_operations anon_pipe_buf_ops = {
.can_merge = 1,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
.confirm = generic_pipe_buf_confirm,
.release = anon_pipe_buf_release,
.steal = generic_pipe_buf_steal,
@ -361,8 +313,6 @@ static const struct pipe_buf_operations anon_pipe_buf_ops = {
static const struct pipe_buf_operations packet_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
.confirm = generic_pipe_buf_confirm,
.release = anon_pipe_buf_release,
.steal = generic_pipe_buf_steal,
@ -410,9 +360,15 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
atomic = !iov_fault_in_pages_write(iov, chars);
redo:
addr = ops->map(pipe, buf, atomic);
if (atomic)
addr = kmap_atomic(buf->page);
else
addr = kmap(buf->page);
error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
ops->unmap(pipe, buf, addr);
if (atomic)
kunmap_atomic(addr);
else
kunmap(buf->page);
if (unlikely(error)) {
/*
* Just retry with the slow path if we failed.
@ -538,10 +494,16 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
iov_fault_in_pages_read(iov, chars);
redo1:
addr = ops->map(pipe, buf, atomic);
if (atomic)
addr = kmap_atomic(buf->page);
else
addr = kmap(buf->page);
error = pipe_iov_copy_from_user(offset + addr, iov,
chars, atomic);
ops->unmap(pipe, buf, addr);
if (atomic)
kunmap_atomic(addr);
else
kunmap(buf->page);
ret = error;
do_wakeup = 1;
if (error) {

View File

@ -136,8 +136,6 @@ error:
const struct pipe_buf_operations page_cache_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
.confirm = page_cache_pipe_buf_confirm,
.release = page_cache_pipe_buf_release,
.steal = page_cache_pipe_buf_steal,
@ -156,8 +154,6 @@ static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
static const struct pipe_buf_operations user_page_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
.confirm = generic_pipe_buf_confirm,
.release = page_cache_pipe_buf_release,
.steal = user_page_pipe_buf_steal,
@ -547,8 +543,6 @@ EXPORT_SYMBOL(generic_file_splice_read);
static const struct pipe_buf_operations default_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
.confirm = generic_pipe_buf_confirm,
.release = generic_pipe_buf_release,
.steal = generic_pipe_buf_steal,
@ -564,8 +558,6 @@ static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
/* Pipe buffer operations for a socket and similar. */
const struct pipe_buf_operations nosteal_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
.confirm = generic_pipe_buf_confirm,
.release = generic_pipe_buf_release,
.steal = generic_pipe_buf_nosteal,
@ -767,13 +759,13 @@ int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
goto out;
if (buf->page != page) {
char *src = buf->ops->map(pipe, buf, 1);
char *src = kmap_atomic(buf->page);
char *dst = kmap_atomic(page);
memcpy(dst + offset, src + buf->offset, this_len);
flush_dcache_page(page);
kunmap_atomic(dst);
buf->ops->unmap(pipe, buf, src);
kunmap_atomic(src);
}
ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len,
page, fsdata);
@ -1067,9 +1059,9 @@ static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
void *data;
loff_t tmp = sd->pos;
data = buf->ops->map(pipe, buf, 0);
data = kmap(buf->page);
ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp);
buf->ops->unmap(pipe, buf, data);
kunmap(buf->page);
return ret;
}
@ -1536,10 +1528,10 @@ static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
* pages and doing an atomic copy
*/
if (!fault_in_pages_writeable(sd->u.userptr, sd->len)) {
src = buf->ops->map(pipe, buf, 1);
src = kmap_atomic(buf->page);
ret = __copy_to_user_inatomic(sd->u.userptr, src + buf->offset,
sd->len);
buf->ops->unmap(pipe, buf, src);
kunmap_atomic(src);
if (!ret) {
ret = sd->len;
goto out;
@ -1549,13 +1541,13 @@ static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
/*
* No dice, use slow non-atomic map and copy
*/
src = buf->ops->map(pipe, buf, 0);
src = kmap(buf->page);
ret = sd->len;
if (copy_to_user(sd->u.userptr, src + buf->offset, sd->len))
ret = -EFAULT;
buf->ops->unmap(pipe, buf, src);
kunmap(buf->page);
out:
if (ret > 0)
sd->u.userptr += ret;

View File

@ -82,23 +82,6 @@ struct pipe_buf_operations {
*/
int can_merge;
/*
* ->map() returns a virtual address mapping of the pipe buffer.
* The last integer flag reflects whether this should be an atomic
* mapping or not. The atomic map is faster, however you can't take
* page faults before calling ->unmap() again. So if you need to eg
* access user data through copy_to/from_user(), then you must get
* a non-atomic map. ->map() uses the kmap_atomic slot for
* atomic maps, you have to be careful if mapping another page as
* source or destination for a copy.
*/
void * (*map)(struct pipe_inode_info *, struct pipe_buffer *, int);
/*
* Undoes ->map(), finishes the virtual mapping of the pipe buffer.
*/
void (*unmap)(struct pipe_inode_info *, struct pipe_buffer *, void *);
/*
* ->confirm() verifies that the data in the pipe buffer is there
* and that the contents are good. If the pages in the pipe belong
@ -150,8 +133,6 @@ struct pipe_inode_info *alloc_pipe_info(void);
void free_pipe_info(struct pipe_inode_info *);
/* Generic pipe buffer ops functions */
void *generic_pipe_buf_map(struct pipe_inode_info *, struct pipe_buffer *, int);
void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void *);
void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);

View File

@ -1195,8 +1195,6 @@ static void relay_pipe_buf_release(struct pipe_inode_info *pipe,
static const struct pipe_buf_operations relay_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
.confirm = generic_pipe_buf_confirm,
.release = relay_pipe_buf_release,
.steal = generic_pipe_buf_steal,

View File

@ -4316,8 +4316,6 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
static const struct pipe_buf_operations tracing_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
.confirm = generic_pipe_buf_confirm,
.release = generic_pipe_buf_release,
.steal = generic_pipe_buf_steal,
@ -5194,8 +5192,6 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
/* Pipe buffer operations for a buffer. */
static const struct pipe_buf_operations buffer_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
.confirm = generic_pipe_buf_confirm,
.release = buffer_pipe_buf_release,
.steal = generic_pipe_buf_steal,