Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ericvh/v9fs

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ericvh/v9fs: (46 commits)
  fs/9p: Make the writeback_fid owned by root
  fs/9p: Writeback dirty data before setattr
  fs/9p: call vmtruncate before setattr 9p opeation
  fs/9p: Properly update inode attributes on link
  fs/9p: Prevent multiple inclusion of same header
  fs/9p: Workaround vfs rename rehash bug
  fs/9p: Mark directory inode invalid for many directory inode operations
  fs/9p: Add . and .. dentry revalidation flag
  fs/9p: mark inode attribute invalid on rename, unlink and setattr
  fs/9p: Add support for marking inode attribute invalid
  fs/9p: Initialize root inode number for dotl
  fs/9p: Update link count correctly on different file system operations
  fs/9p: Add drop_inode 9p callback
  fs/9p: Add direct IO support in cached mode
  fs/9p: Fix inode i_size update in file_write
  fs/9p: set default readahead pages in cached mode
  fs/9p: Move writeback fid to v9fs_inode
  fs/9p: Add v9fs_inode
  fs/9p: Don't set stat.st_blocks based on nrpages
  fs/9p: Add inode hashing
  ...
This commit is contained in:
Linus Torvalds 2011-03-16 08:58:09 -07:00
commit 26a992dbc2
24 changed files with 1666 additions and 567 deletions

View File

@ -21,8 +21,8 @@
#include <linux/posix_acl_xattr.h>
#include "xattr.h"
#include "acl.h"
#include "v9fs_vfs.h"
#include "v9fs.h"
#include "v9fs_vfs.h"
static struct posix_acl *__v9fs_get_acl(struct p9_fid *fid, char *name)
{
@ -59,7 +59,8 @@ int v9fs_get_acl(struct inode *inode, struct p9_fid *fid)
struct v9fs_session_info *v9ses;
v9ses = v9fs_inode2v9ses(inode);
if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) {
if (((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) ||
((v9ses->flags & V9FS_ACL_MASK) != V9FS_POSIX_ACL)) {
set_cached_acl(inode, ACL_TYPE_DEFAULT, NULL);
set_cached_acl(inode, ACL_TYPE_ACCESS, NULL);
return 0;
@ -71,11 +72,15 @@ int v9fs_get_acl(struct inode *inode, struct p9_fid *fid)
if (!IS_ERR(dacl) && !IS_ERR(pacl)) {
set_cached_acl(inode, ACL_TYPE_DEFAULT, dacl);
set_cached_acl(inode, ACL_TYPE_ACCESS, pacl);
posix_acl_release(dacl);
posix_acl_release(pacl);
} else
retval = -EIO;
if (!IS_ERR(dacl))
posix_acl_release(dacl);
if (!IS_ERR(pacl))
posix_acl_release(pacl);
return retval;
}
@ -100,9 +105,10 @@ int v9fs_check_acl(struct inode *inode, int mask, unsigned int flags)
return -ECHILD;
v9ses = v9fs_inode2v9ses(inode);
if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) {
if (((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) ||
((v9ses->flags & V9FS_ACL_MASK) != V9FS_POSIX_ACL)) {
/*
* On access = client mode get the acl
* On access = client and acl = on mode get the acl
* values from the server
*/
return 0;
@ -128,6 +134,10 @@ static int v9fs_set_acl(struct dentry *dentry, int type, struct posix_acl *acl)
struct inode *inode = dentry->d_inode;
set_cached_acl(inode, type, acl);
if (!acl)
return 0;
/* Set a setxattr request to server */
size = posix_acl_xattr_size(acl->a_count);
buffer = kmalloc(size, GFP_KERNEL);
@ -177,10 +187,8 @@ int v9fs_acl_chmod(struct dentry *dentry)
int v9fs_set_create_acl(struct dentry *dentry,
struct posix_acl *dpacl, struct posix_acl *pacl)
{
if (dpacl)
v9fs_set_acl(dentry, ACL_TYPE_DEFAULT, dpacl);
if (pacl)
v9fs_set_acl(dentry, ACL_TYPE_ACCESS, pacl);
v9fs_set_acl(dentry, ACL_TYPE_DEFAULT, dpacl);
v9fs_set_acl(dentry, ACL_TYPE_ACCESS, pacl);
posix_acl_release(dpacl);
posix_acl_release(pacl);
return 0;

View File

@ -33,67 +33,11 @@
#define CACHETAG_LEN 11
struct kmem_cache *vcookie_cache;
struct fscache_netfs v9fs_cache_netfs = {
.name = "9p",
.version = 0,
};
static void init_once(void *foo)
{
struct v9fs_cookie *vcookie = (struct v9fs_cookie *) foo;
vcookie->fscache = NULL;
vcookie->qid = NULL;
inode_init_once(&vcookie->inode);
}
/**
* v9fs_init_vcookiecache - initialize a cache for vcookies to maintain
* vcookie to inode mapping
*
* Returns 0 on success.
*/
static int v9fs_init_vcookiecache(void)
{
vcookie_cache = kmem_cache_create("vcookie_cache",
sizeof(struct v9fs_cookie),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
init_once);
if (!vcookie_cache)
return -ENOMEM;
return 0;
}
/**
* v9fs_destroy_vcookiecache - destroy the cache of vcookies
*
*/
static void v9fs_destroy_vcookiecache(void)
{
kmem_cache_destroy(vcookie_cache);
}
int __v9fs_cache_register(void)
{
int ret;
ret = v9fs_init_vcookiecache();
if (ret < 0)
return ret;
return fscache_register_netfs(&v9fs_cache_netfs);
}
void __v9fs_cache_unregister(void)
{
v9fs_destroy_vcookiecache();
fscache_unregister_netfs(&v9fs_cache_netfs);
}
/**
* v9fs_random_cachetag - Generate a random tag to be associated
* with a new cache session.
@ -133,9 +77,9 @@ static uint16_t v9fs_cache_session_get_key(const void *cookie_netfs_data,
}
const struct fscache_cookie_def v9fs_cache_session_index_def = {
.name = "9P.session",
.type = FSCACHE_COOKIE_TYPE_INDEX,
.get_key = v9fs_cache_session_get_key,
.name = "9P.session",
.type = FSCACHE_COOKIE_TYPE_INDEX,
.get_key = v9fs_cache_session_get_key,
};
void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
@ -163,33 +107,33 @@ void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses)
static uint16_t v9fs_cache_inode_get_key(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax)
{
const struct v9fs_cookie *vcookie = cookie_netfs_data;
memcpy(buffer, &vcookie->qid->path, sizeof(vcookie->qid->path));
P9_DPRINTK(P9_DEBUG_FSC, "inode %p get key %llu", &vcookie->inode,
vcookie->qid->path);
return sizeof(vcookie->qid->path);
const struct v9fs_inode *v9inode = cookie_netfs_data;
memcpy(buffer, &v9inode->fscache_key->path,
sizeof(v9inode->fscache_key->path));
P9_DPRINTK(P9_DEBUG_FSC, "inode %p get key %llu", &v9inode->vfs_inode,
v9inode->fscache_key->path);
return sizeof(v9inode->fscache_key->path);
}
static void v9fs_cache_inode_get_attr(const void *cookie_netfs_data,
uint64_t *size)
{
const struct v9fs_cookie *vcookie = cookie_netfs_data;
*size = i_size_read(&vcookie->inode);
const struct v9fs_inode *v9inode = cookie_netfs_data;
*size = i_size_read(&v9inode->vfs_inode);
P9_DPRINTK(P9_DEBUG_FSC, "inode %p get attr %llu", &vcookie->inode,
P9_DPRINTK(P9_DEBUG_FSC, "inode %p get attr %llu", &v9inode->vfs_inode,
*size);
}
static uint16_t v9fs_cache_inode_get_aux(const void *cookie_netfs_data,
void *buffer, uint16_t buflen)
{
const struct v9fs_cookie *vcookie = cookie_netfs_data;
memcpy(buffer, &vcookie->qid->version, sizeof(vcookie->qid->version));
P9_DPRINTK(P9_DEBUG_FSC, "inode %p get aux %u", &vcookie->inode,
vcookie->qid->version);
return sizeof(vcookie->qid->version);
const struct v9fs_inode *v9inode = cookie_netfs_data;
memcpy(buffer, &v9inode->fscache_key->version,
sizeof(v9inode->fscache_key->version));
P9_DPRINTK(P9_DEBUG_FSC, "inode %p get aux %u", &v9inode->vfs_inode,
v9inode->fscache_key->version);
return sizeof(v9inode->fscache_key->version);
}
static enum
@ -197,13 +141,13 @@ fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data,
const void *buffer,
uint16_t buflen)
{
const struct v9fs_cookie *vcookie = cookie_netfs_data;
const struct v9fs_inode *v9inode = cookie_netfs_data;
if (buflen != sizeof(vcookie->qid->version))
if (buflen != sizeof(v9inode->fscache_key->version))
return FSCACHE_CHECKAUX_OBSOLETE;
if (memcmp(buffer, &vcookie->qid->version,
sizeof(vcookie->qid->version)))
if (memcmp(buffer, &v9inode->fscache_key->version,
sizeof(v9inode->fscache_key->version)))
return FSCACHE_CHECKAUX_OBSOLETE;
return FSCACHE_CHECKAUX_OKAY;
@ -211,7 +155,7 @@ fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data,
static void v9fs_cache_inode_now_uncached(void *cookie_netfs_data)
{
struct v9fs_cookie *vcookie = cookie_netfs_data;
struct v9fs_inode *v9inode = cookie_netfs_data;
struct pagevec pvec;
pgoff_t first;
int loop, nr_pages;
@ -220,7 +164,7 @@ static void v9fs_cache_inode_now_uncached(void *cookie_netfs_data)
first = 0;
for (;;) {
nr_pages = pagevec_lookup(&pvec, vcookie->inode.i_mapping,
nr_pages = pagevec_lookup(&pvec, v9inode->vfs_inode.i_mapping,
first,
PAGEVEC_SIZE - pagevec_count(&pvec));
if (!nr_pages)
@ -249,115 +193,114 @@ const struct fscache_cookie_def v9fs_cache_inode_index_def = {
void v9fs_cache_inode_get_cookie(struct inode *inode)
{
struct v9fs_cookie *vcookie;
struct v9fs_inode *v9inode;
struct v9fs_session_info *v9ses;
if (!S_ISREG(inode->i_mode))
return;
vcookie = v9fs_inode2cookie(inode);
if (vcookie->fscache)
v9inode = V9FS_I(inode);
if (v9inode->fscache)
return;
v9ses = v9fs_inode2v9ses(inode);
vcookie->fscache = fscache_acquire_cookie(v9ses->fscache,
v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
&v9fs_cache_inode_index_def,
vcookie);
v9inode);
P9_DPRINTK(P9_DEBUG_FSC, "inode %p get cookie %p", inode,
vcookie->fscache);
v9inode->fscache);
}
void v9fs_cache_inode_put_cookie(struct inode *inode)
{
struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
struct v9fs_inode *v9inode = V9FS_I(inode);
if (!vcookie->fscache)
if (!v9inode->fscache)
return;
P9_DPRINTK(P9_DEBUG_FSC, "inode %p put cookie %p", inode,
vcookie->fscache);
v9inode->fscache);
fscache_relinquish_cookie(vcookie->fscache, 0);
vcookie->fscache = NULL;
fscache_relinquish_cookie(v9inode->fscache, 0);
v9inode->fscache = NULL;
}
void v9fs_cache_inode_flush_cookie(struct inode *inode)
{
struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
struct v9fs_inode *v9inode = V9FS_I(inode);
if (!vcookie->fscache)
if (!v9inode->fscache)
return;
P9_DPRINTK(P9_DEBUG_FSC, "inode %p flush cookie %p", inode,
vcookie->fscache);
v9inode->fscache);
fscache_relinquish_cookie(vcookie->fscache, 1);
vcookie->fscache = NULL;
fscache_relinquish_cookie(v9inode->fscache, 1);
v9inode->fscache = NULL;
}
void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp)
{
struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
struct v9fs_inode *v9inode = V9FS_I(inode);
struct p9_fid *fid;
if (!vcookie->fscache)
if (!v9inode->fscache)
return;
spin_lock(&vcookie->lock);
spin_lock(&v9inode->fscache_lock);
fid = filp->private_data;
if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
v9fs_cache_inode_flush_cookie(inode);
else
v9fs_cache_inode_get_cookie(inode);
spin_unlock(&vcookie->lock);
spin_unlock(&v9inode->fscache_lock);
}
void v9fs_cache_inode_reset_cookie(struct inode *inode)
{
struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
struct v9fs_inode *v9inode = V9FS_I(inode);
struct v9fs_session_info *v9ses;
struct fscache_cookie *old;
if (!vcookie->fscache)
if (!v9inode->fscache)
return;
old = vcookie->fscache;
old = v9inode->fscache;
spin_lock(&vcookie->lock);
fscache_relinquish_cookie(vcookie->fscache, 1);
spin_lock(&v9inode->fscache_lock);
fscache_relinquish_cookie(v9inode->fscache, 1);
v9ses = v9fs_inode2v9ses(inode);
vcookie->fscache = fscache_acquire_cookie(v9ses->fscache,
v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
&v9fs_cache_inode_index_def,
vcookie);
v9inode);
P9_DPRINTK(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p",
inode, old, vcookie->fscache);
inode, old, v9inode->fscache);
spin_unlock(&vcookie->lock);
spin_unlock(&v9inode->fscache_lock);
}
int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
{
struct inode *inode = page->mapping->host;
struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
struct v9fs_inode *v9inode = V9FS_I(inode);
BUG_ON(!vcookie->fscache);
BUG_ON(!v9inode->fscache);
return fscache_maybe_release_page(vcookie->fscache, page, gfp);
return fscache_maybe_release_page(v9inode->fscache, page, gfp);
}
void __v9fs_fscache_invalidate_page(struct page *page)
{
struct inode *inode = page->mapping->host;
struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
struct v9fs_inode *v9inode = V9FS_I(inode);
BUG_ON(!vcookie->fscache);
BUG_ON(!v9inode->fscache);
if (PageFsCache(page)) {
fscache_wait_on_page_write(vcookie->fscache, page);
fscache_wait_on_page_write(v9inode->fscache, page);
BUG_ON(!PageLocked(page));
fscache_uncache_page(vcookie->fscache, page);
fscache_uncache_page(v9inode->fscache, page);
}
}
@ -380,13 +323,13 @@ static void v9fs_vfs_readpage_complete(struct page *page, void *data,
int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
{
int ret;
const struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
const struct v9fs_inode *v9inode = V9FS_I(inode);
P9_DPRINTK(P9_DEBUG_FSC, "inode %p page %p", inode, page);
if (!vcookie->fscache)
if (!v9inode->fscache)
return -ENOBUFS;
ret = fscache_read_or_alloc_page(vcookie->fscache,
ret = fscache_read_or_alloc_page(v9inode->fscache,
page,
v9fs_vfs_readpage_complete,
NULL,
@ -418,13 +361,13 @@ int __v9fs_readpages_from_fscache(struct inode *inode,
unsigned *nr_pages)
{
int ret;
const struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
const struct v9fs_inode *v9inode = V9FS_I(inode);
P9_DPRINTK(P9_DEBUG_FSC, "inode %p pages %u", inode, *nr_pages);
if (!vcookie->fscache)
if (!v9inode->fscache)
return -ENOBUFS;
ret = fscache_read_or_alloc_pages(vcookie->fscache,
ret = fscache_read_or_alloc_pages(v9inode->fscache,
mapping, pages, nr_pages,
v9fs_vfs_readpage_complete,
NULL,
@ -453,11 +396,22 @@ int __v9fs_readpages_from_fscache(struct inode *inode,
void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page)
{
int ret;
const struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
const struct v9fs_inode *v9inode = V9FS_I(inode);
P9_DPRINTK(P9_DEBUG_FSC, "inode %p page %p", inode, page);
ret = fscache_write_page(vcookie->fscache, page, GFP_KERNEL);
ret = fscache_write_page(v9inode->fscache, page, GFP_KERNEL);
P9_DPRINTK(P9_DEBUG_FSC, "ret = %d", ret);
if (ret != 0)
v9fs_uncache_page(inode, page);
}
/*
* wait for a page to complete writing to the cache
*/
void __v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page)
{
const struct v9fs_inode *v9inode = V9FS_I(inode);
P9_DPRINTK(P9_DEBUG_FSC, "inode %p page %p", inode, page);
if (PageFsCache(page))
fscache_wait_on_page_write(v9inode->fscache, page);
}

View File

@ -25,20 +25,6 @@
#include <linux/fscache.h>
#include <linux/spinlock.h>
extern struct kmem_cache *vcookie_cache;
struct v9fs_cookie {
spinlock_t lock;
struct inode inode;
struct fscache_cookie *fscache;
struct p9_qid *qid;
};
static inline struct v9fs_cookie *v9fs_inode2cookie(const struct inode *inode)
{
return container_of(inode, struct v9fs_cookie, inode);
}
extern struct fscache_netfs v9fs_cache_netfs;
extern const struct fscache_cookie_def v9fs_cache_session_index_def;
extern const struct fscache_cookie_def v9fs_cache_inode_index_def;
@ -64,23 +50,8 @@ extern int __v9fs_readpages_from_fscache(struct inode *inode,
struct list_head *pages,
unsigned *nr_pages);
extern void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page);
/**
* v9fs_cache_register - Register v9fs file system with the cache
*/
static inline int v9fs_cache_register(void)
{
return __v9fs_cache_register();
}
/**
* v9fs_cache_unregister - Unregister v9fs from the cache
*/
static inline void v9fs_cache_unregister(void)
{
__v9fs_cache_unregister();
}
extern void __v9fs_fscache_wait_on_page_write(struct inode *inode,
struct page *page);
static inline int v9fs_fscache_release_page(struct page *page,
gfp_t gfp)
@ -117,29 +88,28 @@ static inline void v9fs_readpage_to_fscache(struct inode *inode,
static inline void v9fs_uncache_page(struct inode *inode, struct page *page)
{
struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
fscache_uncache_page(vcookie->fscache, page);
struct v9fs_inode *v9inode = V9FS_I(inode);
fscache_uncache_page(v9inode->fscache, page);
BUG_ON(PageFsCache(page));
}
static inline void v9fs_vcookie_set_qid(struct inode *inode,
static inline void v9fs_fscache_set_key(struct inode *inode,
struct p9_qid *qid)
{
struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode);
spin_lock(&vcookie->lock);
vcookie->qid = qid;
spin_unlock(&vcookie->lock);
struct v9fs_inode *v9inode = V9FS_I(inode);
spin_lock(&v9inode->fscache_lock);
v9inode->fscache_key = qid;
spin_unlock(&v9inode->fscache_lock);
}
static inline void v9fs_fscache_wait_on_page_write(struct inode *inode,
struct page *page)
{
return __v9fs_fscache_wait_on_page_write(inode, page);
}
#else /* CONFIG_9P_FSCACHE */
static inline int v9fs_cache_register(void)
{
return 1;
}
static inline void v9fs_cache_unregister(void) {}
static inline int v9fs_fscache_release_page(struct page *page,
gfp_t gfp) {
return 1;
@ -168,9 +138,11 @@ static inline void v9fs_readpage_to_fscache(struct inode *inode,
static inline void v9fs_uncache_page(struct inode *inode, struct page *page)
{}
static inline void v9fs_vcookie_set_qid(struct inode *inode,
struct p9_qid *qid)
{}
static inline void v9fs_fscache_wait_on_page_write(struct inode *inode,
struct page *page)
{
return;
}
#endif /* CONFIG_9P_FSCACHE */
#endif /* _9P_CACHE_H */

View File

@ -125,46 +125,17 @@ err_out:
return -ENOMEM;
}
/**
* v9fs_fid_lookup - lookup for a fid, try to walk if not found
* @dentry: dentry to look for fid in
*
* Look for a fid in the specified dentry for the current user.
* If no fid is found, try to create one walking from a fid from the parent
* dentry (if it has one), or the root dentry. If the user haven't accessed
* the fs yet, attach now and walk from the root.
*/
struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
uid_t uid, int any)
{
int i, n, l, clone, any, access;
u32 uid;
struct p9_fid *fid, *old_fid = NULL;
struct dentry *ds;
struct v9fs_session_info *v9ses;
char **wnames, *uname;
int i, n, l, clone, access;
struct v9fs_session_info *v9ses;
struct p9_fid *fid, *old_fid = NULL;
v9ses = v9fs_inode2v9ses(dentry->d_inode);
access = v9ses->flags & V9FS_ACCESS_MASK;
switch (access) {
case V9FS_ACCESS_SINGLE:
case V9FS_ACCESS_USER:
case V9FS_ACCESS_CLIENT:
uid = current_fsuid();
any = 0;
break;
case V9FS_ACCESS_ANY:
uid = v9ses->uid;
any = 1;
break;
default:
uid = ~0;
any = 0;
break;
}
fid = v9fs_fid_find(dentry, uid, any);
if (fid)
return fid;
@ -250,6 +221,45 @@ err_out:
return fid;
}
/**
* v9fs_fid_lookup - lookup for a fid, try to walk if not found
* @dentry: dentry to look for fid in
*
* Look for a fid in the specified dentry for the current user.
* If no fid is found, try to create one walking from a fid from the parent
* dentry (if it has one), or the root dentry. If the user haven't accessed
* the fs yet, attach now and walk from the root.
*/
struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
{
uid_t uid;
int any, access;
struct v9fs_session_info *v9ses;
v9ses = v9fs_inode2v9ses(dentry->d_inode);
access = v9ses->flags & V9FS_ACCESS_MASK;
switch (access) {
case V9FS_ACCESS_SINGLE:
case V9FS_ACCESS_USER:
case V9FS_ACCESS_CLIENT:
uid = current_fsuid();
any = 0;
break;
case V9FS_ACCESS_ANY:
uid = v9ses->uid;
any = 1;
break;
default:
uid = ~0;
any = 0;
break;
}
return v9fs_fid_lookup_with_uid(dentry, uid, any);
}
struct p9_fid *v9fs_fid_clone(struct dentry *dentry)
{
struct p9_fid *fid, *ret;
@ -261,3 +271,39 @@ struct p9_fid *v9fs_fid_clone(struct dentry *dentry)
ret = p9_client_walk(fid, 0, NULL, 1);
return ret;
}
static struct p9_fid *v9fs_fid_clone_with_uid(struct dentry *dentry, uid_t uid)
{
struct p9_fid *fid, *ret;
fid = v9fs_fid_lookup_with_uid(dentry, uid, 0);
if (IS_ERR(fid))
return fid;
ret = p9_client_walk(fid, 0, NULL, 1);
return ret;
}
struct p9_fid *v9fs_writeback_fid(struct dentry *dentry)
{
int err;
struct p9_fid *fid;
fid = v9fs_fid_clone_with_uid(dentry, 0);
if (IS_ERR(fid))
goto error_out;
/*
* writeback fid will only be used to write back the
* dirty pages. We always request for the open fid in read-write
* mode so that a partial page write which result in page
* read can work.
*/
err = p9_client_open(fid, O_RDWR);
if (err < 0) {
p9_client_clunk(fid);
fid = ERR_PTR(err);
goto error_out;
}
error_out:
return fid;
}

View File

@ -19,7 +19,8 @@
* Boston, MA 02111-1301 USA
*
*/
#ifndef FS_9P_FID_H
#define FS_9P_FID_H
#include <linux/list.h>
/**
@ -45,3 +46,5 @@ struct v9fs_dentry {
struct p9_fid *v9fs_fid_lookup(struct dentry *dentry);
struct p9_fid *v9fs_fid_clone(struct dentry *dentry);
int v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid);
struct p9_fid *v9fs_writeback_fid(struct dentry *dentry);
#endif

View File

@ -39,6 +39,7 @@
static DEFINE_SPINLOCK(v9fs_sessionlist_lock);
static LIST_HEAD(v9fs_sessionlist);
struct kmem_cache *v9fs_inode_cache;
/*
* Option Parsing (code inspired by NFS code)
@ -55,7 +56,7 @@ enum {
/* Cache options */
Opt_cache_loose, Opt_fscache,
/* Access options */
Opt_access,
Opt_access, Opt_posixacl,
/* Error token */
Opt_err
};
@ -73,6 +74,7 @@ static const match_table_t tokens = {
{Opt_fscache, "fscache"},
{Opt_cachetag, "cachetag=%s"},
{Opt_access, "access=%s"},
{Opt_posixacl, "posixacl"},
{Opt_err, NULL}
};
@ -194,15 +196,7 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
else if (strcmp(s, "any") == 0)
v9ses->flags |= V9FS_ACCESS_ANY;
else if (strcmp(s, "client") == 0) {
#ifdef CONFIG_9P_FS_POSIX_ACL
v9ses->flags |= V9FS_ACCESS_CLIENT;
#else
P9_DPRINTK(P9_DEBUG_ERROR,
"access=client option not supported\n");
kfree(s);
ret = -EINVAL;
goto free_and_return;
#endif
} else {
v9ses->flags |= V9FS_ACCESS_SINGLE;
v9ses->uid = simple_strtoul(s, &e, 10);
@ -212,6 +206,16 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
kfree(s);
break;
case Opt_posixacl:
#ifdef CONFIG_9P_FS_POSIX_ACL
v9ses->flags |= V9FS_POSIX_ACL;
#else
P9_DPRINTK(P9_DEBUG_ERROR,
"Not defined CONFIG_9P_FS_POSIX_ACL. "
"Ignoring posixacl option\n");
#endif
break;
default:
continue;
}
@ -260,19 +264,12 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
list_add(&v9ses->slist, &v9fs_sessionlist);
spin_unlock(&v9fs_sessionlist_lock);
v9ses->flags = V9FS_ACCESS_USER;
strcpy(v9ses->uname, V9FS_DEFUSER);
strcpy(v9ses->aname, V9FS_DEFANAME);
v9ses->uid = ~0;
v9ses->dfltuid = V9FS_DEFUID;
v9ses->dfltgid = V9FS_DEFGID;
rc = v9fs_parse_options(v9ses, data);
if (rc < 0) {
retval = rc;
goto error;
}
v9ses->clnt = p9_client_create(dev_name, data);
if (IS_ERR(v9ses->clnt)) {
retval = PTR_ERR(v9ses->clnt);
@ -281,10 +278,20 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
goto error;
}
if (p9_is_proto_dotl(v9ses->clnt))
v9ses->flags = V9FS_ACCESS_USER;
if (p9_is_proto_dotl(v9ses->clnt)) {
v9ses->flags = V9FS_ACCESS_CLIENT;
v9ses->flags |= V9FS_PROTO_2000L;
else if (p9_is_proto_dotu(v9ses->clnt))
} else if (p9_is_proto_dotu(v9ses->clnt)) {
v9ses->flags |= V9FS_PROTO_2000U;
}
rc = v9fs_parse_options(v9ses, data);
if (rc < 0) {
retval = rc;
goto error;
}
v9ses->maxdata = v9ses->clnt->msize - P9_IOHDRSZ;
@ -306,6 +313,14 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
v9ses->flags |= V9FS_ACCESS_ANY;
v9ses->uid = ~0;
}
if (!v9fs_proto_dotl(v9ses) ||
!((v9ses->flags & V9FS_ACCESS_MASK) == V9FS_ACCESS_CLIENT)) {
/*
* We support ACL checks on clinet only if the protocol is
* 9P2000.L and access is V9FS_ACCESS_CLIENT.
*/
v9ses->flags &= ~V9FS_ACL_MASK;
}
fid = p9_client_attach(v9ses->clnt, NULL, v9ses->uname, ~0,
v9ses->aname);
@ -467,6 +482,63 @@ static void v9fs_sysfs_cleanup(void)
kobject_put(v9fs_kobj);
}
static void v9fs_inode_init_once(void *foo)
{
struct v9fs_inode *v9inode = (struct v9fs_inode *)foo;
#ifdef CONFIG_9P_FSCACHE
v9inode->fscache = NULL;
v9inode->fscache_key = NULL;
#endif
inode_init_once(&v9inode->vfs_inode);
}
/**
* v9fs_init_inode_cache - initialize a cache for 9P
* Returns 0 on success.
*/
static int v9fs_init_inode_cache(void)
{
v9fs_inode_cache = kmem_cache_create("v9fs_inode_cache",
sizeof(struct v9fs_inode),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
v9fs_inode_init_once);
if (!v9fs_inode_cache)
return -ENOMEM;
return 0;
}
/**
* v9fs_destroy_inode_cache - destroy the cache of 9P inode
*
*/
static void v9fs_destroy_inode_cache(void)
{
kmem_cache_destroy(v9fs_inode_cache);
}
static int v9fs_cache_register(void)
{
int ret;
ret = v9fs_init_inode_cache();
if (ret < 0)
return ret;
#ifdef CONFIG_9P_FSCACHE
return fscache_register_netfs(&v9fs_cache_netfs);
#else
return ret;
#endif
}
static void v9fs_cache_unregister(void)
{
v9fs_destroy_inode_cache();
#ifdef CONFIG_9P_FSCACHE
fscache_unregister_netfs(&v9fs_cache_netfs);
#endif
}
/**
* init_v9fs - Initialize module
*

View File

@ -20,6 +20,9 @@
* Boston, MA 02111-1301 USA
*
*/
#ifndef FS_9P_V9FS_H
#define FS_9P_V9FS_H
#include <linux/backing-dev.h>
/**
@ -28,8 +31,10 @@
* @V9FS_PROTO_2000L: whether or not to use 9P2000.l extensions
* @V9FS_ACCESS_SINGLE: only the mounting user can access the hierarchy
* @V9FS_ACCESS_USER: a new attach will be issued for every user (default)
* @V9FS_ACCESS_CLIENT: Just like user, but access check is performed on client.
* @V9FS_ACCESS_ANY: use a single attach for all users
* @V9FS_ACCESS_MASK: bit mask of different ACCESS options
* @V9FS_POSIX_ACL: POSIX ACLs are enforced
*
* Session flags reflect options selected by users at mount time
*/
@ -37,13 +42,15 @@
V9FS_ACCESS_USER | \
V9FS_ACCESS_CLIENT)
#define V9FS_ACCESS_MASK V9FS_ACCESS_ANY
#define V9FS_ACL_MASK V9FS_POSIX_ACL
enum p9_session_flags {
V9FS_PROTO_2000U = 0x01,
V9FS_PROTO_2000L = 0x02,
V9FS_ACCESS_SINGLE = 0x04,
V9FS_ACCESS_USER = 0x08,
V9FS_ACCESS_CLIENT = 0x10
V9FS_ACCESS_CLIENT = 0x10,
V9FS_POSIX_ACL = 0x20
};
/* possible values of ->cache */
@ -109,8 +116,28 @@ struct v9fs_session_info {
struct list_head slist; /* list of sessions registered with v9fs */
struct backing_dev_info bdi;
struct rw_semaphore rename_sem;
struct p9_fid *root_fid; /* Used for file system sync */
};
/* cache_validity flags */
#define V9FS_INO_INVALID_ATTR 0x01
struct v9fs_inode {
#ifdef CONFIG_9P_FSCACHE
spinlock_t fscache_lock;
struct fscache_cookie *fscache;
struct p9_qid *fscache_key;
#endif
unsigned int cache_validity;
struct p9_fid *writeback_fid;
struct inode vfs_inode;
};
static inline struct v9fs_inode *V9FS_I(const struct inode *inode)
{
return container_of(inode, struct v9fs_inode, vfs_inode);
}
struct p9_fid *v9fs_session_init(struct v9fs_session_info *, const char *,
char *);
extern void v9fs_session_close(struct v9fs_session_info *v9ses);
@ -124,16 +151,15 @@ extern int v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry);
extern void v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd,
void *p);
extern struct inode *v9fs_inode(struct v9fs_session_info *v9ses,
struct p9_fid *fid,
struct super_block *sb);
extern struct inode *v9fs_inode_from_fid(struct v9fs_session_info *v9ses,
struct p9_fid *fid,
struct super_block *sb);
extern const struct inode_operations v9fs_dir_inode_operations_dotl;
extern const struct inode_operations v9fs_file_inode_operations_dotl;
extern const struct inode_operations v9fs_symlink_inode_operations_dotl;
extern struct inode *v9fs_inode_dotl(struct v9fs_session_info *v9ses,
struct p9_fid *fid,
struct super_block *sb);
extern struct inode *v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses,
struct p9_fid *fid,
struct super_block *sb);
/* other default globals */
#define V9FS_PORT 564
@ -158,7 +184,7 @@ static inline int v9fs_proto_dotl(struct v9fs_session_info *v9ses)
}
/**
* v9fs_inode_from_fid - Helper routine to populate an inode by
* v9fs_get_inode_from_fid - Helper routine to populate an inode by
* issuing a attribute request
* @v9ses: session information
* @fid: fid to issue attribute request for
@ -166,11 +192,12 @@ static inline int v9fs_proto_dotl(struct v9fs_session_info *v9ses)
*
*/
static inline struct inode *
v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
struct super_block *sb)
v9fs_get_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
struct super_block *sb)
{
if (v9fs_proto_dotl(v9ses))
return v9fs_inode_dotl(v9ses, fid, sb);
return v9fs_inode_from_fid_dotl(v9ses, fid, sb);
else
return v9fs_inode(v9ses, fid, sb);
return v9fs_inode_from_fid(v9ses, fid, sb);
}
#endif

View File

@ -20,6 +20,8 @@
* Boston, MA 02111-1301 USA
*
*/
#ifndef FS_9P_V9FS_VFS_H
#define FS_9P_V9FS_VFS_H
/* plan9 semantics are that created files are implicitly opened.
* But linux semantics are that you call create, then open.
@ -36,6 +38,7 @@
* unlink calls remove, which is an implicit clunk. So we have to track
* that kind of thing so that we don't try to clunk a dead fid.
*/
#define P9_LOCK_TIMEOUT (30*HZ)
extern struct file_system_type v9fs_fs_type;
extern const struct address_space_operations v9fs_addr_operations;
@ -45,13 +48,15 @@ extern const struct file_operations v9fs_dir_operations;
extern const struct file_operations v9fs_dir_operations_dotl;
extern const struct dentry_operations v9fs_dentry_operations;
extern const struct dentry_operations v9fs_cached_dentry_operations;
extern const struct file_operations v9fs_cached_file_operations;
extern const struct file_operations v9fs_cached_file_operations_dotl;
extern struct kmem_cache *v9fs_inode_cache;
#ifdef CONFIG_9P_FSCACHE
struct inode *v9fs_alloc_inode(struct super_block *sb);
void v9fs_destroy_inode(struct inode *inode);
#endif
struct inode *v9fs_get_inode(struct super_block *sb, int mode);
int v9fs_init_inode(struct v9fs_session_info *v9ses,
struct inode *inode, int mode);
void v9fs_evict_inode(struct inode *inode);
ino_t v9fs_qid2ino(struct p9_qid *qid);
void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
@ -62,8 +67,19 @@ void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
int v9fs_uflags2omode(int uflags, int extended);
ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64);
ssize_t v9fs_fid_readn(struct p9_fid *, char *, char __user *, u32, u64);
void v9fs_blank_wstat(struct p9_wstat *wstat);
int v9fs_vfs_setattr_dotl(struct dentry *, struct iattr *);
int v9fs_file_fsync_dotl(struct file *filp, int datasync);
#define P9_LOCK_TIMEOUT (30*HZ)
ssize_t v9fs_file_write_internal(struct inode *, struct p9_fid *,
const char __user *, size_t, loff_t *, int);
int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode);
int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode);
static inline void v9fs_invalidate_inode_attr(struct inode *inode)
{
struct v9fs_inode *v9inode;
v9inode = V9FS_I(inode);
v9inode->cache_validity |= V9FS_INO_INVALID_ATTR;
return;
}
#endif

View File

@ -39,16 +39,16 @@
#include "v9fs.h"
#include "v9fs_vfs.h"
#include "cache.h"
#include "fid.h"
/**
* v9fs_vfs_readpage - read an entire page in from 9P
* v9fs_fid_readpage - read an entire page in from 9P
*
* @filp: file being read
* @fid: fid being read
* @page: structure to page
*
*/
static int v9fs_vfs_readpage(struct file *filp, struct page *page)
static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
{
int retval;
loff_t offset;
@ -67,7 +67,7 @@ static int v9fs_vfs_readpage(struct file *filp, struct page *page)
buffer = kmap(page);
offset = page_offset(page);
retval = v9fs_file_readn(filp, buffer, NULL, PAGE_CACHE_SIZE, offset);
retval = v9fs_fid_readn(fid, buffer, NULL, PAGE_CACHE_SIZE, offset);
if (retval < 0) {
v9fs_uncache_page(inode, page);
goto done;
@ -86,6 +86,19 @@ done:
return retval;
}
/**
* v9fs_vfs_readpage - read an entire page in from 9P
*
* @filp: file being read
* @page: structure to page
*
*/
static int v9fs_vfs_readpage(struct file *filp, struct page *page)
{
return v9fs_fid_readpage(filp->private_data, page);
}
/**
* v9fs_vfs_readpages - read a set of pages from 9P
*
@ -124,7 +137,6 @@ static int v9fs_release_page(struct page *page, gfp_t gfp)
{
if (PagePrivate(page))
return 0;
return v9fs_fscache_release_page(page, gfp);
}
@ -137,20 +149,89 @@ static int v9fs_release_page(struct page *page, gfp_t gfp)
static void v9fs_invalidate_page(struct page *page, unsigned long offset)
{
/*
* If called with zero offset, we should release
* the private state assocated with the page
*/
if (offset == 0)
v9fs_fscache_invalidate_page(page);
}
static int v9fs_vfs_writepage_locked(struct page *page)
{
char *buffer;
int retval, len;
loff_t offset, size;
mm_segment_t old_fs;
struct v9fs_inode *v9inode;
struct inode *inode = page->mapping->host;
v9inode = V9FS_I(inode);
size = i_size_read(inode);
if (page->index == size >> PAGE_CACHE_SHIFT)
len = size & ~PAGE_CACHE_MASK;
else
len = PAGE_CACHE_SIZE;
set_page_writeback(page);
buffer = kmap(page);
offset = page_offset(page);
old_fs = get_fs();
set_fs(get_ds());
/* We should have writeback_fid always set */
BUG_ON(!v9inode->writeback_fid);
retval = v9fs_file_write_internal(inode,
v9inode->writeback_fid,
(__force const char __user *)buffer,
len, &offset, 0);
if (retval > 0)
retval = 0;
set_fs(old_fs);
kunmap(page);
end_page_writeback(page);
return retval;
}
static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
{
int retval;
retval = v9fs_vfs_writepage_locked(page);
if (retval < 0) {
if (retval == -EAGAIN) {
redirty_page_for_writepage(wbc, page);
retval = 0;
} else {
SetPageError(page);
mapping_set_error(page->mapping, retval);
}
} else
retval = 0;
unlock_page(page);
return retval;
}
/**
* v9fs_launder_page - Writeback a dirty page
* Since the writes go directly to the server, we simply return a 0
* here to indicate success.
*
* Returns 0 on success.
*/
static int v9fs_launder_page(struct page *page)
{
int retval;
struct inode *inode = page->mapping->host;
v9fs_fscache_wait_on_page_write(inode, page);
if (clear_page_dirty_for_io(page)) {
retval = v9fs_vfs_writepage_locked(page);
if (retval)
return retval;
}
return 0;
}
@ -173,9 +254,15 @@ static int v9fs_launder_page(struct page *page)
* with an error.
*
*/
ssize_t v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
loff_t pos, unsigned long nr_segs)
static ssize_t
v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
loff_t pos, unsigned long nr_segs)
{
/*
* FIXME
* Now that we do caching with cache mode enabled, We need
* to support direct IO
*/
P9_DPRINTK(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%s) "
"off/no(%lld/%lu) EINVAL\n",
iocb->ki_filp->f_path.dentry->d_name.name,
@ -183,11 +270,84 @@ ssize_t v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
return -EINVAL;
}
static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
int retval = 0;
struct page *page;
struct v9fs_inode *v9inode;
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
struct inode *inode = mapping->host;
v9inode = V9FS_I(inode);
start:
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page) {
retval = -ENOMEM;
goto out;
}
BUG_ON(!v9inode->writeback_fid);
if (PageUptodate(page))
goto out;
if (len == PAGE_CACHE_SIZE)
goto out;
retval = v9fs_fid_readpage(v9inode->writeback_fid, page);
page_cache_release(page);
if (!retval)
goto start;
out:
*pagep = page;
return retval;
}
static int v9fs_write_end(struct file *filp, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
loff_t last_pos = pos + copied;
struct inode *inode = page->mapping->host;
if (unlikely(copied < len)) {
/*
* zero out the rest of the area
*/
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
zero_user(page, from + copied, len - copied);
flush_dcache_page(page);
}
if (!PageUptodate(page))
SetPageUptodate(page);
/*
* No need to use i_size_read() here, the i_size
* cannot change under us because we hold the i_mutex.
*/
if (last_pos > inode->i_size) {
inode_add_bytes(inode, last_pos - inode->i_size);
i_size_write(inode, last_pos);
}
set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
return copied;
}
const struct address_space_operations v9fs_addr_operations = {
.readpage = v9fs_vfs_readpage,
.readpages = v9fs_vfs_readpages,
.releasepage = v9fs_release_page,
.invalidatepage = v9fs_invalidate_page,
.launder_page = v9fs_launder_page,
.direct_IO = v9fs_direct_IO,
.readpage = v9fs_vfs_readpage,
.readpages = v9fs_vfs_readpages,
.set_page_dirty = __set_page_dirty_nobuffers,
.writepage = v9fs_vfs_writepage,
.write_begin = v9fs_write_begin,
.write_end = v9fs_write_end,
.releasepage = v9fs_release_page,
.invalidatepage = v9fs_invalidate_page,
.launder_page = v9fs_launder_page,
.direct_IO = v9fs_direct_IO,
};

View File

@ -63,20 +63,15 @@ static int v9fs_dentry_delete(const struct dentry *dentry)
* v9fs_cached_dentry_delete - called when dentry refcount equals 0
* @dentry: dentry in question
*
* Only return 1 if our inode is invalid. Only non-synthetic files
* (ones without mtime == 0) should be calling this function.
*
*/
static int v9fs_cached_dentry_delete(const struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name,
dentry);
P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n",
dentry->d_name.name, dentry);
if(!inode)
/* Don't cache negative dentries */
if (!dentry->d_inode)
return 1;
return 0;
}
@ -105,7 +100,41 @@ static void v9fs_dentry_release(struct dentry *dentry)
}
}
static int v9fs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
{
struct p9_fid *fid;
struct inode *inode;
struct v9fs_inode *v9inode;
if (nd->flags & LOOKUP_RCU)
return -ECHILD;
inode = dentry->d_inode;
if (!inode)
goto out_valid;
v9inode = V9FS_I(inode);
if (v9inode->cache_validity & V9FS_INO_INVALID_ATTR) {
int retval;
struct v9fs_session_info *v9ses;
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
v9ses = v9fs_inode2v9ses(inode);
if (v9fs_proto_dotl(v9ses))
retval = v9fs_refresh_inode_dotl(fid, inode);
else
retval = v9fs_refresh_inode(fid, inode);
if (retval <= 0)
return retval;
}
out_valid:
return 1;
}
const struct dentry_operations v9fs_cached_dentry_operations = {
.d_revalidate = v9fs_lookup_revalidate,
.d_delete = v9fs_cached_dentry_delete,
.d_release = v9fs_dentry_release,
};

View File

@ -295,7 +295,6 @@ int v9fs_dir_release(struct inode *inode, struct file *filp)
P9_DPRINTK(P9_DEBUG_VFS,
"v9fs_dir_release: inode: %p filp: %p fid: %d\n",
inode, filp, fid ? fid->fid : -1);
filemap_write_and_wait(inode->i_mapping);
if (fid)
p9_client_clunk(fid);
return 0;

View File

@ -44,8 +44,7 @@
#include "fid.h"
#include "cache.h"
static const struct file_operations v9fs_cached_file_operations;
static const struct file_operations v9fs_cached_file_operations_dotl;
static const struct vm_operations_struct v9fs_file_vm_ops;
/**
* v9fs_file_open - open a file (or directory)
@ -57,11 +56,13 @@ static const struct file_operations v9fs_cached_file_operations_dotl;
int v9fs_file_open(struct inode *inode, struct file *file)
{
int err;
struct v9fs_inode *v9inode;
struct v9fs_session_info *v9ses;
struct p9_fid *fid;
int omode;
P9_DPRINTK(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
v9inode = V9FS_I(inode);
v9ses = v9fs_inode2v9ses(inode);
if (v9fs_proto_dotl(v9ses))
omode = file->f_flags;
@ -89,20 +90,30 @@ int v9fs_file_open(struct inode *inode, struct file *file)
}
file->private_data = fid;
if ((fid->qid.version) && (v9ses->cache)) {
P9_DPRINTK(P9_DEBUG_VFS, "cached");
/* enable cached file options */
if(file->f_op == &v9fs_file_operations)
file->f_op = &v9fs_cached_file_operations;
else if (file->f_op == &v9fs_file_operations_dotl)
file->f_op = &v9fs_cached_file_operations_dotl;
if (v9ses->cache && !v9inode->writeback_fid) {
/*
* clone a fid and add it to writeback_fid
* we do it during open time instead of
* page dirty time via write_begin/page_mkwrite
* because we want write after unlink usecase
* to work.
*/
fid = v9fs_writeback_fid(file->f_path.dentry);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
goto out_error;
}
v9inode->writeback_fid = (void *) fid;
}
#ifdef CONFIG_9P_FSCACHE
if (v9ses->cache)
v9fs_cache_inode_set_cookie(inode, file);
#endif
}
return 0;
out_error:
p9_client_clunk(file->private_data);
file->private_data = NULL;
return err;
}
/**
@ -335,25 +346,22 @@ out_err:
}
/**
* v9fs_file_readn - read from a file
* @filp: file pointer to read
* v9fs_fid_readn - read from a fid
* @fid: fid to read
* @data: data buffer to read data into
* @udata: user data buffer to read data into
* @count: size of buffer
* @offset: offset at which to read data
*
*/
ssize_t
v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count,
v9fs_fid_readn(struct p9_fid *fid, char *data, char __user *udata, u32 count,
u64 offset)
{
int n, total, size;
struct p9_fid *fid = filp->private_data;
P9_DPRINTK(P9_DEBUG_VFS, "fid %d offset %llu count %d\n", fid->fid,
(long long unsigned) offset, count);
(long long unsigned) offset, count);
n = 0;
total = 0;
size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ;
@ -378,6 +386,22 @@ v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count,
return total;
}
/**
* v9fs_file_readn - read from a file
* @filp: file pointer to read
* @data: data buffer to read data into
* @udata: user data buffer to read data into
* @count: size of buffer
* @offset: offset at which to read data
*
*/
ssize_t
v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count,
u64 offset)
{
return v9fs_fid_readn(filp->private_data, data, udata, count, offset);
}
/**
* v9fs_file_read - read from a file
* @filp: file pointer to read
@ -410,6 +434,49 @@ v9fs_file_read(struct file *filp, char __user *udata, size_t count,
return ret;
}
ssize_t
v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid,
const char __user *data, size_t count,
loff_t *offset, int invalidate)
{
int n;
loff_t i_size;
size_t total = 0;
struct p9_client *clnt;
loff_t origin = *offset;
unsigned long pg_start, pg_end;
P9_DPRINTK(P9_DEBUG_VFS, "data %p count %d offset %x\n", data,
(int)count, (int)*offset);
clnt = fid->clnt;
do {
n = p9_client_write(fid, NULL, data+total, origin+total, count);
if (n <= 0)
break;
count -= n;
total += n;
} while (count > 0);
if (invalidate && (total > 0)) {
pg_start = origin >> PAGE_CACHE_SHIFT;
pg_end = (origin + total - 1) >> PAGE_CACHE_SHIFT;
if (inode->i_mapping && inode->i_mapping->nrpages)
invalidate_inode_pages2_range(inode->i_mapping,
pg_start, pg_end);
*offset += total;
i_size = i_size_read(inode);
if (*offset > i_size) {
inode_add_bytes(inode, *offset - i_size);
i_size_write(inode, *offset);
}
}
if (n < 0)
return n;
return total;
}
/**
* v9fs_file_write - write to a file
* @filp: file pointer to write
@ -418,25 +485,13 @@ v9fs_file_read(struct file *filp, char __user *udata, size_t count,
* @offset: offset at which to write data
*
*/
static ssize_t
v9fs_file_write(struct file *filp, const char __user * data,
size_t count, loff_t * offset)
size_t count, loff_t *offset)
{
ssize_t retval;
size_t total = 0;
int n;
struct p9_fid *fid;
struct p9_client *clnt;
struct inode *inode = filp->f_path.dentry->d_inode;
ssize_t retval = 0;
loff_t origin = *offset;
unsigned long pg_start, pg_end;
P9_DPRINTK(P9_DEBUG_VFS, "data %p count %d offset %x\n", data,
(int)count, (int)*offset);
fid = filp->private_data;
clnt = fid->clnt;
retval = generic_write_checks(filp, &origin, &count, 0);
if (retval)
@ -449,33 +504,14 @@ v9fs_file_write(struct file *filp, const char __user * data,
if (!count)
goto out;
do {
n = p9_client_write(fid, NULL, data+total, origin+total, count);
if (n <= 0)
break;
count -= n;
total += n;
} while (count > 0);
if (total > 0) {
pg_start = origin >> PAGE_CACHE_SHIFT;
pg_end = (origin + total - 1) >> PAGE_CACHE_SHIFT;
if (inode->i_mapping && inode->i_mapping->nrpages)
invalidate_inode_pages2_range(inode->i_mapping,
pg_start, pg_end);
*offset += total;
i_size_write(inode, i_size_read(inode) + total);
inode->i_blocks = (i_size_read(inode) + 512 - 1) >> 9;
}
if (n < 0)
retval = n;
else
retval = total;
return v9fs_file_write_internal(filp->f_path.dentry->d_inode,
filp->private_data,
data, count, offset, 1);
out:
return retval;
}
static int v9fs_file_fsync(struct file *filp, int datasync)
{
struct p9_fid *fid;
@ -505,28 +541,182 @@ int v9fs_file_fsync_dotl(struct file *filp, int datasync)
return retval;
}
static const struct file_operations v9fs_cached_file_operations = {
static int
v9fs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
int retval;
retval = generic_file_mmap(file, vma);
if (!retval)
vma->vm_ops = &v9fs_file_vm_ops;
return retval;
}
static int
v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct v9fs_inode *v9inode;
struct page *page = vmf->page;
struct file *filp = vma->vm_file;
struct inode *inode = filp->f_path.dentry->d_inode;
P9_DPRINTK(P9_DEBUG_VFS, "page %p fid %lx\n",
page, (unsigned long)filp->private_data);
v9inode = V9FS_I(inode);
/* make sure the cache has finished storing the page */
v9fs_fscache_wait_on_page_write(inode, page);
BUG_ON(!v9inode->writeback_fid);
lock_page(page);
if (page->mapping != inode->i_mapping)
goto out_unlock;
return VM_FAULT_LOCKED;
out_unlock:
unlock_page(page);
return VM_FAULT_NOPAGE;
}
static ssize_t
v9fs_direct_read(struct file *filp, char __user *udata, size_t count,
loff_t *offsetp)
{
loff_t size, offset;
struct inode *inode;
struct address_space *mapping;
offset = *offsetp;
mapping = filp->f_mapping;
inode = mapping->host;
if (!count)
return 0;
size = i_size_read(inode);
if (offset < size)
filemap_write_and_wait_range(mapping, offset,
offset + count - 1);
return v9fs_file_read(filp, udata, count, offsetp);
}
/**
* v9fs_cached_file_read - read from a file
* @filp: file pointer to read
* @udata: user data buffer to read data into
* @count: size of buffer
* @offset: offset at which to read data
*
*/
static ssize_t
v9fs_cached_file_read(struct file *filp, char __user *data, size_t count,
loff_t *offset)
{
if (filp->f_flags & O_DIRECT)
return v9fs_direct_read(filp, data, count, offset);
return do_sync_read(filp, data, count, offset);
}
static ssize_t
v9fs_direct_write(struct file *filp, const char __user * data,
size_t count, loff_t *offsetp)
{
loff_t offset;
ssize_t retval;
struct inode *inode;
struct address_space *mapping;
offset = *offsetp;
mapping = filp->f_mapping;
inode = mapping->host;
if (!count)
return 0;
mutex_lock(&inode->i_mutex);
retval = filemap_write_and_wait_range(mapping, offset,
offset + count - 1);
if (retval)
goto err_out;
/*
* After a write we want buffered reads to be sure to go to disk to get
* the new data. We invalidate clean cached page from the region we're
* about to write. We do this *before* the write so that if we fail
* here we fall back to buffered write
*/
if (mapping->nrpages) {
pgoff_t pg_start = offset >> PAGE_CACHE_SHIFT;
pgoff_t pg_end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
retval = invalidate_inode_pages2_range(mapping,
pg_start, pg_end);
/*
* If a page can not be invalidated, fall back
* to buffered write.
*/
if (retval) {
if (retval == -EBUSY)
goto buff_write;
goto err_out;
}
}
retval = v9fs_file_write(filp, data, count, offsetp);
err_out:
mutex_unlock(&inode->i_mutex);
return retval;
buff_write:
mutex_unlock(&inode->i_mutex);
return do_sync_write(filp, data, count, offsetp);
}
/**
* v9fs_cached_file_write - write to a file
* @filp: file pointer to write
* @data: data buffer to write data from
* @count: size of buffer
* @offset: offset at which to write data
*
*/
static ssize_t
v9fs_cached_file_write(struct file *filp, const char __user * data,
size_t count, loff_t *offset)
{
if (filp->f_flags & O_DIRECT)
return v9fs_direct_write(filp, data, count, offset);
return do_sync_write(filp, data, count, offset);
}
static const struct vm_operations_struct v9fs_file_vm_ops = {
.fault = filemap_fault,
.page_mkwrite = v9fs_vm_page_mkwrite,
};
const struct file_operations v9fs_cached_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.read = v9fs_cached_file_read,
.write = v9fs_cached_file_write,
.aio_read = generic_file_aio_read,
.write = v9fs_file_write,
.aio_write = generic_file_aio_write,
.open = v9fs_file_open,
.release = v9fs_dir_release,
.lock = v9fs_file_lock,
.mmap = generic_file_readonly_mmap,
.mmap = v9fs_file_mmap,
.fsync = v9fs_file_fsync,
};
static const struct file_operations v9fs_cached_file_operations_dotl = {
const struct file_operations v9fs_cached_file_operations_dotl = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.read = v9fs_cached_file_read,
.write = v9fs_cached_file_write,
.aio_read = generic_file_aio_read,
.write = v9fs_file_write,
.aio_write = generic_file_aio_write,
.open = v9fs_file_open,
.release = v9fs_dir_release,
.lock = v9fs_file_lock_dotl,
.flock = v9fs_file_flock_dotl,
.mmap = generic_file_readonly_mmap,
.mmap = v9fs_file_mmap,
.fsync = v9fs_file_fsync_dotl,
};

View File

@ -203,26 +203,25 @@ v9fs_blank_wstat(struct p9_wstat *wstat)
wstat->extension = NULL;
}
#ifdef CONFIG_9P_FSCACHE
/**
* v9fs_alloc_inode - helper function to allocate an inode
* This callback is executed before setting up the inode so that we
* can associate a vcookie with each inode.
*
*/
struct inode *v9fs_alloc_inode(struct super_block *sb)
{
struct v9fs_cookie *vcookie;
vcookie = (struct v9fs_cookie *)kmem_cache_alloc(vcookie_cache,
GFP_KERNEL);
if (!vcookie)
struct v9fs_inode *v9inode;
v9inode = (struct v9fs_inode *)kmem_cache_alloc(v9fs_inode_cache,
GFP_KERNEL);
if (!v9inode)
return NULL;
vcookie->fscache = NULL;
vcookie->qid = NULL;
spin_lock_init(&vcookie->lock);
return &vcookie->inode;
#ifdef CONFIG_9P_FSCACHE
v9inode->fscache = NULL;
v9inode->fscache_key = NULL;
spin_lock_init(&v9inode->fscache_lock);
#endif
v9inode->writeback_fid = NULL;
v9inode->cache_validity = 0;
return &v9inode->vfs_inode;
}
/**
@ -234,35 +233,18 @@ static void v9fs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(vcookie_cache, v9fs_inode2cookie(inode));
kmem_cache_free(v9fs_inode_cache, V9FS_I(inode));
}
void v9fs_destroy_inode(struct inode *inode)
{
call_rcu(&inode->i_rcu, v9fs_i_callback);
}
#endif
/**
* v9fs_get_inode - helper function to setup an inode
* @sb: superblock
* @mode: mode to setup inode with
*
*/
struct inode *v9fs_get_inode(struct super_block *sb, int mode)
int v9fs_init_inode(struct v9fs_session_info *v9ses,
struct inode *inode, int mode)
{
int err;
struct inode *inode;
struct v9fs_session_info *v9ses = sb->s_fs_info;
P9_DPRINTK(P9_DEBUG_VFS, "super block: %p mode: %o\n", sb, mode);
inode = new_inode(sb);
if (!inode) {
P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n");
return ERR_PTR(-ENOMEM);
}
int err = 0;
inode_init_owner(inode, NULL, mode);
inode->i_blocks = 0;
@ -292,14 +274,20 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode)
case S_IFREG:
if (v9fs_proto_dotl(v9ses)) {
inode->i_op = &v9fs_file_inode_operations_dotl;
inode->i_fop = &v9fs_file_operations_dotl;
if (v9ses->cache)
inode->i_fop =
&v9fs_cached_file_operations_dotl;
else
inode->i_fop = &v9fs_file_operations_dotl;
} else {
inode->i_op = &v9fs_file_inode_operations;
inode->i_fop = &v9fs_file_operations;
if (v9ses->cache)
inode->i_fop = &v9fs_cached_file_operations;
else
inode->i_fop = &v9fs_file_operations;
}
break;
case S_IFLNK:
if (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)) {
P9_DPRINTK(P9_DEBUG_ERROR, "extended modes used with "
@ -335,12 +323,37 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode)
err = -EINVAL;
goto error;
}
return inode;
error:
iput(inode);
return ERR_PTR(err);
return err;
}
/**
* v9fs_get_inode - helper function to setup an inode
* @sb: superblock
* @mode: mode to setup inode with
*
*/
struct inode *v9fs_get_inode(struct super_block *sb, int mode)
{
int err;
struct inode *inode;
struct v9fs_session_info *v9ses = sb->s_fs_info;
P9_DPRINTK(P9_DEBUG_VFS, "super block: %p mode: %o\n", sb, mode);
inode = new_inode(sb);
if (!inode) {
P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n");
return ERR_PTR(-ENOMEM);
}
err = v9fs_init_inode(v9ses, inode, mode);
if (err) {
iput(inode);
return ERR_PTR(err);
}
return inode;
}
/*
@ -403,6 +416,8 @@ error:
*/
void v9fs_evict_inode(struct inode *inode)
{
struct v9fs_inode *v9inode = V9FS_I(inode);
truncate_inode_pages(inode->i_mapping, 0);
end_writeback(inode);
filemap_fdatawrite(inode->i_mapping);
@ -410,41 +425,67 @@ void v9fs_evict_inode(struct inode *inode)
#ifdef CONFIG_9P_FSCACHE
v9fs_cache_inode_put_cookie(inode);
#endif
/* clunk the fid stashed in writeback_fid */
if (v9inode->writeback_fid) {
p9_client_clunk(v9inode->writeback_fid);
v9inode->writeback_fid = NULL;
}
}
static struct inode *v9fs_qid_iget(struct super_block *sb,
struct p9_qid *qid,
struct p9_wstat *st)
{
int retval, umode;
unsigned long i_ino;
struct inode *inode;
struct v9fs_session_info *v9ses = sb->s_fs_info;
i_ino = v9fs_qid2ino(qid);
inode = iget_locked(sb, i_ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
/*
* initialize the inode with the stat info
* FIXME!! we may need support for stale inodes
* later.
*/
umode = p9mode2unixmode(v9ses, st->mode);
retval = v9fs_init_inode(v9ses, inode, umode);
if (retval)
goto error;
v9fs_stat2inode(st, inode, sb);
#ifdef CONFIG_9P_FSCACHE
v9fs_fscache_set_key(inode, &st->qid);
v9fs_cache_inode_get_cookie(inode);
#endif
unlock_new_inode(inode);
return inode;
error:
unlock_new_inode(inode);
iput(inode);
return ERR_PTR(retval);
}
struct inode *
v9fs_inode(struct v9fs_session_info *v9ses, struct p9_fid *fid,
struct super_block *sb)
v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
struct super_block *sb)
{
int err, umode;
struct inode *ret = NULL;
struct p9_wstat *st;
struct inode *inode = NULL;
st = p9_client_stat(fid);
if (IS_ERR(st))
return ERR_CAST(st);
umode = p9mode2unixmode(v9ses, st->mode);
ret = v9fs_get_inode(sb, umode);
if (IS_ERR(ret)) {
err = PTR_ERR(ret);
goto error;
}
v9fs_stat2inode(st, ret, sb);
ret->i_ino = v9fs_qid2ino(&st->qid);
#ifdef CONFIG_9P_FSCACHE
v9fs_vcookie_set_qid(ret, &st->qid);
v9fs_cache_inode_get_cookie(ret);
#endif
inode = v9fs_qid_iget(sb, &st->qid, st);
p9stat_free(st);
kfree(st);
return ret;
error:
p9stat_free(st);
kfree(st);
return ERR_PTR(err);
return inode;
}
/**
@ -458,8 +499,8 @@ error:
static int v9fs_remove(struct inode *dir, struct dentry *file, int rmdir)
{
int retval;
struct inode *file_inode;
struct p9_fid *v9fid;
struct inode *file_inode;
P9_DPRINTK(P9_DEBUG_VFS, "inode: %p dentry: %p rmdir: %d\n", dir, file,
rmdir);
@ -470,8 +511,20 @@ static int v9fs_remove(struct inode *dir, struct dentry *file, int rmdir)
return PTR_ERR(v9fid);
retval = p9_client_remove(v9fid);
if (!retval)
drop_nlink(file_inode);
if (!retval) {
/*
* directories on unlink should have zero
* link count
*/
if (rmdir) {
clear_nlink(file_inode);
drop_nlink(dir);
} else
drop_nlink(file_inode);
v9fs_invalidate_inode_attr(file_inode);
v9fs_invalidate_inode_attr(dir);
}
return retval;
}
@ -531,7 +584,7 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
}
/* instantiate inode and assign the unopened fid to the dentry */
inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
@ -570,9 +623,10 @@ v9fs_vfs_create(struct inode *dir, struct dentry *dentry, int mode,
int err;
u32 perm;
int flags;
struct v9fs_session_info *v9ses;
struct p9_fid *fid;
struct file *filp;
struct v9fs_inode *v9inode;
struct v9fs_session_info *v9ses;
struct p9_fid *fid, *inode_fid;
err = 0;
fid = NULL;
@ -592,8 +646,25 @@ v9fs_vfs_create(struct inode *dir, struct dentry *dentry, int mode,
goto error;
}
v9fs_invalidate_inode_attr(dir);
/* if we are opening a file, assign the open fid to the file */
if (nd && nd->flags & LOOKUP_OPEN) {
v9inode = V9FS_I(dentry->d_inode);
if (v9ses->cache && !v9inode->writeback_fid) {
/*
* clone a fid and add it to writeback_fid
* we do it during open time instead of
* page dirty time via write_begin/page_mkwrite
* because we want write after unlink usecase
* to work.
*/
inode_fid = v9fs_writeback_fid(dentry);
if (IS_ERR(inode_fid)) {
err = PTR_ERR(inode_fid);
goto error;
}
v9inode->writeback_fid = (void *) inode_fid;
}
filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
if (IS_ERR(filp)) {
err = PTR_ERR(filp);
@ -601,6 +672,10 @@ v9fs_vfs_create(struct inode *dir, struct dentry *dentry, int mode,
}
filp->private_data = fid;
#ifdef CONFIG_9P_FSCACHE
if (v9ses->cache)
v9fs_cache_inode_set_cookie(dentry->d_inode, filp);
#endif
} else
p9_client_clunk(fid);
@ -625,8 +700,8 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
int err;
u32 perm;
struct v9fs_session_info *v9ses;
struct p9_fid *fid;
struct v9fs_session_info *v9ses;
P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
err = 0;
@ -636,6 +711,9 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
fid = NULL;
} else {
inc_nlink(dir);
v9fs_invalidate_inode_attr(dir);
}
if (fid)
@ -687,7 +765,7 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
return ERR_PTR(result);
}
inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
result = PTR_ERR(inode);
inode = NULL;
@ -747,17 +825,19 @@ int
v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
int retval;
struct inode *old_inode;
struct inode *new_inode;
struct v9fs_session_info *v9ses;
struct p9_fid *oldfid;
struct p9_fid *olddirfid;
struct p9_fid *newdirfid;
struct p9_wstat wstat;
int retval;
P9_DPRINTK(P9_DEBUG_VFS, "\n");
retval = 0;
old_inode = old_dentry->d_inode;
new_inode = new_dentry->d_inode;
v9ses = v9fs_inode2v9ses(old_inode);
oldfid = v9fs_fid_lookup(old_dentry);
if (IS_ERR(oldfid))
@ -798,9 +878,30 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
retval = p9_client_wstat(oldfid, &wstat);
clunk_newdir:
if (!retval)
if (!retval) {
if (new_inode) {
if (S_ISDIR(new_inode->i_mode))
clear_nlink(new_inode);
else
drop_nlink(new_inode);
/*
* Work around vfs rename rehash bug with
* FS_RENAME_DOES_D_MOVE
*/
v9fs_invalidate_inode_attr(new_inode);
}
if (S_ISDIR(old_inode->i_mode)) {
if (!new_inode)
inc_nlink(new_dir);
drop_nlink(old_dir);
}
v9fs_invalidate_inode_attr(old_inode);
v9fs_invalidate_inode_attr(old_dir);
v9fs_invalidate_inode_attr(new_dir);
/* successful rename */
d_move(old_dentry, new_dentry);
}
up_write(&v9ses->rename_sem);
p9_client_clunk(newdirfid);
@ -831,9 +932,10 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
err = -EPERM;
v9ses = v9fs_inode2v9ses(dentry->d_inode);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
return simple_getattr(mnt, dentry, stat);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
generic_fillattr(dentry->d_inode, stat);
return 0;
}
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
@ -891,17 +993,20 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
if (iattr->ia_valid & ATTR_GID)
wstat.n_gid = iattr->ia_gid;
}
retval = p9_client_wstat(fid, &wstat);
if (retval < 0)
return retval;
if ((iattr->ia_valid & ATTR_SIZE) &&
iattr->ia_size != i_size_read(dentry->d_inode)) {
retval = vmtruncate(dentry->d_inode, iattr->ia_size);
if (retval)
return retval;
}
/* Write all dirty data */
if (S_ISREG(dentry->d_inode->i_mode))
filemap_write_and_wait(dentry->d_inode->i_mapping);
retval = p9_client_wstat(fid, &wstat);
if (retval < 0)
return retval;
v9fs_invalidate_inode_attr(dentry->d_inode);
setattr_copy(dentry->d_inode, iattr);
mark_inode_dirty(dentry->d_inode);
@ -924,6 +1029,7 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
char tag_name[14];
unsigned int i_nlink;
struct v9fs_session_info *v9ses = sb->s_fs_info;
struct v9fs_inode *v9inode = V9FS_I(inode);
inode->i_nlink = 1;
@ -983,6 +1089,7 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
/* not real number of blocks, but 512 byte ones ... */
inode->i_blocks = (i_size_read(inode) + 512 - 1) >> 9;
v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR;
}
/**
@ -1115,8 +1222,8 @@ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry,
int mode, const char *extension)
{
u32 perm;
struct v9fs_session_info *v9ses;
struct p9_fid *fid;
struct v9fs_session_info *v9ses;
v9ses = v9fs_inode2v9ses(dir);
if (!v9fs_proto_dotu(v9ses)) {
@ -1130,6 +1237,7 @@ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry,
if (IS_ERR(fid))
return PTR_ERR(fid);
v9fs_invalidate_inode_attr(dir);
p9_client_clunk(fid);
return 0;
}
@ -1166,8 +1274,8 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
int retval;
struct p9_fid *oldfid;
char *name;
struct p9_fid *oldfid;
P9_DPRINTK(P9_DEBUG_VFS,
" %lu,%s,%s\n", dir->i_ino, dentry->d_name.name,
@ -1186,7 +1294,10 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
sprintf(name, "%d\n", oldfid->fid);
retval = v9fs_vfs_mkspecial(dir, dentry, P9_DMLINK, name);
__putname(name);
if (!retval) {
v9fs_refresh_inode(oldfid, old_dentry->d_inode);
v9fs_invalidate_inode_attr(dir);
}
clunk_fid:
p9_client_clunk(oldfid);
return retval;
@ -1237,6 +1348,32 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
return retval;
}
int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
{
loff_t i_size;
struct p9_wstat *st;
struct v9fs_session_info *v9ses;
v9ses = v9fs_inode2v9ses(inode);
st = p9_client_stat(fid);
if (IS_ERR(st))
return PTR_ERR(st);
spin_lock(&inode->i_lock);
/*
* We don't want to refresh inode->i_size,
* because we may have cached data
*/
i_size = inode->i_size;
v9fs_stat2inode(st, inode, inode->i_sb);
if (v9ses->cache)
inode->i_size = i_size;
spin_unlock(&inode->i_lock);
p9stat_free(st);
kfree(st);
return 0;
}
static const struct inode_operations v9fs_dir_inode_operations_dotu = {
.create = v9fs_vfs_create,
.lookup = v9fs_vfs_lookup,

View File

@ -86,40 +86,63 @@ static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode)
return dentry;
}
struct inode *
v9fs_inode_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
struct super_block *sb)
static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
struct p9_qid *qid,
struct p9_fid *fid,
struct p9_stat_dotl *st)
{
int retval;
unsigned long i_ino;
struct inode *inode;
struct v9fs_session_info *v9ses = sb->s_fs_info;
i_ino = v9fs_qid2ino(qid);
inode = iget_locked(sb, i_ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
/*
* initialize the inode with the stat info
* FIXME!! we may need support for stale inodes
* later.
*/
retval = v9fs_init_inode(v9ses, inode, st->st_mode);
if (retval)
goto error;
v9fs_stat2inode_dotl(st, inode);
#ifdef CONFIG_9P_FSCACHE
v9fs_fscache_set_key(inode, &st->qid);
v9fs_cache_inode_get_cookie(inode);
#endif
retval = v9fs_get_acl(inode, fid);
if (retval)
goto error;
unlock_new_inode(inode);
return inode;
error:
unlock_new_inode(inode);
iput(inode);
return ERR_PTR(retval);
}
struct inode *
v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
struct super_block *sb)
{
struct inode *ret = NULL;
int err;
struct p9_stat_dotl *st;
struct inode *inode = NULL;
st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
if (IS_ERR(st))
return ERR_CAST(st);
ret = v9fs_get_inode(sb, st->st_mode);
if (IS_ERR(ret)) {
err = PTR_ERR(ret);
goto error;
}
v9fs_stat2inode_dotl(st, ret);
ret->i_ino = v9fs_qid2ino(&st->qid);
#ifdef CONFIG_9P_FSCACHE
v9fs_vcookie_set_qid(ret, &st->qid);
v9fs_cache_inode_get_cookie(ret);
#endif
err = v9fs_get_acl(ret, fid);
if (err) {
iput(ret);
goto error;
}
inode = v9fs_qid_iget_dotl(sb, &st->qid, fid, st);
kfree(st);
return ret;
error:
kfree(st);
return ERR_PTR(err);
return inode;
}
/**
@ -136,16 +159,17 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
struct nameidata *nd)
{
int err = 0;
char *name = NULL;
gid_t gid;
int flags;
mode_t mode;
struct v9fs_session_info *v9ses;
struct p9_fid *fid = NULL;
struct p9_fid *dfid, *ofid;
char *name = NULL;
struct file *filp;
struct p9_qid qid;
struct inode *inode;
struct p9_fid *fid = NULL;
struct v9fs_inode *v9inode;
struct p9_fid *dfid, *ofid, *inode_fid;
struct v9fs_session_info *v9ses;
struct posix_acl *pacl = NULL, *dacl = NULL;
v9ses = v9fs_inode2v9ses(dir);
@ -196,6 +220,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
err);
goto error;
}
v9fs_invalidate_inode_attr(dir);
/* instantiate inode and assign the unopened fid to the dentry */
fid = p9_client_walk(dfid, 1, &name, 1);
@ -205,7 +230,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
fid = NULL;
goto error;
}
inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
@ -219,6 +244,22 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
/* Now set the ACL based on the default value */
v9fs_set_create_acl(dentry, dacl, pacl);
v9inode = V9FS_I(inode);
if (v9ses->cache && !v9inode->writeback_fid) {
/*
* clone a fid and add it to writeback_fid
* we do it during open time instead of
* page dirty time via write_begin/page_mkwrite
* because we want write after unlink usecase
* to work.
*/
inode_fid = v9fs_writeback_fid(dentry);
if (IS_ERR(inode_fid)) {
err = PTR_ERR(inode_fid);
goto error;
}
v9inode->writeback_fid = (void *) inode_fid;
}
/* Since we are opening a file, assign the open fid to the file */
filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
if (IS_ERR(filp)) {
@ -226,6 +267,10 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
return PTR_ERR(filp);
}
filp->private_data = ofid;
#ifdef CONFIG_9P_FSCACHE
if (v9ses->cache)
v9fs_cache_inode_set_cookie(inode, filp);
#endif
return 0;
error:
@ -300,7 +345,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
goto error;
}
inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
@ -327,7 +372,8 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
}
/* Now set the ACL based on the default value */
v9fs_set_create_acl(dentry, dacl, pacl);
inc_nlink(dir);
v9fs_invalidate_inode_attr(dir);
error:
if (fid)
p9_client_clunk(fid);
@ -346,9 +392,10 @@ v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
err = -EPERM;
v9ses = v9fs_inode2v9ses(dentry->d_inode);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
return simple_getattr(mnt, dentry, stat);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
generic_fillattr(dentry->d_inode, stat);
return 0;
}
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
@ -406,16 +453,20 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
if (IS_ERR(fid))
return PTR_ERR(fid);
retval = p9_client_setattr(fid, &p9attr);
if (retval < 0)
return retval;
if ((iattr->ia_valid & ATTR_SIZE) &&
iattr->ia_size != i_size_read(dentry->d_inode)) {
retval = vmtruncate(dentry->d_inode, iattr->ia_size);
if (retval)
return retval;
}
/* Write all dirty data */
if (S_ISREG(dentry->d_inode->i_mode))
filemap_write_and_wait(dentry->d_inode->i_mapping);
retval = p9_client_setattr(fid, &p9attr);
if (retval < 0)
return retval;
v9fs_invalidate_inode_attr(dentry->d_inode);
setattr_copy(dentry->d_inode, iattr);
mark_inode_dirty(dentry->d_inode);
@ -439,6 +490,7 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
void
v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
{
struct v9fs_inode *v9inode = V9FS_I(inode);
if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
inode->i_atime.tv_sec = stat->st_atime_sec;
@ -497,20 +549,21 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
/* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION
* because the inode structure does not have fields for them.
*/
v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR;
}
static int
v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
const char *symname)
{
struct v9fs_session_info *v9ses;
struct p9_fid *dfid;
struct p9_fid *fid = NULL;
struct inode *inode;
struct p9_qid qid;
char *name;
int err;
gid_t gid;
char *name;
struct p9_qid qid;
struct inode *inode;
struct p9_fid *dfid;
struct p9_fid *fid = NULL;
struct v9fs_session_info *v9ses;
name = (char *) dentry->d_name.name;
P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_symlink_dotl : %lu,%s,%s\n",
@ -534,6 +587,7 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
goto error;
}
v9fs_invalidate_inode_attr(dir);
if (v9ses->cache) {
/* Now walk from the parent so we can get an unopened fid. */
fid = p9_client_walk(dfid, 1, &name, 1);
@ -546,7 +600,7 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
}
/* instantiate inode and assign the unopened fid to dentry */
inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
@ -588,10 +642,10 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
int err;
struct p9_fid *dfid, *oldfid;
char *name;
struct v9fs_session_info *v9ses;
struct dentry *dir_dentry;
struct p9_fid *dfid, *oldfid;
struct v9fs_session_info *v9ses;
P9_DPRINTK(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n",
dir->i_ino, old_dentry->d_name.name,
@ -616,29 +670,17 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
return err;
}
v9fs_invalidate_inode_attr(dir);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
/* Get the latest stat info from server. */
struct p9_fid *fid;
struct p9_stat_dotl *st;
fid = v9fs_fid_lookup(old_dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
if (IS_ERR(st))
return PTR_ERR(st);
v9fs_stat2inode_dotl(st, old_dentry->d_inode);
kfree(st);
} else {
/* Caching disabled. No need to get upto date stat info.
* This dentry will be released immediately. So, just hold the
* inode
*/
ihold(old_dentry->d_inode);
v9fs_refresh_inode_dotl(fid, old_dentry->d_inode);
}
ihold(old_dentry->d_inode);
d_instantiate(dentry, old_dentry->d_inode);
return err;
@ -657,12 +699,12 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
dev_t rdev)
{
int err;
gid_t gid;
char *name;
mode_t mode;
struct v9fs_session_info *v9ses;
struct p9_fid *fid = NULL, *dfid = NULL;
struct inode *inode;
gid_t gid;
struct p9_qid qid;
struct dentry *dir_dentry;
struct posix_acl *dacl = NULL, *pacl = NULL;
@ -699,6 +741,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
if (err < 0)
goto error;
v9fs_invalidate_inode_attr(dir);
/* instantiate inode and assign the unopened fid to the dentry */
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
fid = p9_client_walk(dfid, 1, &name, 1);
@ -710,7 +753,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
goto error;
}
inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
@ -782,6 +825,31 @@ ndset:
return NULL;
}
int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
{
loff_t i_size;
struct p9_stat_dotl *st;
struct v9fs_session_info *v9ses;
v9ses = v9fs_inode2v9ses(inode);
st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
if (IS_ERR(st))
return PTR_ERR(st);
spin_lock(&inode->i_lock);
/*
* We don't want to refresh inode->i_size,
* because we may have cached data
*/
i_size = inode->i_size;
v9fs_stat2inode_dotl(st, inode);
if (v9ses->cache)
inode->i_size = i_size;
spin_unlock(&inode->i_lock);
kfree(st);
return 0;
}
const struct inode_operations v9fs_dir_inode_operations_dotl = {
.create = v9fs_vfs_create_dotl,
.lookup = v9fs_vfs_lookup,

View File

@ -86,12 +86,15 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
} else
sb->s_op = &v9fs_super_ops;
sb->s_bdi = &v9ses->bdi;
if (v9ses->cache)
sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_CACHE_SIZE;
sb->s_flags = flags | MS_ACTIVE | MS_SYNCHRONOUS | MS_DIRSYNC |
MS_NOATIME;
sb->s_flags = flags | MS_ACTIVE | MS_DIRSYNC | MS_NOATIME;
if (!v9ses->cache)
sb->s_flags |= MS_SYNCHRONOUS;
#ifdef CONFIG_9P_FS_POSIX_ACL
if ((v9ses->flags & V9FS_ACCESS_MASK) == V9FS_ACCESS_CLIENT)
if ((v9ses->flags & V9FS_ACL_MASK) == V9FS_POSIX_ACL)
sb->s_flags |= MS_POSIXACL;
#endif
@ -151,7 +154,6 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
retval = PTR_ERR(inode);
goto release_sb;
}
root = d_alloc_root(inode);
if (!root) {
iput(inode);
@ -166,7 +168,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
retval = PTR_ERR(st);
goto release_sb;
}
root->d_inode->i_ino = v9fs_qid2ino(&st->qid);
v9fs_stat2inode_dotl(st, root->d_inode);
kfree(st);
} else {
@ -183,10 +185,21 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
p9stat_free(st);
kfree(st);
}
v9fs_fid_add(root, fid);
retval = v9fs_get_acl(inode, fid);
if (retval)
goto release_sb;
v9fs_fid_add(root, fid);
/*
* Add the root fid to session info. This is used
* for file system sync. We want a cloned fid here
* so that we can do a sync_filesystem after a
* shrink_dcache_for_umount
*/
v9ses->root_fid = v9fs_fid_clone(root);
if (IS_ERR(v9ses->root_fid)) {
retval = PTR_ERR(v9ses->root_fid);
goto release_sb;
}
P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
return dget(sb->s_root);
@ -197,15 +210,11 @@ close_session:
v9fs_session_close(v9ses);
kfree(v9ses);
return ERR_PTR(retval);
release_sb:
/*
* we will do the session_close and root dentry release
* in the below call. But we need to clunk fid, because we haven't
* attached the fid to dentry so it won't get clunked
* automatically.
* we will do the session_close and root dentry
* release in the below call.
*/
p9_client_clunk(fid);
deactivate_locked_super(sb);
return ERR_PTR(retval);
}
@ -223,7 +232,7 @@ static void v9fs_kill_super(struct super_block *s)
P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s);
kill_anon_super(s);
p9_client_clunk(v9ses->root_fid);
v9fs_session_cancel(v9ses);
v9fs_session_close(v9ses);
kfree(v9ses);
@ -276,11 +285,31 @@ done:
return res;
}
static int v9fs_sync_fs(struct super_block *sb, int wait)
{
struct v9fs_session_info *v9ses = sb->s_fs_info;
P9_DPRINTK(P9_DEBUG_VFS, "v9fs_sync_fs: super_block %p\n", sb);
return p9_client_sync_fs(v9ses->root_fid);
}
static int v9fs_drop_inode(struct inode *inode)
{
struct v9fs_session_info *v9ses;
v9ses = v9fs_inode2v9ses(inode);
if (v9ses->cache)
return generic_drop_inode(inode);
/*
* in case of non cached mode always drop the
* the inode because we want the inode attribute
* to always match that on the server.
*/
return 1;
}
static const struct super_operations v9fs_super_ops = {
#ifdef CONFIG_9P_FSCACHE
.alloc_inode = v9fs_alloc_inode,
.destroy_inode = v9fs_destroy_inode,
#endif
.statfs = simple_statfs,
.evict_inode = v9fs_evict_inode,
.show_options = generic_show_options,
@ -288,11 +317,11 @@ static const struct super_operations v9fs_super_ops = {
};
static const struct super_operations v9fs_super_ops_dotl = {
#ifdef CONFIG_9P_FSCACHE
.alloc_inode = v9fs_alloc_inode,
.destroy_inode = v9fs_destroy_inode,
#endif
.sync_fs = v9fs_sync_fs,
.statfs = v9fs_statfs,
.drop_inode = v9fs_drop_inode,
.evict_inode = v9fs_evict_inode,
.show_options = generic_show_options,
.umount_begin = v9fs_umount_begin,
@ -303,5 +332,5 @@ struct file_system_type v9fs_fs_type = {
.mount = v9fs_mount,
.kill_sb = v9fs_kill_super,
.owner = THIS_MODULE,
.fs_flags = FS_RENAME_DOES_D_MOVE,
.fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT,
};

View File

@ -139,6 +139,8 @@ do { \
*/
enum p9_msg_t {
P9_TSYNCFS = 0,
P9_RSYNCFS,
P9_TLERROR = 6,
P9_RLERROR,
P9_TSTATFS = 8,
@ -688,7 +690,11 @@ struct p9_rwstat {
* @id: protocol operating identifier of type &p9_msg_t
* @tag: transaction id of the request
* @offset: used by marshalling routines to track currentposition in buffer
* @capacity: used by marshalling routines to track total capacity
* @capacity: used by marshalling routines to track total malloc'd capacity
* @pubuf: Payload user buffer given by the caller
* @pubuf: Payload kernel buffer given by the caller
* @pbuf_size: pubuf/pkbuf(only one will be !NULL) size to be read/write.
* @private: For transport layer's use.
* @sdata: payload
*
* &p9_fcall represents the structure for all 9P RPC
@ -705,6 +711,10 @@ struct p9_fcall {
size_t offset;
size_t capacity;
char __user *pubuf;
char *pkbuf;
size_t pbuf_size;
void *private;
uint8_t *sdata;
};

View File

@ -230,6 +230,7 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode,
gid_t gid, struct p9_qid *qid);
int p9_client_clunk(struct p9_fid *fid);
int p9_client_fsync(struct p9_fid *fid, int datasync);
int p9_client_sync_fs(struct p9_fid *fid);
int p9_client_remove(struct p9_fid *fid);
int p9_client_read(struct p9_fid *fid, char *data, char __user *udata,
u64 offset, u32 count);

View File

@ -26,11 +26,19 @@
#ifndef NET_9P_TRANSPORT_H
#define NET_9P_TRANSPORT_H
#define P9_TRANS_PREF_PAYLOAD_MASK 0x1
/* Default. Add Payload to PDU before sending it down to transport layer */
#define P9_TRANS_PREF_PAYLOAD_DEF 0x0
/* Send pay load seperately to transport layer along with PDU.*/
#define P9_TRANS_PREF_PAYLOAD_SEP 0x1
/**
* struct p9_trans_module - transport module interface
* @list: used to maintain a list of currently available transports
* @name: the human-readable name of the transport
* @maxsize: transport provided maximum packet size
* @pref: Preferences of this transport
* @def: set if this transport should be considered the default
* @create: member function to create a new connection on this transport
* @request: member function to issue a request to the transport
@ -47,6 +55,7 @@ struct p9_trans_module {
struct list_head list;
char *name; /* name of transport */
int maxsize; /* max message size of transport */
int pref; /* Preferences of this transport */
int def; /* this transport should be default */
struct module *owner;
int (*create)(struct p9_client *, const char *, char *);

View File

@ -9,6 +9,7 @@ obj-$(CONFIG_NET_9P_RDMA) += 9pnet_rdma.o
util.o \
protocol.o \
trans_fd.o \
trans_common.o \
9pnet_virtio-objs := \
trans_virtio.o \

View File

@ -229,10 +229,23 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
return ERR_PTR(-ENOMEM);
}
init_waitqueue_head(req->wq);
req->tc = kmalloc(sizeof(struct p9_fcall)+c->msize,
GFP_KERNEL);
req->rc = kmalloc(sizeof(struct p9_fcall)+c->msize,
GFP_KERNEL);
if ((c->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
P9_TRANS_PREF_PAYLOAD_SEP) {
int alloc_msize = min(c->msize, 4096);
req->tc = kmalloc(sizeof(struct p9_fcall)+alloc_msize,
GFP_KERNEL);
req->tc->capacity = alloc_msize;
req->rc = kmalloc(sizeof(struct p9_fcall)+alloc_msize,
GFP_KERNEL);
req->rc->capacity = alloc_msize;
} else {
req->tc = kmalloc(sizeof(struct p9_fcall)+c->msize,
GFP_KERNEL);
req->tc->capacity = c->msize;
req->rc = kmalloc(sizeof(struct p9_fcall)+c->msize,
GFP_KERNEL);
req->rc->capacity = c->msize;
}
if ((!req->tc) || (!req->rc)) {
printk(KERN_ERR "Couldn't grow tag array\n");
kfree(req->tc);
@ -243,9 +256,7 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
return ERR_PTR(-ENOMEM);
}
req->tc->sdata = (char *) req->tc + sizeof(struct p9_fcall);
req->tc->capacity = c->msize;
req->rc->sdata = (char *) req->rc + sizeof(struct p9_fcall);
req->rc->capacity = c->msize;
}
p9pdu_reset(req->tc);
@ -443,6 +454,7 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
{
int8_t type;
int err;
int ecode;
err = p9_parse_header(req->rc, NULL, &type, NULL, 0);
if (err) {
@ -450,36 +462,53 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
return err;
}
if (type == P9_RERROR || type == P9_RLERROR) {
int ecode;
if (type != P9_RERROR && type != P9_RLERROR)
return 0;
if (!p9_is_proto_dotl(c)) {
char *ename;
if (!p9_is_proto_dotl(c)) {
char *ename;
err = p9pdu_readf(req->rc, c->proto_version, "s?d",
&ename, &ecode);
if (err)
goto out_err;
if (p9_is_proto_dotu(c))
err = -ecode;
if (!err || !IS_ERR_VALUE(err)) {
err = p9_errstr2errno(ename, strlen(ename));
P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", -ecode, ename);
kfree(ename);
if (req->tc->pbuf_size) {
/* Handle user buffers */
size_t len = req->rc->size - req->rc->offset;
if (req->tc->pubuf) {
/* User Buffer */
err = copy_from_user(
&req->rc->sdata[req->rc->offset],
req->tc->pubuf, len);
if (err) {
err = -EFAULT;
goto out_err;
}
} else {
/* Kernel Buffer */
memmove(&req->rc->sdata[req->rc->offset],
req->tc->pkbuf, len);
}
} else {
err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode);
}
err = p9pdu_readf(req->rc, c->proto_version, "s?d",
&ename, &ecode);
if (err)
goto out_err;
if (p9_is_proto_dotu(c))
err = -ecode;
P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
}
if (!err || !IS_ERR_VALUE(err)) {
err = p9_errstr2errno(ename, strlen(ename));
P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", -ecode,
ename);
kfree(ename);
}
} else {
err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode);
err = -ecode;
P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
}
} else
err = 0;
return err;
@ -1191,6 +1220,27 @@ error:
}
EXPORT_SYMBOL(p9_client_fsync);
int p9_client_sync_fs(struct p9_fid *fid)
{
int err = 0;
struct p9_req_t *req;
struct p9_client *clnt;
P9_DPRINTK(P9_DEBUG_9P, ">>> TSYNC_FS fid %d\n", fid->fid);
clnt = fid->clnt;
req = p9_client_rpc(clnt, P9_TSYNCFS, "d", fid->fid);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RSYNCFS fid %d\n", fid->fid);
p9_free_req(clnt, req);
error:
return err;
}
EXPORT_SYMBOL(p9_client_sync_fs);
int p9_client_clunk(struct p9_fid *fid)
{
int err;
@ -1270,7 +1320,15 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
if (count < rsize)
rsize = count;
req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset, rsize);
/* Don't bother zerocopy form small IO (< 1024) */
if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) {
req = p9_client_rpc(clnt, P9_TREAD, "dqE", fid->fid, offset,
rsize, data, udata);
} else {
req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset,
rsize);
}
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
@ -1284,13 +1342,15 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
P9_DPRINTK(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
if (data) {
memmove(data, dataptr, count);
} else {
err = copy_to_user(udata, dataptr, count);
if (err) {
err = -EFAULT;
goto free_and_error;
if (!req->tc->pbuf_size) {
if (data) {
memmove(data, dataptr, count);
} else {
err = copy_to_user(udata, dataptr, count);
if (err) {
err = -EFAULT;
goto free_and_error;
}
}
}
p9_free_req(clnt, req);
@ -1323,12 +1383,21 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
if (count < rsize)
rsize = count;
if (data)
req = p9_client_rpc(clnt, P9_TWRITE, "dqD", fid->fid, offset,
rsize, data);
else
req = p9_client_rpc(clnt, P9_TWRITE, "dqU", fid->fid, offset,
rsize, udata);
/* Don't bother zerocopy form small IO (< 1024) */
if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) {
req = p9_client_rpc(clnt, P9_TWRITE, "dqE", fid->fid, offset,
rsize, data, udata);
} else {
if (data)
req = p9_client_rpc(clnt, P9_TWRITE, "dqD", fid->fid,
offset, rsize, data);
else
req = p9_client_rpc(clnt, P9_TWRITE, "dqU", fid->fid,
offset, rsize, udata);
}
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
@ -1716,7 +1785,14 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
if (count < rsize)
rsize = count;
req = p9_client_rpc(clnt, P9_TREADDIR, "dqd", fid->fid, offset, rsize);
if ((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
P9_TRANS_PREF_PAYLOAD_SEP) {
req = p9_client_rpc(clnt, P9_TREADDIR, "dqF", fid->fid,
offset, rsize, data);
} else {
req = p9_client_rpc(clnt, P9_TREADDIR, "dqd", fid->fid,
offset, rsize);
}
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
@ -1730,7 +1806,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
P9_DPRINTK(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
if (data)
if (!req->tc->pbuf_size && data)
memmove(data, dataptr, count);
p9_free_req(clnt, req);

View File

@ -114,6 +114,26 @@ pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size)
return size - len;
}
static size_t
pdu_write_urw(struct p9_fcall *pdu, const char *kdata, const char __user *udata,
size_t size)
{
BUG_ON(pdu->size > P9_IOHDRSZ);
pdu->pubuf = (char __user *)udata;
pdu->pkbuf = (char *)kdata;
pdu->pbuf_size = size;
return 0;
}
static size_t
pdu_write_readdir(struct p9_fcall *pdu, const char *kdata, size_t size)
{
BUG_ON(pdu->size > P9_READDIRHDRSZ);
pdu->pkbuf = (char *)kdata;
pdu->pbuf_size = size;
return 0;
}
/*
b - int8_t
w - int16_t
@ -445,6 +465,25 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
errcode = -EFAULT;
}
break;
case 'E':{
int32_t cnt = va_arg(ap, int32_t);
const char *k = va_arg(ap, const void *);
const char *u = va_arg(ap, const void *);
errcode = p9pdu_writef(pdu, proto_version, "d",
cnt);
if (!errcode && pdu_write_urw(pdu, k, u, cnt))
errcode = -EFAULT;
}
break;
case 'F':{
int32_t cnt = va_arg(ap, int32_t);
const char *k = va_arg(ap, const void *);
errcode = p9pdu_writef(pdu, proto_version, "d",
cnt);
if (!errcode && pdu_write_readdir(pdu, k, cnt))
errcode = -EFAULT;
}
break;
case 'U':{
int32_t count = va_arg(ap, int32_t);
const char __user *udata =
@ -579,6 +618,7 @@ EXPORT_SYMBOL(p9stat_read);
int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type)
{
pdu->id = type;
return p9pdu_writef(pdu, 0, "dbw", 0, type, tag);
}
@ -606,6 +646,10 @@ void p9pdu_reset(struct p9_fcall *pdu)
{
pdu->offset = 0;
pdu->size = 0;
pdu->private = NULL;
pdu->pubuf = NULL;
pdu->pkbuf = NULL;
pdu->pbuf_size = 0;
}
int p9dirent_read(char *buf, int len, struct p9_dirent *dirent,

97
net/9p/trans_common.c Normal file
View File

@ -0,0 +1,97 @@
/*
* Copyright IBM Corporation, 2010
* Author Venkateswararao Jujjuri <jvrao@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2.1 of the GNU Lesser General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include <linux/scatterlist.h>
#include "trans_common.h"
/**
* p9_release_req_pages - Release pages after the transaction.
* @*private: PDU's private page of struct trans_rpage_info
*/
void
p9_release_req_pages(struct trans_rpage_info *rpinfo)
{
int i = 0;
while (rpinfo->rp_data[i] && rpinfo->rp_nr_pages--) {
put_page(rpinfo->rp_data[i]);
i++;
}
}
EXPORT_SYMBOL(p9_release_req_pages);
/**
* p9_nr_pages - Return number of pages needed to accomodate the payload.
*/
int
p9_nr_pages(struct p9_req_t *req)
{
int start_page, end_page;
start_page = (unsigned long long)req->tc->pubuf >> PAGE_SHIFT;
end_page = ((unsigned long long)req->tc->pubuf + req->tc->pbuf_size +
PAGE_SIZE - 1) >> PAGE_SHIFT;
return end_page - start_page;
}
EXPORT_SYMBOL(p9_nr_pages);
/**
* payload_gup - Translates user buffer into kernel pages and
* pins them either for read/write through get_user_pages_fast().
* @req: Request to be sent to server.
* @pdata_off: data offset into the first page after translation (gup).
* @pdata_len: Total length of the IO. gup may not return requested # of pages.
* @nr_pages: number of pages to accomodate the payload
* @rw: Indicates if the pages are for read or write.
*/
int
p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len,
int nr_pages, u8 rw)
{
uint32_t first_page_bytes = 0;
uint32_t pdata_mapped_pages;
struct trans_rpage_info *rpinfo;
*pdata_off = (size_t)req->tc->pubuf & (PAGE_SIZE-1);
if (*pdata_off)
first_page_bytes = min((PAGE_SIZE - *pdata_off),
req->tc->pbuf_size);
rpinfo = req->tc->private;
pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf,
nr_pages, rw, &rpinfo->rp_data[0]);
if (pdata_mapped_pages < 0) {
printk(KERN_ERR "get_user_pages_fast failed:%d udata:%p"
"nr_pages:%d\n", pdata_mapped_pages,
req->tc->pubuf, nr_pages);
pdata_mapped_pages = 0;
return -EIO;
}
rpinfo->rp_nr_pages = pdata_mapped_pages;
if (*pdata_off) {
*pdata_len = first_page_bytes;
*pdata_len += min((req->tc->pbuf_size - *pdata_len),
((size_t)pdata_mapped_pages - 1) << PAGE_SHIFT);
} else {
*pdata_len = min(req->tc->pbuf_size,
(size_t)pdata_mapped_pages << PAGE_SHIFT);
}
return 0;
}
EXPORT_SYMBOL(p9_payload_gup);

32
net/9p/trans_common.h Normal file
View File

@ -0,0 +1,32 @@
/*
* Copyright IBM Corporation, 2010
* Author Venkateswararao Jujjuri <jvrao@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2.1 of the GNU Lesser General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
*/
/* TRUE if it is user context */
#define P9_IS_USER_CONTEXT (!segment_eq(get_fs(), KERNEL_DS))
/**
* struct trans_rpage_info - To store mapped page information in PDU.
* @rp_alloc:Set if this structure is allocd, not a reuse unused space in pdu.
* @rp_nr_pages: Number of mapped pages
* @rp_data: Array of page pointers
*/
struct trans_rpage_info {
u8 rp_alloc;
int rp_nr_pages;
struct page *rp_data[0];
};
void p9_release_req_pages(struct trans_rpage_info *);
int p9_payload_gup(struct p9_req_t *, size_t *, int *, int, u8);
int p9_nr_pages(struct p9_req_t *);

View File

@ -45,6 +45,7 @@
#include <linux/scatterlist.h>
#include <linux/virtio.h>
#include <linux/virtio_9p.h>
#include "trans_common.h"
#define VIRTQUEUE_NUM 128
@ -155,6 +156,14 @@ static void req_done(struct virtqueue *vq)
rc->tag);
req = p9_tag_lookup(chan->client, rc->tag);
req->status = REQ_STATUS_RCVD;
if (req->tc->private) {
struct trans_rpage_info *rp = req->tc->private;
/*Release pages */
p9_release_req_pages(rp);
if (rp->rp_alloc)
kfree(rp);
req->tc->private = NULL;
}
p9_client_cb(chan->client, req);
} else {
spin_unlock_irqrestore(&chan->lock, flags);
@ -202,6 +211,38 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
return 1;
}
/**
* pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer,
* this takes a list of pages.
* @sg: scatter/gather list to pack into
* @start: which segment of the sg_list to start at
* @pdata_off: Offset into the first page
* @**pdata: a list of pages to add into sg.
* @count: amount of data to pack into the scatter/gather list
*/
static int
pack_sg_list_p(struct scatterlist *sg, int start, int limit, size_t pdata_off,
struct page **pdata, int count)
{
int s;
int i = 0;
int index = start;
if (pdata_off) {
s = min((int)(PAGE_SIZE - pdata_off), count);
sg_set_page(&sg[index++], pdata[i++], s, pdata_off);
count -= s;
}
while (count) {
BUG_ON(index > limit);
s = min((int)PAGE_SIZE, count);
sg_set_page(&sg[index++], pdata[i++], s, 0);
count -= s;
}
return index-start;
}
/**
* p9_virtio_request - issue a request
* @client: client instance issuing the request
@ -212,22 +253,97 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
static int
p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
{
int in, out;
int in, out, inp, outp;
struct virtio_chan *chan = client->trans;
char *rdata = (char *)req->rc+sizeof(struct p9_fcall);
unsigned long flags;
int err;
size_t pdata_off = 0;
struct trans_rpage_info *rpinfo = NULL;
int err, pdata_len = 0;
P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
req_retry:
req->status = REQ_STATUS_SENT;
if (req->tc->pbuf_size && (req->tc->pubuf && P9_IS_USER_CONTEXT)) {
int nr_pages = p9_nr_pages(req);
int rpinfo_size = sizeof(struct trans_rpage_info) +
sizeof(struct page *) * nr_pages;
if (rpinfo_size <= (req->tc->capacity - req->tc->size)) {
/* We can use sdata */
req->tc->private = req->tc->sdata + req->tc->size;
rpinfo = (struct trans_rpage_info *)req->tc->private;
rpinfo->rp_alloc = 0;
} else {
req->tc->private = kmalloc(rpinfo_size, GFP_NOFS);
if (!req->tc->private) {
P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: "
"private kmalloc returned NULL");
return -ENOMEM;
}
rpinfo = (struct trans_rpage_info *)req->tc->private;
rpinfo->rp_alloc = 1;
}
err = p9_payload_gup(req, &pdata_off, &pdata_len, nr_pages,
req->tc->id == P9_TREAD ? 1 : 0);
if (err < 0) {
if (rpinfo->rp_alloc)
kfree(rpinfo);
return err;
}
}
spin_lock_irqsave(&chan->lock, flags);
/* Handle out VirtIO ring buffers */
out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc->sdata,
req->tc->size);
in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM-out, rdata,
client->msize);
req->tc->size);
if (req->tc->pbuf_size && (req->tc->id == P9_TWRITE)) {
/* We have additional write payload buffer to take care */
if (req->tc->pubuf && P9_IS_USER_CONTEXT) {
outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
pdata_off, rpinfo->rp_data, pdata_len);
} else {
char *pbuf = req->tc->pubuf ? req->tc->pubuf :
req->tc->pkbuf;
outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
req->tc->pbuf_size);
}
out += outp;
}
/* Handle in VirtIO ring buffers */
if (req->tc->pbuf_size &&
((req->tc->id == P9_TREAD) || (req->tc->id == P9_TREADDIR))) {
/*
* Take care of additional Read payload.
* 11 is the read/write header = PDU Header(7) + IO Size (4).
* Arrange in such a way that server places header in the
* alloced memory and payload onto the user buffer.
*/
inp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 11);
/*
* Running executables in the filesystem may result in
* a read request with kernel buffer as opposed to user buffer.
*/
if (req->tc->pubuf && P9_IS_USER_CONTEXT) {
in = pack_sg_list_p(chan->sg, out+inp, VIRTQUEUE_NUM,
pdata_off, rpinfo->rp_data, pdata_len);
} else {
char *pbuf = req->tc->pubuf ? req->tc->pubuf :
req->tc->pkbuf;
in = pack_sg_list(chan->sg, out+inp, VIRTQUEUE_NUM,
pbuf, req->tc->pbuf_size);
}
in += inp;
} else {
in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata,
client->msize);
}
err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
if (err < 0) {
@ -246,6 +362,8 @@ req_retry:
P9_DPRINTK(P9_DEBUG_TRANS,
"9p debug: "
"virtio rpc add_buf returned failure");
if (rpinfo && rpinfo->rp_alloc)
kfree(rpinfo);
return -EIO;
}
}
@ -448,6 +566,7 @@ static struct p9_trans_module p9_virtio_trans = {
.request = p9_virtio_request,
.cancel = p9_virtio_cancel,
.maxsize = PAGE_SIZE*16,
.pref = P9_TRANS_PREF_PAYLOAD_SEP,
.def = 0,
.owner = THIS_MODULE,
};