Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull VFS fixes from Al Viro:
 "A bunch of assorted fixes, most of them followups to overlayfs merge"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  ovl: initialize ->is_cursor
  Return short read or 0 at end of a raw device, not EIO
  isofs: don't bother with ->d_op for normal case
  isofs_cmp(): we'll never see a dentry for . or ..
  overlayfs: fix lockdep misannotation
  ovl: fix check for cursor
  overlayfs: barriers for opening upper-layer directory
  rcu: Provide counterpart to rcu_dereference() for non-RCU situations
  staging: android: logger: Fix log corruption regression
This commit is contained in:
Linus Torvalds 2014-11-02 10:28:43 -08:00
commit 7e05b807b9
9 changed files with 50 additions and 58 deletions

View File

@ -285,7 +285,7 @@ static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations raw_fops = {
.read = new_sync_read,
.read_iter = generic_file_read_iter,
.read_iter = blkdev_read_iter,
.write = new_sync_write,
.write_iter = blkdev_write_iter,
.fsync = blkdev_fsync,

View File

@ -420,7 +420,7 @@ static ssize_t logger_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct logger_log *log = file_get_log(iocb->ki_filp);
struct logger_entry header;
struct timespec now;
size_t len, count;
size_t len, count, w_off;
count = min_t(size_t, iocb->ki_nbytes, LOGGER_ENTRY_MAX_PAYLOAD);
@ -452,11 +452,14 @@ static ssize_t logger_write_iter(struct kiocb *iocb, struct iov_iter *from)
memcpy(log->buffer + log->w_off, &header, len);
memcpy(log->buffer, (char *)&header + len, sizeof(header) - len);
len = min(count, log->size - log->w_off);
/* Work with a copy until we are ready to commit the whole entry */
w_off = logger_offset(log, log->w_off + sizeof(struct logger_entry));
if (copy_from_iter(log->buffer + log->w_off, len, from) != len) {
len = min(count, log->size - w_off);
if (copy_from_iter(log->buffer + w_off, len, from) != len) {
/*
* Note that by not updating w_off, this abandons the
* Note that by not updating log->w_off, this abandons the
* portion of the new entry that *was* successfully
* copied, just above. This is intentional to avoid
* message corruption from missing fragments.
@ -470,7 +473,7 @@ static ssize_t logger_write_iter(struct kiocb *iocb, struct iov_iter *from)
return -EFAULT;
}
log->w_off = logger_offset(log, log->w_off + count);
log->w_off = logger_offset(log, w_off + count);
mutex_unlock(&log->mutex);
/* wake up any blocked readers */

View File

@ -1585,7 +1585,7 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
}
EXPORT_SYMBOL_GPL(blkdev_write_iter);
static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
struct inode *bd_inode = file->f_mapping->host;
@ -1599,6 +1599,7 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
iov_iter_truncate(to, size);
return generic_file_read_iter(iocb, to);
}
EXPORT_SYMBOL_GPL(blkdev_read_iter);
/*
* Try to release a page associated with block device when the system

View File

@ -29,13 +29,9 @@
#define BEQUIET
static int isofs_hashi(const struct dentry *parent, struct qstr *qstr);
static int isofs_hash(const struct dentry *parent, struct qstr *qstr);
static int isofs_dentry_cmpi(const struct dentry *parent,
const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name);
static int isofs_dentry_cmp(const struct dentry *parent,
const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name);
#ifdef CONFIG_JOLIET
static int isofs_hashi_ms(const struct dentry *parent, struct qstr *qstr);
@ -134,10 +130,6 @@ static const struct super_operations isofs_sops = {
static const struct dentry_operations isofs_dentry_ops[] = {
{
.d_hash = isofs_hash,
.d_compare = isofs_dentry_cmp,
},
{
.d_hash = isofs_hashi,
.d_compare = isofs_dentry_cmpi,
@ -257,25 +249,12 @@ static int isofs_dentry_cmp_common(
return 1;
}
static int
isofs_hash(const struct dentry *dentry, struct qstr *qstr)
{
return isofs_hash_common(qstr, 0);
}
static int
isofs_hashi(const struct dentry *dentry, struct qstr *qstr)
{
return isofs_hashi_common(qstr, 0);
}
static int
isofs_dentry_cmp(const struct dentry *parent, const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
return isofs_dentry_cmp_common(len, str, name, 0, 0);
}
static int
isofs_dentry_cmpi(const struct dentry *parent, const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
@ -930,7 +909,8 @@ root_found:
if (opt.check == 'r')
table++;
s->s_d_op = &isofs_dentry_ops[table];
if (table)
s->s_d_op = &isofs_dentry_ops[table - 1];
/* get the root dentry */
s->s_root = d_make_root(inode);

View File

@ -18,25 +18,10 @@ static int
isofs_cmp(struct dentry *dentry, const char *compare, int dlen)
{
struct qstr qstr;
if (!compare)
return 1;
/* check special "." and ".." files */
if (dlen == 1) {
/* "." */
if (compare[0] == 0) {
if (!dentry->d_name.len)
return 0;
compare = ".";
} else if (compare[0] == 1) {
compare = "..";
dlen = 2;
}
}
qstr.name = compare;
qstr.len = dlen;
if (likely(!dentry->d_op))
return dentry->d_name.len != dlen || memcmp(dentry->d_name.name, compare, dlen);
return dentry->d_op->d_compare(NULL, NULL, dentry->d_name.len, dentry->d_name.name, &qstr);
}
@ -146,7 +131,8 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry,
(!(de->flags[-sbi->s_high_sierra] & 1))) &&
(sbi->s_showassoc ||
(!(de->flags[-sbi->s_high_sierra] & 4)))) {
match = (isofs_cmp(dentry, dpnt, dlen) == 0);
if (dpnt && (dlen > 1 || dpnt[0] > 1))
match = (isofs_cmp(dentry, dpnt, dlen) == 0);
}
if (match) {
isofs_normalize_block_and_offset(de,

View File

@ -2497,7 +2497,7 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
}
mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD);
mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT2);
return NULL;
}
EXPORT_SYMBOL(lock_rename);

View File

@ -21,9 +21,10 @@ struct ovl_cache_entry {
unsigned int len;
unsigned int type;
u64 ino;
bool is_whiteout;
struct list_head l_node;
struct rb_node node;
bool is_whiteout;
bool is_cursor;
char name[];
};
@ -92,6 +93,7 @@ static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len,
p->type = d_type;
p->ino = ino;
p->is_whiteout = false;
p->is_cursor = false;
}
return p;
@ -251,7 +253,7 @@ static int ovl_dir_mark_whiteouts(struct dentry *dir,
mutex_lock(&dir->d_inode->i_mutex);
list_for_each_entry(p, rdd->list, l_node) {
if (!p->name)
if (p->is_cursor)
continue;
if (p->type != DT_CHR)
@ -307,7 +309,6 @@ static inline int ovl_dir_read_merged(struct path *upperpath,
}
out:
return err;
}
static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
@ -316,7 +317,7 @@ static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
loff_t off = 0;
list_for_each_entry(p, &od->cache->entries, l_node) {
if (!p->name)
if (p->is_cursor)
continue;
if (off >= pos)
break;
@ -389,7 +390,7 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
p = list_entry(od->cursor.l_node.next, struct ovl_cache_entry, l_node);
/* Skip cursors */
if (p->name) {
if (!p->is_cursor) {
if (!p->is_whiteout) {
if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
break;
@ -454,12 +455,13 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
if (!od->is_upper && ovl_path_type(dentry) == OVL_PATH_MERGE) {
struct inode *inode = file_inode(file);
realfile = od->upperfile;
realfile =lockless_dereference(od->upperfile);
if (!realfile) {
struct path upperpath;
ovl_path_upper(dentry, &upperpath);
realfile = ovl_path_open(&upperpath, O_RDONLY);
smp_mb__before_spinlock();
mutex_lock(&inode->i_mutex);
if (!od->upperfile) {
if (IS_ERR(realfile)) {
@ -518,6 +520,7 @@ static int ovl_dir_open(struct inode *inode, struct file *file)
od->realfile = realfile;
od->is_real = (type != OVL_PATH_MERGE);
od->is_upper = (type != OVL_PATH_LOWER);
od->cursor.is_cursor = true;
file->private_data = od;
return 0;
@ -569,7 +572,7 @@ void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
{
struct ovl_cache_entry *p;
mutex_lock_nested(&upper->d_inode->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&upper->d_inode->i_mutex, I_MUTEX_CHILD);
list_for_each_entry(p, list, l_node) {
struct dentry *dentry;

View File

@ -639,11 +639,13 @@ static inline int inode_unhashed(struct inode *inode)
* 2: child/target
* 3: xattr
* 4: second non-directory
* The last is for certain operations (such as rename) which lock two
* 5: second parent (when locking independent directories in rename)
*
* I_MUTEX_NONDIR2 is for certain operations (such as rename) which lock two
* non-directories at once.
*
* The locking order between these classes is
* parent -> child -> normal -> xattr -> second non-directory
* parent[2] -> child -> grandchild -> normal -> xattr -> second non-directory
*/
enum inode_i_mutex_lock_class
{
@ -651,7 +653,8 @@ enum inode_i_mutex_lock_class
I_MUTEX_PARENT,
I_MUTEX_CHILD,
I_MUTEX_XATTR,
I_MUTEX_NONDIR2
I_MUTEX_NONDIR2,
I_MUTEX_PARENT2,
};
void lock_two_nondirectories(struct inode *, struct inode*);
@ -2466,6 +2469,7 @@ extern ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, lo
extern ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
/* fs/block_dev.c */
extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from);
extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
int datasync);

View File

@ -616,6 +616,21 @@ static inline void rcu_preempt_sleep_check(void)
*/
#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
/**
* lockless_dereference() - safely load a pointer for later dereference
* @p: The pointer to load
*
* Similar to rcu_dereference(), but for situations where the pointed-to
* object's lifetime is managed by something other than RCU. That
* "something other" might be reference counting or simple immortality.
*/
#define lockless_dereference(p) \
({ \
typeof(p) _________p1 = ACCESS_ONCE(p); \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
(_________p1); \
})
/**
* rcu_assign_pointer() - assign to RCU-protected pointer
* @p: pointer to assign to