7 SMB3 fixes, one for stable, 3 others fix problems found in testing handle leases, and a compounded request fix

-----BEGIN PGP SIGNATURE-----
 
 iQGzBAABCgAdFiEE6fsu8pdIjtWE/DpLiiy9cAdyT1EFAmCn2x8ACgkQiiy9cAdy
 T1HjnQv+M87Xx++VVaJzeLQQlKGA/vfkhM7YLEkIwxmbUpt8JURORoK91xVa/RZA
 eS/K2tYOilAuuV7VXXw6ng6WNCWE/l+BNT5FHZ4WJt71pE1/tN/NIACtOhBB01GO
 r+JhAE08zYLu8vA1Ax1EBtSSBjTLUjDX0fWMfwD4C/BBABw5VZISnkSEj2lC6wT9
 vovEalU9amMRrvlhK9Z+MRJRJFzxY4LingiEVlFIdLczCGia5PgSl3NXRY1//rNO
 wc//34cCGxBNc5Su5Bvn1kTZT5mdBFR98mLOuD+Dw55LlIlShKDnhZHGQDGPyQGT
 ey2w2b+pNAr3rwVNtU6JNmI7AiUllNHiDu5UsyB0ctDWJljzrILd4uPaWofcNXAh
 5qPRvuGsqjo3D/10DPshla1pJtmFr8eKXy8o6UVfMYQSHDo1LbqMll7ArGgV3Fxn
 B2g5N+ax1+DXZlykKJGhYBBkvGANuUBU/tq810i5BvLhfrc1dx+pJlZAeO5OxCSA
 SBUiirq4
 =neWC
 -----END PGP SIGNATURE-----

Merge tag '5.13-rc3-smb3' of git://git.samba.org/sfrench/cifs-2.6

Pull cifs fixes from Steve French:
 "Seven smb3 fixes: one for stable, three others fix problems found in
  testing handle leases, and a compounded request fix"

* tag '5.13-rc3-smb3' of git://git.samba.org/sfrench/cifs-2.6:
  Fix KASAN identified use-after-free issue.
  Defer close only when lease is enabled.
  Fix kernel oops when CONFIG_DEBUG_ATOMIC_SLEEP is enabled.
  cifs: Fix inconsistent indenting
  cifs: fix memory leak in smb2_copychunk_range
  SMB3: incorrect file id in requests compounded with open
  cifs: remove deadstore in cifs_close_all_deferred_files()
This commit is contained in:
Linus Torvalds 2021-05-21 13:12:51 -10:00
commit 8bb14ca171
7 changed files with 55 additions and 32 deletions

View file

@ -133,7 +133,7 @@ struct workqueue_struct *cifsiod_wq;
struct workqueue_struct *decrypt_wq;
struct workqueue_struct *fileinfo_put_wq;
struct workqueue_struct *cifsoplockd_wq;
struct workqueue_struct *deferredclose_wq;
struct workqueue_struct *deferredclose_wq;
__u32 cifs_lock_secret;
/*

View file

@ -1257,8 +1257,7 @@ struct cifsFileInfo {
struct work_struct oplock_break; /* work for oplock breaks */
struct work_struct put; /* work for the final part of _put */
struct delayed_work deferred;
bool oplock_break_received; /* Flag to indicate oplock break */
bool deferred_scheduled;
bool deferred_close_scheduled; /* Flag to indicate close is scheduled */
};
struct cifs_io_parms {
@ -1418,6 +1417,7 @@ struct cifsInodeInfo {
struct inode vfs_inode;
struct list_head deferred_closes; /* list of deferred closes */
spinlock_t deferred_lock; /* protection on deferred list */
bool lease_granted; /* Flag to indicate whether lease or oplock is granted. */
};
static inline struct cifsInodeInfo *

View file

@ -323,8 +323,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
cfile->dentry = dget(dentry);
cfile->f_flags = file->f_flags;
cfile->invalidHandle = false;
cfile->oplock_break_received = false;
cfile->deferred_scheduled = false;
cfile->deferred_close_scheduled = false;
cfile->tlink = cifs_get_tlink(tlink);
INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
INIT_WORK(&cfile->put, cifsFileInfo_put_work);
@ -574,21 +573,18 @@ int cifs_open(struct inode *inode, struct file *file)
file->f_op = &cifs_file_direct_ops;
}
spin_lock(&CIFS_I(inode)->deferred_lock);
/* Get the cached handle as SMB2 close is deferred */
rc = cifs_get_readable_path(tcon, full_path, &cfile);
if (rc == 0) {
if (file->f_flags == cfile->f_flags) {
file->private_data = cfile;
spin_lock(&CIFS_I(inode)->deferred_lock);
cifs_del_deferred_close(cfile);
spin_unlock(&CIFS_I(inode)->deferred_lock);
goto out;
} else {
spin_unlock(&CIFS_I(inode)->deferred_lock);
_cifsFileInfo_put(cfile, true, false);
}
} else {
spin_unlock(&CIFS_I(inode)->deferred_lock);
}
if (server->oplocks)
@ -878,12 +874,8 @@ void smb2_deferred_work_close(struct work_struct *work)
struct cifsFileInfo, deferred.work);
spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
if (!cfile->deferred_scheduled) {
spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
return;
}
cifs_del_deferred_close(cfile);
cfile->deferred_scheduled = false;
cfile->deferred_close_scheduled = false;
spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
_cifsFileInfo_put(cfile, true, false);
}
@ -900,19 +892,26 @@ int cifs_close(struct inode *inode, struct file *file)
file->private_data = NULL;
dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
cinode->lease_granted &&
dclose) {
if (test_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags))
inode->i_ctime = inode->i_mtime = current_time(inode);
spin_lock(&cinode->deferred_lock);
cifs_add_deferred_close(cfile, dclose);
if (cfile->deferred_scheduled) {
mod_delayed_work(deferredclose_wq,
&cfile->deferred, cifs_sb->ctx->acregmax);
if (cfile->deferred_close_scheduled &&
delayed_work_pending(&cfile->deferred)) {
/*
* If there is no pending work, mod_delayed_work queues new work.
* So, Increase the ref count to avoid use-after-free.
*/
if (!mod_delayed_work(deferredclose_wq,
&cfile->deferred, cifs_sb->ctx->acregmax))
cifsFileInfo_get(cfile);
} else {
/* Deferred close for files */
queue_delayed_work(deferredclose_wq,
&cfile->deferred, cifs_sb->ctx->acregmax);
cfile->deferred_scheduled = true;
cfile->deferred_close_scheduled = true;
spin_unlock(&cinode->deferred_lock);
return 0;
}
@ -2020,8 +2019,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
continue;
if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
if ((!open_file->invalidHandle) &&
(!open_file->oplock_break_received)) {
if ((!open_file->invalidHandle)) {
/* found a good file */
/* lock it so it will not be closed on us */
cifsFileInfo_get(open_file);
@ -4874,14 +4872,20 @@ void cifs_oplock_break(struct work_struct *work)
}
/*
* When oplock break is received and there are no active
* file handles but cached, then set the flag oplock_break_received.
* file handles but cached, then schedule deferred close immediately.
* So, new open will not use cached handle.
*/
spin_lock(&CIFS_I(inode)->deferred_lock);
is_deferred = cifs_is_deferred_close(cfile, &dclose);
if (is_deferred && cfile->deferred_scheduled) {
cfile->oplock_break_received = true;
mod_delayed_work(deferredclose_wq, &cfile->deferred, 0);
if (is_deferred &&
cfile->deferred_close_scheduled &&
delayed_work_pending(&cfile->deferred)) {
/*
* If there is no pending work, mod_delayed_work queues new work.
* So, Increase the ref count to avoid use-after-free.
*/
if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0))
cifsFileInfo_get(cfile);
}
spin_unlock(&CIFS_I(inode)->deferred_lock);
_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);

View file

@ -1145,7 +1145,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
/* if iocharset not set then load_nls_default
* is used by caller
*/
cifs_dbg(FYI, "iocharset set to %s\n", ctx->iocharset);
cifs_dbg(FYI, "iocharset set to %s\n", ctx->iocharset);
break;
case Opt_netbiosname:
memset(ctx->source_rfc1001_name, 0x20,

View file

@ -672,6 +672,11 @@ cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
}
/*
* Critical section which runs after acquiring deferred_lock.
* As there is no reference count on cifs_deferred_close, pdclose
* should not be used outside deferred_lock.
*/
bool
cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **pdclose)
{
@ -688,6 +693,9 @@ cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **
return false;
}
/*
* Critical section which runs after acquiring deferred_lock.
*/
void
cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *dclose)
{
@ -707,6 +715,9 @@ cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *
list_add_tail(&dclose->dlist, &CIFS_I(d_inode(cfile->dentry))->deferred_closes);
}
/*
* Critical section which runs after acquiring deferred_lock.
*/
void
cifs_del_deferred_close(struct cifsFileInfo *cfile)
{
@ -738,15 +749,19 @@ void
cifs_close_all_deferred_files(struct cifs_tcon *tcon)
{
struct cifsFileInfo *cfile;
struct cifsInodeInfo *cinode;
struct list_head *tmp;
spin_lock(&tcon->open_file_lock);
list_for_each(tmp, &tcon->openFileList) {
cfile = list_entry(tmp, struct cifsFileInfo, tlist);
cinode = CIFS_I(d_inode(cfile->dentry));
if (delayed_work_pending(&cfile->deferred))
mod_delayed_work(deferredclose_wq, &cfile->deferred, 0);
if (delayed_work_pending(&cfile->deferred)) {
/*
* If there is no pending work, mod_delayed_work queues new work.
* So, Increase the ref count to avoid use-after-free.
*/
if (!mod_delayed_work(deferredclose_wq, &cfile->deferred, 0))
cifsFileInfo_get(cfile);
}
}
spin_unlock(&tcon->open_file_lock);
}

View file

@ -1861,6 +1861,8 @@ smb2_copychunk_range(const unsigned int xid,
cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
/* Request server copy to target from src identified by key */
kfree(retbuf);
retbuf = NULL;
rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
true /* is_fsctl */, (char *)pcchunk,
@ -3981,6 +3983,7 @@ smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
unsigned int epoch, bool *purge_cache)
{
oplock &= 0xFF;
cinode->lease_granted = false;
if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
return;
if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
@ -4007,6 +4010,7 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
unsigned int new_oplock = 0;
oplock &= 0xFF;
cinode->lease_granted = true;
if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
return;

View file

@ -3900,10 +3900,10 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
* Related requests use info from previous read request
* in chain.
*/
shdr->SessionId = 0xFFFFFFFF;
shdr->SessionId = 0xFFFFFFFFFFFFFFFF;
shdr->TreeId = 0xFFFFFFFF;
req->PersistentFileId = 0xFFFFFFFF;
req->VolatileFileId = 0xFFFFFFFF;
req->PersistentFileId = 0xFFFFFFFFFFFFFFFF;
req->VolatileFileId = 0xFFFFFFFFFFFFFFFF;
}
}
if (remaining_bytes > io_parms->length)