Allow Windows blocking locks to be cancelled via a
CANCEL_LOCK call. TODO - restrict this to servers
that support NT_STATUS codes (Win9x will probably
not support this call).

Signed-off-by: Jeremy Allison <jra@samba.org>
Signed-off-by: Steve French <sfrench@us.ibm.com>
(cherry picked from 570d4d2d895569825d0d017d4e76b51138f68864 commit)
This commit is contained in:
Jeremy Allison 2006-08-02 21:56:33 +00:00 committed by Steve French
parent 6c3d8909d8
commit 7ee1af765d
7 changed files with 528 additions and 312 deletions

View file

@ -3,6 +3,7 @@
*
* Copyright (C) International Business Machines Corp., 2002,2006
* Author(s): Steve French (sfrench@us.ibm.com)
* Jeremy Allison (jra@samba.org)
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
@ -267,14 +268,14 @@ struct cifsTconInfo {
};
/*
* This info hangs off the cifsFileInfo structure. This is used to track
* byte stream locks on the file
* This info hangs off the cifsFileInfo structure, pointed to by llist.
* This is used to track byte stream locks on the file
*/
struct cifsLockInfo {
struct cifsLockInfo *next;
int start;
int length;
int type;
struct list_head llist; /* pointer to next cifsLockInfo */
__u64 offset;
__u64 length;
__u8 type;
};
/*
@ -305,6 +306,8 @@ struct cifsFileInfo {
/* lock scope id (0 if none) */
struct file * pfile; /* needed for writepage */
struct inode * pInode; /* needed for oplock break */
struct semaphore lock_sem;
struct list_head llist; /* list of byte range locks we have. */
unsigned closePend:1; /* file is marked to close */
unsigned invalidHandle:1; /* file closed via session abend */
atomic_t wrtPending; /* handle in use - defer close */

View file

@ -50,6 +50,10 @@ extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *,
extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *,
struct kvec *, int /* nvec to send */,
int * /* type of buf returned */ , const int long_op);
extern int SendReceiveBlockingLock(const unsigned int /* xid */ , struct cifsTconInfo *,
struct smb_hdr * /* input */ ,
struct smb_hdr * /* out */ ,
int * /* bytes returned */);
extern int checkSMBhdr(struct smb_hdr *smb, __u16 mid);
extern int checkSMB(struct smb_hdr *smb, __u16 mid, int length);
extern int is_valid_oplock_break(struct smb_hdr *smb, struct TCP_Server_Info *);

View file

@ -1460,8 +1460,13 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
pSMB->hdr.smb_buf_length += count;
pSMB->ByteCount = cpu_to_le16(count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
if (waitFlag) {
rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned);
} else {
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, timeout);
}
cifs_stats_inc(&tcon->num_locks);
if (rc) {
cFYI(1, ("Send error in Lock = %d", rc));
@ -1546,8 +1551,14 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
pSMB->Reserved4 = 0;
pSMB->hdr.smb_buf_length += byte_count;
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
if (waitFlag) {
rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned);
} else {
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, timeout);
}
if (rc) {
cFYI(1, ("Send error in Posix Lock = %d", rc));
} else if (get_flag) {

View file

@ -5,6 +5,7 @@
*
* Copyright (C) International Business Machines Corp., 2002,2003
* Author(s): Steve French (sfrench@us.ibm.com)
* Jeremy Allison (jra@samba.org)
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
@ -47,6 +48,8 @@ static inline struct cifsFileInfo *cifs_init_private(
private_data->netfid = netfid;
private_data->pid = current->tgid;
init_MUTEX(&private_data->fh_sem);
init_MUTEX(&private_data->lock_sem);
INIT_LIST_HEAD(&private_data->llist);
private_data->pfile = file; /* needed for writepage */
private_data->pInode = inode;
private_data->invalidHandle = FALSE;
@ -473,6 +476,8 @@ int cifs_close(struct inode *inode, struct file *file)
cifs_sb = CIFS_SB(inode->i_sb);
pTcon = cifs_sb->tcon;
if (pSMBFile) {
struct cifsLockInfo *li, *tmp;
pSMBFile->closePend = TRUE;
if (pTcon) {
/* no sense reconnecting to close a file that is
@ -496,6 +501,16 @@ int cifs_close(struct inode *inode, struct file *file)
pSMBFile->netfid);
}
}
/* Delete any outstanding lock records.
We'll lose them when the file is closed anyway. */
down(&pSMBFile->lock_sem);
list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
list_del(&li->llist);
kfree(li);
}
up(&pSMBFile->lock_sem);
write_lock(&GlobalSMBSeslock);
list_del(&pSMBFile->flist);
list_del(&pSMBFile->tlist);
@ -570,6 +585,21 @@ int cifs_closedir(struct inode *inode, struct file *file)
return rc;
}
static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
__u64 offset, __u8 lockType)
{
struct cifsLockInfo *li = kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
if (li == NULL)
return -ENOMEM;
li->offset = offset;
li->length = len;
li->type = lockType;
down(&fid->lock_sem);
list_add(&li->llist, &fid->llist);
up(&fid->lock_sem);
return 0;
}
int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
{
int rc, xid;
@ -581,6 +611,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
struct cifsTconInfo *pTcon;
__u16 netfid;
__u8 lockType = LOCKING_ANDX_LARGE_FILES;
int posix_locking;
length = 1 + pfLock->fl_end - pfLock->fl_start;
rc = -EACCES;
@ -639,14 +670,14 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
}
netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
posix_locking = (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability));
/* BB add code here to normalize offset and length to
account for negative length which we can not accept over the
wire */
if (IS_GETLK(cmd)) {
if((cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
(CIFS_UNIX_FCNTL_CAP &
le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) {
if(posix_locking) {
int posix_lock_type;
if(lockType & LOCKING_ANDX_SHARED_LOCK)
posix_lock_type = CIFS_RDLCK;
@ -682,9 +713,15 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
FreeXid(xid);
return rc;
}
if ((cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
(CIFS_UNIX_FCNTL_CAP &
le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) {
if (!numLock && !numUnlock) {
/* if no lock or unlock then nothing
to do since we do not know what it is */
FreeXid(xid);
return -EOPNOTSUPP;
}
if (posix_locking) {
int posix_lock_type;
if(lockType & LOCKING_ANDX_SHARED_LOCK)
posix_lock_type = CIFS_RDLCK;
@ -693,18 +730,46 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
if(numUnlock == 1)
posix_lock_type = CIFS_UNLCK;
else if(numLock == 0) {
/* if no lock or unlock then nothing
to do since we do not know what it is */
FreeXid(xid);
return -EOPNOTSUPP;
}
rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
length, pfLock,
posix_lock_type, wait_flag);
} else
rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
numUnlock, numLock, lockType, wait_flag);
} else {
struct cifsFileInfo *fid = (struct cifsFileInfo *)file->private_data;
if (numLock) {
rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
0, numLock, lockType, wait_flag);
if (rc == 0) {
/* For Windows locks we must store them. */
rc = store_file_lock(fid, length,
pfLock->fl_start, lockType);
}
} else if (numUnlock) {
/* For each stored lock that this unlock overlaps
completely, unlock it. */
int stored_rc = 0;
struct cifsLockInfo *li, *tmp;
down(&fid->lock_sem);
list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
if (pfLock->fl_start <= li->offset &&
length >= li->length) {
stored_rc = CIFSSMBLock(xid, pTcon, netfid,
li->length, li->offset,
1, 0, li->type, FALSE);
if (stored_rc)
rc = stored_rc;
list_del(&li->llist);
kfree(li);
}
}
up(&fid->lock_sem);
}
}
if (pfLock->fl_flags & FL_POSIX)
posix_lock_file_wait(file, pfLock);
FreeXid(xid);

View file

@ -72,6 +72,7 @@ static const struct smb_to_posix_error mapping_table_ERRDOS[] = {
{ERRinvlevel,-EOPNOTSUPP},
{ERRdirnotempty, -ENOTEMPTY},
{ERRnotlocked, -ENOLCK},
{ERRcancelviolation, -ENOLCK},
{ERRalreadyexists, -EEXIST},
{ERRmoredata, -EOVERFLOW},
{ERReasnotsupported,-EOPNOTSUPP},

View file

@ -95,6 +95,7 @@
#define ERRinvlevel 124
#define ERRdirnotempty 145
#define ERRnotlocked 158
#define ERRcancelviolation 173
#define ERRalreadyexists 183
#define ERRbadpipe 230
#define ERRpipebusy 231

View file

@ -37,7 +37,7 @@ extern mempool_t *cifs_mid_poolp;
extern kmem_cache_t *cifs_oplock_cachep;
static struct mid_q_entry *
AllocMidQEntry(struct smb_hdr *smb_buffer, struct cifsSesInfo *ses)
AllocMidQEntry(const struct smb_hdr *smb_buffer, struct cifsSesInfo *ses)
{
struct mid_q_entry *temp;
@ -204,6 +204,10 @@ smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer,
rc = 0;
}
/* Don't want to modify the buffer as a
side effect of this call. */
smb_buffer->smb_buf_length = smb_buf_length;
return rc;
}
@ -218,6 +222,7 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
unsigned int len = iov[0].iov_len;
unsigned int total_len;
int first_vec = 0;
unsigned int smb_buf_length = smb_buffer->smb_buf_length;
if(ssocket == NULL)
return -ENOTSOCK; /* BB eventually add reconnect code here */
@ -294,9 +299,123 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
} else
rc = 0;
/* Don't want to modify the buffer as a
side effect of this call. */
smb_buffer->smb_buf_length = smb_buf_length;
return rc;
}
static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op)
{
if(long_op == -1) {
/* oplock breaks must not be held up */
atomic_inc(&ses->server->inFlight);
} else {
spin_lock(&GlobalMid_Lock);
while(1) {
if(atomic_read(&ses->server->inFlight) >=
cifs_max_pending){
spin_unlock(&GlobalMid_Lock);
#ifdef CONFIG_CIFS_STATS2
atomic_inc(&ses->server->num_waiters);
#endif
wait_event(ses->server->request_q,
atomic_read(&ses->server->inFlight)
< cifs_max_pending);
#ifdef CONFIG_CIFS_STATS2
atomic_dec(&ses->server->num_waiters);
#endif
spin_lock(&GlobalMid_Lock);
} else {
if(ses->server->tcpStatus == CifsExiting) {
spin_unlock(&GlobalMid_Lock);
return -ENOENT;
}
/* can not count locking commands against total since
they are allowed to block on server */
/* update # of requests on the wire to server */
if (long_op < 3)
atomic_inc(&ses->server->inFlight);
spin_unlock(&GlobalMid_Lock);
break;
}
}
}
return 0;
}
static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf,
struct mid_q_entry **ppmidQ)
{
if (ses->server->tcpStatus == CifsExiting) {
return -ENOENT;
} else if (ses->server->tcpStatus == CifsNeedReconnect) {
cFYI(1,("tcp session dead - return to caller to retry"));
return -EAGAIN;
} else if (ses->status != CifsGood) {
/* check if SMB session is bad because we are setting it up */
if((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
(in_buf->Command != SMB_COM_NEGOTIATE)) {
return -EAGAIN;
} /* else ok - we are setting up session */
}
*ppmidQ = AllocMidQEntry(in_buf, ses);
if (*ppmidQ == NULL) {
return -ENOMEM;
}
return 0;
}
static int wait_for_response(struct cifsSesInfo *ses,
struct mid_q_entry *midQ,
unsigned long timeout,
unsigned long time_to_wait)
{
unsigned long curr_timeout;
for (;;) {
curr_timeout = timeout + jiffies;
wait_event(ses->server->response_q,
(!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
time_after(jiffies, curr_timeout) ||
((ses->server->tcpStatus != CifsGood) &&
(ses->server->tcpStatus != CifsNew)));
if (time_after(jiffies, curr_timeout) &&
(midQ->midState == MID_REQUEST_SUBMITTED) &&
((ses->server->tcpStatus == CifsGood) ||
(ses->server->tcpStatus == CifsNew))) {
unsigned long lrt;
/* We timed out. Is the server still
sending replies ? */
spin_lock(&GlobalMid_Lock);
lrt = ses->server->lstrp;
spin_unlock(&GlobalMid_Lock);
/* Calculate time_to_wait past last receive time.
Although we prefer not to time out if the
server is still responding - we will time
out if the server takes more than 15 (or 45
or 180) seconds to respond to this request
and has not responded to any request from
other threads on the client within 10 seconds */
lrt += time_to_wait;
if (time_after(jiffies, lrt)) {
/* No replies for time_to_wait. */
cERROR(1,("server not responding"));
return -1;
}
} else {
return 0;
}
}
}
int
SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
struct kvec *iov, int n_vec, int * pRespBufType /* ret */,
@ -324,75 +443,27 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
/* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or
use ses->maxReq */
if(long_op == -1) {
/* oplock breaks must not be held up */
atomic_inc(&ses->server->inFlight);
} else {
spin_lock(&GlobalMid_Lock);
while(1) {
if(atomic_read(&ses->server->inFlight) >=
cifs_max_pending){
spin_unlock(&GlobalMid_Lock);
#ifdef CONFIG_CIFS_STATS2
atomic_inc(&ses->server->num_waiters);
#endif
wait_event(ses->server->request_q,
atomic_read(&ses->server->inFlight)
< cifs_max_pending);
#ifdef CONFIG_CIFS_STATS2
atomic_dec(&ses->server->num_waiters);
#endif
spin_lock(&GlobalMid_Lock);
} else {
if(ses->server->tcpStatus == CifsExiting) {
spin_unlock(&GlobalMid_Lock);
cifs_small_buf_release(in_buf);
return -ENOENT;
}
/* can not count locking commands against total since
they are allowed to block on server */
if(long_op < 3) {
/* update # of requests on the wire to server */
atomic_inc(&ses->server->inFlight);
}
spin_unlock(&GlobalMid_Lock);
break;
}
}
rc = wait_for_free_request(ses, long_op);
if (rc) {
cifs_small_buf_release(in_buf);
return rc;
}
/* make sure that we sign in the same order that we send on this socket
and avoid races inside tcp sendmsg code that could cause corruption
of smb data */
down(&ses->server->tcpSem);
if (ses->server->tcpStatus == CifsExiting) {
rc = -ENOENT;
goto out_unlock2;
} else if (ses->server->tcpStatus == CifsNeedReconnect) {
cFYI(1,("tcp session dead - return to caller to retry"));
rc = -EAGAIN;
goto out_unlock2;
} else if (ses->status != CifsGood) {
/* check if SMB session is bad because we are setting it up */
if((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
(in_buf->Command != SMB_COM_NEGOTIATE)) {
rc = -EAGAIN;
goto out_unlock2;
} /* else ok - we are setting up session */
}
midQ = AllocMidQEntry(in_buf, ses);
if (midQ == NULL) {
rc = allocate_mid(ses, in_buf, &midQ);
if (rc) {
up(&ses->server->tcpSem);
cifs_small_buf_release(in_buf);
/* If not lock req, update # of requests on wire to server */
if(long_op < 3) {
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
}
return -ENOMEM;
/* Update # of requests on wire to server */
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
return rc;
}
rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number);
@ -407,32 +478,23 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
atomic_dec(&ses->server->inSend);
midQ->when_sent = jiffies;
#endif
if(rc < 0) {
DeleteMidQEntry(midQ);
up(&ses->server->tcpSem);
cifs_small_buf_release(in_buf);
/* If not lock req, update # of requests on wire to server */
if(long_op < 3) {
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
}
return rc;
} else {
up(&ses->server->tcpSem);
cifs_small_buf_release(in_buf);
}
up(&ses->server->tcpSem);
cifs_small_buf_release(in_buf);
if(rc < 0)
goto out;
if (long_op == -1)
goto cifs_no_response_exit2;
goto out;
else if (long_op == 2) /* writes past end of file can take loong time */
timeout = 180 * HZ;
else if (long_op == 1)
timeout = 45 * HZ; /* should be greater than
servers oplock break timeout (about 43 seconds) */
else if (long_op > 2) {
timeout = MAX_SCHEDULE_TIMEOUT;
} else
else
timeout = 15 * HZ;
/* wait for 15 seconds or until woken up due to response arriving or
due to last connection to this server being unmounted */
if (signal_pending(current)) {
@ -442,53 +504,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
}
/* No user interrupts in wait - wreaks havoc with performance */
if(timeout != MAX_SCHEDULE_TIMEOUT) {
unsigned long curr_timeout;
for (;;) {
curr_timeout = timeout + jiffies;
wait_event(ses->server->response_q,
(!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
time_after(jiffies, curr_timeout) ||
((ses->server->tcpStatus != CifsGood) &&
(ses->server->tcpStatus != CifsNew)));
if (time_after(jiffies, curr_timeout) &&
(midQ->midState == MID_REQUEST_SUBMITTED) &&
((ses->server->tcpStatus == CifsGood) ||
(ses->server->tcpStatus == CifsNew))) {
unsigned long lrt;
/* We timed out. Is the server still
sending replies ? */
spin_lock(&GlobalMid_Lock);
lrt = ses->server->lstrp;
spin_unlock(&GlobalMid_Lock);
/* Calculate 10 seconds past last receive time.
Although we prefer not to time out if the
server is still responding - we will time
out if the server takes more than 15 (or 45
or 180) seconds to respond to this request
and has not responded to any request from
other threads on the client within 10 seconds */
lrt += (10 * HZ);
if (time_after(jiffies, lrt)) {
/* No replies for 10 seconds. */
cERROR(1,("server not responding"));
break;
}
} else {
break;
}
}
} else {
wait_event(ses->server->response_q,
(!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
((ses->server->tcpStatus != CifsGood) &&
(ses->server->tcpStatus != CifsNew)));
}
wait_for_response(ses, midQ, timeout, 10 * HZ);
spin_lock(&GlobalMid_Lock);
if (midQ->resp_buf) {
@ -516,11 +532,9 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
}
spin_unlock(&GlobalMid_Lock);
DeleteMidQEntry(midQ);
/* If not lock req, update # of requests on wire to server */
if(long_op < 3) {
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
}
/* Update # of requests on wire to server */
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
return rc;
}
@ -571,24 +585,12 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
cFYI(1,("Bad MID state?"));
}
}
cifs_no_response_exit2:
out:
DeleteMidQEntry(midQ);
if(long_op < 3) {
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
}
return rc;
out_unlock2:
up(&ses->server->tcpSem);
cifs_small_buf_release(in_buf);
/* If not lock req, update # of requests on wire to server */
if(long_op < 3) {
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
}
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
return rc;
}
@ -618,85 +620,34 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
/* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or
use ses->maxReq */
if(long_op == -1) {
/* oplock breaks must not be held up */
atomic_inc(&ses->server->inFlight);
} else {
spin_lock(&GlobalMid_Lock);
while(1) {
if(atomic_read(&ses->server->inFlight) >=
cifs_max_pending){
spin_unlock(&GlobalMid_Lock);
#ifdef CONFIG_CIFS_STATS2
atomic_inc(&ses->server->num_waiters);
#endif
wait_event(ses->server->request_q,
atomic_read(&ses->server->inFlight)
< cifs_max_pending);
#ifdef CONFIG_CIFS_STATS2
atomic_dec(&ses->server->num_waiters);
#endif
spin_lock(&GlobalMid_Lock);
} else {
if(ses->server->tcpStatus == CifsExiting) {
spin_unlock(&GlobalMid_Lock);
return -ENOENT;
}
/* can not count locking commands against total since
they are allowed to block on server */
if(long_op < 3) {
/* update # of requests on the wire to server */
atomic_inc(&ses->server->inFlight);
}
spin_unlock(&GlobalMid_Lock);
break;
}
}
}
rc = wait_for_free_request(ses, long_op);
if (rc)
return rc;
/* make sure that we sign in the same order that we send on this socket
and avoid races inside tcp sendmsg code that could cause corruption
of smb data */
down(&ses->server->tcpSem);
if (ses->server->tcpStatus == CifsExiting) {
rc = -ENOENT;
goto out_unlock;
} else if (ses->server->tcpStatus == CifsNeedReconnect) {
cFYI(1,("tcp session dead - return to caller to retry"));
rc = -EAGAIN;
goto out_unlock;
} else if (ses->status != CifsGood) {
/* check if SMB session is bad because we are setting it up */
if((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
(in_buf->Command != SMB_COM_NEGOTIATE)) {
rc = -EAGAIN;
goto out_unlock;
} /* else ok - we are setting up session */
}
midQ = AllocMidQEntry(in_buf, ses);
if (midQ == NULL) {
rc = allocate_mid(ses, in_buf, &midQ);
if (rc) {
up(&ses->server->tcpSem);
/* If not lock req, update # of requests on wire to server */
if(long_op < 3) {
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
}
return -ENOMEM;
/* Update # of requests on wire to server */
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
return rc;
}
if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
up(&ses->server->tcpSem);
cERROR(1, ("Illegal length, greater than maximum frame, %d",
in_buf->smb_buf_length));
DeleteMidQEntry(midQ);
/* If not lock req, update # of requests on wire to server */
if(long_op < 3) {
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
}
up(&ses->server->tcpSem);
/* Update # of requests on wire to server */
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
return -EIO;
}
@ -712,27 +663,19 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
atomic_dec(&ses->server->inSend);
midQ->when_sent = jiffies;
#endif
if(rc < 0) {
DeleteMidQEntry(midQ);
up(&ses->server->tcpSem);
/* If not lock req, update # of requests on wire to server */
if(long_op < 3) {
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
}
return rc;
} else
up(&ses->server->tcpSem);
up(&ses->server->tcpSem);
if(rc < 0)
goto out;
if (long_op == -1)
goto cifs_no_response_exit;
goto out;
else if (long_op == 2) /* writes past end of file can take loong time */
timeout = 180 * HZ;
else if (long_op == 1)
timeout = 45 * HZ; /* should be greater than
servers oplock break timeout (about 43 seconds) */
else if (long_op > 2) {
timeout = MAX_SCHEDULE_TIMEOUT;
} else
else
timeout = 15 * HZ;
/* wait for 15 seconds or until woken up due to response arriving or
due to last connection to this server being unmounted */
@ -743,47 +686,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
}
/* No user interrupts in wait - wreaks havoc with performance */
if(timeout != MAX_SCHEDULE_TIMEOUT) {
unsigned long curr_timeout;
for (;;) {
curr_timeout = timeout + jiffies;
wait_event(ses->server->response_q,
(!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
time_after(jiffies, curr_timeout) ||
((ses->server->tcpStatus != CifsGood) &&
(ses->server->tcpStatus != CifsNew)));
if (time_after(jiffies, curr_timeout) &&
(midQ->midState == MID_REQUEST_SUBMITTED) &&
((ses->server->tcpStatus == CifsGood) ||
(ses->server->tcpStatus == CifsNew))) {
unsigned long lrt;
/* We timed out. Is the server still
sending replies ? */
spin_lock(&GlobalMid_Lock);
lrt = ses->server->lstrp;
spin_unlock(&GlobalMid_Lock);
/* Calculate 10 seconds past last receive time*/
lrt += (10 * HZ);
if (time_after(jiffies, lrt)) {
/* Server sent no reply in 10 seconds */
cERROR(1,("Server not responding"));
break;
}
} else {
break;
}
}
} else {
wait_event(ses->server->response_q,
(!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
((ses->server->tcpStatus != CifsGood) &&
(ses->server->tcpStatus != CifsNew)));
}
wait_for_response(ses, midQ, timeout, 10 * HZ);
spin_lock(&GlobalMid_Lock);
if (midQ->resp_buf) {
@ -811,11 +714,9 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
}
spin_unlock(&GlobalMid_Lock);
DeleteMidQEntry(midQ);
/* If not lock req, update # of requests on wire to server */
if(long_op < 3) {
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
}
/* Update # of requests on wire to server */
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
return rc;
}
@ -862,23 +763,253 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
cERROR(1,("Bad MID state?"));
}
}
cifs_no_response_exit:
out:
DeleteMidQEntry(midQ);
if(long_op < 3) {
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
}
return rc;
out_unlock:
up(&ses->server->tcpSem);
/* If not lock req, update # of requests on wire to server */
if(long_op < 3) {
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
}
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
return rc;
}
/* Send an NT_CANCEL SMB to cause the POSIX blocking lock to return. */
static int
send_nt_cancel(struct cifsTconInfo *tcon, struct smb_hdr *in_buf,
struct mid_q_entry *midQ)
{
int rc = 0;
struct cifsSesInfo *ses = tcon->ses;
__u16 mid = in_buf->Mid;
header_assemble(in_buf, SMB_COM_NT_CANCEL, tcon, 0);
in_buf->Mid = mid;
down(&ses->server->tcpSem);
rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
if (rc) {
up(&ses->server->tcpSem);
return rc;
}
rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length,
(struct sockaddr *) &(ses->server->addr.sockAddr));
up(&ses->server->tcpSem);
return rc;
}
/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
blocking lock to return. */
static int
send_lock_cancel(const unsigned int xid, struct cifsTconInfo *tcon,
struct smb_hdr *in_buf,
struct smb_hdr *out_buf)
{
int bytes_returned;
struct cifsSesInfo *ses = tcon->ses;
LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
/* We just modify the current in_buf to change
the type of lock from LOCKING_ANDX_SHARED_LOCK
or LOCKING_ANDX_EXCLUSIVE_LOCK to
LOCKING_ANDX_CANCEL_LOCK. */
pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
pSMB->Timeout = 0;
pSMB->hdr.Mid = GetNextMid(ses->server);
return SendReceive(xid, ses, in_buf, out_buf,
&bytes_returned, 0);
}
int
SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
struct smb_hdr *in_buf, struct smb_hdr *out_buf,
int *pbytes_returned)
{
int rc = 0;
int rstart = 0;
unsigned int receive_len;
struct mid_q_entry *midQ;
struct cifsSesInfo *ses;
if (tcon == NULL || tcon->ses == NULL) {
cERROR(1,("Null smb session"));
return -EIO;
}
ses = tcon->ses;
if(ses->server == NULL) {
cERROR(1,("Null tcp session"));
return -EIO;
}
if(ses->server->tcpStatus == CifsExiting)
return -ENOENT;
/* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or
use ses->maxReq */
rc = wait_for_free_request(ses, 3);
if (rc)
return rc;
/* make sure that we sign in the same order that we send on this socket
and avoid races inside tcp sendmsg code that could cause corruption
of smb data */
down(&ses->server->tcpSem);
rc = allocate_mid(ses, in_buf, &midQ);
if (rc) {
up(&ses->server->tcpSem);
return rc;
}
if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
up(&ses->server->tcpSem);
cERROR(1, ("Illegal length, greater than maximum frame, %d",
in_buf->smb_buf_length));
DeleteMidQEntry(midQ);
return -EIO;
}
rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
midQ->midState = MID_REQUEST_SUBMITTED;
#ifdef CONFIG_CIFS_STATS2
atomic_inc(&ses->server->inSend);
#endif
rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length,
(struct sockaddr *) &(ses->server->addr.sockAddr));
#ifdef CONFIG_CIFS_STATS2
atomic_dec(&ses->server->inSend);
midQ->when_sent = jiffies;
#endif
up(&ses->server->tcpSem);
if(rc < 0) {
DeleteMidQEntry(midQ);
return rc;
}
/* Wait for a reply - allow signals to interrupt. */
rc = wait_event_interruptible(ses->server->response_q,
(!(midQ->midState == MID_REQUEST_SUBMITTED)) ||
((ses->server->tcpStatus != CifsGood) &&
(ses->server->tcpStatus != CifsNew)));
/* Were we interrupted by a signal ? */
if ((rc == -ERESTARTSYS) &&
(midQ->midState == MID_REQUEST_SUBMITTED) &&
((ses->server->tcpStatus == CifsGood) ||
(ses->server->tcpStatus == CifsNew))) {
if (in_buf->Command == SMB_COM_TRANSACTION2) {
/* POSIX lock. We send a NT_CANCEL SMB to cause the
blocking lock to return. */
rc = send_nt_cancel(tcon, in_buf, midQ);
if (rc) {
DeleteMidQEntry(midQ);
return rc;
}
} else {
/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
to cause the blocking lock to return. */
rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
/* If we get -ENOLCK back the lock may have
already been removed. Don't exit in this case. */
if (rc && rc != -ENOLCK) {
DeleteMidQEntry(midQ);
return rc;
}
}
/* Wait 5 seconds for the response. */
if (wait_for_response(ses, midQ, 5 * HZ, 5 * HZ)==0) {
/* We got the response - restart system call. */
rstart = 1;
}
}
spin_lock(&GlobalMid_Lock);
if (midQ->resp_buf) {
spin_unlock(&GlobalMid_Lock);
receive_len = midQ->resp_buf->smb_buf_length;
} else {
cERROR(1,("No response for cmd %d mid %d",
midQ->command, midQ->mid));
if(midQ->midState == MID_REQUEST_SUBMITTED) {
if(ses->server->tcpStatus == CifsExiting)
rc = -EHOSTDOWN;
else {
ses->server->tcpStatus = CifsNeedReconnect;
midQ->midState = MID_RETRY_NEEDED;
}
}
if (rc != -EHOSTDOWN) {
if(midQ->midState == MID_RETRY_NEEDED) {
rc = -EAGAIN;
cFYI(1,("marking request for retry"));
} else {
rc = -EIO;
}
}
spin_unlock(&GlobalMid_Lock);
DeleteMidQEntry(midQ);
return rc;
}
if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
cERROR(1, ("Frame too large received. Length: %d Xid: %d",
receive_len, xid));
rc = -EIO;
} else { /* rcvd frame is ok */
if (midQ->resp_buf && out_buf
&& (midQ->midState == MID_RESPONSE_RECEIVED)) {
out_buf->smb_buf_length = receive_len;
memcpy((char *)out_buf + 4,
(char *)midQ->resp_buf + 4,
receive_len);
dump_smb(out_buf, 92);
/* convert the length into a more usable form */
if((receive_len > 24) &&
(ses->server->secMode & (SECMODE_SIGN_REQUIRED |
SECMODE_SIGN_ENABLED))) {
rc = cifs_verify_signature(out_buf,
ses->server->mac_signing_key,
midQ->sequence_number+1);
if(rc) {
cERROR(1,("Unexpected SMB signature"));
/* BB FIXME add code to kill session */
}
}
*pbytes_returned = out_buf->smb_buf_length;
/* BB special case reconnect tid and uid here? */
rc = map_smb_to_linux_error(out_buf);
/* convert ByteCount if necessary */
if (receive_len >=
sizeof (struct smb_hdr) -
4 /* do not count RFC1001 header */ +
(2 * out_buf->WordCount) + 2 /* bcc */ )
BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf));
} else {
rc = -EIO;
cERROR(1,("Bad MID state?"));
}
}
DeleteMidQEntry(midQ);
if (rstart && rc == -EACCES)
return -ERESTARTSYS;
return rc;
}