[PATCH] cifs: Do not init smb requests or block when sending requests

if cifsd thread is no longer running to demultixplex responses.

Do not send FindClose request when FindFirst failed without reaching end
of search. 

Signed-off-by: Steve French (sfrench@us.ibm.com)
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Steve French 2005-04-28 22:41:11 -07:00 коммит произвёл Linus Torvalds
Родитель 57337e42f1
Коммит 31ca3bc3c5
5 изменённых файлов: 53 добавлений и 7 удалений

Просмотреть файл

@ -1,7 +1,10 @@
Version 1.34
------------
Fix error mapping of the TOO_MANY_LINKS (hardlinks) case.
Do not oops if user kills cifs oplock kernel thread.
Do not oops if root user kills cifs oplock kernel thread or
kills the cifsd thread (NB: killing the cifs kernel threads is not
recommended, unmount and rmmod cifs will kill them when they are
no longer needed).
Version 1.33
------------

Просмотреть файл

@ -90,7 +90,8 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
check for tcp and smb session status done differently
for those three - in the calling routine */
if(tcon) {
if((tcon->ses) && (tcon->ses->server)){
if((tcon->ses) && (tcon->ses->status != CifsExiting) &&
(tcon->ses->server)){
struct nls_table *nls_codepage;
/* Give Demultiplex thread up to 10 seconds to
reconnect, should be greater than cifs socket
@ -185,7 +186,8 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
check for tcp and smb session status done differently
for those three - in the calling routine */
if(tcon) {
if((tcon->ses) && (tcon->ses->server)){
if((tcon->ses) && (tcon->ses->status != CifsExiting) &&
(tcon->ses->server)){
struct nls_table *nls_codepage;
/* Give Demultiplex thread up to 10 seconds to
reconnect, should be greater than cifs socket

Просмотреть файл

@ -604,7 +604,13 @@ multi_t2_fnd:
spin_lock(&GlobalMid_Lock);
server->tcpStatus = CifsExiting;
server->tsk = NULL;
atomic_set(&server->inFlight, 0);
/* check if we have blocked requests that need to free */
/* Note that cifs_max_pending is normally 50, but
can be set at module install time to as little as two */
if(atomic_read(&server->inFlight) >= cifs_max_pending)
atomic_set(&server->inFlight, cifs_max_pending - 1);
/* We do not want to set the max_pending too low or we
could end up with the counter going negative */
spin_unlock(&GlobalMid_Lock);
/* Although there should not be any requests blocked on
this queue it can not hurt to be paranoid and try to wake up requests
@ -640,6 +646,17 @@ multi_t2_fnd:
}
read_unlock(&GlobalSMBSeslock);
} else {
/* although we can not zero the server struct pointer yet,
since there are active requests which may depnd on them,
mark the corresponding SMB sessions as exiting too */
list_for_each(tmp, &GlobalSMBSessionList) {
ses = list_entry(tmp, struct cifsSesInfo,
cifsSessionList);
if (ses->server == server) {
ses->status = CifsExiting;
}
}
spin_lock(&GlobalMid_Lock);
list_for_each(tmp, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
@ -661,17 +678,34 @@ multi_t2_fnd:
if (list_empty(&server->pending_mid_q)) {
/* mpx threads have not exited yet give them
at least the smb send timeout time for long ops */
/* due to delays on oplock break requests, we need
to wait at least 45 seconds before giving up
on a request getting a response and going ahead
and killing cifsd */
cFYI(1, ("Wait for exit from demultiplex thread"));
msleep(46);
msleep(46000);
/* if threads still have not exited they are probably never
coming home not much else we can do but free the memory */
}
kfree(server);
write_lock(&GlobalSMBSeslock);
atomic_dec(&tcpSesAllocCount);
length = tcpSesAllocCount.counter;
/* last chance to mark ses pointers invalid
if there are any pointing to this (e.g
if a crazy root user tried to kill cifsd
kernel thread explicitly this might happen) */
list_for_each(tmp, &GlobalSMBSessionList) {
ses = list_entry(tmp, struct cifsSesInfo,
cifsSessionList);
if (ses->server == server) {
ses->server = NULL;
}
}
write_unlock(&GlobalSMBSeslock);
kfree(server);
if(length > 0) {
mempool_resize(cifs_req_poolp,
length + cifs_min_rcv,

Просмотреть файл

@ -512,7 +512,8 @@ int cifs_closedir(struct inode *inode, struct file *file)
pTcon = cifs_sb->tcon;
cFYI(1, ("Freeing private data in close dir"));
if (pCFileStruct->srch_inf.endOfSearch == FALSE) {
if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
(pCFileStruct->invalidHandle == FALSE)) {
pCFileStruct->invalidHandle = TRUE;
rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
cFYI(1, ("Closing uncompleted readdir with rc %d",

Просмотреть файл

@ -270,6 +270,9 @@ CIFSSendRcv(const unsigned int xid, struct cifsSesInfo *ses,
if(ses->server->tcpStatus == CIFS_EXITING)
return -ENOENT;
/* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or
use ses->maxReq */
@ -401,6 +404,9 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
return -EIO;
}
if(ses->server->tcpStatus == CifsExiting)
return -ENOENT;
/* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or
use ses->maxReq */