cifs: avoid re-lookups in dfs_cache_find()
Simply downgrade the write lock on cache updates from cache_refresh_path() and avoid unnecessary re-lookup in dfs_cache_find(). Signed-off-by: Paulo Alcantara (SUSE) <pc@cjr.nz> Signed-off-by: Steve French <stfrench@microsoft.com>
This commit is contained in:
Родитель
9fb0db4051
Коммит
48d240bf00
|
@ -558,7 +558,8 @@ static void remove_oldest_entry_locked(void)
|
|||
}
|
||||
|
||||
/* Add a new DFS cache entry */
|
||||
static int add_cache_entry_locked(struct dfs_info3_param *refs, int numrefs)
|
||||
static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
|
||||
int numrefs)
|
||||
{
|
||||
int rc;
|
||||
struct cache_entry *ce;
|
||||
|
@ -573,11 +574,11 @@ static int add_cache_entry_locked(struct dfs_info3_param *refs, int numrefs)
|
|||
|
||||
rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
|
||||
if (rc)
|
||||
return rc;
|
||||
return ERR_PTR(rc);
|
||||
|
||||
ce = alloc_cache_entry(refs, numrefs);
|
||||
if (IS_ERR(ce))
|
||||
return PTR_ERR(ce);
|
||||
return ce;
|
||||
|
||||
spin_lock(&cache_ttl_lock);
|
||||
if (!cache_ttl) {
|
||||
|
@ -594,7 +595,7 @@ static int add_cache_entry_locked(struct dfs_info3_param *refs, int numrefs)
|
|||
|
||||
atomic_inc(&cache_count);
|
||||
|
||||
return 0;
|
||||
return ce;
|
||||
}
|
||||
|
||||
/* Check if two DFS paths are equal. @s1 and @s2 are expected to be in @cache_cp's charset */
|
||||
|
@ -767,8 +768,12 @@ static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const
|
|||
*
|
||||
* For interlinks, cifs_mount() and expand_dfs_referral() are supposed to
|
||||
* handle them properly.
|
||||
*
|
||||
* On success, return entry with acquired lock for reading, otherwise error ptr.
|
||||
*/
|
||||
static int cache_refresh_path(const unsigned int xid, struct cifs_ses *ses, const char *path)
|
||||
static struct cache_entry *cache_refresh_path(const unsigned int xid,
|
||||
struct cifs_ses *ses,
|
||||
const char *path)
|
||||
{
|
||||
struct dfs_info3_param *refs = NULL;
|
||||
struct cache_entry *ce;
|
||||
|
@ -780,10 +785,9 @@ static int cache_refresh_path(const unsigned int xid, struct cifs_ses *ses, cons
|
|||
down_read(&htable_rw_lock);
|
||||
|
||||
ce = lookup_cache_entry(path);
|
||||
if (!IS_ERR(ce) && !cache_entry_expired(ce)) {
|
||||
up_read(&htable_rw_lock);
|
||||
return 0;
|
||||
}
|
||||
if (!IS_ERR(ce) && !cache_entry_expired(ce))
|
||||
return ce;
|
||||
|
||||
/*
|
||||
* Unlock shared access as we don't want to hold any locks while getting
|
||||
* a new referral. The @ses used for performing the I/O could be
|
||||
|
@ -797,8 +801,10 @@ static int cache_refresh_path(const unsigned int xid, struct cifs_ses *ses, cons
|
|||
* Request a new DFS referral in order to create or update a cache entry.
|
||||
*/
|
||||
rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
ce = ERR_PTR(rc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
dump_refs(refs, numrefs);
|
||||
|
||||
|
@ -806,16 +812,24 @@ static int cache_refresh_path(const unsigned int xid, struct cifs_ses *ses, cons
|
|||
/* Re-check as another task might have it added or refreshed already */
|
||||
ce = lookup_cache_entry(path);
|
||||
if (!IS_ERR(ce)) {
|
||||
if (cache_entry_expired(ce))
|
||||
if (cache_entry_expired(ce)) {
|
||||
rc = update_cache_entry_locked(ce, refs, numrefs);
|
||||
if (rc)
|
||||
ce = ERR_PTR(rc);
|
||||
}
|
||||
} else {
|
||||
rc = add_cache_entry_locked(refs, numrefs);
|
||||
ce = add_cache_entry_locked(refs, numrefs);
|
||||
}
|
||||
|
||||
up_write(&htable_rw_lock);
|
||||
if (IS_ERR(ce)) {
|
||||
up_write(&htable_rw_lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
downgrade_write(&htable_rw_lock);
|
||||
out:
|
||||
free_dfs_info_array(refs, numrefs);
|
||||
return rc;
|
||||
return ce;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -935,15 +949,8 @@ int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nl
|
|||
if (IS_ERR(npath))
|
||||
return PTR_ERR(npath);
|
||||
|
||||
rc = cache_refresh_path(xid, ses, npath);
|
||||
if (rc)
|
||||
goto out_free_path;
|
||||
|
||||
down_read(&htable_rw_lock);
|
||||
|
||||
ce = lookup_cache_entry(npath);
|
||||
ce = cache_refresh_path(xid, ses, npath);
|
||||
if (IS_ERR(ce)) {
|
||||
up_read(&htable_rw_lock);
|
||||
rc = PTR_ERR(ce);
|
||||
goto out_free_path;
|
||||
}
|
||||
|
@ -1039,10 +1046,13 @@ int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
|
|||
|
||||
cifs_dbg(FYI, "%s: update target hint - path: %s\n", __func__, npath);
|
||||
|
||||
rc = cache_refresh_path(xid, ses, npath);
|
||||
if (rc)
|
||||
ce = cache_refresh_path(xid, ses, npath);
|
||||
if (IS_ERR(ce)) {
|
||||
rc = PTR_ERR(ce);
|
||||
goto out_free_path;
|
||||
}
|
||||
|
||||
up_read(&htable_rw_lock);
|
||||
down_write(&htable_rw_lock);
|
||||
|
||||
ce = lookup_cache_entry(npath);
|
||||
|
|
Загрузка…
Ссылка в новой задаче