nfs-ganesha/0002-nfs-ganesha_2802.patch

680 lines
23 KiB
Diff

diff -ur nfs-ganesha-2.8.0.1/src/CMakeLists.txt nfs-ganesha-2.8.0.2/src/CMakeLists.txt
--- nfs-ganesha-2.8.0.1/src/CMakeLists.txt 2019-06-07 18:00:00.000000000 -0400
+++ nfs-ganesha-2.8.0.2/src/CMakeLists.txt 2019-06-14 18:23:42.000000000 -0400
@@ -39,7 +39,7 @@
# Patch level is always ".0" for mainline (master). It is blank for development.
# When starting a stable maintenance branch, this becomes ".N"
# where N is monotonically increasing starting at 1. Remember to include the "." !!
-set(GANESHA_PATCH_LEVEL .0.1)
+set(GANESHA_PATCH_LEVEL .0.2)
# Extra version is for naming development/RC. It is blank in master/stable branches
# so it can be available to end-users to name local variants/versions
diff -ur nfs-ganesha-2.8.0.1/src/doc/man/ganesha-cache-config.rst nfs-ganesha-2.8.0.2/src/doc/man/ganesha-cache-config.rst
--- nfs-ganesha-2.8.0.1/src/doc/man/ganesha-cache-config.rst 2019-06-07 18:00:00.000000000 -0400
+++ nfs-ganesha-2.8.0.2/src/doc/man/ganesha-cache-config.rst 2019-06-14 18:23:42.000000000 -0400
@@ -44,6 +44,9 @@
Entries_HWMark(uint32, range 1 to UINT32_MAX, default 100000)
The point at which object cache entries will start being reused.
+Chunks_HWMark(uint32, range 1 to UINT32_MAX, default 100000)
+ The point at which dirent cache chunks will start being reused.
+
LRU_Run_Interval(uint32, range 1 to 24 * 3600, default 90)
Base interval in seconds between runs of the LRU cleaner thread.
diff -ur nfs-ganesha-2.8.0.1/src/FSAL/Stackable_FSALs/FSAL_MDCACHE/mdcache_helpers.c nfs-ganesha-2.8.0.2/src/FSAL/Stackable_FSALs/FSAL_MDCACHE/mdcache_helpers.c
--- nfs-ganesha-2.8.0.1/src/FSAL/Stackable_FSALs/FSAL_MDCACHE/mdcache_helpers.c 2019-06-07 18:00:00.000000000 -0400
+++ nfs-ganesha-2.8.0.2/src/FSAL/Stackable_FSALs/FSAL_MDCACHE/mdcache_helpers.c 2019-06-14 18:23:42.000000000 -0400
@@ -56,6 +56,28 @@
#define mdc_chunk_first_dirent(c) \
glist_first_entry(&(c)->dirents, mdcache_dir_entry_t, chunk_list)
+/**
+ * @brief Drop refs for state chunks
+ *
+ * Drop a ref from each chunk, making sure keep has one
+ *
+ * @param[in] a First chunk
+ * @param[in] b Second chunk
+ * @param[in] c Third chunk
+ * @param[in] keep Chunk to keep a ref
+ */
+static void drop_state_chunk_refs(struct dir_chunk *a, struct dir_chunk *b,
+ struct dir_chunk *c, struct dir_chunk *keep)
+{
+ if (keep) {
+ mdcache_lru_ref_chunk(keep);
+ }
+
+ mdcache_lru_unref_chunk(a);
+ mdcache_lru_unref_chunk(b);
+ mdcache_lru_unref_chunk(c);
+}
+
static inline bool trust_negative_cache(mdcache_entry_t *parent)
{
bool trust = op_ctx_export_has_option(
@@ -223,7 +245,7 @@
*
* @param[in] dirent First ref'd dirent
*/
-static void mdc_unref_chunk(struct dir_chunk *chunk,
+static void mdc_unref_chunk_dirents(struct dir_chunk *chunk,
mdcache_dir_entry_t *dirent)
{
for (;
@@ -2078,6 +2100,8 @@
*/
chunk->next_ck = here->ck;
+ mdcache_lru_unref_chunk(split);
+
LogFullDebugAlt(COMPONENT_NFS_READDIR, COMPONENT_CACHE_INODE,
"Chunk next_ck=%"PRIx64,
chunk->next_ck);
@@ -2127,9 +2151,6 @@
fsal_cookie_t cookie)
{
struct mdcache_populate_cb_state *state = dir_state;
- struct dir_chunk *chunk = state->cur_chunk;
- mdcache_entry_t *mdc_parent = container_of(&state->dir->obj_handle,
- mdcache_entry_t, obj_handle);
struct mdcache_fsal_export *export = mdc_cur_export();
mdcache_entry_t *new_entry = NULL;
mdcache_dir_entry_t *new_dir_entry = NULL, *allocated_dir_entry = NULL;
@@ -2139,27 +2160,27 @@
enum fsal_dir_result result = DIR_CONTINUE;
#ifdef DEBUG_MDCACHE
- assert(mdc_parent->content_lock.__data.__cur_writer);
+ assert(state->dir->content_lock.__data.__cur_writer);
#endif
- if (chunk->num_entries == mdcache_param.dir.avl_chunk) {
+ if (state->cur_chunk->num_entries == mdcache_param.dir.avl_chunk) {
/* We are being called readahead. */
LogFullDebugAlt(COMPONENT_NFS_READDIR, COMPONENT_CACHE_INODE,
"Readdir readahead first entry in new chunk %s",
name);
- state->prev_chunk = chunk;
+ mdcache_lru_unref_chunk(state->prev_chunk);
+ state->prev_chunk = state->cur_chunk;
state->prev_chunk->next_ck = cookie;
/* Chunk is added to the chunks list before being passed in */
/* Now start a new chunk, passing this chunk as prev_chunk. */
- chunk = mdcache_get_chunk(chunk->parent, chunk, 0);
-
- state->cur_chunk = chunk;
+ state->cur_chunk = mdcache_get_chunk(state->prev_chunk->parent,
+ state->prev_chunk, 0);
LogFullDebugAlt(COMPONENT_NFS_READDIR, COMPONENT_CACHE_INODE,
"Chunk %p Prev chunk %p next_ck=%" PRIx64,
- chunk, state->prev_chunk,
+ state->cur_chunk, state->prev_chunk,
state->prev_chunk->next_ck);
/* And start accepting entries into the new chunk. */
}
@@ -2176,7 +2197,7 @@
*state->status = status;
LogInfoAlt(COMPONENT_NFS_READDIR, COMPONENT_CACHE_INODE,
"mdcache_new_entry failed on %s in dir %p with %s",
- name, mdc_parent, fsal_err_txt(status));
+ name, state->dir, fsal_err_txt(status));
return DIR_TERMINATE;
}
@@ -2187,10 +2208,10 @@
"Add mdcache entry %p for %s for FSAL %s",
new_entry, name, new_entry->sub_handle->fsal->name);
- /* in cache avl, we always insert on mdc_parent */
+ /* in cache avl, we always insert on state->dir */
new_dir_entry = gsh_calloc(1, sizeof(mdcache_dir_entry_t) + namesize);
new_dir_entry->flags = DIR_ENTRY_FLAG_NONE;
- new_dir_entry->chunk = chunk;
+ new_dir_entry->chunk = state->cur_chunk;
new_dir_entry->ck = cookie;
allocated_dir_entry = new_dir_entry;
@@ -2207,7 +2228,7 @@
mdcache_key_dup(&new_dir_entry->ckey, &new_entry->fh_hk.key);
/* add to avl */
- code = mdcache_avl_insert(mdc_parent, &new_dir_entry);
+ code = mdcache_avl_insert(state->dir, &new_dir_entry);
if (code < 0) {
/* We can get here with the following possibilities:
@@ -2244,7 +2265,7 @@
}
/* Note that if this dirent was already in the lookup by name AVL
- * tree (mdc_parent->fsobj.fsdir.avl.t), then mdcache_avl_qp_insert
+ * tree (state->dir->fsobj.fsdir.avl.t), then mdcache_avl_qp_insert
* freed the dirent we allocated above, and returned the one that was
* in tree. It will have set chunk, ck, and nk.
*
@@ -2256,7 +2277,7 @@
"Swapped %s using %p instead of %p, new_dir_entry->chunk=%p chunk=%p",
new_dir_entry->name, new_dir_entry,
allocated_dir_entry, new_dir_entry->chunk,
- chunk);
+ state->cur_chunk);
}
assert(new_dir_entry->chunk);
@@ -2275,7 +2296,7 @@
node = avltree_inline_insert(
&new_dir_entry->node_sorted,
- &mdc_parent->fsobj.fsdir.avl.sorted,
+ &state->dir->fsobj.fsdir.avl.sorted,
avl_dirent_sorted_cmpf);
if (node != NULL) {
@@ -2313,11 +2334,12 @@
* old dirent that was not part of a chunk, but it is NOT the
* same dirent that was already part of some other chunk.
*/
- glist_add_tail(&chunk->dirents, &new_dir_entry->chunk_list);
- chunk->num_entries++;
+ glist_add_tail(&state->cur_chunk->dirents,
+ &new_dir_entry->chunk_list);
+ state->cur_chunk->num_entries++;
}
- if (new_dir_entry->chunk != chunk) {
+ if (new_dir_entry->chunk != state->cur_chunk) {
/* We have the situation where we have collided with a
* previously used chunk (and thus we have a partial chunk).
* Since dirent is pointing to the existing dirent and the one
@@ -2333,25 +2355,25 @@
"Collision old chunk %p next_ck=%"PRIx64
" new chunk %p next_ck=%"PRIx64,
new_dir_entry->chunk,
- new_dir_entry->chunk->next_ck, chunk,
- chunk->next_ck);
- if (chunk->num_entries == 0) {
+ new_dir_entry->chunk->next_ck, state->cur_chunk,
+ state->cur_chunk->next_ck);
+ if (state->cur_chunk->num_entries == 0) {
LogFullDebugAlt(COMPONENT_NFS_READDIR,
COMPONENT_CACHE_INODE,
- "Nuking empty Chunk %p", chunk);
+ "Nuking empty Chunk %p", state->cur_chunk);
/* We read-ahead into an existing chunk, and this chunk
* is empty. Just ditch it now, to avoid any issue. */
- mdcache_lru_unref_chunk(chunk);
- if (state->first_chunk == chunk) {
+ mdcache_lru_unref_chunk(state->cur_chunk);
+ if (state->first_chunk == state->cur_chunk) {
/* Drop the first_chunk ref */
mdcache_lru_unref_chunk(state->first_chunk);
state->first_chunk = new_dir_entry->chunk;
/* And take the first_chunk ref */
mdcache_lru_ref_chunk(state->first_chunk);
}
- chunk = new_dir_entry->chunk;
- state->cur_chunk = chunk;
+ state->cur_chunk = new_dir_entry->chunk;
+ mdcache_lru_ref_chunk(state->cur_chunk);
if (new_dir_entry->entry) {
/* This was ref'd already; drop extra ref */
mdcache_put(new_dir_entry->entry);
@@ -2363,10 +2385,12 @@
} else {
LogFullDebugAlt(COMPONENT_NFS_READDIR,
COMPONENT_CACHE_INODE,
- "keeping non-empty Chunk %p", chunk);
- chunk->next_ck = new_dir_entry->ck;
+ "keeping non-empty Chunk %p",
+ state->cur_chunk);
+ state->cur_chunk->next_ck = new_dir_entry->ck;
}
- } else if (chunk->num_entries == mdcache_param.dir.avl_chunk) {
+ } else if (state->cur_chunk->num_entries ==
+ mdcache_param.dir.avl_chunk) {
/* Chunk is full. Since dirent is pointing to the existing
* dirent and the one we allocated above has been freed we don't
* need to do any cleanup.
@@ -2382,7 +2406,7 @@
if (new_entry->obj_handle.type == DIRECTORY) {
/* Insert Parent's key */
PTHREAD_RWLOCK_wrlock(&new_entry->content_lock);
- mdc_dir_add_parent(new_entry, mdc_parent);
+ mdc_dir_add_parent(new_entry, state->dir);
PTHREAD_RWLOCK_unlock(&new_entry->content_lock);
}
@@ -2391,10 +2415,16 @@
new_entry,
atomic_fetch_int32_t(&new_entry->lru.refcnt));
- /* Note that this entry is ref'd, so that mdcache_readdir_chunked can
- * un-ref it. Pass this ref off to the dir_entry for this purpose. */
- assert(!new_dir_entry->entry);
- new_dir_entry->entry = new_entry;
+ if (new_dir_entry != allocated_dir_entry && new_dir_entry->entry) {
+ /* This was swapped and already has a refcounted entry. Drop our
+ * ref. */
+ mdcache_put(new_entry);
+ } else {
+ /* This is a new dirent, or doesn't have an entry. Pass our ref
+ * on entry off to mdcache_readdir_chunked */
+ assert(!new_dir_entry->entry);
+ new_dir_entry->entry = new_entry;
+ }
return result;
}
@@ -2464,10 +2494,12 @@
* @brief Read the next chunk of a directory
*
* If called for an FSAL that only supports whence as the dirent name to
- * continue from, and prev_chunk is NULL, we must scan the directory from the
- * beginning. If prev_chunk is not NULL, we can scan the directory starting with
- * the last dirent name in prev_chunk, but we must still scan the directory
- * until we find whence.
+ * continue from, and @a prev_chunk is NULL, we must scan the directory from the
+ * beginning. If @a prev_chunk is not NULL, we can scan the directory starting
+ * with the last dirent name in @a prev_chunk, but we must still scan the
+ * directory until we find whence.
+ *
+ * @a prev_chunk must have a ref taken on it, and this ref will be released
*
* @note this returns a ref on the chunk containing @a dirent
*
@@ -2489,30 +2521,30 @@
fsal_status_t status = {0, 0};
fsal_status_t readdir_status = {0, 0};
struct mdcache_populate_cb_state state;
- struct dir_chunk *chunk;
attrmask_t attrmask;
fsal_cookie_t *whence_ptr = &whence;
- chunk = mdcache_get_chunk(directory, prev_chunk, whence);
-
attrmask = op_ctx->fsal_export->exp_ops.fs_supported_attrs(
op_ctx->fsal_export) | ATTR_RDATTR_ERR;
- /* Take a ref on the first chunk */
- mdcache_lru_ref_chunk(chunk);
-
state.export = mdc_cur_export();
state.dir = directory;
state.status = &status;
state.cb = NULL; /* We don't use the call back during chunking. */
- state.first_chunk = state.cur_chunk = chunk;
- state.prev_chunk = prev_chunk;
state.cookie = whence;
state.dirent = dirent;
state.whence_is_name = op_ctx->fsal_export->exp_ops.fs_supports(
op_ctx->fsal_export, fso_whence_is_name);
- state.whence_search = state.whence_is_name && whence != 0 &&
- prev_chunk == NULL;
+ state.whence_search = state.whence_is_name && whence != 0;
+
+ /* Set up chunks */
+ state.first_chunk = mdcache_get_chunk(directory, prev_chunk, whence);
+
+ state.cur_chunk = state.first_chunk;
+ mdcache_lru_ref_chunk(state.cur_chunk);
+
+ /* prev_chunk was passed in with a ref on it */
+ state.prev_chunk = prev_chunk;
if (state.whence_is_name) {
LogFullDebugAlt(COMPONENT_NFS_READDIR, COMPONENT_CACHE_INODE,
@@ -2531,12 +2563,12 @@
* prev_chunk has been updated to point to the last cached chunk.
*/
if (state.whence_is_name) {
- if (prev_chunk != NULL) {
+ if (state.prev_chunk != NULL) {
/* Start from end of prev_chunk */
/* If end of directory, mark last dirent as eod. */
mdcache_dir_entry_t *last;
- last = glist_last_entry(&prev_chunk->dirents,
+ last = glist_last_entry(&state.prev_chunk->dirents,
mdcache_dir_entry_t,
chunk_list);
whence_ptr = (fsal_cookie_t *)last->name;
@@ -2590,7 +2622,8 @@
"FSAL readdir status=%s",
fsal_err_txt(readdir_status));
*dirent = NULL;
- mdcache_lru_unref_chunk(chunk);
+ drop_state_chunk_refs(state.first_chunk, state.cur_chunk,
+ state.prev_chunk, NULL);
return readdir_status;
}
@@ -2599,32 +2632,33 @@
"status=%s",
fsal_err_txt(status));
*dirent = NULL;
- mdcache_lru_unref_chunk(chunk);
+ drop_state_chunk_refs(state.first_chunk, state.cur_chunk,
+ state.prev_chunk, NULL);
return status;
}
- /* Recover the most recent chunk from cur_chunk, if we had readahead.
- * it might have changed.
- */
- chunk = state.cur_chunk;
-
- if (chunk->num_entries == 0) {
- /* Chunk is empty - should only happen for an empty directory
- * but could happen if the FSAL failed to indicate end of
- * directory. This COULD happen on a readahead chunk, but it
+ if (state.cur_chunk->num_entries == 0) {
+ /* cur_chunk is empty - should only happen for an empty
+ * directory but could happen if the FSAL failed to indicate end
+ * of directory. This COULD happen on a readahead chunk, but it
* would be unusual.
*/
LogFullDebugAlt(COMPONENT_NFS_READDIR, COMPONENT_CACHE_INODE,
"Empty chunk");
- mdcache_lru_unref_chunk(chunk);
+ /* Put our ref */
+ mdcache_lru_unref_chunk(state.cur_chunk);
+ /* Put sentinal ref */
+ mdcache_lru_unref_chunk(state.cur_chunk);
- if (chunk == state.first_chunk) {
+ if (state.cur_chunk == state.first_chunk) {
/* We really got nothing on this readdir, so don't
* return a dirent.
*/
*dirent = NULL;
- mdcache_lru_unref_chunk(chunk);
+ mdcache_lru_unref_chunk(state.first_chunk);
+ /* cur_chunk was freed */
+ mdcache_lru_unref_chunk(state.prev_chunk);
LogDebugAlt(COMPONENT_NFS_READDIR,
COMPONENT_CACHE_INODE,
"status=%s",
@@ -2633,15 +2667,18 @@
}
/* If the empty chunk wasn't first, then prev_chunk is valid */
- chunk = state.prev_chunk;
+ state.cur_chunk = state.prev_chunk;
+ if (state.cur_chunk) {
+ mdcache_lru_ref_chunk(state.cur_chunk);
+ }
}
if (*eod_met) {
/* If end of directory, mark last dirent as eod. */
mdcache_dir_entry_t *last;
- last = glist_last_entry(&chunk->dirents, mdcache_dir_entry_t,
- chunk_list);
+ last = glist_last_entry(&state.cur_chunk->dirents,
+ mdcache_dir_entry_t, chunk_list);
last->eod = true;
}
@@ -2653,6 +2690,10 @@
if (state.whence_search && *dirent == NULL) {
if (*eod_met) {
/* Did not find cookie. */
+ drop_state_chunk_refs(state.first_chunk,
+ state.cur_chunk,
+ state.prev_chunk,
+ NULL);
status = fsalstat(ERR_FSAL_BADCOOKIE, 0);
LogDebugAlt(COMPONENT_NFS_READDIR,
COMPONENT_CACHE_INODE,
@@ -2679,34 +2720,44 @@
"Rescan dir to find cookie needs to continue search for %"
PRIx64, state.cookie);
- if (chunk->next_ck != 0) {
+ if (state.cur_chunk->next_ck != 0) {
+ fsal_cookie_t next_ck = state.cur_chunk->next_ck;
+
+ mdcache_lru_unref_chunk(state.cur_chunk);
/* In the collision case, chunk->next_ck was set,
* so now start skipping.
*/
LogFullDebugAlt(COMPONENT_NFS_READDIR,
COMPONENT_CACHE_INODE,
"Search skipping from cookie %"PRIx64,
- chunk->next_ck);
- chunk = mdcache_skip_chunks(directory, chunk->next_ck);
+ state.cur_chunk->next_ck);
+ state.cur_chunk = mdcache_skip_chunks(directory,
+ next_ck);
+ mdcache_lru_ref_chunk(state.cur_chunk);
}
+ /* drop refs on state, except cur_chunk which we're saving */
+ drop_state_chunk_refs(state.first_chunk, NULL,
+ state.prev_chunk, NULL);
+
/* We need to start a new FSAL readdir call, but we don't just
* want to call mdcache_populate_dir_chunk raw, so set up a few
* things and jump to again...
*/
+
/* The chunk we just dealt with is now prev_chunk. */
- prev_chunk = chunk;
+ state.prev_chunk = state.cur_chunk;
LogFullDebugAlt(COMPONENT_NFS_READDIR, COMPONENT_CACHE_INODE,
"About to allocate a new chunk to continue search, prev chunk = %p",
- prev_chunk);
+ state.prev_chunk);
/* And we need to allocate a fresh chunk. */
- chunk = mdcache_get_chunk(directory, chunk, 0);
+ state.first_chunk = mdcache_get_chunk(directory,
+ state.prev_chunk, 0);
+ state.cur_chunk = state.first_chunk;
+ mdcache_lru_ref_chunk(state.cur_chunk);
- /* And switch over to new chunk. */
- state.cur_chunk = chunk;
- state.prev_chunk = prev_chunk;
/* And go start a new FSAL readdir call. */
goto again;
@@ -2721,6 +2772,14 @@
chunk_list);
}
+ /* At this point, we have ref's on first_chunk, cur_chunk, and
+ * prev_chunk. Drop the refs and make sure the chunk containing dirent
+ * is ref'd */
+ drop_state_chunk_refs(state.first_chunk, state.cur_chunk,
+ state.prev_chunk, (*dirent)->chunk);
+
+ /* At this point, only dirent->chunk should be ref'd */
+
LogDebugAlt(COMPONENT_NFS_READDIR, COMPONENT_CACHE_INODE,
"status=%s",
fsal_err_txt(status));
@@ -3004,12 +3063,6 @@
/* Bump the chunk in the LRU */
lru_bump_chunk(chunk);
- /* We can drop the ref now, we've bumped. This cannot be the last ref
- * drop. To get here, we had at least 2 refs, and we also hold the
- * content_lock for at least read. This means noone holds it for write,
- * and all final ref drops are done with it held for write. */
- mdcache_lru_unref_chunk(chunk);
-
LogFullDebugAlt(COMPONENT_NFS_READDIR, COMPONENT_CACHE_INODE,
"About to read directory=%p cookie=%" PRIx64,
directory, next_ck);
@@ -3076,6 +3129,7 @@
* reloading the chunk.
*/
first_pass = true;
+ mdcache_lru_unref_chunk(chunk);
chunk = NULL;
/* Now we need to look for this dirent again.
@@ -3103,6 +3157,7 @@
/* In order to get here, we passed the has_write
* check above, and took the write lock. */
mdcache_lru_unref_chunk(chunk);
+ mdcache_lru_unref_chunk(chunk);
chunk = NULL;
goto again;
}
@@ -3124,6 +3179,7 @@
if (status.major == ERR_FSAL_STALE)
mdcache_kill_entry(directory);
+ mdcache_lru_unref_chunk(chunk);
return status;
}
@@ -3134,6 +3190,7 @@
* don't match. If the new dirent couldn't be
* placed, we need to restart readdir.
*/
+ mdcache_lru_unref_chunk(chunk);
goto restart;
}
}
@@ -3192,6 +3249,7 @@
fsal_err_txt(status));
mdcache_put(entry);
+ mdcache_lru_unref_chunk(chunk);
return status;
}
@@ -3238,7 +3296,7 @@
* entries in this chunk, so that they don't
* hang around until the directory is
* invalidated. */
- mdc_unref_chunk(chunk, dirent);
+ mdc_unref_chunk_dirents(chunk, dirent);
}
LogDebugAlt(COMPONENT_NFS_READDIR,
@@ -3248,6 +3306,7 @@
PTHREAD_RWLOCK_unlock(&directory->content_lock);
+ mdcache_lru_unref_chunk(chunk);
return status;
}
@@ -3297,6 +3356,10 @@
* lock just in case the next chunk actually
* happens to be populated.
*/
+
+ /* Note: We're passing our ref on chunk into
+ * mdcache_populate_dir_chunk(), so don't drop it here.
+ */
first_pass = false;
goto again;
}
diff -ur nfs-ganesha-2.8.0.1/src/FSAL/Stackable_FSALs/FSAL_MDCACHE/mdcache_lru.c nfs-ganesha-2.8.0.2/src/FSAL/Stackable_FSALs/FSAL_MDCACHE/mdcache_lru.c
--- nfs-ganesha-2.8.0.1/src/FSAL/Stackable_FSALs/FSAL_MDCACHE/mdcache_lru.c 2019-06-07 18:00:00.000000000 -0400
+++ nfs-ganesha-2.8.0.2/src/FSAL/Stackable_FSALs/FSAL_MDCACHE/mdcache_lru.c 2019-06-14 18:23:42.000000000 -0400
@@ -869,7 +869,7 @@
* @param[in] prev_chunk If non-NULL, the previous chunk in this directory
* @param[in] whence If @a prev_chunk is NULL, the starting whence of chunk
*
- * @return reused or allocated chunk
+ * @return reused or allocated chunk with a ref taken for the caller
*/
struct dir_chunk *mdcache_get_chunk(mdcache_entry_t *parent,
struct dir_chunk *prev_chunk,
@@ -920,7 +920,7 @@
chunk->reload_ck = whence;
}
- chunk->chunk_lru.refcnt = 1;
+ chunk->chunk_lru.refcnt = 2;
chunk->chunk_lru.cf = 0;
chunk->chunk_lru.lane = lru_lane_of(chunk);
@@ -2033,7 +2033,7 @@
mdcache_clean_dirent_chunk(chunk);
}
-void mdcache_lru_ref_chunk(struct dir_chunk *chunk)
+void _mdcache_lru_ref_chunk(struct dir_chunk *chunk, const char *func, int line)
{
atomic_inc_int32_t(&chunk->chunk_lru.refcnt);
}
@@ -2043,12 +2043,17 @@
* Should be called with content_lock held in write mode.
* @param [in] chunk The chunk to unref
*/
-void mdcache_lru_unref_chunk(struct dir_chunk *chunk)
+void _mdcache_lru_unref_chunk(struct dir_chunk *chunk, const char *func,
+ int line)
{
int refcnt;
- uint32_t lane = chunk->chunk_lru.lane;
+ uint32_t lane;
struct lru_q_lane *qlane;
+ if (!chunk)
+ return;
+
+ lane = chunk->chunk_lru.lane;
qlane = &CHUNK_LRU[lane];
QLOCK(qlane);
diff -ur nfs-ganesha-2.8.0.1/src/FSAL/Stackable_FSALs/FSAL_MDCACHE/mdcache_lru.h nfs-ganesha-2.8.0.2/src/FSAL/Stackable_FSALs/FSAL_MDCACHE/mdcache_lru.h
--- nfs-ganesha-2.8.0.1/src/FSAL/Stackable_FSALs/FSAL_MDCACHE/mdcache_lru.h 2019-06-07 18:00:00.000000000 -0400
+++ nfs-ganesha-2.8.0.2/src/FSAL/Stackable_FSALs/FSAL_MDCACHE/mdcache_lru.h 2019-06-14 18:23:42.000000000 -0400
@@ -196,8 +196,14 @@
mdcache_lru_unref(entry);
}
-void mdcache_lru_ref_chunk(struct dir_chunk *chunk);
-void mdcache_lru_unref_chunk(struct dir_chunk *chunk);
+#define mdcache_lru_ref_chunk(chunk) \
+ _mdcache_lru_ref_chunk(chunk, __func__, __LINE__)
+void _mdcache_lru_ref_chunk(struct dir_chunk *chunk, const char *func,
+ int line);
+#define mdcache_lru_unref_chunk(chunk) \
+ _mdcache_lru_unref_chunk(chunk, __func__, __LINE__)
+void _mdcache_lru_unref_chunk(struct dir_chunk *chunk, const char *func,
+ int line);
struct dir_chunk *mdcache_get_chunk(mdcache_entry_t *parent,
struct dir_chunk *prev_chunk,
fsal_cookie_t whence);
diff -ur nfs-ganesha-2.8.0.1/src/Protocols/NFS/mnt_Export.c nfs-ganesha-2.8.0.2/src/Protocols/NFS/mnt_Export.c
--- nfs-ganesha-2.8.0.1/src/Protocols/NFS/mnt_Export.c 2019-06-07 18:00:00.000000000 -0400
+++ nfs-ganesha-2.8.0.2/src/Protocols/NFS/mnt_Export.c 2019-06-14 18:23:42.000000000 -0400
@@ -201,6 +201,7 @@
gsh_free(grp);
grp = next_grp;
}
+ gsh_free(exp->ex_dir);
gsh_free(exp);
exp = next_exp;
}
diff -ur nfs-ganesha-2.8.0.1/src/RPCAL/nfs_dupreq.c nfs-ganesha-2.8.0.2/src/RPCAL/nfs_dupreq.c
--- nfs-ganesha-2.8.0.1/src/RPCAL/nfs_dupreq.c 2019-06-07 18:00:00.000000000 -0400
+++ nfs-ganesha-2.8.0.2/src/RPCAL/nfs_dupreq.c 2019-06-14 18:23:42.000000000 -0400
@@ -577,6 +577,7 @@
LogFullDebug(COMPONENT_DUPREQ, "ref shared UDP DRC");
drc = &(drc_st->udp_drc);
DRC_ST_LOCK();
+ req->rq_xprt->xp_u2 = (void *)drc;
(void)nfs_dupreq_ref_drc(drc);
DRC_ST_UNLOCK();
goto out;