summaryrefslogtreecommitdiff
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-27 06:00:28 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-27 06:00:28 +0300
commit478a1469a7d27fe6b2f85fc801ecdeb8afc836e6 (patch)
tree9b1eb10e1a0567413443281387b09d02b514b5ec /mm/filemap.c
parent315227f6da389f3a560f27f7777080857278e1b4 (diff)
parent4d9a2c8746671efbb0c27d3ae28c7474597a7aad (diff)
downloadlinux-478a1469a7d27fe6b2f85fc801ecdeb8afc836e6.tar.xz
Merge tag 'dax-locking-for-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull DAX locking updates from Ross Zwisler: "Filesystem DAX locking for 4.7 - We use a bit in an exceptional radix tree entry as a lock bit and use it similarly to how page lock is used for normal faults. This fixes races between hole instantiation and read faults of the same index. - Filesystem DAX PMD faults are disabled, and will be re-enabled when PMD locking is implemented" * tag 'dax-locking-for-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: dax: Remove i_mmap_lock protection dax: Use radix tree entry lock to protect cow faults dax: New fault locking dax: Allow DAX code to replace exceptional entries dax: Define DAX lock bit for radix tree exceptional entry dax: Make huge page handling depend of CONFIG_BROKEN dax: Fix condition for filling of PMD holes
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c30
1 files changed, 21 insertions, 9 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 9665b1d4f318..00ae878b2a38 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -143,13 +143,15 @@ static void page_cache_tree_delete(struct address_space *mapping,
return;
/*
- * Track node that only contains shadow entries.
+ * Track node that only contains shadow entries. DAX mappings contain
+ * no shadow entries and may contain other exceptional entries so skip
+ * those.
*
* Avoid acquiring the list_lru lock if already tracked. The
* list_empty() test is safe as node->private_list is
* protected by mapping->tree_lock.
*/
- if (!workingset_node_pages(node) &&
+ if (!dax_mapping(mapping) && !workingset_node_pages(node) &&
list_empty(&node->private_list)) {
node->private_data = mapping;
list_lru_add(&workingset_shadow_nodes, &node->private_list);
@@ -580,14 +582,24 @@ static int page_cache_tree_insert(struct address_space *mapping,
if (!radix_tree_exceptional_entry(p))
return -EEXIST;
- if (WARN_ON(dax_mapping(mapping)))
- return -EINVAL;
-
- if (shadowp)
- *shadowp = p;
mapping->nrexceptional--;
- if (node)
- workingset_node_shadows_dec(node);
+ if (!dax_mapping(mapping)) {
+ if (shadowp)
+ *shadowp = p;
+ if (node)
+ workingset_node_shadows_dec(node);
+ } else {
+ /* DAX can replace empty locked entry with a hole */
+ WARN_ON_ONCE(p !=
+ (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
+ RADIX_DAX_ENTRY_LOCK));
+ /* DAX accounts exceptional entries as normal pages */
+ if (node)
+ workingset_node_pages_dec(node);
+ /* Wakeup waiters for exceptional entry lock */
+ dax_wake_mapping_entry_waiter(mapping, page->index,
+ false);
+ }
}
radix_tree_replace_slot(slot, page);
mapping->nrpages++;