summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorSergey Senozhatsky <senozhatsky@chromium.org>2022-11-22 06:30:22 +0300
committerAndrew Morton <akpm@linux-foundation.org>2022-12-01 02:59:07 +0300
commit8d9b63708ddd1ac51e0260c7b8f641daf01f4caf (patch)
tree5b556095bef64db52c2a43904500cedf3e94153d /mm
parent7ce5f7e16afa82d33dc47d633404b8b1142a5e44 (diff)
downloadlinux-8d9b63708ddd1ac51e0260c7b8f641daf01f4caf.tar.xz
zswap: do not allocate from atomic pool
zswap_frontswap_load() should be called from preemptible context (we even call mutex_lock() there) and it does not look like we need to do GFP_ATOMIC allocaion for temp buffer. The same applies to zswap_writeback_entry(). Use GFP_KERNEL for temporary buffer allocation in both cases. Link: https://lkml.kernel.org/r/Y3xCTr6ikbtcUr/y@google.com Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Nhat Pham <nphamcs@gmail.com> Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Dan Streetman <ddstreet@ieee.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Vitaly Wool <vitaly.wool@konsulko.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/zpool.c7
-rw-r--r--mm/zswap.c4
2 files changed, 9 insertions, 2 deletions
diff --git a/mm/zpool.c b/mm/zpool.c
index 68facc193496..f46c0d5e766c 100644
--- a/mm/zpool.c
+++ b/mm/zpool.c
@@ -387,6 +387,13 @@ bool zpool_evictable(struct zpool *zpool)
* zpool_can_sleep_mapped - Test if zpool can sleep when do mapped.
* @zpool: The zpool to test
*
+ * Some allocators enter non-preemptible context in ->map() callback (e.g.
+ * disable pagefaults) and exit that context in ->unmap(), which limits what
+ * we can do with the mapped object. For instance, we cannot wait for
+ * asynchronous crypto API to decompress such an object or take mutexes
+ * since those will call into the scheduler. This function tells us whether
+ * we use such an allocator.
+ *
* Returns: true if zpool can sleep; false otherwise.
*/
bool zpool_can_sleep_mapped(struct zpool *zpool)
diff --git a/mm/zswap.c b/mm/zswap.c
index 2d48fd59cc7a..3019f0bde194 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -958,7 +958,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
};
if (!zpool_can_sleep_mapped(pool)) {
- tmp = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+ tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
}
@@ -1311,7 +1311,7 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
}
if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
- tmp = kmalloc(entry->length, GFP_ATOMIC);
+ tmp = kmalloc(entry->length, GFP_KERNEL);
if (!tmp) {
ret = -ENOMEM;
goto freeentry;