From 0130e4e8e49f9bd0342d3fc14102470ea9e7230e Mon Sep 17 00:00:00 2001 From: Jeffle Xu Date: Thu, 26 May 2022 09:03:44 +0800 Subject: erofs: leave compressed inodes unsupported in fscache mode for now erofs over fscache doesn't support the compressed layout yet. It will cause NULL crash if there are compressed inodes contained when working in fscache mode. So far in the erofs based container image distribution scenarios (RAFS v6), the compressed RAFS v6 images are downloaded and then decompressed on demand as an uncompressed erofs image. Then the erofs image is mounted in fscache mode for containers to use. IOWs, currently compressed data is decompressed on the userspace side instead and uncompressed erofs images will be finally cached. The fscache support for the compressed layout is still under development and it will be used for runtime decompression feature. Anyway, to avoid the potential crash, let's leave the compressed inodes unsupported in fscache mode until we support it later. Fixes: 1442b02b66ad ("erofs: implement fscache-based data read for non-inline layout") Signed-off-by: Jeffle Xu Reviewed-by: Gao Xiang Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20220526010344.118493-1-jefflexu@linux.alibaba.com Signed-off-by: Gao Xiang --- fs/erofs/inode.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index bcc8335b46b3..95a403720e8c 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -288,7 +288,10 @@ static int erofs_fill_inode(struct inode *inode, int isdir) } if (erofs_inode_is_data_compressed(vi->datalayout)) { - err = z_erofs_fill_inode(inode); + if (!erofs_is_fscache_mode(inode->i_sb)) + err = z_erofs_fill_inode(inode); + else + err = -EOPNOTSUPP; goto out_unlock; } inode->i_mapping->a_ops = &erofs_raw_access_aops; -- cgit v1.2.3 From b5cb79dcfd03e7bb8054d38eaaa557d07966a811 Mon Sep 17 00:00:00 2001 From: Xin Yin Date: Fri, 27 May 2022 18:18:00 +0800 Subject: erofs: fix crash when enable tracepoint cachefiles_prep_read RIP: 0010:trace_event_raw_event_cachefiles_prep_read+0x88/0xe0 [cachefiles] Call Trace: cachefiles_prepare_read+0x1d7/0x3a0 [cachefiles] erofs_fscache_read_folios+0x188/0x220 [erofs] erofs_fscache_meta_readpage+0x106/0x160 [erofs] do_read_cache_folio+0x42a/0x590 ? bdi_register_va.part.14+0x1a7/0x210 ? super_setup_bdi_name+0x76/0xe0 erofs_bread+0x5b/0x170 [erofs] erofs_fc_fill_super+0x12b/0xc50 [erofs] This tracepoint uses rreq->inode, should set it when allocating. Fixes: d435d53228dd ("erofs: change to use asynchronous io for fscache readpage/readahead") Signed-off-by: Xin Yin Reviewed-by: Jeffle Xu Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20220527101800.22360-1-yinxin.x@bytedance.com Signed-off-by: Gao Xiang --- fs/erofs/fscache.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c index 7e4417167d0b..355259252943 100644 --- a/fs/erofs/fscache.c +++ b/fs/erofs/fscache.c @@ -17,6 +17,7 @@ static struct netfs_io_request *erofs_fscache_alloc_request(struct address_space rreq->start = start; rreq->len = len; rreq->mapping = mapping; + rreq->inode = mapping->host; INIT_LIST_HEAD(&rreq->subrequests); refcount_set(&rreq->ref, 1); return rreq; -- cgit v1.2.3 From 87ca34a7065db66adbbe882a2be6b04127c26a87 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Sun, 29 May 2022 13:54:23 +0800 Subject: erofs: get rid of `struct z_erofs_collection' It was incompletely introduced for deduplication between different logical extents backed with the same pcluster. We will have a better in-memory representation in the next release cycle for this, as well as partial memory folios support. So get rid of it instead. No logic changes. Link: https://lore.kernel.org/r/20220529055425.226363-2-xiang@kernel.org Acked-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/zdata.c | 111 +++++++++++++++++++++---------------------------------- fs/erofs/zdata.h | 50 +++++++++++-------------- 2 files changed, 65 insertions(+), 96 deletions(-) (limited to 'fs') diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index e6dea6dfca16..4fd66a66c5f9 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -199,7 +199,6 @@ struct z_erofs_decompress_frontend { struct z_erofs_pagevec_ctor vector; struct z_erofs_pcluster *pcl, *tailpcl; - struct z_erofs_collection *cl; /* a pointer used to pick up inplace I/O pages */ struct page **icpage_ptr; z_erofs_next_pcluster_t owned_head; @@ -357,7 +356,7 @@ static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe, return false; } -/* callers must be with collection lock held */ +/* callers must be with pcluster lock held */ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe, struct page *page, enum z_erofs_page_type type, bool pvec_safereuse) @@ -372,7 +371,7 @@ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe, ret = z_erofs_pagevec_enqueue(&fe->vector, page, type, pvec_safereuse); - fe->cl->vcnt += (unsigned int)ret; + fe->pcl->vcnt += (unsigned int)ret; return ret ? 0 : -EAGAIN; } @@ -405,12 +404,11 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f) f->mode = COLLECT_PRIMARY; } -static int z_erofs_lookup_collection(struct z_erofs_decompress_frontend *fe, - struct inode *inode, - struct erofs_map_blocks *map) +static int z_erofs_lookup_pcluster(struct z_erofs_decompress_frontend *fe, + struct inode *inode, + struct erofs_map_blocks *map) { struct z_erofs_pcluster *pcl = fe->pcl; - struct z_erofs_collection *cl; unsigned int length; /* to avoid unexpected loop formed by corrupted images */ @@ -419,8 +417,7 @@ static int z_erofs_lookup_collection(struct z_erofs_decompress_frontend *fe, return -EFSCORRUPTED; } - cl = z_erofs_primarycollection(pcl); - if (cl->pageofs != (map->m_la & ~PAGE_MASK)) { + if (pcl->pageofs_out != (map->m_la & ~PAGE_MASK)) { DBG_BUGON(1); return -EFSCORRUPTED; } @@ -443,23 +440,21 @@ static int z_erofs_lookup_collection(struct z_erofs_decompress_frontend *fe, length = READ_ONCE(pcl->length); } } - mutex_lock(&cl->lock); + mutex_lock(&pcl->lock); /* used to check tail merging loop due to corrupted images */ if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL) fe->tailpcl = pcl; z_erofs_try_to_claim_pcluster(fe); - fe->cl = cl; return 0; } -static int z_erofs_register_collection(struct z_erofs_decompress_frontend *fe, - struct inode *inode, - struct erofs_map_blocks *map) +static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe, + struct inode *inode, + struct erofs_map_blocks *map) { bool ztailpacking = map->m_flags & EROFS_MAP_META; struct z_erofs_pcluster *pcl; - struct z_erofs_collection *cl; struct erofs_workgroup *grp; int err; @@ -482,17 +477,15 @@ static int z_erofs_register_collection(struct z_erofs_decompress_frontend *fe, /* new pclusters should be claimed as type 1, primary and followed */ pcl->next = fe->owned_head; + pcl->pageofs_out = map->m_la & ~PAGE_MASK; fe->mode = COLLECT_PRIMARY_FOLLOWED; - cl = z_erofs_primarycollection(pcl); - cl->pageofs = map->m_la & ~PAGE_MASK; - /* * lock all primary followed works before visible to others * and mutex_trylock *never* fails for a new pcluster. */ - mutex_init(&cl->lock); - DBG_BUGON(!mutex_trylock(&cl->lock)); + mutex_init(&pcl->lock); + DBG_BUGON(!mutex_trylock(&pcl->lock)); if (ztailpacking) { pcl->obj.index = 0; /* which indicates ztailpacking */ @@ -519,11 +512,10 @@ static int z_erofs_register_collection(struct z_erofs_decompress_frontend *fe, fe->tailpcl = pcl; fe->owned_head = &pcl->next; fe->pcl = pcl; - fe->cl = cl; return 0; err_out: - mutex_unlock(&cl->lock); + mutex_unlock(&pcl->lock); z_erofs_free_pcluster(pcl); return err; } @@ -535,9 +527,9 @@ static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe, struct erofs_workgroup *grp; int ret; - DBG_BUGON(fe->cl); + DBG_BUGON(fe->pcl); - /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */ + /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */ DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL); DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); @@ -554,14 +546,14 @@ static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe, fe->pcl = container_of(grp, struct z_erofs_pcluster, obj); } else { tailpacking: - ret = z_erofs_register_collection(fe, inode, map); + ret = z_erofs_register_pcluster(fe, inode, map); if (!ret) goto out; if (ret != -EEXIST) return ret; } - ret = z_erofs_lookup_collection(fe, inode, map); + ret = z_erofs_lookup_pcluster(fe, inode, map); if (ret) { erofs_workgroup_put(&fe->pcl->obj); return ret; @@ -569,7 +561,7 @@ tailpacking: out: z_erofs_pagevec_ctor_init(&fe->vector, Z_EROFS_NR_INLINE_PAGEVECS, - fe->cl->pagevec, fe->cl->vcnt); + fe->pcl->pagevec, fe->pcl->vcnt); /* since file-backed online pages are traversed in reverse order */ fe->icpage_ptr = fe->pcl->compressed_pages + z_erofs_pclusterpages(fe->pcl); @@ -582,48 +574,36 @@ out: */ static void z_erofs_rcu_callback(struct rcu_head *head) { - struct z_erofs_collection *const cl = - container_of(head, struct z_erofs_collection, rcu); - - z_erofs_free_pcluster(container_of(cl, struct z_erofs_pcluster, - primary_collection)); + z_erofs_free_pcluster(container_of(head, + struct z_erofs_pcluster, rcu)); } void erofs_workgroup_free_rcu(struct erofs_workgroup *grp) { struct z_erofs_pcluster *const pcl = container_of(grp, struct z_erofs_pcluster, obj); - struct z_erofs_collection *const cl = z_erofs_primarycollection(pcl); - call_rcu(&cl->rcu, z_erofs_rcu_callback); -} - -static void z_erofs_collection_put(struct z_erofs_collection *cl) -{ - struct z_erofs_pcluster *const pcl = - container_of(cl, struct z_erofs_pcluster, primary_collection); - - erofs_workgroup_put(&pcl->obj); + call_rcu(&pcl->rcu, z_erofs_rcu_callback); } static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe) { - struct z_erofs_collection *cl = fe->cl; + struct z_erofs_pcluster *pcl = fe->pcl; - if (!cl) + if (!pcl) return false; z_erofs_pagevec_ctor_exit(&fe->vector, false); - mutex_unlock(&cl->lock); + mutex_unlock(&pcl->lock); /* * if all pending pages are added, don't hold its reference * any longer if the pcluster isn't hosted by ourselves. */ if (fe->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE) - z_erofs_collection_put(cl); + erofs_workgroup_put(&pcl->obj); - fe->cl = NULL; + fe->pcl = NULL; return true; } @@ -666,8 +646,8 @@ repeat: /* lucky, within the range of the current map_blocks */ if (offset + cur >= map->m_la && offset + cur < map->m_la + map->m_llen) { - /* didn't get a valid collection previously (very rare) */ - if (!fe->cl) + /* didn't get a valid pcluster previously (very rare) */ + if (!fe->pcl) goto restart_now; goto hitted; } @@ -766,7 +746,7 @@ retry: /* bump up the number of spiltted parts of a page */ ++spiltted; /* also update nr_pages */ - fe->cl->nr_pages = max_t(pgoff_t, fe->cl->nr_pages, index + 1); + fe->pcl->nr_pages = max_t(pgoff_t, fe->pcl->nr_pages, index + 1); next_part: /* can be used for verification */ map->m_llen = offset + cur - map->m_la; @@ -821,15 +801,13 @@ static int z_erofs_decompress_pcluster(struct super_block *sb, enum z_erofs_page_type page_type; bool overlapped, partial; - struct z_erofs_collection *cl; int err; might_sleep(); - cl = z_erofs_primarycollection(pcl); - DBG_BUGON(!READ_ONCE(cl->nr_pages)); + DBG_BUGON(!READ_ONCE(pcl->nr_pages)); - mutex_lock(&cl->lock); - nr_pages = cl->nr_pages; + mutex_lock(&pcl->lock); + nr_pages = pcl->nr_pages; if (nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES) { pages = pages_onstack; @@ -857,9 +835,9 @@ static int z_erofs_decompress_pcluster(struct super_block *sb, err = 0; z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS, - cl->pagevec, 0); + pcl->pagevec, 0); - for (i = 0; i < cl->vcnt; ++i) { + for (i = 0; i < pcl->vcnt; ++i) { unsigned int pagenr; page = z_erofs_pagevec_dequeue(&ctor, &page_type); @@ -945,11 +923,11 @@ static int z_erofs_decompress_pcluster(struct super_block *sb, goto out; llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT; - if (nr_pages << PAGE_SHIFT >= cl->pageofs + llen) { + if (nr_pages << PAGE_SHIFT >= pcl->pageofs_out + llen) { outputsize = llen; partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH); } else { - outputsize = (nr_pages << PAGE_SHIFT) - cl->pageofs; + outputsize = (nr_pages << PAGE_SHIFT) - pcl->pageofs_out; partial = true; } @@ -963,7 +941,7 @@ static int z_erofs_decompress_pcluster(struct super_block *sb, .in = compressed_pages, .out = pages, .pageofs_in = pcl->pageofs_in, - .pageofs_out = cl->pageofs, + .pageofs_out = pcl->pageofs_out, .inputsize = inputsize, .outputsize = outputsize, .alg = pcl->algorithmformat, @@ -1012,16 +990,12 @@ out: else if (pages != pages_onstack) kvfree(pages); - cl->nr_pages = 0; - cl->vcnt = 0; + pcl->nr_pages = 0; + pcl->vcnt = 0; - /* all cl locks MUST be taken before the following line */ + /* pcluster lock MUST be taken before the following line */ WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL); - - /* all cl locks SHOULD be released right now */ - mutex_unlock(&cl->lock); - - z_erofs_collection_put(cl); + mutex_unlock(&pcl->lock); return err; } @@ -1043,6 +1017,7 @@ static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, owned = READ_ONCE(pcl->next); z_erofs_decompress_pcluster(io->sb, pcl, pagepool); + erofs_workgroup_put(&pcl->obj); } } diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h index 800b11c53f57..58053bb5066f 100644 --- a/fs/erofs/zdata.h +++ b/fs/erofs/zdata.h @@ -12,21 +12,40 @@ #define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE) #define Z_EROFS_NR_INLINE_PAGEVECS 3 +#define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001 +#define Z_EROFS_PCLUSTER_LENGTH_BIT 1 + +/* + * let's leave a type here in case of introducing + * another tagged pointer later. + */ +typedef void *z_erofs_next_pcluster_t; + /* * Structure fields follow one of the following exclusion rules. * * I: Modifiable by initialization/destruction paths and read-only * for everyone else; * - * L: Field should be protected by pageset lock; + * L: Field should be protected by the pcluster lock; * * A: Field should be accessed / updated in atomic for parallelized code. */ -struct z_erofs_collection { +struct z_erofs_pcluster { + struct erofs_workgroup obj; struct mutex lock; + /* A: point to next chained pcluster or TAILs */ + z_erofs_next_pcluster_t next; + + /* A: lower limit of decompressed length and if full length or not */ + unsigned int length; + /* I: page offset of start position of decompression */ - unsigned short pageofs; + unsigned short pageofs_out; + + /* I: page offset of inline compressed data */ + unsigned short pageofs_in; /* L: maximum relative page index in pagevec[] */ unsigned short nr_pages; @@ -41,29 +60,6 @@ struct z_erofs_collection { /* I: can be used to free the pcluster by RCU. */ struct rcu_head rcu; }; -}; - -#define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001 -#define Z_EROFS_PCLUSTER_LENGTH_BIT 1 - -/* - * let's leave a type here in case of introducing - * another tagged pointer later. - */ -typedef void *z_erofs_next_pcluster_t; - -struct z_erofs_pcluster { - struct erofs_workgroup obj; - struct z_erofs_collection primary_collection; - - /* A: point to next chained pcluster or TAILs */ - z_erofs_next_pcluster_t next; - - /* A: lower limit of decompressed length and if full length or not */ - unsigned int length; - - /* I: page offset of inline compressed data */ - unsigned short pageofs_in; union { /* I: physical cluster size in pages */ @@ -80,8 +76,6 @@ struct z_erofs_pcluster { struct page *compressed_pages[]; }; -#define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection) - /* let's avoid the valid 32-bit kernel addresses */ /* the chained workgroup has't submitted io (still open) */ -- cgit v1.2.3 From 39397a46cff3d7b7d3b45b3283491af05bdfb64b Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Sun, 29 May 2022 13:54:24 +0800 Subject: erofs: get rid of label `restart_now' Simplify this part of code. No logic changes. Link: https://lore.kernel.org/r/20220529055425.226363-3-xiang@kernel.org Acked-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/zdata.c | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 4fd66a66c5f9..6dd858f94e44 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -643,28 +643,23 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, repeat: cur = end - 1; - /* lucky, within the range of the current map_blocks */ - if (offset + cur >= map->m_la && - offset + cur < map->m_la + map->m_llen) { + if (offset + cur < map->m_la || + offset + cur >= map->m_la + map->m_llen) { + erofs_dbg("out-of-range map @ pos %llu", offset + cur); + + if (z_erofs_collector_end(fe)) + fe->backmost = false; + map->m_la = offset + cur; + map->m_llen = 0; + err = z_erofs_map_blocks_iter(inode, map, 0); + if (err) + goto err_out; + } else { + if (fe->pcl) + goto hitted; /* didn't get a valid pcluster previously (very rare) */ - if (!fe->pcl) - goto restart_now; - goto hitted; } - /* go ahead the next map_blocks */ - erofs_dbg("%s: [out-of-range] pos %llu", __func__, offset + cur); - - if (z_erofs_collector_end(fe)) - fe->backmost = false; - - map->m_la = offset + cur; - map->m_llen = 0; - err = z_erofs_map_blocks_iter(inode, map, 0); - if (err) - goto err_out; - -restart_now: if (!(map->m_flags & EROFS_MAP_MAPPED)) goto hitted; -- cgit v1.2.3 From aa793b46bb9342ae3c6152fc21654b8ade8dd125 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Sun, 29 May 2022 13:54:25 +0800 Subject: erofs: simplify z_erofs_pcluster_readmore() Get rid of unnecessary label `skip'. No logic changes. Link: https://lore.kernel.org/r/20220529055425.226363-4-xiang@kernel.org Acked-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/zdata.c | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 6dd858f94e44..b33fb64b3393 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1436,22 +1436,19 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, struct page *page; page = erofs_grab_cache_page_nowait(inode->i_mapping, index); - if (!page) - goto skip; - - if (PageUptodate(page)) { - unlock_page(page); + if (page) { + if (PageUptodate(page)) { + unlock_page(page); + } else { + err = z_erofs_do_read_page(f, page, pagepool); + if (err) + erofs_err(inode->i_sb, + "readmore error at page %lu @ nid %llu", + index, EROFS_I(inode)->nid); + } put_page(page); - goto skip; } - err = z_erofs_do_read_page(f, page, pagepool); - if (err) - erofs_err(inode->i_sb, - "readmore error at page %lu @ nid %llu", - index, EROFS_I(inode)->nid); - put_page(page); -skip: if (cur < PAGE_SIZE) break; cur = (index << PAGE_SHIFT) - 1; -- cgit v1.2.3 From 4398d3c31b582db0d640b23434bf344a6c8df57c Mon Sep 17 00:00:00 2001 From: Weizhao Ouyang Date: Mon, 30 May 2022 15:51:14 +0800 Subject: erofs: fix 'backmost' member of z_erofs_decompress_frontend Initialize 'backmost' to true in DECOMPRESS_FRONTEND_INIT. Fixes: 5c6dcc57e2e5 ("erofs: get rid of `struct z_erofs_collector'") Signed-off-by: Weizhao Ouyang Reviewed-by: Gao Xiang Reviewed-by: Yue Hu Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20220530075114.918874-1-o451686892@gmail.com Signed-off-by: Gao Xiang --- fs/erofs/zdata.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index b33fb64b3393..90c5b10c5794 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -213,7 +213,7 @@ struct z_erofs_decompress_frontend { #define DECOMPRESS_FRONTEND_INIT(__i) { \ .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \ - .mode = COLLECT_PRIMARY_FOLLOWED } + .mode = COLLECT_PRIMARY_FOLLOWED, .backmost = true } static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES]; static DEFINE_MUTEX(z_pagemap_global_lock); -- cgit v1.2.3