summaryrefslogtreecommitdiff
path: root/kernel/module.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/module.c')
-rw-r--r--kernel/module.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/kernel/module.c b/kernel/module.c
index a6e43a5806a1..c9bea7f2b43e 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1472,7 +1472,8 @@ static ssize_t module_sect_show(struct module_attribute *mattr,
{
struct module_sect_attr *sattr =
container_of(mattr, struct module_sect_attr, mattr);
- return sprintf(buf, "0x%pK\n", (void *)sattr->address);
+ return sprintf(buf, "0x%px\n", kptr_restrict < 2 ?
+ (void *)sattr->address : NULL);
}
static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
@@ -3516,6 +3517,11 @@ static noinline int do_init_module(struct module *mod)
* walking this with preempt disabled. In all the failure paths, we
* call synchronize_sched(), but we don't want to slow down the success
* path, so use actual RCU here.
+ * Note that module_alloc() on most architectures creates W+X page
+ * mappings which won't be cleaned up until do_free_init() runs. Any
+ * code such as mark_rodata_ro() which depends on those mappings to
+ * be cleaned up needs to sync with the queued work - ie
+ * rcu_barrier_sched()
*/
call_rcu_sched(&freeinit->rcu, do_free_init);
mutex_unlock(&module_mutex);