summaryrefslogtreecommitdiff
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2021-05-07 04:06:47 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2021-05-07 10:26:35 +0300
commitf0953a1bbaca71e1ebbcb9864eb1b273156157ed (patch)
tree2a5ebeef98d5f1144782a3fe6bb36f2a9118c463 /mm/vmalloc.c
parentfa60ce2cb4506701c43bd4cf3ca23d970daf1b9c (diff)
downloadlinux-f0953a1bbaca71e1ebbcb9864eb1b273156157ed.tar.xz
mm: fix typos in comments
Fix ~94 single-word typos in locking code comments, plus a few very obvious grammar mistakes. Link: https://lkml.kernel.org/r/20210322212624.GA1963421@gmail.com Link: https://lore.kernel.org/r/20210322205203.GB1959563@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Randy Dunlap <rdunlap@infradead.org> Cc: Bhaskar Chowdhury <unixbhaskar@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index a7f318c9e426..a13ac524f6ff 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1583,7 +1583,7 @@ static unsigned long lazy_max_pages(void)
static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
/*
- * Serialize vmap purging. There is no actual criticial section protected
+ * Serialize vmap purging. There is no actual critical section protected
* by this look, but we want to avoid concurrent calls for performance
* reasons and to make the pcpu_get_vm_areas more deterministic.
*/
@@ -2628,7 +2628,7 @@ static void __vfree(const void *addr)
* May sleep if called *not* from interrupt context.
* Must not be called in NMI context (strictly speaking, it could be
* if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
- * conventions for vfree() arch-depenedent would be a really bad idea).
+ * conventions for vfree() arch-dependent would be a really bad idea).
*/
void vfree(const void *addr)
{
@@ -3141,7 +3141,7 @@ static int aligned_vread(char *buf, char *addr, unsigned long count)
/*
* To do safe access to this _mapped_ area, we need
* lock. But adding lock here means that we need to add
- * overhead of vmalloc()/vfree() calles for this _debug_
+ * overhead of vmalloc()/vfree() calls for this _debug_
* interface, rarely used. Instead of that, we'll use
* kmap() and get small overhead in this access function.
*/