summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/highmem-internal.h18
-rw-r--r--include/linux/highmem.h22
2 files changed, 23 insertions, 17 deletions
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
index a77be5630209..a694ca95c4ed 100644
--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -236,9 +236,21 @@ static inline unsigned long totalhigh_pages(void) { return 0UL; }
#endif /* CONFIG_HIGHMEM */
-/*
- * Prevent people trying to call kunmap_atomic() as if it were kunmap()
- * kunmap_atomic() should get the return value of kmap_atomic, not the page.
+/**
+ * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() - deprecated!
+ * @__addr: Virtual address to be unmapped
+ *
+ * Unmaps an address previously mapped by kmap_atomic() and re-enables
+ * pagefaults. Depending on PREEMP_RT configuration, re-enables also
+ * migration and preemption. Users should not count on these side effects.
+ *
+ * Mappings should be unmapped in the reverse order that they were mapped.
+ * See kmap_local_page() for details on nesting.
+ *
+ * @__addr can be any address within the mapped page, so there is no need
+ * to subtract any offset that has been added. In contrast to kunmap(),
+ * this function takes the address returned from kmap_atomic(), not the
+ * page passed to it. The compiler will warn you if you pass the page.
*/
#define kunmap_atomic(__addr) \
do { \
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 67720bfd6ade..c05ae395898a 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -37,7 +37,7 @@ static inline void *kmap(struct page *page);
/**
* kunmap - Unmap the virtual address mapped by kmap()
- * @addr: Virtual address to be unmapped
+ * @page: Pointer to the page which was mapped by kmap()
*
* Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
* pages in the low memory area.
@@ -138,24 +138,16 @@ static inline void *kmap_local_folio(struct folio *folio, size_t offset);
*
* Returns: The virtual address of the mapping
*
- * Effectively a wrapper around kmap_local_page() which disables pagefaults
- * and preemption.
+ * In fact a wrapper around kmap_local_page() which also disables pagefaults
+ * and, depending on PREEMPT_RT configuration, also CPU migration and
+ * preemption. Therefore users should not count on the latter two side effects.
+ *
+ * Mappings should always be released by kunmap_atomic().
*
* Do not use in new code. Use kmap_local_page() instead.
*/
static inline void *kmap_atomic(struct page *page);
-/**
- * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic()
- * @addr: Virtual address to be unmapped
- *
- * Counterpart to kmap_atomic().
- *
- * Effectively a wrapper around kunmap_local() which additionally undoes
- * the side effects of kmap_atomic(), i.e. reenabling pagefaults and
- * preemption.
- */
-
/* Highmem related interfaces for management code */
static inline unsigned int nr_free_highpages(void);
static inline unsigned long totalhigh_pages(void);
@@ -191,6 +183,8 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
* @vma: The VMA the page is to be allocated for
* @vaddr: The virtual address the page will be inserted into
*
+ * Returns: The allocated and zeroed HIGHMEM page
+ *
* This function will allocate a page for a VMA that the caller knows will
* be able to migrate in the future using move_pages() or reclaimed
*