summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHeiko Carstens <hca@linux.ibm.com>2023-10-27 15:12:37 +0300
committerVasily Gorbik <gor@linux.ibm.com>2023-11-06 00:34:58 +0300
commita3e89e20fe00209779eb3e419621070b9158acdb (patch)
tree408b99b38141eb30c1392d16e6f2e43038298736
parent468a3bc2b7b955a7cf97d47c6022bf1ae4a538a3 (diff)
downloadlinux-a3e89e20fe00209779eb3e419621070b9158acdb.tar.xz
s390/cmma: move set_page_stable() and friends to header file
In order to be usable for early boot code move the simple set_page_xxx() function to header file. Also change the parameters, and the function names slightly. This is required since there aren't any struct pages available in early boot code, and renaming of functions is done to make sure that all users are converted to the new API. Instead of a pointer to a struct page a virtual address is passed, and instead of an order the number of pages for which the page state needs be set. Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
-rw-r--r--arch/s390/include/asm/page-states.h38
-rw-r--r--arch/s390/mm/page-states.c47
2 files changed, 43 insertions, 42 deletions
diff --git a/arch/s390/include/asm/page-states.h b/arch/s390/include/asm/page-states.h
index 659e3c963ce6..1eae4046c07d 100644
--- a/arch/s390/include/asm/page-states.h
+++ b/arch/s390/include/asm/page-states.h
@@ -8,6 +8,7 @@
#define PAGE_STATES_H
#include <asm/sections.h>
+#include <asm/page.h>
#define ESSA_GET_STATE 0
#define ESSA_SET_STABLE 1
@@ -22,4 +23,41 @@
extern int __bootdata_preserved(cmma_flag);
+static __always_inline unsigned long essa(unsigned long paddr, unsigned char cmd)
+{
+ unsigned long rc;
+
+ asm volatile(
+ " .insn rrf,0xb9ab0000,%[rc],%[paddr],%[cmd],0"
+ : [rc] "=d" (rc)
+ : [paddr] "d" (paddr),
+ [cmd] "i" (cmd));
+ return rc;
+}
+
+static __always_inline void __set_page_state(void *addr, unsigned long num_pages, unsigned char cmd)
+{
+ unsigned long paddr = __pa(addr) & PAGE_MASK;
+
+ while (num_pages--) {
+ essa(paddr, cmd);
+ paddr += PAGE_SIZE;
+ }
+}
+
+static inline void __set_page_unused(void *addr, unsigned long num_pages)
+{
+ __set_page_state(addr, num_pages, ESSA_SET_UNUSED);
+}
+
+static inline void __set_page_stable_dat(void *addr, unsigned long num_pages)
+{
+ __set_page_state(addr, num_pages, ESSA_SET_STABLE);
+}
+
+static inline void __set_page_stable_nodat(void *addr, unsigned long num_pages)
+{
+ __set_page_state(addr, num_pages, ESSA_SET_STABLE_NODAT);
+}
+
#endif
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index b68fef3d1230..7dc75dd05f48 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -20,43 +20,6 @@
int __bootdata_preserved(cmma_flag);
-static __always_inline void essa(unsigned long paddr, unsigned char cmd)
-{
- unsigned long rc;
-
- asm volatile(
- " .insn rrf,0xb9ab0000,%[rc],%[paddr],%[cmd],0"
- : [rc] "=d" (rc)
- : [paddr] "d" (paddr),
- [cmd] "i" (cmd));
-}
-
-static __always_inline void __set_page_state(struct page *page, int order, unsigned char cmd)
-{
- unsigned long paddr = page_to_phys(page);
- unsigned long num_pages = 1UL << order;
-
- while (num_pages--) {
- essa(paddr, cmd);
- paddr += PAGE_SIZE;
- }
-}
-
-static inline void set_page_unused(struct page *page, int order)
-{
- __set_page_state(page, order, ESSA_SET_UNUSED);
-}
-
-static inline void set_page_stable_dat(struct page *page, int order)
-{
- __set_page_state(page, order, ESSA_SET_STABLE);
-}
-
-static inline void set_page_stable_nodat(struct page *page, int order)
-{
- __set_page_state(page, order, ESSA_SET_STABLE_NODAT);
-}
-
static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
{
unsigned long next;
@@ -169,7 +132,7 @@ void __init cmma_init_nodat(void)
continue; /* skip page table pages */
if (!list_empty(&page->lru))
continue; /* skip free pages */
- set_page_stable_nodat(page, 0);
+ __set_page_stable_nodat(page_to_virt(page), 1);
}
}
}
@@ -178,7 +141,7 @@ void arch_free_page(struct page *page, int order)
{
if (!cmma_flag)
return;
- set_page_unused(page, order);
+ __set_page_unused(page_to_virt(page), 1UL << order);
}
void arch_alloc_page(struct page *page, int order)
@@ -186,14 +149,14 @@ void arch_alloc_page(struct page *page, int order)
if (!cmma_flag)
return;
if (cmma_flag < 2)
- set_page_stable_dat(page, order);
+ __set_page_stable_dat(page_to_virt(page), 1UL << order);
else
- set_page_stable_nodat(page, order);
+ __set_page_stable_nodat(page_to_virt(page), 1UL << order);
}
void arch_set_page_dat(struct page *page, int order)
{
if (!cmma_flag)
return;
- set_page_stable_dat(page, order);
+ __set_page_stable_dat(page_to_virt(page), 1UL << order);
}