diff options
Diffstat (limited to 'arch/arc/include')
-rw-r--r-- | arch/arc/include/asm/atomic.h | 60 | ||||
-rw-r--r-- | arch/arc/include/asm/cmpxchg.h | 14 | ||||
-rw-r--r-- | arch/arc/include/asm/mmzone.h | 40 | ||||
-rw-r--r-- | arch/arc/include/asm/page.h | 12 | ||||
-rw-r--r-- | arch/arc/include/asm/pgalloc.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/pgtable.h | 20 | ||||
-rw-r--r-- | arch/arc/include/uapi/asm/page.h | 1 | ||||
-rw-r--r-- | arch/arc/include/uapi/asm/sigcontext.h | 1 |
8 files changed, 55 insertions, 95 deletions
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 5afc79c9b2f5..7a36d79b5b2f 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -14,14 +14,14 @@ #include <asm/barrier.h> #include <asm/smp.h> -#define atomic_read(v) READ_ONCE((v)->counter) +#define arch_atomic_read(v) READ_ONCE((v)->counter) #ifdef CONFIG_ARC_HAS_LLSC -#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) +#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) #define ATOMIC_OP(op, c_op, asm_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ unsigned int val; \ \ @@ -37,7 +37,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ } \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ +static inline int arch_atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned int val; \ \ @@ -63,7 +63,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ } #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ -static inline int atomic_fetch_##op(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ { \ unsigned int val, orig; \ \ @@ -94,11 +94,11 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ #ifndef CONFIG_SMP /* violating atomic_xxx API locking protocol in UP for optimization sake */ -#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) +#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) #else -static inline void atomic_set(atomic_t *v, int i) +static inline void arch_atomic_set(atomic_t *v, int i) { /* * Independent of hardware support, all of the atomic_xxx() APIs need @@ -116,7 +116,7 @@ static inline void atomic_set(atomic_t *v, int i) atomic_ops_unlock(flags); } -#define atomic_set_release(v, i) atomic_set((v), (i)) +#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i)) #endif @@ -126,7 +126,7 @@ static inline void atomic_set(atomic_t *v, int i) */ #define ATOMIC_OP(op, c_op, asm_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ \ @@ -136,7 +136,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ } #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ +static inline int arch_atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned long flags; \ unsigned long temp; \ @@ -154,7 +154,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ } #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ -static inline int atomic_fetch_##op(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ unsigned long orig; \ @@ -180,9 +180,6 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) -#define atomic_andnot atomic_andnot -#define atomic_fetch_andnot atomic_fetch_andnot - #undef ATOMIC_OPS #define ATOMIC_OPS(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \ @@ -193,6 +190,9 @@ ATOMIC_OPS(andnot, &= ~, bic) ATOMIC_OPS(or, |=, or) ATOMIC_OPS(xor, ^=, xor) +#define arch_atomic_andnot arch_atomic_andnot +#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot + #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN @@ -220,7 +220,7 @@ typedef struct { #define ATOMIC64_INIT(a) { (a) } -static inline s64 atomic64_read(const atomic64_t *v) +static inline s64 arch_atomic64_read(const atomic64_t *v) { s64 val; @@ -232,7 +232,7 @@ static inline s64 atomic64_read(const atomic64_t *v) return val; } -static inline void atomic64_set(atomic64_t *v, s64 a) +static inline void arch_atomic64_set(atomic64_t *v, s64 a) { /* * This could have been a simple assignment in "C" but would need @@ -253,7 +253,7 @@ static inline void atomic64_set(atomic64_t *v, s64 a) } #define ATOMIC64_OP(op, op1, op2) \ -static inline void atomic64_##op(s64 a, atomic64_t *v) \ +static inline void arch_atomic64_##op(s64 a, atomic64_t *v) \ { \ s64 val; \ \ @@ -270,7 +270,7 @@ static inline void atomic64_##op(s64 a, atomic64_t *v) \ } \ #define ATOMIC64_OP_RETURN(op, op1, op2) \ -static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \ +static inline s64 arch_atomic64_##op##_return(s64 a, atomic64_t *v) \ { \ s64 val; \ \ @@ -293,7 +293,7 @@ static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \ } #define ATOMIC64_FETCH_OP(op, op1, op2) \ -static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \ +static inline s64 arch_atomic64_fetch_##op(s64 a, atomic64_t *v) \ { \ s64 val, orig; \ \ @@ -320,9 +320,6 @@ static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \ ATOMIC64_OP_RETURN(op, op1, op2) \ ATOMIC64_FETCH_OP(op, op1, op2) -#define atomic64_andnot atomic64_andnot -#define atomic64_fetch_andnot atomic64_fetch_andnot - ATOMIC64_OPS(add, add.f, adc) ATOMIC64_OPS(sub, sub.f, sbc) ATOMIC64_OPS(and, and, and) @@ -330,13 +327,16 @@ ATOMIC64_OPS(andnot, bic, bic) ATOMIC64_OPS(or, or, or) ATOMIC64_OPS(xor, xor, xor) +#define arch_atomic64_andnot arch_atomic64_andnot +#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot + #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP static inline s64 -atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new) +arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new) { s64 prev; @@ -358,7 +358,7 @@ atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new) return prev; } -static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new) +static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new) { s64 prev; @@ -379,14 +379,14 @@ static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new) } /** - * atomic64_dec_if_positive - decrement by 1 if old value positive + * arch_atomic64_dec_if_positive - decrement by 1 if old value positive * @v: pointer of type atomic64_t * * The function returns the old value of *v minus 1, even if * the atomic variable, v, was not decremented. */ -static inline s64 atomic64_dec_if_positive(atomic64_t *v) +static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) { s64 val; @@ -408,10 +408,10 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v) return val; } -#define atomic64_dec_if_positive atomic64_dec_if_positive +#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive /** - * atomic64_fetch_add_unless - add unless the number is a given value + * arch_atomic64_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic64_t * @a: the amount to add to v... * @u: ...unless v is equal to u. @@ -419,7 +419,7 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v) * Atomically adds @a to @v, if it was not @u. * Returns the old value of @v */ -static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { s64 old, temp; @@ -443,7 +443,7 @@ static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) return old; } -#define atomic64_fetch_add_unless atomic64_fetch_add_unless +#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless #endif /* !CONFIG_GENERIC_ATOMIC64 */ diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h index 9b87e162e539..d42917e803e1 100644 --- a/arch/arc/include/asm/cmpxchg.h +++ b/arch/arc/include/asm/cmpxchg.h @@ -63,7 +63,7 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) #endif -#define cmpxchg(ptr, o, n) ({ \ +#define arch_cmpxchg(ptr, o, n) ({ \ (typeof(*(ptr)))__cmpxchg((ptr), \ (unsigned long)(o), \ (unsigned long)(n)); \ @@ -75,7 +75,7 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) * !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee * semantics, and this lock also happens to be used by atomic_*() */ -#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n))) /* @@ -116,14 +116,14 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, * * Technically the lock is also needed for UP (boils down to irq save/restore) * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to - * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg() + * be disabled thus can't possibly be interrupted/preempted/clobbered by xchg() * Other way around, xchg is one instruction anyways, so can't be interrupted * as such */ #if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP) -#define xchg(ptr, with) \ +#define arch_xchg(ptr, with) \ ({ \ unsigned long flags; \ typeof(*(ptr)) old_val; \ @@ -136,14 +136,14 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, #else -#define xchg(ptr, with) _xchg(ptr, with) +#define arch_xchg(ptr, with) _xchg(ptr, with) #endif /* * "atomic" variant of xchg() * REQ: It needs to follow the same serialization rules as other atomic_xxx() - * Since xchg() doesn't always do that, it would seem that following defintion + * Since xchg() doesn't always do that, it would seem that following definition * is incorrect. But here's the rationale: * SMP : Even xchg() takes the atomic_ops_lock, so OK. * LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC @@ -153,6 +153,6 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, * can't be clobbered by others. Thus no serialization required when * atomic_xchg is involved. */ -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) #endif diff --git a/arch/arc/include/asm/mmzone.h b/arch/arc/include/asm/mmzone.h deleted file mode 100644 index b86b9d1e54dc..000000000000 --- a/arch/arc/include/asm/mmzone.h +++ /dev/null @@ -1,40 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com) - */ - -#ifndef _ASM_ARC_MMZONE_H -#define _ASM_ARC_MMZONE_H - -#ifdef CONFIG_DISCONTIGMEM - -extern struct pglist_data node_data[]; -#define NODE_DATA(nid) (&node_data[nid]) - -static inline int pfn_to_nid(unsigned long pfn) -{ - int is_end_low = 1; - - if (IS_ENABLED(CONFIG_ARC_HAS_PAE40)) - is_end_low = pfn <= virt_to_pfn(0xFFFFFFFFUL); - - /* - * node 0: lowmem: 0x8000_0000 to 0xFFFF_FFFF - * node 1: HIGHMEM w/o PAE40: 0x0 to 0x7FFF_FFFF - * HIGHMEM with PAE40: 0x1_0000_0000 to ... - */ - if (pfn >= ARCH_PFN_OFFSET && is_end_low) - return 0; - - return 1; -} - -static inline int pfn_valid(unsigned long pfn) -{ - int nid = pfn_to_nid(pfn); - - return (pfn <= node_end_pfn(nid)); -} -#endif /* CONFIG_DISCONTIGMEM */ - -#endif diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index ad9b7fe4dba3..4a9d33372fe2 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h @@ -7,6 +7,18 @@ #include <uapi/asm/page.h> +#ifdef CONFIG_ARC_HAS_PAE40 + +#define MAX_POSSIBLE_PHYSMEM_BITS 40 +#define PAGE_MASK_PHYS (0xff00000000ull | PAGE_MASK) + +#else /* CONFIG_ARC_HAS_PAE40 */ + +#define MAX_POSSIBLE_PHYSMEM_BITS 32 +#define PAGE_MASK_PHYS PAGE_MASK + +#endif /* CONFIG_ARC_HAS_PAE40 */ + #ifndef __ASSEMBLY__ #define clear_page(paddr) memset((paddr), 0, PAGE_SIZE) diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h index 6147db925248..a32ca3104ced 100644 --- a/arch/arc/include/asm/pgalloc.h +++ b/arch/arc/include/asm/pgalloc.h @@ -129,6 +129,4 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptep) #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) -#define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd)) - #endif /* _ASM_ARC_PGALLOC_H */ diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 163641726a2b..320cc0ae8a08 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -107,8 +107,8 @@ #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE) /* Set of bits not changed in pte_modify */ -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL) - +#define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \ + _PAGE_SPECIAL) /* More Abbrevaited helpers */ #define PAGE_U_NONE __pgprot(___DEF) #define PAGE_U_R __pgprot(___DEF | _PAGE_READ) @@ -132,13 +132,7 @@ #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ) #define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ) -#ifdef CONFIG_ARC_HAS_PAE40 -#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE) -#define MAX_POSSIBLE_PHYSMEM_BITS 40 -#else -#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE) -#define MAX_POSSIBLE_PHYSMEM_BITS 32 -#endif +#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK_PHYS | _PAGE_CACHEABLE) /************************************************************************** * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) @@ -228,12 +222,6 @@ */ #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) -/* - * No special requirements for lowest virtual address we permit any user space - * mapping to be mapped at. - */ -#define FIRST_USER_ADDRESS 0UL - /**************************************************************** * Bucket load of VM Helpers @@ -362,6 +350,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, #define kern_addr_valid(addr) (1) +#define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd)) + /* * remap a physical page `pfn' of size `size' with page protection `prot' * into virtual address `from' diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h index 2a97e2718a21..2a4ad619abfb 100644 --- a/arch/arc/include/uapi/asm/page.h +++ b/arch/arc/include/uapi/asm/page.h @@ -33,5 +33,4 @@ #define PAGE_MASK (~(PAGE_SIZE-1)) - #endif /* _UAPI__ASM_ARC_PAGE_H */ diff --git a/arch/arc/include/uapi/asm/sigcontext.h b/arch/arc/include/uapi/asm/sigcontext.h index 95f8a4380e11..7a5449dfcb29 100644 --- a/arch/arc/include/uapi/asm/sigcontext.h +++ b/arch/arc/include/uapi/asm/sigcontext.h @@ -18,6 +18,7 @@ */ struct sigcontext { struct user_regs_struct regs; + struct user_regs_arcv2 v2abi; }; #endif /* _ASM_ARC_SIGCONTEXT_H */ |