summaryrefslogtreecommitdiff
path: root/arch/csky
diff options
context:
space:
mode:
Diffstat (limited to 'arch/csky')
-rw-r--r--arch/csky/Kconfig3
-rw-r--r--arch/csky/abiv1/alignment.c4
-rw-r--r--arch/csky/include/asm/bitops.h2
-rw-r--r--arch/csky/include/asm/uaccess.h16
-rw-r--r--arch/csky/kernel/signal.c2
-rw-r--r--arch/csky/lib/usercopy.c8
-rw-r--r--arch/csky/mm/dma-mapping.c142
-rw-r--r--arch/csky/mm/init.c4
8 files changed, 17 insertions, 164 deletions
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index 7689b54d4236..398113c845f5 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -7,8 +7,7 @@ config CSKY
select COMMON_CLK
select CLKSRC_MMIO
select CLKSRC_OF
- select DMA_DIRECT_OPS
- select DMA_NONCOHERENT_OPS
+ select DMA_DIRECT_REMAP
select IRQ_DOMAIN
select HANDLE_DOMAIN_IRQ
select DW_APB_TIMER_OF
diff --git a/arch/csky/abiv1/alignment.c b/arch/csky/abiv1/alignment.c
index 60205e98fb87..d789be36eb4f 100644
--- a/arch/csky/abiv1/alignment.c
+++ b/arch/csky/abiv1/alignment.c
@@ -32,7 +32,7 @@ static int ldb_asm(uint32_t addr, uint32_t *valp)
uint32_t val;
int err;
- if (!access_ok(VERIFY_READ, (void *)addr, 1))
+ if (!access_ok((void *)addr, 1))
return 1;
asm volatile (
@@ -67,7 +67,7 @@ static int stb_asm(uint32_t addr, uint32_t val)
{
int err;
- if (!access_ok(VERIFY_WRITE, (void *)addr, 1))
+ if (!access_ok((void *)addr, 1))
return 1;
asm volatile (
diff --git a/arch/csky/include/asm/bitops.h b/arch/csky/include/asm/bitops.h
index 335f2883fb1e..43b9838bff63 100644
--- a/arch/csky/include/asm/bitops.h
+++ b/arch/csky/include/asm/bitops.h
@@ -40,7 +40,7 @@ static __always_inline unsigned long __ffs(unsigned long x)
/*
* asm-generic/bitops/fls.h
*/
-static __always_inline int fls(int x)
+static __always_inline int fls(unsigned int x)
{
asm volatile(
"ff1 %0\n"
diff --git a/arch/csky/include/asm/uaccess.h b/arch/csky/include/asm/uaccess.h
index acaf0e210d81..eaa1c3403a42 100644
--- a/arch/csky/include/asm/uaccess.h
+++ b/arch/csky/include/asm/uaccess.h
@@ -16,10 +16,7 @@
#include <linux/version.h>
#include <asm/segment.h>
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
-
-static inline int access_ok(int type, const void *addr, unsigned long size)
+static inline int access_ok(const void *addr, unsigned long size)
{
unsigned long limit = current_thread_info()->addr_limit.seg;
@@ -27,12 +24,7 @@ static inline int access_ok(int type, const void *addr, unsigned long size)
((unsigned long)(addr + size) < limit));
}
-static inline int verify_area(int type, const void *addr, unsigned long size)
-{
- return access_ok(type, addr, size) ? 0 : -EFAULT;
-}
-
-#define __addr_ok(addr) (access_ok(VERIFY_READ, addr, 0))
+#define __addr_ok(addr) (access_ok(addr, 0))
extern int __put_user_bad(void);
@@ -91,7 +83,7 @@ extern int __put_user_bad(void);
long __pu_err = -EFAULT; \
typeof(*(ptr)) *__pu_addr = (ptr); \
typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \
- if (access_ok(VERIFY_WRITE, __pu_addr, size) && __pu_addr) \
+ if (access_ok(__pu_addr, size) && __pu_addr) \
__put_user_size(__pu_val, __pu_addr, (size), __pu_err); \
__pu_err; \
})
@@ -217,7 +209,7 @@ do { \
({ \
int __gu_err = -EFAULT; \
const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
- if (access_ok(VERIFY_READ, __gu_ptr, size) && __gu_ptr) \
+ if (access_ok(__gu_ptr, size) && __gu_ptr) \
__get_user_size(x, __gu_ptr, size, __gu_err); \
__gu_err; \
})
diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c
index e680dc127f40..207a891479d2 100644
--- a/arch/csky/kernel/signal.c
+++ b/arch/csky/kernel/signal.c
@@ -88,7 +88,7 @@ do_rt_sigreturn(void)
struct pt_regs *regs = current_pt_regs();
struct rt_sigframe *frame = (struct rt_sigframe *)(regs->usp);
- if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
+ if (!access_ok(frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
diff --git a/arch/csky/lib/usercopy.c b/arch/csky/lib/usercopy.c
index ac9170e2cbb8..647a23986fb5 100644
--- a/arch/csky/lib/usercopy.c
+++ b/arch/csky/lib/usercopy.c
@@ -7,7 +7,7 @@
unsigned long raw_copy_from_user(void *to, const void *from,
unsigned long n)
{
- if (access_ok(VERIFY_READ, from, n))
+ if (access_ok(from, n))
__copy_user_zeroing(to, from, n);
else
memset(to, 0, n);
@@ -18,7 +18,7 @@ EXPORT_SYMBOL(raw_copy_from_user);
unsigned long raw_copy_to_user(void *to, const void *from,
unsigned long n)
{
- if (access_ok(VERIFY_WRITE, to, n))
+ if (access_ok(to, n))
__copy_user(to, from, n);
return n;
}
@@ -113,7 +113,7 @@ long strncpy_from_user(char *dst, const char *src, long count)
{
long res = -EFAULT;
- if (access_ok(VERIFY_READ, src, 1))
+ if (access_ok(src, 1))
__do_strncpy_from_user(dst, src, count, res);
return res;
}
@@ -236,7 +236,7 @@ do { \
unsigned long
clear_user(void __user *to, unsigned long n)
{
- if (access_ok(VERIFY_WRITE, to, n))
+ if (access_ok(to, n))
__do_clear_user(to, n);
return n;
}
diff --git a/arch/csky/mm/dma-mapping.c b/arch/csky/mm/dma-mapping.c
index 85437b21e045..80783bb71c5c 100644
--- a/arch/csky/mm/dma-mapping.c
+++ b/arch/csky/mm/dma-mapping.c
@@ -14,73 +14,13 @@
#include <linux/version.h>
#include <asm/cache.h>
-static struct gen_pool *atomic_pool;
-static size_t atomic_pool_size __initdata = SZ_256K;
-
-static int __init early_coherent_pool(char *p)
-{
- atomic_pool_size = memparse(p, &p);
- return 0;
-}
-early_param("coherent_pool", early_coherent_pool);
-
static int __init atomic_pool_init(void)
{
- struct page *page;
- size_t size = atomic_pool_size;
- void *ptr;
- int ret;
-
- atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
- if (!atomic_pool)
- BUG();
-
- page = alloc_pages(GFP_KERNEL | GFP_DMA, get_order(size));
- if (!page)
- BUG();
-
- ptr = dma_common_contiguous_remap(page, size, VM_ALLOC,
- pgprot_noncached(PAGE_KERNEL),
- __builtin_return_address(0));
- if (!ptr)
- BUG();
-
- ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
- page_to_phys(page), atomic_pool_size, -1);
- if (ret)
- BUG();
-
- gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL);
-
- pr_info("DMA: preallocated %zu KiB pool for atomic coherent pool\n",
- atomic_pool_size / 1024);
-
- pr_info("DMA: vaddr: 0x%x phy: 0x%lx,\n", (unsigned int)ptr,
- page_to_phys(page));
-
- return 0;
+ return dma_atomic_pool_init(GFP_KERNEL, pgprot_noncached(PAGE_KERNEL));
}
postcore_initcall(atomic_pool_init);
-static void *csky_dma_alloc_atomic(struct device *dev, size_t size,
- dma_addr_t *dma_handle)
-{
- unsigned long addr;
-
- addr = gen_pool_alloc(atomic_pool, size);
- if (addr)
- *dma_handle = gen_pool_virt_to_phys(atomic_pool, addr);
-
- return (void *)addr;
-}
-
-static void csky_dma_free_atomic(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
-{
- gen_pool_free(atomic_pool, (unsigned long)vaddr, size);
-}
-
-static void __dma_clear_buffer(struct page *page, size_t size)
+void arch_dma_prep_coherent(struct page *page, size_t size)
{
if (PageHighMem(page)) {
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
@@ -107,84 +47,6 @@ static void __dma_clear_buffer(struct page *page, size_t size)
}
}
-static void *csky_dma_alloc_nonatomic(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
- void *vaddr;
- struct page *page;
- unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
- if (DMA_ATTR_NON_CONSISTENT & attrs) {
- pr_err("csky %s can't support DMA_ATTR_NON_CONSISTENT.\n", __func__);
- return NULL;
- }
-
- if (IS_ENABLED(CONFIG_DMA_CMA))
- page = dma_alloc_from_contiguous(dev, count, get_order(size),
- gfp);
- else
- page = alloc_pages(gfp, get_order(size));
-
- if (!page) {
- pr_err("csky %s no more free pages.\n", __func__);
- return NULL;
- }
-
- *dma_handle = page_to_phys(page);
-
- __dma_clear_buffer(page, size);
-
- if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
- return page;
-
- vaddr = dma_common_contiguous_remap(page, PAGE_ALIGN(size), VM_USERMAP,
- pgprot_noncached(PAGE_KERNEL), __builtin_return_address(0));
- if (!vaddr)
- BUG();
-
- return vaddr;
-}
-
-static void csky_dma_free_nonatomic(
- struct device *dev,
- size_t size,
- void *vaddr,
- dma_addr_t dma_handle,
- unsigned long attrs
- )
-{
- struct page *page = phys_to_page(dma_handle);
- unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
- if ((unsigned int)vaddr >= VMALLOC_START)
- dma_common_free_remap(vaddr, size, VM_USERMAP);
-
- if (IS_ENABLED(CONFIG_DMA_CMA))
- dma_release_from_contiguous(dev, page, count);
- else
- __free_pages(page, get_order(size));
-}
-
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp, unsigned long attrs)
-{
- if (gfpflags_allow_blocking(gfp))
- return csky_dma_alloc_nonatomic(dev, size, dma_handle, gfp,
- attrs);
- else
- return csky_dma_alloc_atomic(dev, size, dma_handle);
-}
-
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
-{
- if (!addr_in_gen_pool(atomic_pool, (unsigned int) vaddr, size))
- csky_dma_free_nonatomic(dev, size, vaddr, dma_handle, attrs);
- else
- csky_dma_free_atomic(dev, size, vaddr, dma_handle, attrs);
-}
-
static inline void cache_op(phys_addr_t paddr, size_t size,
void (*fn)(unsigned long start, unsigned long end))
{
diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
index dc07c078f9b8..66e597053488 100644
--- a/arch/csky/mm/init.c
+++ b/arch/csky/mm/init.c
@@ -71,7 +71,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
ClearPageReserved(virt_to_page(start));
init_page_count(virt_to_page(start));
free_page(start);
- totalram_pages++;
+ totalram_pages_inc();
}
}
#endif
@@ -88,7 +88,7 @@ void free_initmem(void)
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
free_page(addr);
- totalram_pages++;
+ totalram_pages_inc();
addr += PAGE_SIZE;
}