summaryrefslogtreecommitdiff
path: root/arch/mips/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/Makefile2
-rw-r--r--arch/mips/mm/dma-default.c7
-rw-r--r--arch/mips/mm/fault.c16
-rw-r--r--arch/mips/mm/highmem.c4
-rw-r--r--arch/mips/mm/sc-rm7k.c163
-rw-r--r--arch/mips/mm/tlbex.c5
-rw-r--r--arch/mips/mm/uasm.c162
7 files changed, 260 insertions, 99 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index f0e435599707..d679c772d082 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -34,5 +34,3 @@ obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
-
-EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 9547bc0cf188..7ba890860d98 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -357,13 +357,6 @@ int dma_supported(struct device *dev, u64 mask)
EXPORT_SYMBOL(dma_supported);
-int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
-{
- return plat_device_is_coherent(dev);
-}
-
-EXPORT_SYMBOL(dma_is_consistent);
-
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index b78f7d913ca4..783ad0065fdf 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -16,8 +16,8 @@
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/vt_kern.h> /* For unblank_screen() */
#include <linux/module.h>
+#include <linux/kprobes.h>
#include <asm/branch.h>
#include <asm/mmu_context.h>
@@ -25,13 +25,14 @@
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/highmem.h> /* For VMALLOC_END */
+#include <linux/kdebug.h>
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
-asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
+asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long write,
unsigned long address)
{
struct vm_area_struct * vma = NULL;
@@ -47,6 +48,17 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
field, regs->cp0_epc);
#endif
+#ifdef CONFIG_KPROBES
+ /*
+ * This is to notify the fault handler of the kprobes. The
+ * exception code is redundant as it is also carried in REGS,
+ * but we pass it anyhow.
+ */
+ if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
+ (regs->cp0_cause >> 2) & 0x1f, SIGSEGV) == NOTIFY_STOP)
+ return;
+#endif
+
info.si_code = SEGV_MAPERR;
/*
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index 127d732474bf..6a2b1bf9ef11 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -64,7 +64,7 @@ void *__kmap_atomic(struct page *page, enum km_type type)
}
EXPORT_SYMBOL(__kmap_atomic);
-void __kunmap_atomic(void *kvaddr, enum km_type type)
+void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
{
#ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
@@ -87,7 +87,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type)
pagefault_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(__kunmap_atomic_notypecheck);
/*
* This is the same as kmap_atomic() but can map memory that doesn't
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c
index de69bfbf506e..1ef75cd80a0d 100644
--- a/arch/mips/mm/sc-rm7k.c
+++ b/arch/mips/mm/sc-rm7k.c
@@ -16,6 +16,7 @@
#include <asm/cacheops.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
+#include <asm/sections.h>
#include <asm/cacheflush.h> /* for run_uncached() */
/* Primary cache parameters. */
@@ -25,11 +26,15 @@
/* Secondary cache parameters. */
#define scache_size (256*1024) /* Fixed to 256KiB on RM7000 */
+/* Tertiary cache parameters */
+#define tc_lsize 32
+
extern unsigned long icache_way_size, dcache_way_size;
+unsigned long tcache_size;
#include <asm/r4kcache.h>
-static int rm7k_tcache_enabled;
+static int rm7k_tcache_init;
/*
* Writeback and invalidate the primary cache dcache before DMA.
@@ -46,7 +51,7 @@ static void rm7k_sc_wback_inv(unsigned long addr, unsigned long size)
blast_scache_range(addr, addr + size);
- if (!rm7k_tcache_enabled)
+ if (!rm7k_tcache_init)
return;
a = addr & ~(tc_pagesize - 1);
@@ -70,7 +75,7 @@ static void rm7k_sc_inv(unsigned long addr, unsigned long size)
blast_inv_scache_range(addr, addr + size);
- if (!rm7k_tcache_enabled)
+ if (!rm7k_tcache_init)
return;
a = addr & ~(tc_pagesize - 1);
@@ -83,6 +88,45 @@ static void rm7k_sc_inv(unsigned long addr, unsigned long size)
}
}
+static void blast_rm7k_tcache(void)
+{
+ unsigned long start = CKSEG0ADDR(0);
+ unsigned long end = start + tcache_size;
+
+ write_c0_taglo(0);
+
+ while (start < end) {
+ cache_op(Page_Invalidate_T, start);
+ start += tc_pagesize;
+ }
+}
+
+/*
+ * This function is executed in uncached address space.
+ */
+static __cpuinit void __rm7k_tc_enable(void)
+{
+ int i;
+
+ set_c0_config(RM7K_CONF_TE);
+
+ write_c0_taglo(0);
+ write_c0_taghi(0);
+
+ for (i = 0; i < tcache_size; i += tc_lsize)
+ cache_op(Index_Store_Tag_T, CKSEG0ADDR(i));
+}
+
+static __cpuinit void rm7k_tc_enable(void)
+{
+ if (read_c0_config() & RM7K_CONF_TE)
+ return;
+
+ BUG_ON(tcache_size == 0);
+
+ run_uncached(__rm7k_tc_enable);
+}
+
/*
* This function is executed in uncached address space.
*/
@@ -95,16 +139,8 @@ static __cpuinit void __rm7k_sc_enable(void)
write_c0_taglo(0);
write_c0_taghi(0);
- for (i = 0; i < scache_size; i += sc_lsize) {
- __asm__ __volatile__ (
- ".set noreorder\n\t"
- ".set mips3\n\t"
- "cache %1, (%0)\n\t"
- ".set mips0\n\t"
- ".set reorder"
- :
- : "r" (CKSEG0ADDR(i)), "i" (Index_Store_Tag_SD));
- }
+ for (i = 0; i < scache_size; i += sc_lsize)
+ cache_op(Index_Store_Tag_SD, CKSEG0ADDR(i));
}
static __cpuinit void rm7k_sc_enable(void)
@@ -112,13 +148,29 @@ static __cpuinit void rm7k_sc_enable(void)
if (read_c0_config() & RM7K_CONF_SE)
return;
- printk(KERN_INFO "Enabling secondary cache...\n");
+ pr_info("Enabling secondary cache...\n");
run_uncached(__rm7k_sc_enable);
+
+ if (rm7k_tcache_init)
+ rm7k_tc_enable();
+}
+
+static void rm7k_tc_disable(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ blast_rm7k_tcache();
+ clear_c0_config(RM7K_CONF_TE);
+ local_irq_save(flags);
}
static void rm7k_sc_disable(void)
{
clear_c0_config(RM7K_CONF_SE);
+
+ if (rm7k_tcache_init)
+ rm7k_tc_disable();
}
static struct bcache_ops rm7k_sc_ops = {
@@ -128,6 +180,52 @@ static struct bcache_ops rm7k_sc_ops = {
.bc_inv = rm7k_sc_inv
};
+/*
+ * This is a probing function like the one found in c-r4k.c, we look for the
+ * wrap around point with different addresses.
+ */
+static __cpuinit void __probe_tcache(void)
+{
+ unsigned long flags, addr, begin, end, pow2;
+
+ begin = (unsigned long) &_stext;
+ begin &= ~((8 * 1024 * 1024) - 1);
+ end = begin + (8 * 1024 * 1024);
+
+ local_irq_save(flags);
+
+ set_c0_config(RM7K_CONF_TE);
+
+ /* Fill size-multiple lines with a valid tag */
+ pow2 = (256 * 1024);
+ for (addr = begin; addr <= end; addr = (begin + pow2)) {
+ unsigned long *p = (unsigned long *) addr;
+ __asm__ __volatile__("nop" : : "r" (*p));
+ pow2 <<= 1;
+ }
+
+ /* Load first line with a 0 tag, to check after */
+ write_c0_taglo(0);
+ write_c0_taghi(0);
+ cache_op(Index_Store_Tag_T, begin);
+
+ /* Look for the wrap-around */
+ pow2 = (512 * 1024);
+ for (addr = begin + (512 * 1024); addr <= end; addr = begin + pow2) {
+ cache_op(Index_Load_Tag_T, addr);
+ if (!read_c0_taglo())
+ break;
+ pow2 <<= 1;
+ }
+
+ addr -= begin;
+ tcache_size = addr;
+
+ clear_c0_config(RM7K_CONF_TE);
+
+ local_irq_restore(flags);
+}
+
void __cpuinit rm7k_sc_init(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -147,27 +245,26 @@ void __cpuinit rm7k_sc_init(void)
if (!(config & RM7K_CONF_SE))
rm7k_sc_enable();
+ bcops = &rm7k_sc_ops;
+
/*
* While we're at it let's deal with the tertiary cache.
*/
- if (!(config & RM7K_CONF_TC)) {
-
- /*
- * We can't enable the L3 cache yet. There may be board-specific
- * magic necessary to turn it on, and blindly asking the CPU to
- * start using it would may give cache errors.
- *
- * Also, board-specific knowledge may allow us to use the
- * CACHE Flash_Invalidate_T instruction if the tag RAM supports
- * it, and may specify the size of the L3 cache so we don't have
- * to probe it.
- */
- printk(KERN_INFO "Tertiary cache present, %s enabled\n",
- (config & RM7K_CONF_TE) ? "already" : "not (yet)");
-
- if ((config & RM7K_CONF_TE))
- rm7k_tcache_enabled = 1;
- }
- bcops = &rm7k_sc_ops;
+ rm7k_tcache_init = 0;
+ tcache_size = 0;
+
+ if (config & RM7K_CONF_TC)
+ return;
+
+ /*
+ * No efficient way to ask the hardware for the size of the tcache,
+ * so must probe for it.
+ */
+ run_uncached(__probe_tcache);
+ rm7k_tc_enable();
+ rm7k_tcache_init = 1;
+ c->tcache.linesz = tc_lsize;
+ c->tcache.ways = 1;
+ pr_info("Tertiary cache size %ldK.\n", (tcache_size >> 10));
}
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 86f004dc8355..4510e61883eb 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -409,6 +409,11 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
tlbw(p);
break;
+ case CPU_JZRISC:
+ tlbw(p);
+ uasm_i_nop(p);
+ break;
+
default:
panic("No TLB refill handler yet (CPU type: %d)",
current_cpu_data.cputype);
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 611d564fdcf1..d2647a4e012b 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -62,12 +62,13 @@ enum opcode {
insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
insn_bne, insn_cache, insn_daddu, insn_daddiu, insn_dmfc0,
insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl,
- insn_dsrl32, insn_drotr, insn_dsubu, insn_eret, insn_j, insn_jal,
- insn_jr, insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0,
- insn_mtc0, insn_or, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd,
- insn_sd, insn_sll, insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw,
- insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori,
- insn_dins, insn_syscall
+ insn_dsrl32, insn_drotr, insn_drotr32, insn_dsubu, insn_eret,
+ insn_j, insn_jal, insn_jr, insn_ld, insn_ll, insn_lld,
+ insn_lui, insn_lw, insn_mfc0, insn_mtc0, insn_or, insn_ori,
+ insn_pref, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll,
+ insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw, insn_tlbp,
+ insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori,
+ insn_dins, insn_syscall, insn_bbit0, insn_bbit1
};
struct insn {
@@ -85,7 +86,7 @@ struct insn {
| (e) << RE_SH \
| (f) << FUNC_SH)
-static struct insn insn_table[] __cpuinitdata = {
+static struct insn insn_table[] __uasminitdata = {
{ insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
{ insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
@@ -108,6 +109,7 @@ static struct insn insn_table[] __cpuinitdata = {
{ insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
{ insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
{ insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
+ { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
{ insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
{ insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
{ insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
@@ -141,12 +143,14 @@ static struct insn insn_table[] __cpuinitdata = {
{ insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
{ insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
{ insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
+ { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+ { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
{ insn_invalid, 0, 0 }
};
#undef M
-static inline __cpuinit u32 build_rs(u32 arg)
+static inline __uasminit u32 build_rs(u32 arg)
{
if (arg & ~RS_MASK)
printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -154,7 +158,7 @@ static inline __cpuinit u32 build_rs(u32 arg)
return (arg & RS_MASK) << RS_SH;
}
-static inline __cpuinit u32 build_rt(u32 arg)
+static inline __uasminit u32 build_rt(u32 arg)
{
if (arg & ~RT_MASK)
printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -162,7 +166,7 @@ static inline __cpuinit u32 build_rt(u32 arg)
return (arg & RT_MASK) << RT_SH;
}
-static inline __cpuinit u32 build_rd(u32 arg)
+static inline __uasminit u32 build_rd(u32 arg)
{
if (arg & ~RD_MASK)
printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -170,7 +174,7 @@ static inline __cpuinit u32 build_rd(u32 arg)
return (arg & RD_MASK) << RD_SH;
}
-static inline __cpuinit u32 build_re(u32 arg)
+static inline __uasminit u32 build_re(u32 arg)
{
if (arg & ~RE_MASK)
printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -178,7 +182,7 @@ static inline __cpuinit u32 build_re(u32 arg)
return (arg & RE_MASK) << RE_SH;
}
-static inline __cpuinit u32 build_simm(s32 arg)
+static inline __uasminit u32 build_simm(s32 arg)
{
if (arg > 0x7fff || arg < -0x8000)
printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -186,7 +190,7 @@ static inline __cpuinit u32 build_simm(s32 arg)
return arg & 0xffff;
}
-static inline __cpuinit u32 build_uimm(u32 arg)
+static inline __uasminit u32 build_uimm(u32 arg)
{
if (arg & ~IMM_MASK)
printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -194,7 +198,7 @@ static inline __cpuinit u32 build_uimm(u32 arg)
return arg & IMM_MASK;
}
-static inline __cpuinit u32 build_bimm(s32 arg)
+static inline __uasminit u32 build_bimm(s32 arg)
{
if (arg > 0x1ffff || arg < -0x20000)
printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -205,7 +209,7 @@ static inline __cpuinit u32 build_bimm(s32 arg)
return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
}
-static inline __cpuinit u32 build_jimm(u32 arg)
+static inline __uasminit u32 build_jimm(u32 arg)
{
if (arg & ~((JIMM_MASK) << 2))
printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -213,7 +217,7 @@ static inline __cpuinit u32 build_jimm(u32 arg)
return (arg >> 2) & JIMM_MASK;
}
-static inline __cpuinit u32 build_scimm(u32 arg)
+static inline __uasminit u32 build_scimm(u32 arg)
{
if (arg & ~SCIMM_MASK)
printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -221,7 +225,7 @@ static inline __cpuinit u32 build_scimm(u32 arg)
return (arg & SCIMM_MASK) << SCIMM_SH;
}
-static inline __cpuinit u32 build_func(u32 arg)
+static inline __uasminit u32 build_func(u32 arg)
{
if (arg & ~FUNC_MASK)
printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -229,7 +233,7 @@ static inline __cpuinit u32 build_func(u32 arg)
return arg & FUNC_MASK;
}
-static inline __cpuinit u32 build_set(u32 arg)
+static inline __uasminit u32 build_set(u32 arg)
{
if (arg & ~SET_MASK)
printk(KERN_WARNING "Micro-assembler field overflow\n");
@@ -241,7 +245,7 @@ static inline __cpuinit u32 build_set(u32 arg)
* The order of opcode arguments is implicitly left to right,
* starting with RS and ending with FUNC or IMM.
*/
-static void __cpuinit build_insn(u32 **buf, enum opcode opc, ...)
+static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
{
struct insn *ip = NULL;
unsigned int i;
@@ -291,67 +295,78 @@ static void __cpuinit build_insn(u32 **buf, enum opcode opc, ...)
Ip_u1u2u3(op) \
{ \
build_insn(buf, insn##op, a, b, c); \
-}
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u2u1u3(op) \
Ip_u2u1u3(op) \
{ \
build_insn(buf, insn##op, b, a, c); \
-}
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u3u1u2(op) \
Ip_u3u1u2(op) \
{ \
build_insn(buf, insn##op, b, c, a); \
-}
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u1u2s3(op) \
Ip_u1u2s3(op) \
{ \
build_insn(buf, insn##op, a, b, c); \
-}
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u2s3u1(op) \
Ip_u2s3u1(op) \
{ \
build_insn(buf, insn##op, c, a, b); \
-}
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u2u1s3(op) \
Ip_u2u1s3(op) \
{ \
build_insn(buf, insn##op, b, a, c); \
-}
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u2u1msbu3(op) \
Ip_u2u1msbu3(op) \
{ \
build_insn(buf, insn##op, b, a, c+d-1, c); \
-}
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u1u2(op) \
Ip_u1u2(op) \
{ \
build_insn(buf, insn##op, a, b); \
-}
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u1s2(op) \
Ip_u1s2(op) \
{ \
build_insn(buf, insn##op, a, b); \
-}
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_u1(op) \
Ip_u1(op) \
{ \
build_insn(buf, insn##op, a); \
-}
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
#define I_0(op) \
Ip_0(op) \
{ \
build_insn(buf, insn##op); \
-}
+} \
+UASM_EXPORT_SYMBOL(uasm_i##op);
I_u2u1s3(_addiu)
I_u3u1u2(_addu)
@@ -375,6 +390,7 @@ I_u2u1u3(_dsra)
I_u2u1u3(_dsrl)
I_u2u1u3(_dsrl32)
I_u2u1u3(_drotr)
+I_u2u1u3(_drotr32)
I_u3u1u2(_dsubu)
I_0(_eret)
I_u1(_j)
@@ -408,16 +424,19 @@ I_u3u1u2(_xor)
I_u2u1u3(_xori)
I_u2u1msbu3(_dins);
I_u1(_syscall);
+I_u1u2s3(_bbit0);
+I_u1u2s3(_bbit1);
/* Handle labels. */
-void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
+void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
{
(*lab)->addr = addr;
(*lab)->lab = lid;
(*lab)++;
}
+UASM_EXPORT_SYMBOL(uasm_build_label);
-int __cpuinit uasm_in_compat_space_p(long addr)
+int __uasminit uasm_in_compat_space_p(long addr)
{
/* Is this address in 32bit compat space? */
#ifdef CONFIG_64BIT
@@ -426,8 +445,9 @@ int __cpuinit uasm_in_compat_space_p(long addr)
return 1;
#endif
}
+UASM_EXPORT_SYMBOL(uasm_in_compat_space_p);
-static int __cpuinit uasm_rel_highest(long val)
+static int __uasminit uasm_rel_highest(long val)
{
#ifdef CONFIG_64BIT
return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
@@ -436,7 +456,7 @@ static int __cpuinit uasm_rel_highest(long val)
#endif
}
-static int __cpuinit uasm_rel_higher(long val)
+static int __uasminit uasm_rel_higher(long val)
{
#ifdef CONFIG_64BIT
return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
@@ -445,17 +465,19 @@ static int __cpuinit uasm_rel_higher(long val)
#endif
}
-int __cpuinit uasm_rel_hi(long val)
+int __uasminit uasm_rel_hi(long val)
{
return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
}
+UASM_EXPORT_SYMBOL(uasm_rel_hi);
-int __cpuinit uasm_rel_lo(long val)
+int __uasminit uasm_rel_lo(long val)
{
return ((val & 0xffff) ^ 0x8000) - 0x8000;
}
+UASM_EXPORT_SYMBOL(uasm_rel_lo);
-void __cpuinit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
+void __uasminit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
{
if (!uasm_in_compat_space_p(addr)) {
uasm_i_lui(buf, rs, uasm_rel_highest(addr));
@@ -470,8 +492,9 @@ void __cpuinit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
} else
uasm_i_lui(buf, rs, uasm_rel_hi(addr));
}
+UASM_EXPORT_SYMBOL(UASM_i_LA_mostly);
-void __cpuinit UASM_i_LA(u32 **buf, unsigned int rs, long addr)
+void __uasminit UASM_i_LA(u32 **buf, unsigned int rs, long addr)
{
UASM_i_LA_mostly(buf, rs, addr);
if (uasm_rel_lo(addr)) {
@@ -481,9 +504,10 @@ void __cpuinit UASM_i_LA(u32 **buf, unsigned int rs, long addr)
uasm_i_addiu(buf, rs, rs, uasm_rel_lo(addr));
}
}
+UASM_EXPORT_SYMBOL(UASM_i_LA);
/* Handle relocations. */
-void __cpuinit
+void __uasminit
uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
{
(*rel)->addr = addr;
@@ -491,8 +515,9 @@ uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
(*rel)->lab = lid;
(*rel)++;
}
+UASM_EXPORT_SYMBOL(uasm_r_mips_pc16);
-static inline void __cpuinit
+static inline void __uasminit
__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
{
long laddr = (long)lab->addr;
@@ -509,7 +534,7 @@ __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
}
}
-void __cpuinit
+void __uasminit
uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
{
struct uasm_label *l;
@@ -519,24 +544,27 @@ uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
if (rel->lab == l->lab)
__resolve_relocs(rel, l);
}
+UASM_EXPORT_SYMBOL(uasm_resolve_relocs);
-void __cpuinit
+void __uasminit
uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
{
for (; rel->lab != UASM_LABEL_INVALID; rel++)
if (rel->addr >= first && rel->addr < end)
rel->addr += off;
}
+UASM_EXPORT_SYMBOL(uasm_move_relocs);
-void __cpuinit
+void __uasminit
uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off)
{
for (; lab->lab != UASM_LABEL_INVALID; lab++)
if (lab->addr >= first && lab->addr < end)
lab->addr += off;
}
+UASM_EXPORT_SYMBOL(uasm_move_labels);
-void __cpuinit
+void __uasminit
uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
u32 *end, u32 *target)
{
@@ -547,8 +575,9 @@ uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
uasm_move_relocs(rel, first, end, off);
uasm_move_labels(lab, first, end, off);
}
+UASM_EXPORT_SYMBOL(uasm_copy_handler);
-int __cpuinit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
+int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
{
for (; rel->lab != UASM_LABEL_INVALID; rel++) {
if (rel->addr == addr
@@ -559,61 +588,88 @@ int __cpuinit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
return 0;
}
+UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay);
/* Convenience functions for labeled branches. */
-void __cpuinit
+void __uasminit
uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_bltz(p, reg, 0);
}
+UASM_EXPORT_SYMBOL(uasm_il_bltz);
-void __cpuinit
+void __uasminit
uasm_il_b(u32 **p, struct uasm_reloc **r, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_b(p, 0);
}
+UASM_EXPORT_SYMBOL(uasm_il_b);
-void __cpuinit
+void __uasminit
uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_beqz(p, reg, 0);
}
+UASM_EXPORT_SYMBOL(uasm_il_beqz);
-void __cpuinit
+void __uasminit
uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_beqzl(p, reg, 0);
}
+UASM_EXPORT_SYMBOL(uasm_il_beqzl);
-void __cpuinit
+void __uasminit
uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
unsigned int reg2, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_bne(p, reg1, reg2, 0);
}
+UASM_EXPORT_SYMBOL(uasm_il_bne);
-void __cpuinit
+void __uasminit
uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_bnez(p, reg, 0);
}
+UASM_EXPORT_SYMBOL(uasm_il_bnez);
-void __cpuinit
+void __uasminit
uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_bgezl(p, reg, 0);
}
+UASM_EXPORT_SYMBOL(uasm_il_bgezl);
-void __cpuinit
+void __uasminit
uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
uasm_i_bgez(p, reg, 0);
}
+UASM_EXPORT_SYMBOL(uasm_il_bgez);
+
+void __uasminit
+uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ unsigned int bit, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_bbit0(p, reg, bit, 0);
+}
+UASM_EXPORT_SYMBOL(uasm_il_bbit0);
+
+void __uasminit
+uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ unsigned int bit, int lid)
+{
+ uasm_r_mips_pc16(r, *p, lid);
+ uasm_i_bbit1(p, reg, bit, 0);
+}
+UASM_EXPORT_SYMBOL(uasm_il_bbit1);