From f5c3c22f21b6a002e371afdcc9180a2fa47dc267 Mon Sep 17 00:00:00 2001 From: WANG Xuerui Date: Tue, 26 Jul 2022 23:57:22 +0800 Subject: LoongArch: Re-tab the assembly files Reflow the *.S files for better stylistic consistency, namely hard tabs after mnemonic position, and vertical alignment of the first operand with hard tabs. Tab width is obviously 8. Some pre-existing intra-block vertical alignments are preserved. Signed-off-by: WANG Xuerui Signed-off-by: Huacai Chen --- arch/loongarch/kernel/fpu.S | 170 ++++++++++++++++++++++---------------------- 1 file changed, 85 insertions(+), 85 deletions(-) (limited to 'arch/loongarch/kernel/fpu.S') diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S index e14f096d40bd..576b3370a296 100644 --- a/arch/loongarch/kernel/fpu.S +++ b/arch/loongarch/kernel/fpu.S @@ -27,78 +27,78 @@ .endm .macro sc_save_fp base - EX fst.d $f0, \base, (0 * FPU_REG_WIDTH) - EX fst.d $f1, \base, (1 * FPU_REG_WIDTH) - EX fst.d $f2, \base, (2 * FPU_REG_WIDTH) - EX fst.d $f3, \base, (3 * FPU_REG_WIDTH) - EX fst.d $f4, \base, (4 * FPU_REG_WIDTH) - EX fst.d $f5, \base, (5 * FPU_REG_WIDTH) - EX fst.d $f6, \base, (6 * FPU_REG_WIDTH) - EX fst.d $f7, \base, (7 * FPU_REG_WIDTH) - EX fst.d $f8, \base, (8 * FPU_REG_WIDTH) - EX fst.d $f9, \base, (9 * FPU_REG_WIDTH) - EX fst.d $f10, \base, (10 * FPU_REG_WIDTH) - EX fst.d $f11, \base, (11 * FPU_REG_WIDTH) - EX fst.d $f12, \base, (12 * FPU_REG_WIDTH) - EX fst.d $f13, \base, (13 * FPU_REG_WIDTH) - EX fst.d $f14, \base, (14 * FPU_REG_WIDTH) - EX fst.d $f15, \base, (15 * FPU_REG_WIDTH) - EX fst.d $f16, \base, (16 * FPU_REG_WIDTH) - EX fst.d $f17, \base, (17 * FPU_REG_WIDTH) - EX fst.d $f18, \base, (18 * FPU_REG_WIDTH) - EX fst.d $f19, \base, (19 * FPU_REG_WIDTH) - EX fst.d $f20, \base, (20 * FPU_REG_WIDTH) - EX fst.d $f21, \base, (21 * FPU_REG_WIDTH) - EX fst.d $f22, \base, (22 * FPU_REG_WIDTH) - EX fst.d $f23, \base, (23 * FPU_REG_WIDTH) - EX fst.d $f24, \base, (24 * FPU_REG_WIDTH) - EX fst.d $f25, \base, (25 * FPU_REG_WIDTH) - EX fst.d $f26, \base, (26 * FPU_REG_WIDTH) - EX fst.d $f27, \base, (27 * FPU_REG_WIDTH) - EX fst.d $f28, \base, (28 * FPU_REG_WIDTH) - EX fst.d $f29, \base, (29 * FPU_REG_WIDTH) - EX fst.d $f30, \base, (30 * FPU_REG_WIDTH) - EX fst.d $f31, \base, (31 * FPU_REG_WIDTH) + EX fst.d $f0, \base, (0 * FPU_REG_WIDTH) + EX fst.d $f1, \base, (1 * FPU_REG_WIDTH) + EX fst.d $f2, \base, (2 * FPU_REG_WIDTH) + EX fst.d $f3, \base, (3 * FPU_REG_WIDTH) + EX fst.d $f4, \base, (4 * FPU_REG_WIDTH) + EX fst.d $f5, \base, (5 * FPU_REG_WIDTH) + EX fst.d $f6, \base, (6 * FPU_REG_WIDTH) + EX fst.d $f7, \base, (7 * FPU_REG_WIDTH) + EX fst.d $f8, \base, (8 * FPU_REG_WIDTH) + EX fst.d $f9, \base, (9 * FPU_REG_WIDTH) + EX fst.d $f10, \base, (10 * FPU_REG_WIDTH) + EX fst.d $f11, \base, (11 * FPU_REG_WIDTH) + EX fst.d $f12, \base, (12 * FPU_REG_WIDTH) + EX fst.d $f13, \base, (13 * FPU_REG_WIDTH) + EX fst.d $f14, \base, (14 * FPU_REG_WIDTH) + EX fst.d $f15, \base, (15 * FPU_REG_WIDTH) + EX fst.d $f16, \base, (16 * FPU_REG_WIDTH) + EX fst.d $f17, \base, (17 * FPU_REG_WIDTH) + EX fst.d $f18, \base, (18 * FPU_REG_WIDTH) + EX fst.d $f19, \base, (19 * FPU_REG_WIDTH) + EX fst.d $f20, \base, (20 * FPU_REG_WIDTH) + EX fst.d $f21, \base, (21 * FPU_REG_WIDTH) + EX fst.d $f22, \base, (22 * FPU_REG_WIDTH) + EX fst.d $f23, \base, (23 * FPU_REG_WIDTH) + EX fst.d $f24, \base, (24 * FPU_REG_WIDTH) + EX fst.d $f25, \base, (25 * FPU_REG_WIDTH) + EX fst.d $f26, \base, (26 * FPU_REG_WIDTH) + EX fst.d $f27, \base, (27 * FPU_REG_WIDTH) + EX fst.d $f28, \base, (28 * FPU_REG_WIDTH) + EX fst.d $f29, \base, (29 * FPU_REG_WIDTH) + EX fst.d $f30, \base, (30 * FPU_REG_WIDTH) + EX fst.d $f31, \base, (31 * FPU_REG_WIDTH) .endm .macro sc_restore_fp base - EX fld.d $f0, \base, (0 * FPU_REG_WIDTH) - EX fld.d $f1, \base, (1 * FPU_REG_WIDTH) - EX fld.d $f2, \base, (2 * FPU_REG_WIDTH) - EX fld.d $f3, \base, (3 * FPU_REG_WIDTH) - EX fld.d $f4, \base, (4 * FPU_REG_WIDTH) - EX fld.d $f5, \base, (5 * FPU_REG_WIDTH) - EX fld.d $f6, \base, (6 * FPU_REG_WIDTH) - EX fld.d $f7, \base, (7 * FPU_REG_WIDTH) - EX fld.d $f8, \base, (8 * FPU_REG_WIDTH) - EX fld.d $f9, \base, (9 * FPU_REG_WIDTH) - EX fld.d $f10, \base, (10 * FPU_REG_WIDTH) - EX fld.d $f11, \base, (11 * FPU_REG_WIDTH) - EX fld.d $f12, \base, (12 * FPU_REG_WIDTH) - EX fld.d $f13, \base, (13 * FPU_REG_WIDTH) - EX fld.d $f14, \base, (14 * FPU_REG_WIDTH) - EX fld.d $f15, \base, (15 * FPU_REG_WIDTH) - EX fld.d $f16, \base, (16 * FPU_REG_WIDTH) - EX fld.d $f17, \base, (17 * FPU_REG_WIDTH) - EX fld.d $f18, \base, (18 * FPU_REG_WIDTH) - EX fld.d $f19, \base, (19 * FPU_REG_WIDTH) - EX fld.d $f20, \base, (20 * FPU_REG_WIDTH) - EX fld.d $f21, \base, (21 * FPU_REG_WIDTH) - EX fld.d $f22, \base, (22 * FPU_REG_WIDTH) - EX fld.d $f23, \base, (23 * FPU_REG_WIDTH) - EX fld.d $f24, \base, (24 * FPU_REG_WIDTH) - EX fld.d $f25, \base, (25 * FPU_REG_WIDTH) - EX fld.d $f26, \base, (26 * FPU_REG_WIDTH) - EX fld.d $f27, \base, (27 * FPU_REG_WIDTH) - EX fld.d $f28, \base, (28 * FPU_REG_WIDTH) - EX fld.d $f29, \base, (29 * FPU_REG_WIDTH) - EX fld.d $f30, \base, (30 * FPU_REG_WIDTH) - EX fld.d $f31, \base, (31 * FPU_REG_WIDTH) + EX fld.d $f0, \base, (0 * FPU_REG_WIDTH) + EX fld.d $f1, \base, (1 * FPU_REG_WIDTH) + EX fld.d $f2, \base, (2 * FPU_REG_WIDTH) + EX fld.d $f3, \base, (3 * FPU_REG_WIDTH) + EX fld.d $f4, \base, (4 * FPU_REG_WIDTH) + EX fld.d $f5, \base, (5 * FPU_REG_WIDTH) + EX fld.d $f6, \base, (6 * FPU_REG_WIDTH) + EX fld.d $f7, \base, (7 * FPU_REG_WIDTH) + EX fld.d $f8, \base, (8 * FPU_REG_WIDTH) + EX fld.d $f9, \base, (9 * FPU_REG_WIDTH) + EX fld.d $f10, \base, (10 * FPU_REG_WIDTH) + EX fld.d $f11, \base, (11 * FPU_REG_WIDTH) + EX fld.d $f12, \base, (12 * FPU_REG_WIDTH) + EX fld.d $f13, \base, (13 * FPU_REG_WIDTH) + EX fld.d $f14, \base, (14 * FPU_REG_WIDTH) + EX fld.d $f15, \base, (15 * FPU_REG_WIDTH) + EX fld.d $f16, \base, (16 * FPU_REG_WIDTH) + EX fld.d $f17, \base, (17 * FPU_REG_WIDTH) + EX fld.d $f18, \base, (18 * FPU_REG_WIDTH) + EX fld.d $f19, \base, (19 * FPU_REG_WIDTH) + EX fld.d $f20, \base, (20 * FPU_REG_WIDTH) + EX fld.d $f21, \base, (21 * FPU_REG_WIDTH) + EX fld.d $f22, \base, (22 * FPU_REG_WIDTH) + EX fld.d $f23, \base, (23 * FPU_REG_WIDTH) + EX fld.d $f24, \base, (24 * FPU_REG_WIDTH) + EX fld.d $f25, \base, (25 * FPU_REG_WIDTH) + EX fld.d $f26, \base, (26 * FPU_REG_WIDTH) + EX fld.d $f27, \base, (27 * FPU_REG_WIDTH) + EX fld.d $f28, \base, (28 * FPU_REG_WIDTH) + EX fld.d $f29, \base, (29 * FPU_REG_WIDTH) + EX fld.d $f30, \base, (30 * FPU_REG_WIDTH) + EX fld.d $f31, \base, (31 * FPU_REG_WIDTH) .endm .macro sc_save_fcc base, tmp0, tmp1 movcf2gr \tmp0, $fcc0 - move \tmp1, \tmp0 + move \tmp1, \tmp0 movcf2gr \tmp0, $fcc1 bstrins.d \tmp1, \tmp0, 15, 8 movcf2gr \tmp0, $fcc2 @@ -113,11 +113,11 @@ bstrins.d \tmp1, \tmp0, 55, 48 movcf2gr \tmp0, $fcc7 bstrins.d \tmp1, \tmp0, 63, 56 - EX st.d \tmp1, \base, 0 + EX st.d \tmp1, \base, 0 .endm .macro sc_restore_fcc base, tmp0, tmp1 - EX ld.d \tmp0, \base, 0 + EX ld.d \tmp0, \base, 0 bstrpick.d \tmp1, \tmp0, 7, 0 movgr2cf $fcc0, \tmp1 bstrpick.d \tmp1, \tmp0, 15, 8 @@ -138,11 +138,11 @@ .macro sc_save_fcsr base, tmp0 movfcsr2gr \tmp0, fcsr0 - EX st.w \tmp0, \base, 0 + EX st.w \tmp0, \base, 0 .endm .macro sc_restore_fcsr base, tmp0 - EX ld.w \tmp0, \base, 0 + EX ld.w \tmp0, \base, 0 movgr2fcsr fcsr0, \tmp0 .endm @@ -151,9 +151,9 @@ */ SYM_FUNC_START(_save_fp) fpu_save_csr a0 t1 - fpu_save_double a0 t1 # clobbers t1 + fpu_save_double a0 t1 # clobbers t1 fpu_save_cc a0 t1 t2 # clobbers t1, t2 - jr ra + jr ra SYM_FUNC_END(_save_fp) EXPORT_SYMBOL(_save_fp) @@ -161,10 +161,10 @@ EXPORT_SYMBOL(_save_fp) * Restore a thread's fp context. */ SYM_FUNC_START(_restore_fp) - fpu_restore_double a0 t1 # clobbers t1 - fpu_restore_csr a0 t1 - fpu_restore_cc a0 t1 t2 # clobbers t1, t2 - jr ra + fpu_restore_double a0 t1 # clobbers t1 + fpu_restore_csr a0 t1 + fpu_restore_cc a0 t1 t2 # clobbers t1, t2 + jr ra SYM_FUNC_END(_restore_fp) /* @@ -225,11 +225,11 @@ SYM_FUNC_END(_init_fpu) * a2: fcsr */ SYM_FUNC_START(_save_fp_context) - sc_save_fcc a1 t1 t2 - sc_save_fcsr a2 t1 - sc_save_fp a0 - li.w a0, 0 # success - jr ra + sc_save_fcc a1 t1 t2 + sc_save_fcsr a2 t1 + sc_save_fp a0 + li.w a0, 0 # success + jr ra SYM_FUNC_END(_save_fp_context) /* @@ -238,11 +238,11 @@ SYM_FUNC_END(_save_fp_context) * a2: fcsr */ SYM_FUNC_START(_restore_fp_context) - sc_restore_fp a0 - sc_restore_fcc a1 t1 t2 - sc_restore_fcsr a2 t1 - li.w a0, 0 # success - jr ra + sc_restore_fp a0 + sc_restore_fcc a1 t1 t2 + sc_restore_fcsr a2 t1 + li.w a0, 0 # success + jr ra SYM_FUNC_END(_restore_fp_context) SYM_FUNC_START(fault) -- cgit v1.2.3