summaryrefslogtreecommitdiff
path: root/arch/s390/include/asm/fpu.h
diff options
context:
space:
mode:
authorHeiko Carstens <hca@linux.ibm.com>2024-02-03 13:45:07 +0300
committerHeiko Carstens <hca@linux.ibm.com>2024-02-16 16:30:15 +0300
commit3a5866a001e83e1aa143fc0aeba0248247483962 (patch)
tree9439898f3b6e59c909d28315e5af297b4817bc13 /arch/s390/include/asm/fpu.h
parentf4e3de75d0c4ebe9bbbfef19d7845ee70cb017bd (diff)
downloadlinux-3a5866a001e83e1aa143fc0aeba0248247483962.tar.xz
s390/fpu: provide and use vlm and vstm inline assemblies
Instead of open-coding vlm and vstm inline assemblies at several locations, provide an fpu_* function for each instruction, and use them in the new save_vx_regs() and load_vx_regs() helper functions. Note that "O" and "R" inline assembly operand modifiers are used in order to pass the displacement and base register of the memory operands to the existing VLM and VSTM macros. The two operand modifiers are not available for clang. Therefore provide two variants of each inline assembly. The clang variant always uses and clobbers general purpose register 1, like in the previous inline assemblies, so it can be used as base register with a zero displacement. This generates slightly less efficient code, but can be removed as soon as clang has support for the used operand modifiers. Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Diffstat (limited to 'arch/s390/include/asm/fpu.h')
-rw-r--r--arch/s390/include/asm/fpu.h21
1 files changed, 12 insertions, 9 deletions
diff --git a/arch/s390/include/asm/fpu.h b/arch/s390/include/asm/fpu.h
index 626695de6085..6a0a23a28ce8 100644
--- a/arch/s390/include/asm/fpu.h
+++ b/arch/s390/include/asm/fpu.h
@@ -84,6 +84,18 @@ void __load_fpu_regs(void);
void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags);
void __kernel_fpu_end(struct kernel_fpu *state, u32 flags);
+static __always_inline void save_vx_regs(__vector128 *vxrs)
+{
+ fpu_vstm(0, 15, &vxrs[0]);
+ fpu_vstm(16, 31, &vxrs[16]);
+}
+
+static __always_inline void load_vx_regs(__vector128 *vxrs)
+{
+ fpu_vlm(0, 15, &vxrs[0]);
+ fpu_vlm(16, 31, &vxrs[16]);
+}
+
static __always_inline void save_fp_regs(freg_t *fprs)
{
fpu_std(0, &fprs[0]);
@@ -148,15 +160,6 @@ static inline void kernel_fpu_end(struct kernel_fpu *state, u32 flags)
preempt_enable();
}
-static inline void save_vx_regs(__vector128 *vxrs)
-{
- asm volatile("\n"
- " la 1,%0\n"
- " .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
- " .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */
- : "=Q" (*(struct vx_array *)vxrs) : : "1");
-}
-
static inline void convert_vx_to_fp(freg_t *fprs, __vector128 *vxrs)
{
int i;