summaryrefslogtreecommitdiff
path: root/arch/s390/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include/asm')
-rw-r--r--arch/s390/include/asm/bugs.h21
-rw-r--r--arch/s390/include/asm/checksum.h7
-rw-r--r--arch/s390/include/asm/ftrace.h29
-rw-r--r--arch/s390/include/asm/ipl.h11
-rw-r--r--arch/s390/include/asm/kvm_host.h14
-rw-r--r--arch/s390/include/asm/mem_encrypt.h4
-rw-r--r--arch/s390/include/asm/pai.h6
-rw-r--r--arch/s390/include/asm/pgtable.h3
-rw-r--r--arch/s390/include/asm/sclp.h2
-rw-r--r--arch/s390/include/asm/serial.h7
-rw-r--r--arch/s390/include/asm/shmparam.h12
-rw-r--r--arch/s390/include/asm/stacktrace.h1
-rw-r--r--arch/s390/include/asm/tlb.h11
-rw-r--r--arch/s390/include/asm/uv.h10
-rw-r--r--arch/s390/include/asm/vga.h7
-rw-r--r--arch/s390/include/asm/vx-insn-asm.h681
-rw-r--r--arch/s390/include/asm/vx-insn.h671
17 files changed, 772 insertions, 725 deletions
diff --git a/arch/s390/include/asm/bugs.h b/arch/s390/include/asm/bugs.h
deleted file mode 100644
index aa42a179be33..000000000000
--- a/arch/s390/include/asm/bugs.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * S390 version
- * Copyright IBM Corp. 1999
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * Derived from "include/asm-i386/bugs.h"
- * Copyright (C) 1994 Linus Torvalds
- */
-
-/*
- * This is included by init/main.c to check for architecture-dependent bugs.
- *
- * Needs:
- * void check_bugs(void);
- */
-
-static inline void check_bugs(void)
-{
- /* s390 has no bugs ... */
-}
diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h
index cdd19d326345..d977a3a2f619 100644
--- a/arch/s390/include/asm/checksum.h
+++ b/arch/s390/include/asm/checksum.h
@@ -12,6 +12,12 @@
#ifndef _S390_CHECKSUM_H
#define _S390_CHECKSUM_H
+#ifdef CONFIG_GENERIC_CSUM
+
+#include <asm-generic/checksum.h>
+
+#else /* CONFIG_GENERIC_CSUM */
+
#include <linux/uaccess.h>
#include <linux/in6.h>
@@ -129,4 +135,5 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
return csum_fold((__force __wsum)(sum >> 32));
}
+#endif /* CONFIG_GENERIC_CSUM */
#endif /* _S390_CHECKSUM_H */
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 6f80ec9c04be..e5c5cb1207e2 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -54,12 +54,33 @@ static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *
return NULL;
}
-static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *fregs,
- unsigned long ip)
+static __always_inline unsigned long
+ftrace_regs_get_instruction_pointer(const struct ftrace_regs *fregs)
+{
+ return fregs->regs.psw.addr;
+}
+
+static __always_inline void
+ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs,
+ unsigned long ip)
{
fregs->regs.psw.addr = ip;
}
+#define ftrace_regs_get_argument(fregs, n) \
+ regs_get_kernel_argument(&(fregs)->regs, n)
+#define ftrace_regs_get_stack_pointer(fregs) \
+ kernel_stack_pointer(&(fregs)->regs)
+#define ftrace_regs_return_value(fregs) \
+ regs_return_value(&(fregs)->regs)
+#define ftrace_regs_set_return_value(fregs, ret) \
+ regs_set_return_value(&(fregs)->regs, ret)
+#define ftrace_override_function_with_return(fregs) \
+ override_function_with_return(&(fregs)->regs)
+#define ftrace_regs_query_register_offset(name) \
+ regs_query_register_offset(name)
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
/*
* When an ftrace registered caller is tracing a function that is
* also set by a register_ftrace_direct() call, it needs to be
@@ -67,10 +88,12 @@ static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *f
* place the direct caller in the ORIG_GPR2 part of pt_regs. This
* tells the ftrace_caller that there's a direct caller.
*/
-static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr)
+static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsigned long addr)
{
+ struct pt_regs *regs = &fregs->regs;
regs->orig_gpr2 = addr;
}
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
/*
* Even though the system call numbers are identical for s390/s390x a
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index a405b6bb89fb..b0d00032479d 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -22,6 +22,7 @@ struct ipl_parameter_block {
struct ipl_pb0_common common;
struct ipl_pb0_fcp fcp;
struct ipl_pb0_ccw ccw;
+ struct ipl_pb0_eckd eckd;
struct ipl_pb0_nvme nvme;
char raw[PAGE_SIZE - sizeof(struct ipl_pl_hdr)];
};
@@ -41,6 +42,10 @@ struct ipl_parameter_block {
sizeof(struct ipl_pb0_ccw))
#define IPL_BP0_CCW_LEN (sizeof(struct ipl_pb0_ccw))
+#define IPL_BP_ECKD_LEN (sizeof(struct ipl_pl_hdr) + \
+ sizeof(struct ipl_pb0_eckd))
+#define IPL_BP0_ECKD_LEN (sizeof(struct ipl_pb0_eckd))
+
#define IPL_MAX_SUPPORTED_VERSION (0)
#define IPL_RB_CERT_UNKNOWN ((unsigned short)-1)
@@ -68,6 +73,8 @@ enum ipl_type {
IPL_TYPE_NSS = 16,
IPL_TYPE_NVME = 32,
IPL_TYPE_NVME_DUMP = 64,
+ IPL_TYPE_ECKD = 128,
+ IPL_TYPE_ECKD_DUMP = 256,
};
struct ipl_info
@@ -79,6 +86,9 @@ struct ipl_info
} ccw;
struct {
struct ccw_dev_id dev_id;
+ } eckd;
+ struct {
+ struct ccw_dev_id dev_id;
u64 wwpn;
u64 lun;
} fcp;
@@ -99,6 +109,7 @@ extern void set_os_info_reipl_block(void);
static inline bool is_ipl_type_dump(void)
{
return (ipl_info.type == IPL_TYPE_FCP_DUMP) ||
+ (ipl_info.type == IPL_TYPE_ECKD_DUMP) ||
(ipl_info.type == IPL_TYPE_NVME_DUMP);
}
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index b1e98a9ed152..d67ce719d16a 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -142,8 +142,7 @@ struct mcck_volatile_info {
CR14_EXTERNAL_DAMAGE_SUBMASK)
#define SIDAD_SIZE_MASK 0xff
-#define sida_origin(sie_block) \
- ((sie_block)->sidad & PAGE_MASK)
+#define sida_addr(sie_block) phys_to_virt((sie_block)->sidad & PAGE_MASK)
#define sida_size(sie_block) \
((((sie_block)->sidad & SIDAD_SIZE_MASK) + 1) * PAGE_SIZE)
@@ -276,6 +275,7 @@ struct kvm_s390_sie_block {
#define ECB3_AES 0x04
#define ECB3_RI 0x01
__u8 ecb3; /* 0x0063 */
+#define ESCA_SCAOL_MASK ~0x3fU
__u32 scaol; /* 0x0064 */
__u8 sdf; /* 0x0068 */
__u8 epdx; /* 0x0069 */
@@ -942,6 +942,8 @@ struct kvm_s390_pv {
unsigned long stor_base;
void *stor_var;
bool dumping;
+ void *set_aside;
+ struct list_head need_cleanup;
struct mmu_notifier mmu_notifier;
};
@@ -1017,7 +1019,13 @@ void kvm_arch_crypto_clear_masks(struct kvm *kvm);
void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
unsigned long *aqm, unsigned long *adm);
-extern int sie64a(struct kvm_s390_sie_block *, u64 *);
+int __sie64a(phys_addr_t sie_block_phys, struct kvm_s390_sie_block *sie_block, u64 *rsa);
+
+static inline int sie64a(struct kvm_s390_sie_block *sie_block, u64 *rsa)
+{
+ return __sie64a(virt_to_phys(sie_block), sie_block, rsa);
+}
+
extern char sie_exit;
extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc);
diff --git a/arch/s390/include/asm/mem_encrypt.h b/arch/s390/include/asm/mem_encrypt.h
index 08a8b96606d7..b85e13505a0f 100644
--- a/arch/s390/include/asm/mem_encrypt.h
+++ b/arch/s390/include/asm/mem_encrypt.h
@@ -4,8 +4,8 @@
#ifndef __ASSEMBLY__
-int set_memory_encrypted(unsigned long addr, int numpages);
-int set_memory_decrypted(unsigned long addr, int numpages);
+int set_memory_encrypted(unsigned long vaddr, int numpages);
+int set_memory_decrypted(unsigned long vaddr, int numpages);
#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/pai.h b/arch/s390/include/asm/pai.h
index 1a8a6b15d121..7d1888e3dee6 100644
--- a/arch/s390/include/asm/pai.h
+++ b/arch/s390/include/asm/pai.h
@@ -75,4 +75,10 @@ static __always_inline void pai_kernel_exit(struct pt_regs *regs)
WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd & ~PAI_CRYPTO_KERNEL_OFFSET);
}
+enum paievt_mode {
+ PAI_MODE_NONE,
+ PAI_MODE_SAMPLING,
+ PAI_MODE_COUNTING,
+};
+
#endif
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index f1cb9391190d..b26cbf1c533c 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -763,6 +763,7 @@ static inline int pmd_dirty(pmd_t pmd)
return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
}
+#define pmd_young pmd_young
static inline int pmd_young(pmd_t pmd)
{
return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
@@ -1773,8 +1774,6 @@ static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-#define kern_addr_valid(addr) (1)
-
extern int vmem_add_mapping(unsigned long start, unsigned long size);
extern void vmem_remove_mapping(unsigned long start, unsigned long size);
extern int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc);
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 9d4c7f71e070..dac7da88f61f 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -87,6 +87,7 @@ struct sclp_info {
unsigned char has_gisaf : 1;
unsigned char has_diag318 : 1;
unsigned char has_sipl : 1;
+ unsigned char has_sipl_eckd : 1;
unsigned char has_dirq : 1;
unsigned char has_iplcc : 1;
unsigned char has_zpci_lsi : 1;
@@ -131,6 +132,7 @@ void sclp_early_get_ipl_info(struct sclp_ipl_info *info);
void sclp_early_detect(void);
void sclp_early_printk(const char *s);
void __sclp_early_printk(const char *s, unsigned int len);
+void sclp_emergency_printk(const char *s);
int sclp_early_get_memsize(unsigned long *mem);
int sclp_early_get_hsa_size(unsigned long *hsa_size);
diff --git a/arch/s390/include/asm/serial.h b/arch/s390/include/asm/serial.h
deleted file mode 100644
index aaf85a69061c..000000000000
--- a/arch/s390/include/asm/serial.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_S390_SERIAL_H
-#define _ASM_S390_SERIAL_H
-
-#define BASE_BAUD 0
-
-#endif /* _ASM_S390_SERIAL_H */
diff --git a/arch/s390/include/asm/shmparam.h b/arch/s390/include/asm/shmparam.h
deleted file mode 100644
index e75d45649c54..000000000000
--- a/arch/s390/include/asm/shmparam.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * S390 version
- *
- * Derived from "include/asm-i386/shmparam.h"
- */
-#ifndef _ASM_S390_SHMPARAM_H
-#define _ASM_S390_SHMPARAM_H
-
-#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
-
-#endif /* _ASM_S390_SHMPARAM_H */
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index b23c658dce77..1802be5abb5d 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -46,6 +46,7 @@ struct stack_frame {
unsigned long sie_savearea;
unsigned long sie_reason;
unsigned long sie_flags;
+ unsigned long sie_control_block_phys;
};
};
unsigned long gprs[10];
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 3a5c8fb590e5..b91f4a9b044c 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -25,7 +25,8 @@
void __tlb_remove_table(void *_table);
static inline void tlb_flush(struct mmu_gather *tlb);
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size);
+ struct encoded_page *page,
+ int page_size);
#define tlb_flush tlb_flush
#define pte_free_tlb pte_free_tlb
@@ -40,11 +41,15 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
* Release the page cache reference for a pte removed by
* tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
* has already been freed, so just do free_page_and_swap_cache.
+ *
+ * s390 doesn't delay rmap removal, so there is nothing encoded in
+ * the page pointer.
*/
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
+ struct encoded_page *page,
+ int page_size)
{
- free_page_and_swap_cache(page);
+ free_page_and_swap_cache(encoded_page_ptr(page));
return false;
}
diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
index be3ef9dd6972..28a9ad57b6f1 100644
--- a/arch/s390/include/asm/uv.h
+++ b/arch/s390/include/asm/uv.h
@@ -34,6 +34,7 @@
#define UVC_CMD_INIT_UV 0x000f
#define UVC_CMD_CREATE_SEC_CONF 0x0100
#define UVC_CMD_DESTROY_SEC_CONF 0x0101
+#define UVC_CMD_DESTROY_SEC_CONF_FAST 0x0102
#define UVC_CMD_CREATE_SEC_CPU 0x0120
#define UVC_CMD_DESTROY_SEC_CPU 0x0121
#define UVC_CMD_CONV_TO_SEC_STOR 0x0200
@@ -81,6 +82,7 @@ enum uv_cmds_inst {
BIT_UVC_CMD_UNSHARE_ALL = 20,
BIT_UVC_CMD_PIN_PAGE_SHARED = 21,
BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
+ BIT_UVC_CMD_DESTROY_SEC_CONF_FAST = 23,
BIT_UVC_CMD_DUMP_INIT = 24,
BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE = 25,
BIT_UVC_CMD_DUMP_CPU = 26,
@@ -230,6 +232,14 @@ struct uv_cb_nodata {
u64 reserved20[4];
} __packed __aligned(8);
+/* Destroy Configuration Fast */
+struct uv_cb_destroy_fast {
+ struct uv_cb_header header;
+ u64 reserved08[2];
+ u64 handle;
+ u64 reserved20[5];
+} __packed __aligned(8);
+
/* Set Shared Access */
struct uv_cb_share {
struct uv_cb_header header;
diff --git a/arch/s390/include/asm/vga.h b/arch/s390/include/asm/vga.h
deleted file mode 100644
index 605dc46bac5e..000000000000
--- a/arch/s390/include/asm/vga.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_S390_VGA_H
-#define _ASM_S390_VGA_H
-
-/* Avoid compile errors due to missing asm/vga.h */
-
-#endif /* _ASM_S390_VGA_H */
diff --git a/arch/s390/include/asm/vx-insn-asm.h b/arch/s390/include/asm/vx-insn-asm.h
new file mode 100644
index 000000000000..360f8b36d962
--- /dev/null
+++ b/arch/s390/include/asm/vx-insn-asm.h
@@ -0,0 +1,681 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for Vector Instructions
+ *
+ * Assembler macros to generate .byte/.word code for particular
+ * vector instructions that are supported by recent binutils (>= 2.26) only.
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ */
+
+#ifndef __ASM_S390_VX_INSN_INTERNAL_H
+#define __ASM_S390_VX_INSN_INTERNAL_H
+
+#ifndef __ASM_S390_VX_INSN_H
+#error only <asm/vx-insn.h> can be included directly
+#endif
+
+#ifdef __ASSEMBLY__
+
+/* Macros to generate vector instruction byte code */
+
+/* GR_NUM - Retrieve general-purpose register number
+ *
+ * @opd: Operand to store register number
+ * @r64: String designation register in the format "%rN"
+ */
+.macro GR_NUM opd gr
+ \opd = 255
+ .ifc \gr,%r0
+ \opd = 0
+ .endif
+ .ifc \gr,%r1
+ \opd = 1
+ .endif
+ .ifc \gr,%r2
+ \opd = 2
+ .endif
+ .ifc \gr,%r3
+ \opd = 3
+ .endif
+ .ifc \gr,%r4
+ \opd = 4
+ .endif
+ .ifc \gr,%r5
+ \opd = 5
+ .endif
+ .ifc \gr,%r6
+ \opd = 6
+ .endif
+ .ifc \gr,%r7
+ \opd = 7
+ .endif
+ .ifc \gr,%r8
+ \opd = 8
+ .endif
+ .ifc \gr,%r9
+ \opd = 9
+ .endif
+ .ifc \gr,%r10
+ \opd = 10
+ .endif
+ .ifc \gr,%r11
+ \opd = 11
+ .endif
+ .ifc \gr,%r12
+ \opd = 12
+ .endif
+ .ifc \gr,%r13
+ \opd = 13
+ .endif
+ .ifc \gr,%r14
+ \opd = 14
+ .endif
+ .ifc \gr,%r15
+ \opd = 15
+ .endif
+ .if \opd == 255
+ \opd = \gr
+ .endif
+.endm
+
+/* VX_NUM - Retrieve vector register number
+ *
+ * @opd: Operand to store register number
+ * @vxr: String designation register in the format "%vN"
+ *
+ * The vector register number is used for as input number to the
+ * instruction and, as well as, to compute the RXB field of the
+ * instruction.
+ */
+.macro VX_NUM opd vxr
+ \opd = 255
+ .ifc \vxr,%v0
+ \opd = 0
+ .endif
+ .ifc \vxr,%v1
+ \opd = 1
+ .endif
+ .ifc \vxr,%v2
+ \opd = 2
+ .endif
+ .ifc \vxr,%v3
+ \opd = 3
+ .endif
+ .ifc \vxr,%v4
+ \opd = 4
+ .endif
+ .ifc \vxr,%v5
+ \opd = 5
+ .endif
+ .ifc \vxr,%v6
+ \opd = 6
+ .endif
+ .ifc \vxr,%v7
+ \opd = 7
+ .endif
+ .ifc \vxr,%v8
+ \opd = 8
+ .endif
+ .ifc \vxr,%v9
+ \opd = 9
+ .endif
+ .ifc \vxr,%v10
+ \opd = 10
+ .endif
+ .ifc \vxr,%v11
+ \opd = 11
+ .endif
+ .ifc \vxr,%v12
+ \opd = 12
+ .endif
+ .ifc \vxr,%v13
+ \opd = 13
+ .endif
+ .ifc \vxr,%v14
+ \opd = 14
+ .endif
+ .ifc \vxr,%v15
+ \opd = 15
+ .endif
+ .ifc \vxr,%v16
+ \opd = 16
+ .endif
+ .ifc \vxr,%v17
+ \opd = 17
+ .endif
+ .ifc \vxr,%v18
+ \opd = 18
+ .endif
+ .ifc \vxr,%v19
+ \opd = 19
+ .endif
+ .ifc \vxr,%v20
+ \opd = 20
+ .endif
+ .ifc \vxr,%v21
+ \opd = 21
+ .endif
+ .ifc \vxr,%v22
+ \opd = 22
+ .endif
+ .ifc \vxr,%v23
+ \opd = 23
+ .endif
+ .ifc \vxr,%v24
+ \opd = 24
+ .endif
+ .ifc \vxr,%v25
+ \opd = 25
+ .endif
+ .ifc \vxr,%v26
+ \opd = 26
+ .endif
+ .ifc \vxr,%v27
+ \opd = 27
+ .endif
+ .ifc \vxr,%v28
+ \opd = 28
+ .endif
+ .ifc \vxr,%v29
+ \opd = 29
+ .endif
+ .ifc \vxr,%v30
+ \opd = 30
+ .endif
+ .ifc \vxr,%v31
+ \opd = 31
+ .endif
+ .if \opd == 255
+ \opd = \vxr
+ .endif
+.endm
+
+/* RXB - Compute most significant bit used vector registers
+ *
+ * @rxb: Operand to store computed RXB value
+ * @v1: First vector register designated operand
+ * @v2: Second vector register designated operand
+ * @v3: Third vector register designated operand
+ * @v4: Fourth vector register designated operand
+ */
+.macro RXB rxb v1 v2=0 v3=0 v4=0
+ \rxb = 0
+ .if \v1 & 0x10
+ \rxb = \rxb | 0x08
+ .endif
+ .if \v2 & 0x10
+ \rxb = \rxb | 0x04
+ .endif
+ .if \v3 & 0x10
+ \rxb = \rxb | 0x02
+ .endif
+ .if \v4 & 0x10
+ \rxb = \rxb | 0x01
+ .endif
+.endm
+
+/* MRXB - Generate Element Size Control and RXB value
+ *
+ * @m: Element size control
+ * @v1: First vector register designated operand (for RXB)
+ * @v2: Second vector register designated operand (for RXB)
+ * @v3: Third vector register designated operand (for RXB)
+ * @v4: Fourth vector register designated operand (for RXB)
+ */
+.macro MRXB m v1 v2=0 v3=0 v4=0
+ rxb = 0
+ RXB rxb, \v1, \v2, \v3, \v4
+ .byte (\m << 4) | rxb
+.endm
+
+/* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields
+ *
+ * @m: Element size control
+ * @opc: Opcode
+ * @v1: First vector register designated operand (for RXB)
+ * @v2: Second vector register designated operand (for RXB)
+ * @v3: Third vector register designated operand (for RXB)
+ * @v4: Fourth vector register designated operand (for RXB)
+ */
+.macro MRXBOPC m opc v1 v2=0 v3=0 v4=0
+ MRXB \m, \v1, \v2, \v3, \v4
+ .byte \opc
+.endm
+
+/* Vector support instructions */
+
+/* VECTOR GENERATE BYTE MASK */
+.macro VGBM vr imm2
+ VX_NUM v1, \vr
+ .word (0xE700 | ((v1&15) << 4))
+ .word \imm2
+ MRXBOPC 0, 0x44, v1
+.endm
+.macro VZERO vxr
+ VGBM \vxr, 0
+.endm
+.macro VONE vxr
+ VGBM \vxr, 0xFFFF
+.endm
+
+/* VECTOR LOAD VR ELEMENT FROM GR */
+.macro VLVG v, gr, disp, m
+ VX_NUM v1, \v
+ GR_NUM b2, "%r0"
+ GR_NUM r3, \gr
+ .word 0xE700 | ((v1&15) << 4) | r3
+ .word (b2 << 12) | (\disp)
+ MRXBOPC \m, 0x22, v1
+.endm
+.macro VLVGB v, gr, index, base
+ VLVG \v, \gr, \index, \base, 0
+.endm
+.macro VLVGH v, gr, index
+ VLVG \v, \gr, \index, 1
+.endm
+.macro VLVGF v, gr, index
+ VLVG \v, \gr, \index, 2
+.endm
+.macro VLVGG v, gr, index
+ VLVG \v, \gr, \index, 3
+.endm
+
+/* VECTOR LOAD REGISTER */
+.macro VLR v1, v2
+ VX_NUM v1, \v1
+ VX_NUM v2, \v2
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word 0
+ MRXBOPC 0, 0x56, v1, v2
+.endm
+
+/* VECTOR LOAD */
+.macro VL v, disp, index="%r0", base
+ VX_NUM v1, \v
+ GR_NUM x2, \index
+ GR_NUM b2, \base
+ .word 0xE700 | ((v1&15) << 4) | x2
+ .word (b2 << 12) | (\disp)
+ MRXBOPC 0, 0x06, v1
+.endm
+
+/* VECTOR LOAD ELEMENT */
+.macro VLEx vr1, disp, index="%r0", base, m3, opc
+ VX_NUM v1, \vr1
+ GR_NUM x2, \index
+ GR_NUM b2, \base
+ .word 0xE700 | ((v1&15) << 4) | x2
+ .word (b2 << 12) | (\disp)
+ MRXBOPC \m3, \opc, v1
+.endm
+.macro VLEB vr1, disp, index="%r0", base, m3
+ VLEx \vr1, \disp, \index, \base, \m3, 0x00
+.endm
+.macro VLEH vr1, disp, index="%r0", base, m3
+ VLEx \vr1, \disp, \index, \base, \m3, 0x01
+.endm
+.macro VLEF vr1, disp, index="%r0", base, m3
+ VLEx \vr1, \disp, \index, \base, \m3, 0x03
+.endm
+.macro VLEG vr1, disp, index="%r0", base, m3
+ VLEx \vr1, \disp, \index, \base, \m3, 0x02
+.endm
+
+/* VECTOR LOAD ELEMENT IMMEDIATE */
+.macro VLEIx vr1, imm2, m3, opc
+ VX_NUM v1, \vr1
+ .word 0xE700 | ((v1&15) << 4)
+ .word \imm2
+ MRXBOPC \m3, \opc, v1
+.endm
+.macro VLEIB vr1, imm2, index
+ VLEIx \vr1, \imm2, \index, 0x40
+.endm
+.macro VLEIH vr1, imm2, index
+ VLEIx \vr1, \imm2, \index, 0x41
+.endm
+.macro VLEIF vr1, imm2, index
+ VLEIx \vr1, \imm2, \index, 0x43
+.endm
+.macro VLEIG vr1, imm2, index
+ VLEIx \vr1, \imm2, \index, 0x42
+.endm
+
+/* VECTOR LOAD GR FROM VR ELEMENT */
+.macro VLGV gr, vr, disp, base="%r0", m
+ GR_NUM r1, \gr
+ GR_NUM b2, \base
+ VX_NUM v3, \vr
+ .word 0xE700 | (r1 << 4) | (v3&15)
+ .word (b2 << 12) | (\disp)
+ MRXBOPC \m, 0x21, v3
+.endm
+.macro VLGVB gr, vr, disp, base="%r0"
+ VLGV \gr, \vr, \disp, \base, 0
+.endm
+.macro VLGVH gr, vr, disp, base="%r0"
+ VLGV \gr, \vr, \disp, \base, 1
+.endm
+.macro VLGVF gr, vr, disp, base="%r0"
+ VLGV \gr, \vr, \disp, \base, 2
+.endm
+.macro VLGVG gr, vr, disp, base="%r0"
+ VLGV \gr, \vr, \disp, \base, 3
+.endm
+
+/* VECTOR LOAD MULTIPLE */
+.macro VLM vfrom, vto, disp, base, hint=3
+ VX_NUM v1, \vfrom
+ VX_NUM v3, \vto
+ GR_NUM b2, \base
+ .word 0xE700 | ((v1&15) << 4) | (v3&15)
+ .word (b2 << 12) | (\disp)
+ MRXBOPC \hint, 0x36, v1, v3
+.endm
+
+/* VECTOR STORE */
+.macro VST vr1, disp, index="%r0", base
+ VX_NUM v1, \vr1
+ GR_NUM x2, \index
+ GR_NUM b2, \base
+ .word 0xE700 | ((v1&15) << 4) | (x2&15)
+ .word (b2 << 12) | (\disp)
+ MRXBOPC 0, 0x0E, v1
+.endm
+
+/* VECTOR STORE MULTIPLE */
+.macro VSTM vfrom, vto, disp, base, hint=3
+ VX_NUM v1, \vfrom
+ VX_NUM v3, \vto
+ GR_NUM b2, \base
+ .word 0xE700 | ((v1&15) << 4) | (v3&15)
+ .word (b2 << 12) | (\disp)
+ MRXBOPC \hint, 0x3E, v1, v3
+.endm
+
+/* VECTOR PERMUTE */
+.macro VPERM vr1, vr2, vr3, vr4
+ VX_NUM v1, \vr1
+ VX_NUM v2, \vr2
+ VX_NUM v3, \vr3
+ VX_NUM v4, \vr4
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
+ MRXBOPC (v4&15), 0x8C, v1, v2, v3, v4
+.endm
+
+/* VECTOR UNPACK LOGICAL LOW */
+.macro VUPLL vr1, vr2, m3
+ VX_NUM v1, \vr1
+ VX_NUM v2, \vr2
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word 0x0000
+ MRXBOPC \m3, 0xD4, v1, v2
+.endm
+.macro VUPLLB vr1, vr2
+ VUPLL \vr1, \vr2, 0
+.endm
+.macro VUPLLH vr1, vr2
+ VUPLL \vr1, \vr2, 1
+.endm
+.macro VUPLLF vr1, vr2
+ VUPLL \vr1, \vr2, 2
+.endm
+
+/* VECTOR PERMUTE DOUBLEWORD IMMEDIATE */
+.macro VPDI vr1, vr2, vr3, m4
+ VX_NUM v1, \vr1
+ VX_NUM v2, \vr2
+ VX_NUM v3, \vr3
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
+ MRXBOPC \m4, 0x84, v1, v2, v3
+.endm
+
+/* VECTOR REPLICATE */
+.macro VREP vr1, vr3, imm2, m4
+ VX_NUM v1, \vr1
+ VX_NUM v3, \vr3
+ .word 0xE700 | ((v1&15) << 4) | (v3&15)
+ .word \imm2
+ MRXBOPC \m4, 0x4D, v1, v3
+.endm
+.macro VREPB vr1, vr3, imm2
+ VREP \vr1, \vr3, \imm2, 0
+.endm
+.macro VREPH vr1, vr3, imm2
+ VREP \vr1, \vr3, \imm2, 1
+.endm
+.macro VREPF vr1, vr3, imm2
+ VREP \vr1, \vr3, \imm2, 2
+.endm
+.macro VREPG vr1, vr3, imm2
+ VREP \vr1, \vr3, \imm2, 3
+.endm
+
+/* VECTOR MERGE HIGH */
+.macro VMRH vr1, vr2, vr3, m4
+ VX_NUM v1, \vr1
+ VX_NUM v2, \vr2
+ VX_NUM v3, \vr3
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
+ MRXBOPC \m4, 0x61, v1, v2, v3
+.endm
+.macro VMRHB vr1, vr2, vr3
+ VMRH \vr1, \vr2, \vr3, 0
+.endm
+.macro VMRHH vr1, vr2, vr3
+ VMRH \vr1, \vr2, \vr3, 1
+.endm
+.macro VMRHF vr1, vr2, vr3
+ VMRH \vr1, \vr2, \vr3, 2
+.endm
+.macro VMRHG vr1, vr2, vr3
+ VMRH \vr1, \vr2, \vr3, 3
+.endm
+
+/* VECTOR MERGE LOW */
+.macro VMRL vr1, vr2, vr3, m4
+ VX_NUM v1, \vr1
+ VX_NUM v2, \vr2
+ VX_NUM v3, \vr3
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
+ MRXBOPC \m4, 0x60, v1, v2, v3
+.endm
+.macro VMRLB vr1, vr2, vr3
+ VMRL \vr1, \vr2, \vr3, 0
+.endm
+.macro VMRLH vr1, vr2, vr3
+ VMRL \vr1, \vr2, \vr3, 1
+.endm
+.macro VMRLF vr1, vr2, vr3
+ VMRL \vr1, \vr2, \vr3, 2
+.endm
+.macro VMRLG vr1, vr2, vr3
+ VMRL \vr1, \vr2, \vr3, 3
+.endm
+
+
+/* Vector integer instructions */
+
+/* VECTOR AND */
+.macro VN vr1, vr2, vr3
+ VX_NUM v1, \vr1
+ VX_NUM v2, \vr2
+ VX_NUM v3, \vr3
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
+ MRXBOPC 0, 0x68, v1, v2, v3
+.endm
+
+/* VECTOR EXCLUSIVE OR */
+.macro VX vr1, vr2, vr3
+ VX_NUM v1, \vr1
+ VX_NUM v2, \vr2
+ VX_NUM v3, \vr3
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
+ MRXBOPC 0, 0x6D, v1, v2, v3
+.endm
+
+/* VECTOR GALOIS FIELD MULTIPLY SUM */
+.macro VGFM vr1, vr2, vr3, m4
+ VX_NUM v1, \vr1
+ VX_NUM v2, \vr2
+ VX_NUM v3, \vr3
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
+ MRXBOPC \m4, 0xB4, v1, v2, v3
+.endm
+.macro VGFMB vr1, vr2, vr3
+ VGFM \vr1, \vr2, \vr3, 0
+.endm
+.macro VGFMH vr1, vr2, vr3
+ VGFM \vr1, \vr2, \vr3, 1
+.endm
+.macro VGFMF vr1, vr2, vr3
+ VGFM \vr1, \vr2, \vr3, 2
+.endm
+.macro VGFMG vr1, vr2, vr3
+ VGFM \vr1, \vr2, \vr3, 3
+.endm
+
+/* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */
+.macro VGFMA vr1, vr2, vr3, vr4, m5
+ VX_NUM v1, \vr1
+ VX_NUM v2, \vr2
+ VX_NUM v3, \vr3
+ VX_NUM v4, \vr4
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12) | (\m5 << 8)
+ MRXBOPC (v4&15), 0xBC, v1, v2, v3, v4
+.endm
+.macro VGFMAB vr1, vr2, vr3, vr4
+ VGFMA \vr1, \vr2, \vr3, \vr4, 0
+.endm
+.macro VGFMAH vr1, vr2, vr3, vr4
+ VGFMA \vr1, \vr2, \vr3, \vr4, 1
+.endm
+.macro VGFMAF vr1, vr2, vr3, vr4
+ VGFMA \vr1, \vr2, \vr3, \vr4, 2
+.endm
+.macro VGFMAG vr1, vr2, vr3, vr4
+ VGFMA \vr1, \vr2, \vr3, \vr4, 3
+.endm
+
+/* VECTOR SHIFT RIGHT LOGICAL BY BYTE */
+.macro VSRLB vr1, vr2, vr3
+ VX_NUM v1, \vr1
+ VX_NUM v2, \vr2
+ VX_NUM v3, \vr3
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
+ MRXBOPC 0, 0x7D, v1, v2, v3
+.endm
+
+/* VECTOR REPLICATE IMMEDIATE */
+.macro VREPI vr1, imm2, m3
+ VX_NUM v1, \vr1
+ .word 0xE700 | ((v1&15) << 4)
+ .word \imm2
+ MRXBOPC \m3, 0x45, v1
+.endm
+.macro VREPIB vr1, imm2
+ VREPI \vr1, \imm2, 0
+.endm
+.macro VREPIH vr1, imm2
+ VREPI \vr1, \imm2, 1
+.endm
+.macro VREPIF vr1, imm2
+ VREPI \vr1, \imm2, 2
+.endm
+.macro VREPIG vr1, imm2
+ VREP \vr1, \imm2, 3
+.endm
+
+/* VECTOR ADD */
+.macro VA vr1, vr2, vr3, m4
+ VX_NUM v1, \vr1
+ VX_NUM v2, \vr2
+ VX_NUM v3, \vr3
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
+ MRXBOPC \m4, 0xF3, v1, v2, v3
+.endm
+.macro VAB vr1, vr2, vr3
+ VA \vr1, \vr2, \vr3, 0
+.endm
+.macro VAH vr1, vr2, vr3
+ VA \vr1, \vr2, \vr3, 1
+.endm
+.macro VAF vr1, vr2, vr3
+ VA \vr1, \vr2, \vr3, 2
+.endm
+.macro VAG vr1, vr2, vr3
+ VA \vr1, \vr2, \vr3, 3
+.endm
+.macro VAQ vr1, vr2, vr3
+ VA \vr1, \vr2, \vr3, 4
+.endm
+
+/* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */
+.macro VESRAV vr1, vr2, vr3, m4
+ VX_NUM v1, \vr1
+ VX_NUM v2, \vr2
+ VX_NUM v3, \vr3
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
+ MRXBOPC \m4, 0x7A, v1, v2, v3
+.endm
+
+.macro VESRAVB vr1, vr2, vr3
+ VESRAV \vr1, \vr2, \vr3, 0
+.endm
+.macro VESRAVH vr1, vr2, vr3
+ VESRAV \vr1, \vr2, \vr3, 1
+.endm
+.macro VESRAVF vr1, vr2, vr3
+ VESRAV \vr1, \vr2, \vr3, 2
+.endm
+.macro VESRAVG vr1, vr2, vr3
+ VESRAV \vr1, \vr2, \vr3, 3
+.endm
+
+/* VECTOR ELEMENT ROTATE LEFT LOGICAL */
+.macro VERLL vr1, vr3, disp, base="%r0", m4
+ VX_NUM v1, \vr1
+ VX_NUM v3, \vr3
+ GR_NUM b2, \base
+ .word 0xE700 | ((v1&15) << 4) | (v3&15)
+ .word (b2 << 12) | (\disp)
+ MRXBOPC \m4, 0x33, v1, v3
+.endm
+.macro VERLLB vr1, vr3, disp, base="%r0"
+ VERLL \vr1, \vr3, \disp, \base, 0
+.endm
+.macro VERLLH vr1, vr3, disp, base="%r0"
+ VERLL \vr1, \vr3, \disp, \base, 1
+.endm
+.macro VERLLF vr1, vr3, disp, base="%r0"
+ VERLL \vr1, \vr3, \disp, \base, 2
+.endm
+.macro VERLLG vr1, vr3, disp, base="%r0"
+ VERLL \vr1, \vr3, \disp, \base, 3
+.endm
+
+/* VECTOR SHIFT LEFT DOUBLE BY BYTE */
+.macro VSLDB vr1, vr2, vr3, imm4
+ VX_NUM v1, \vr1
+ VX_NUM v2, \vr2
+ VX_NUM v3, \vr3
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12) | (\imm4)
+ MRXBOPC 0, 0x77, v1, v2, v3
+.endm
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_S390_VX_INSN_INTERNAL_H */
diff --git a/arch/s390/include/asm/vx-insn.h b/arch/s390/include/asm/vx-insn.h
index 95480ed9149e..8c188f1c6d27 100644
--- a/arch/s390/include/asm/vx-insn.h
+++ b/arch/s390/include/asm/vx-insn.h
@@ -2,677 +2,18 @@
/*
* Support for Vector Instructions
*
- * Assembler macros to generate .byte/.word code for particular
- * vector instructions that are supported by recent binutils (>= 2.26) only.
- *
- * Copyright IBM Corp. 2015
- * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ * This wrapper header file allows to use the vector instruction macros in
+ * both assembler files as well as in inline assemblies in C files.
*/
#ifndef __ASM_S390_VX_INSN_H
#define __ASM_S390_VX_INSN_H
-#ifdef __ASSEMBLY__
-
-
-/* Macros to generate vector instruction byte code */
-
-/* GR_NUM - Retrieve general-purpose register number
- *
- * @opd: Operand to store register number
- * @r64: String designation register in the format "%rN"
- */
-.macro GR_NUM opd gr
- \opd = 255
- .ifc \gr,%r0
- \opd = 0
- .endif
- .ifc \gr,%r1
- \opd = 1
- .endif
- .ifc \gr,%r2
- \opd = 2
- .endif
- .ifc \gr,%r3
- \opd = 3
- .endif
- .ifc \gr,%r4
- \opd = 4
- .endif
- .ifc \gr,%r5
- \opd = 5
- .endif
- .ifc \gr,%r6
- \opd = 6
- .endif
- .ifc \gr,%r7
- \opd = 7
- .endif
- .ifc \gr,%r8
- \opd = 8
- .endif
- .ifc \gr,%r9
- \opd = 9
- .endif
- .ifc \gr,%r10
- \opd = 10
- .endif
- .ifc \gr,%r11
- \opd = 11
- .endif
- .ifc \gr,%r12
- \opd = 12
- .endif
- .ifc \gr,%r13
- \opd = 13
- .endif
- .ifc \gr,%r14
- \opd = 14
- .endif
- .ifc \gr,%r15
- \opd = 15
- .endif
- .if \opd == 255
- \opd = \gr
- .endif
-.endm
-
-/* VX_NUM - Retrieve vector register number
- *
- * @opd: Operand to store register number
- * @vxr: String designation register in the format "%vN"
- *
- * The vector register number is used for as input number to the
- * instruction and, as well as, to compute the RXB field of the
- * instruction.
- */
-.macro VX_NUM opd vxr
- \opd = 255
- .ifc \vxr,%v0
- \opd = 0
- .endif
- .ifc \vxr,%v1
- \opd = 1
- .endif
- .ifc \vxr,%v2
- \opd = 2
- .endif
- .ifc \vxr,%v3
- \opd = 3
- .endif
- .ifc \vxr,%v4
- \opd = 4
- .endif
- .ifc \vxr,%v5
- \opd = 5
- .endif
- .ifc \vxr,%v6
- \opd = 6
- .endif
- .ifc \vxr,%v7
- \opd = 7
- .endif
- .ifc \vxr,%v8
- \opd = 8
- .endif
- .ifc \vxr,%v9
- \opd = 9
- .endif
- .ifc \vxr,%v10
- \opd = 10
- .endif
- .ifc \vxr,%v11
- \opd = 11
- .endif
- .ifc \vxr,%v12
- \opd = 12
- .endif
- .ifc \vxr,%v13
- \opd = 13
- .endif
- .ifc \vxr,%v14
- \opd = 14
- .endif
- .ifc \vxr,%v15
- \opd = 15
- .endif
- .ifc \vxr,%v16
- \opd = 16
- .endif
- .ifc \vxr,%v17
- \opd = 17
- .endif
- .ifc \vxr,%v18
- \opd = 18
- .endif
- .ifc \vxr,%v19
- \opd = 19
- .endif
- .ifc \vxr,%v20
- \opd = 20
- .endif
- .ifc \vxr,%v21
- \opd = 21
- .endif
- .ifc \vxr,%v22
- \opd = 22
- .endif
- .ifc \vxr,%v23
- \opd = 23
- .endif
- .ifc \vxr,%v24
- \opd = 24
- .endif
- .ifc \vxr,%v25
- \opd = 25
- .endif
- .ifc \vxr,%v26
- \opd = 26
- .endif
- .ifc \vxr,%v27
- \opd = 27
- .endif
- .ifc \vxr,%v28
- \opd = 28
- .endif
- .ifc \vxr,%v29
- \opd = 29
- .endif
- .ifc \vxr,%v30
- \opd = 30
- .endif
- .ifc \vxr,%v31
- \opd = 31
- .endif
- .if \opd == 255
- \opd = \vxr
- .endif
-.endm
-
-/* RXB - Compute most significant bit used vector registers
- *
- * @rxb: Operand to store computed RXB value
- * @v1: First vector register designated operand
- * @v2: Second vector register designated operand
- * @v3: Third vector register designated operand
- * @v4: Fourth vector register designated operand
- */
-.macro RXB rxb v1 v2=0 v3=0 v4=0
- \rxb = 0
- .if \v1 & 0x10
- \rxb = \rxb | 0x08
- .endif
- .if \v2 & 0x10
- \rxb = \rxb | 0x04
- .endif
- .if \v3 & 0x10
- \rxb = \rxb | 0x02
- .endif
- .if \v4 & 0x10
- \rxb = \rxb | 0x01
- .endif
-.endm
-
-/* MRXB - Generate Element Size Control and RXB value
- *
- * @m: Element size control
- * @v1: First vector register designated operand (for RXB)
- * @v2: Second vector register designated operand (for RXB)
- * @v3: Third vector register designated operand (for RXB)
- * @v4: Fourth vector register designated operand (for RXB)
- */
-.macro MRXB m v1 v2=0 v3=0 v4=0
- rxb = 0
- RXB rxb, \v1, \v2, \v3, \v4
- .byte (\m << 4) | rxb
-.endm
-
-/* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields
- *
- * @m: Element size control
- * @opc: Opcode
- * @v1: First vector register designated operand (for RXB)
- * @v2: Second vector register designated operand (for RXB)
- * @v3: Third vector register designated operand (for RXB)
- * @v4: Fourth vector register designated operand (for RXB)
- */
-.macro MRXBOPC m opc v1 v2=0 v3=0 v4=0
- MRXB \m, \v1, \v2, \v3, \v4
- .byte \opc
-.endm
-
-/* Vector support instructions */
-
-/* VECTOR GENERATE BYTE MASK */
-.macro VGBM vr imm2
- VX_NUM v1, \vr
- .word (0xE700 | ((v1&15) << 4))
- .word \imm2
- MRXBOPC 0, 0x44, v1
-.endm
-.macro VZERO vxr
- VGBM \vxr, 0
-.endm
-.macro VONE vxr
- VGBM \vxr, 0xFFFF
-.endm
-
-/* VECTOR LOAD VR ELEMENT FROM GR */
-.macro VLVG v, gr, disp, m
- VX_NUM v1, \v
- GR_NUM b2, "%r0"
- GR_NUM r3, \gr
- .word 0xE700 | ((v1&15) << 4) | r3
- .word (b2 << 12) | (\disp)
- MRXBOPC \m, 0x22, v1
-.endm
-.macro VLVGB v, gr, index, base
- VLVG \v, \gr, \index, \base, 0
-.endm
-.macro VLVGH v, gr, index
- VLVG \v, \gr, \index, 1
-.endm
-.macro VLVGF v, gr, index
- VLVG \v, \gr, \index, 2
-.endm
-.macro VLVGG v, gr, index
- VLVG \v, \gr, \index, 3
-.endm
-
-/* VECTOR LOAD REGISTER */
-.macro VLR v1, v2
- VX_NUM v1, \v1
- VX_NUM v2, \v2
- .word 0xE700 | ((v1&15) << 4) | (v2&15)
- .word 0
- MRXBOPC 0, 0x56, v1, v2
-.endm
-
-/* VECTOR LOAD */
-.macro VL v, disp, index="%r0", base
- VX_NUM v1, \v
- GR_NUM x2, \index
- GR_NUM b2, \base
- .word 0xE700 | ((v1&15) << 4) | x2
- .word (b2 << 12) | (\disp)
- MRXBOPC 0, 0x06, v1
-.endm
-
-/* VECTOR LOAD ELEMENT */
-.macro VLEx vr1, disp, index="%r0", base, m3, opc
- VX_NUM v1, \vr1
- GR_NUM x2, \index
- GR_NUM b2, \base
- .word 0xE700 | ((v1&15) << 4) | x2
- .word (b2 << 12) | (\disp)
- MRXBOPC \m3, \opc, v1
-.endm
-.macro VLEB vr1, disp, index="%r0", base, m3
- VLEx \vr1, \disp, \index, \base, \m3, 0x00
-.endm
-.macro VLEH vr1, disp, index="%r0", base, m3
- VLEx \vr1, \disp, \index, \base, \m3, 0x01
-.endm
-.macro VLEF vr1, disp, index="%r0", base, m3
- VLEx \vr1, \disp, \index, \base, \m3, 0x03
-.endm
-.macro VLEG vr1, disp, index="%r0", base, m3
- VLEx \vr1, \disp, \index, \base, \m3, 0x02
-.endm
-
-/* VECTOR LOAD ELEMENT IMMEDIATE */
-.macro VLEIx vr1, imm2, m3, opc
- VX_NUM v1, \vr1
- .word 0xE700 | ((v1&15) << 4)
- .word \imm2
- MRXBOPC \m3, \opc, v1
-.endm
-.macro VLEIB vr1, imm2, index
- VLEIx \vr1, \imm2, \index, 0x40
-.endm
-.macro VLEIH vr1, imm2, index
- VLEIx \vr1, \imm2, \index, 0x41
-.endm
-.macro VLEIF vr1, imm2, index
- VLEIx \vr1, \imm2, \index, 0x43
-.endm
-.macro VLEIG vr1, imm2, index
- VLEIx \vr1, \imm2, \index, 0x42
-.endm
-
-/* VECTOR LOAD GR FROM VR ELEMENT */
-.macro VLGV gr, vr, disp, base="%r0", m
- GR_NUM r1, \gr
- GR_NUM b2, \base
- VX_NUM v3, \vr
- .word 0xE700 | (r1 << 4) | (v3&15)
- .word (b2 << 12) | (\disp)
- MRXBOPC \m, 0x21, v3
-.endm
-.macro VLGVB gr, vr, disp, base="%r0"
- VLGV \gr, \vr, \disp, \base, 0
-.endm
-.macro VLGVH gr, vr, disp, base="%r0"
- VLGV \gr, \vr, \disp, \base, 1
-.endm
-.macro VLGVF gr, vr, disp, base="%r0"
- VLGV \gr, \vr, \disp, \base, 2
-.endm
-.macro VLGVG gr, vr, disp, base="%r0"
- VLGV \gr, \vr, \disp, \base, 3
-.endm
-
-/* VECTOR LOAD MULTIPLE */
-.macro VLM vfrom, vto, disp, base, hint=3
- VX_NUM v1, \vfrom
- VX_NUM v3, \vto
- GR_NUM b2, \base
- .word 0xE700 | ((v1&15) << 4) | (v3&15)
- .word (b2 << 12) | (\disp)
- MRXBOPC \hint, 0x36, v1, v3
-.endm
-
-/* VECTOR STORE */
-.macro VST vr1, disp, index="%r0", base
- VX_NUM v1, \vr1
- GR_NUM x2, \index
- GR_NUM b2, \base
- .word 0xE700 | ((v1&15) << 4) | (x2&15)
- .word (b2 << 12) | (\disp)
- MRXBOPC 0, 0x0E, v1
-.endm
-
-/* VECTOR STORE MULTIPLE */
-.macro VSTM vfrom, vto, disp, base, hint=3
- VX_NUM v1, \vfrom
- VX_NUM v3, \vto
- GR_NUM b2, \base
- .word 0xE700 | ((v1&15) << 4) | (v3&15)
- .word (b2 << 12) | (\disp)
- MRXBOPC \hint, 0x3E, v1, v3
-.endm
-
-/* VECTOR PERMUTE */
-.macro VPERM vr1, vr2, vr3, vr4
- VX_NUM v1, \vr1
- VX_NUM v2, \vr2
- VX_NUM v3, \vr3
- VX_NUM v4, \vr4
- .word 0xE700 | ((v1&15) << 4) | (v2&15)
- .word ((v3&15) << 12)
- MRXBOPC (v4&15), 0x8C, v1, v2, v3, v4
-.endm
-
-/* VECTOR UNPACK LOGICAL LOW */
-.macro VUPLL vr1, vr2, m3
- VX_NUM v1, \vr1
- VX_NUM v2, \vr2
- .word 0xE700 | ((v1&15) << 4) | (v2&15)
- .word 0x0000
- MRXBOPC \m3, 0xD4, v1, v2
-.endm
-.macro VUPLLB vr1, vr2
- VUPLL \vr1, \vr2, 0
-.endm
-.macro VUPLLH vr1, vr2
- VUPLL \vr1, \vr2, 1
-.endm
-.macro VUPLLF vr1, vr2
- VUPLL \vr1, \vr2, 2
-.endm
-
-/* VECTOR PERMUTE DOUBLEWORD IMMEDIATE */
-.macro VPDI vr1, vr2, vr3, m4
- VX_NUM v1, \vr1
- VX_NUM v2, \vr2
- VX_NUM v3, \vr3
- .word 0xE700 | ((v1&15) << 4) | (v2&15)
- .word ((v3&15) << 12)
- MRXBOPC \m4, 0x84, v1, v2, v3
-.endm
-
-/* VECTOR REPLICATE */
-.macro VREP vr1, vr3, imm2, m4
- VX_NUM v1, \vr1
- VX_NUM v3, \vr3
- .word 0xE700 | ((v1&15) << 4) | (v3&15)
- .word \imm2
- MRXBOPC \m4, 0x4D, v1, v3
-.endm
-.macro VREPB vr1, vr3, imm2
- VREP \vr1, \vr3, \imm2, 0
-.endm
-.macro VREPH vr1, vr3, imm2
- VREP \vr1, \vr3, \imm2, 1
-.endm
-.macro VREPF vr1, vr3, imm2
- VREP \vr1, \vr3, \imm2, 2
-.endm
-.macro VREPG vr1, vr3, imm2
- VREP \vr1, \vr3, \imm2, 3
-.endm
-
-/* VECTOR MERGE HIGH */
-.macro VMRH vr1, vr2, vr3, m4
- VX_NUM v1, \vr1
- VX_NUM v2, \vr2
- VX_NUM v3, \vr3
- .word 0xE700 | ((v1&15) << 4) | (v2&15)
- .word ((v3&15) << 12)
- MRXBOPC \m4, 0x61, v1, v2, v3
-.endm
-.macro VMRHB vr1, vr2, vr3
- VMRH \vr1, \vr2, \vr3, 0
-.endm
-.macro VMRHH vr1, vr2, vr3
- VMRH \vr1, \vr2, \vr3, 1
-.endm
-.macro VMRHF vr1, vr2, vr3
- VMRH \vr1, \vr2, \vr3, 2
-.endm
-.macro VMRHG vr1, vr2, vr3
- VMRH \vr1, \vr2, \vr3, 3
-.endm
-
-/* VECTOR MERGE LOW */
-.macro VMRL vr1, vr2, vr3, m4
- VX_NUM v1, \vr1
- VX_NUM v2, \vr2
- VX_NUM v3, \vr3
- .word 0xE700 | ((v1&15) << 4) | (v2&15)
- .word ((v3&15) << 12)
- MRXBOPC \m4, 0x60, v1, v2, v3
-.endm
-.macro VMRLB vr1, vr2, vr3
- VMRL \vr1, \vr2, \vr3, 0
-.endm
-.macro VMRLH vr1, vr2, vr3
- VMRL \vr1, \vr2, \vr3, 1
-.endm
-.macro VMRLF vr1, vr2, vr3
- VMRL \vr1, \vr2, \vr3, 2
-.endm
-.macro VMRLG vr1, vr2, vr3
- VMRL \vr1, \vr2, \vr3, 3
-.endm
-
-
-/* Vector integer instructions */
-
-/* VECTOR AND */
-.macro VN vr1, vr2, vr3
- VX_NUM v1, \vr1
- VX_NUM v2, \vr2
- VX_NUM v3, \vr3
- .word 0xE700 | ((v1&15) << 4) | (v2&15)
- .word ((v3&15) << 12)
- MRXBOPC 0, 0x68, v1, v2, v3
-.endm
-
-/* VECTOR EXCLUSIVE OR */
-.macro VX vr1, vr2, vr3
- VX_NUM v1, \vr1
- VX_NUM v2, \vr2
- VX_NUM v3, \vr3
- .word 0xE700 | ((v1&15) << 4) | (v2&15)
- .word ((v3&15) << 12)
- MRXBOPC 0, 0x6D, v1, v2, v3
-.endm
-
-/* VECTOR GALOIS FIELD MULTIPLY SUM */
-.macro VGFM vr1, vr2, vr3, m4
- VX_NUM v1, \vr1
- VX_NUM v2, \vr2
- VX_NUM v3, \vr3
- .word 0xE700 | ((v1&15) << 4) | (v2&15)
- .word ((v3&15) << 12)
- MRXBOPC \m4, 0xB4, v1, v2, v3
-.endm
-.macro VGFMB vr1, vr2, vr3
- VGFM \vr1, \vr2, \vr3, 0
-.endm
-.macro VGFMH vr1, vr2, vr3
- VGFM \vr1, \vr2, \vr3, 1
-.endm
-.macro VGFMF vr1, vr2, vr3
- VGFM \vr1, \vr2, \vr3, 2
-.endm
-.macro VGFMG vr1, vr2, vr3
- VGFM \vr1, \vr2, \vr3, 3
-.endm
-
-/* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */
-.macro VGFMA vr1, vr2, vr3, vr4, m5
- VX_NUM v1, \vr1
- VX_NUM v2, \vr2
- VX_NUM v3, \vr3
- VX_NUM v4, \vr4
- .word 0xE700 | ((v1&15) << 4) | (v2&15)
- .word ((v3&15) << 12) | (\m5 << 8)
- MRXBOPC (v4&15), 0xBC, v1, v2, v3, v4
-.endm
-.macro VGFMAB vr1, vr2, vr3, vr4
- VGFMA \vr1, \vr2, \vr3, \vr4, 0
-.endm
-.macro VGFMAH vr1, vr2, vr3, vr4
- VGFMA \vr1, \vr2, \vr3, \vr4, 1
-.endm
-.macro VGFMAF vr1, vr2, vr3, vr4
- VGFMA \vr1, \vr2, \vr3, \vr4, 2
-.endm
-.macro VGFMAG vr1, vr2, vr3, vr4
- VGFMA \vr1, \vr2, \vr3, \vr4, 3
-.endm
-
-/* VECTOR SHIFT RIGHT LOGICAL BY BYTE */
-.macro VSRLB vr1, vr2, vr3
- VX_NUM v1, \vr1
- VX_NUM v2, \vr2
- VX_NUM v3, \vr3
- .word 0xE700 | ((v1&15) << 4) | (v2&15)
- .word ((v3&15) << 12)
- MRXBOPC 0, 0x7D, v1, v2, v3
-.endm
-
-/* VECTOR REPLICATE IMMEDIATE */
-.macro VREPI vr1, imm2, m3
- VX_NUM v1, \vr1
- .word 0xE700 | ((v1&15) << 4)
- .word \imm2
- MRXBOPC \m3, 0x45, v1
-.endm
-.macro VREPIB vr1, imm2
- VREPI \vr1, \imm2, 0
-.endm
-.macro VREPIH vr1, imm2
- VREPI \vr1, \imm2, 1
-.endm
-.macro VREPIF vr1, imm2
- VREPI \vr1, \imm2, 2
-.endm
-.macro VREPIG vr1, imm2
- VREP \vr1, \imm2, 3
-.endm
-
-/* VECTOR ADD */
-.macro VA vr1, vr2, vr3, m4
- VX_NUM v1, \vr1
- VX_NUM v2, \vr2
- VX_NUM v3, \vr3
- .word 0xE700 | ((v1&15) << 4) | (v2&15)
- .word ((v3&15) << 12)
- MRXBOPC \m4, 0xF3, v1, v2, v3
-.endm
-.macro VAB vr1, vr2, vr3
- VA \vr1, \vr2, \vr3, 0
-.endm
-.macro VAH vr1, vr2, vr3
- VA \vr1, \vr2, \vr3, 1
-.endm
-.macro VAF vr1, vr2, vr3
- VA \vr1, \vr2, \vr3, 2
-.endm
-.macro VAG vr1, vr2, vr3
- VA \vr1, \vr2, \vr3, 3
-.endm
-.macro VAQ vr1, vr2, vr3
- VA \vr1, \vr2, \vr3, 4
-.endm
-
-/* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */
-.macro VESRAV vr1, vr2, vr3, m4
- VX_NUM v1, \vr1
- VX_NUM v2, \vr2
- VX_NUM v3, \vr3
- .word 0xE700 | ((v1&15) << 4) | (v2&15)
- .word ((v3&15) << 12)
- MRXBOPC \m4, 0x7A, v1, v2, v3
-.endm
-
-.macro VESRAVB vr1, vr2, vr3
- VESRAV \vr1, \vr2, \vr3, 0
-.endm
-.macro VESRAVH vr1, vr2, vr3
- VESRAV \vr1, \vr2, \vr3, 1
-.endm
-.macro VESRAVF vr1, vr2, vr3
- VESRAV \vr1, \vr2, \vr3, 2
-.endm
-.macro VESRAVG vr1, vr2, vr3
- VESRAV \vr1, \vr2, \vr3, 3
-.endm
+#include <asm/vx-insn-asm.h>
-/* VECTOR ELEMENT ROTATE LEFT LOGICAL */
-.macro VERLL vr1, vr3, disp, base="%r0", m4
- VX_NUM v1, \vr1
- VX_NUM v3, \vr3
- GR_NUM b2, \base
- .word 0xE700 | ((v1&15) << 4) | (v3&15)
- .word (b2 << 12) | (\disp)
- MRXBOPC \m4, 0x33, v1, v3
-.endm
-.macro VERLLB vr1, vr3, disp, base="%r0"
- VERLL \vr1, \vr3, \disp, \base, 0
-.endm
-.macro VERLLH vr1, vr3, disp, base="%r0"
- VERLL \vr1, \vr3, \disp, \base, 1
-.endm
-.macro VERLLF vr1, vr3, disp, base="%r0"
- VERLL \vr1, \vr3, \disp, \base, 2
-.endm
-.macro VERLLG vr1, vr3, disp, base="%r0"
- VERLL \vr1, \vr3, \disp, \base, 3
-.endm
+#ifndef __ASSEMBLY__
-/* VECTOR SHIFT LEFT DOUBLE BY BYTE */
-.macro VSLDB vr1, vr2, vr3, imm4
- VX_NUM v1, \vr1
- VX_NUM v2, \vr2
- VX_NUM v3, \vr3
- .word 0xE700 | ((v1&15) << 4) | (v2&15)
- .word ((v3&15) << 12) | (\imm4)
- MRXBOPC 0, 0x77, v1, v2, v3
-.endm
+asm(".include \"asm/vx-insn-asm.h\"\n");
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLY__ */
#endif /* __ASM_S390_VX_INSN_H */