From c808a6ec7166105818e698be9ead396233eb91b8 Mon Sep 17 00:00:00 2001 From: Xiu Jianfeng Date: Mon, 6 Jun 2022 18:10:42 +0800 Subject: evm: Use IS_ENABLED to initialize .enabled Use IS_ENABLED(CONFIG_XXX) instead of #ifdef/#endif statements to initialize .enabled, minor simplicity improvement. Signed-off-by: Xiu Jianfeng Reviewed-by: Ahmad Fatoum Signed-off-by: Mimi Zohar --- security/integrity/evm/evm_main.c | 52 +++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 29 deletions(-) diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c index cc88f02c7562..397fea5b3fa6 100644 --- a/security/integrity/evm/evm_main.c +++ b/security/integrity/evm/evm_main.c @@ -36,42 +36,36 @@ static const char * const integrity_status_msg[] = { int evm_hmac_attrs; static struct xattr_list evm_config_default_xattrnames[] = { - {.name = XATTR_NAME_SELINUX, -#ifdef CONFIG_SECURITY_SELINUX - .enabled = true -#endif + { + .name = XATTR_NAME_SELINUX, + .enabled = IS_ENABLED(CONFIG_SECURITY_SELINUX) }, - {.name = XATTR_NAME_SMACK, -#ifdef CONFIG_SECURITY_SMACK - .enabled = true -#endif + { + .name = XATTR_NAME_SMACK, + .enabled = IS_ENABLED(CONFIG_SECURITY_SMACK) }, - {.name = XATTR_NAME_SMACKEXEC, -#ifdef CONFIG_EVM_EXTRA_SMACK_XATTRS - .enabled = true -#endif + { + .name = XATTR_NAME_SMACKEXEC, + .enabled = IS_ENABLED(CONFIG_EVM_EXTRA_SMACK_XATTRS) }, - {.name = XATTR_NAME_SMACKTRANSMUTE, -#ifdef CONFIG_EVM_EXTRA_SMACK_XATTRS - .enabled = true -#endif + { + .name = XATTR_NAME_SMACKTRANSMUTE, + .enabled = IS_ENABLED(CONFIG_EVM_EXTRA_SMACK_XATTRS) }, - {.name = XATTR_NAME_SMACKMMAP, -#ifdef CONFIG_EVM_EXTRA_SMACK_XATTRS - .enabled = true -#endif + { + .name = XATTR_NAME_SMACKMMAP, + .enabled = IS_ENABLED(CONFIG_EVM_EXTRA_SMACK_XATTRS) }, - {.name = XATTR_NAME_APPARMOR, -#ifdef CONFIG_SECURITY_APPARMOR - .enabled = true -#endif + { + .name = XATTR_NAME_APPARMOR, + .enabled = IS_ENABLED(CONFIG_SECURITY_APPARMOR) }, - {.name = XATTR_NAME_IMA, -#ifdef CONFIG_IMA_APPRAISE - .enabled = true -#endif + { + .name = XATTR_NAME_IMA, + .enabled = IS_ENABLED(CONFIG_IMA_APPRAISE) }, - {.name = XATTR_NAME_CAPS, + { + .name = XATTR_NAME_CAPS, .enabled = true }, }; -- cgit v1.2.3 From 65d9a9a60fd71be964effb2e94747a6acb6e7015 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Fri, 1 Jul 2022 13:04:04 +0530 Subject: kexec_file: drop weak attribute from functions As requested (http://lkml.kernel.org/r/87ee0q7b92.fsf@email.froward.int.ebiederm.org), this series converts weak functions in kexec to use the #ifdef approach. Quoting the 3e35142ef99fe ("kexec_file: drop weak attribute from arch_kexec_apply_relocations[_add]") changelog: : Since commit d1bcae833b32f1 ("ELF: Don't generate unused section symbols") : [1], binutils (v2.36+) started dropping section symbols that it thought : were unused. This isn't an issue in general, but with kexec_file.c, gcc : is placing kexec_arch_apply_relocations[_add] into a separate : .text.unlikely section and the section symbol ".text.unlikely" is being : dropped. Due to this, recordmcount is unable to find a non-weak symbol in : .text.unlikely to generate a relocation record against. This patch (of 2); Drop __weak attribute from functions in kexec_file.c: - arch_kexec_kernel_image_probe() - arch_kimage_file_post_load_cleanup() - arch_kexec_kernel_image_load() - arch_kexec_locate_mem_hole() - arch_kexec_kernel_verify_sig() arch_kexec_kernel_image_load() calls into kexec_image_load_default(), so drop the static attribute for the latter. arch_kexec_kernel_verify_sig() is not overridden by any architecture, so drop the __weak attribute. Link: https://lkml.kernel.org/r/cover.1656659357.git.naveen.n.rao@linux.vnet.ibm.com Link: https://lkml.kernel.org/r/2cd7ca1fe4d6bb6ca38e3283c717878388ed6788.1656659357.git.naveen.n.rao@linux.vnet.ibm.com Signed-off-by: Naveen N. Rao Suggested-by: Eric Biederman Signed-off-by: Andrew Morton Signed-off-by: Mimi Zohar --- arch/arm64/include/asm/kexec.h | 4 +++- arch/powerpc/include/asm/kexec.h | 9 ++++++++ arch/s390/include/asm/kexec.h | 3 +++ arch/x86/include/asm/kexec.h | 6 ++++++ include/linux/kexec.h | 44 ++++++++++++++++++++++++++++++++++------ kernel/kexec_file.c | 35 ++------------------------------ 6 files changed, 61 insertions(+), 40 deletions(-) diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index 9839bfc163d7..78d272b26ebd 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -115,7 +115,9 @@ extern const struct kexec_file_ops kexec_image_ops; struct kimage; -extern int arch_kimage_file_post_load_cleanup(struct kimage *image); +int arch_kimage_file_post_load_cleanup(struct kimage *image); +#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup + extern int load_other_segments(struct kimage *image, unsigned long kernel_load_addr, unsigned long kernel_size, char *initrd, unsigned long initrd_len, diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 2aefe14e1442..1e5e9b6ec78d 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -120,6 +120,15 @@ int setup_purgatory(struct kimage *image, const void *slave_code, #ifdef CONFIG_PPC64 struct kexec_buf; +int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len); +#define arch_kexec_kernel_image_probe arch_kexec_kernel_image_probe + +int arch_kimage_file_post_load_cleanup(struct kimage *image); +#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup + +int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf); +#define arch_kexec_locate_mem_hole arch_kexec_locate_mem_hole + int load_crashdump_segments_ppc64(struct kimage *image, struct kexec_buf *kbuf); int setup_purgatory_ppc64(struct kimage *image, const void *slave_code, diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h index 649ecdcc8734..8886aadc11a3 100644 --- a/arch/s390/include/asm/kexec.h +++ b/arch/s390/include/asm/kexec.h @@ -92,5 +92,8 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi, const Elf_Shdr *relsec, const Elf_Shdr *symtab); #define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add + +int arch_kimage_file_post_load_cleanup(struct kimage *image); +#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup #endif #endif /*_S390_KEXEC_H */ diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h index 6ad8d946cd3e..5ec359c1b50c 100644 --- a/arch/x86/include/asm/kexec.h +++ b/arch/x86/include/asm/kexec.h @@ -193,6 +193,12 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi, const Elf_Shdr *relsec, const Elf_Shdr *symtab); #define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add + +void *arch_kexec_kernel_image_load(struct kimage *image); +#define arch_kexec_kernel_image_load arch_kexec_kernel_image_load + +int arch_kimage_file_post_load_cleanup(struct kimage *image); +#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup #endif #endif diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 475683cd67f1..6958c6b471f4 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -188,21 +188,53 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, void *buf, unsigned int size, bool get_value); void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name); +void *kexec_image_load_default(struct kimage *image); + +#ifndef arch_kexec_kernel_image_probe +static inline int +arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len) +{ + return kexec_image_probe_default(image, buf, buf_len); +} +#endif + +#ifndef arch_kimage_file_post_load_cleanup +static inline int arch_kimage_file_post_load_cleanup(struct kimage *image) +{ + return kexec_image_post_load_cleanup_default(image); +} +#endif + +#ifndef arch_kexec_kernel_image_load +static inline void *arch_kexec_kernel_image_load(struct kimage *image) +{ + return kexec_image_load_default(image); +} +#endif -/* Architectures may override the below functions */ -int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, - unsigned long buf_len); -void *arch_kexec_kernel_image_load(struct kimage *image); -int arch_kimage_file_post_load_cleanup(struct kimage *image); #ifdef CONFIG_KEXEC_SIG int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, unsigned long buf_len); #endif -int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf); extern int kexec_add_buffer(struct kexec_buf *kbuf); int kexec_locate_mem_hole(struct kexec_buf *kbuf); +#ifndef arch_kexec_locate_mem_hole +/** + * arch_kexec_locate_mem_hole - Find free memory to place the segments. + * @kbuf: Parameters for the memory search. + * + * On success, kbuf->mem will have the start address of the memory region found. + * + * Return: 0 on success, negative errno on error. + */ +static inline int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf) +{ + return kexec_locate_mem_hole(kbuf); +} +#endif + /* Alignment required for elf header segment */ #define ELF_CORE_HEADER_ALIGN 4096 diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index f9261c07b048..0c27c81351ee 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -62,14 +62,7 @@ int kexec_image_probe_default(struct kimage *image, void *buf, return ret; } -/* Architectures can provide this probe function */ -int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf, - unsigned long buf_len) -{ - return kexec_image_probe_default(image, buf, buf_len); -} - -static void *kexec_image_load_default(struct kimage *image) +void *kexec_image_load_default(struct kimage *image) { if (!image->fops || !image->fops->load) return ERR_PTR(-ENOEXEC); @@ -80,11 +73,6 @@ static void *kexec_image_load_default(struct kimage *image) image->cmdline_buf_len); } -void * __weak arch_kexec_kernel_image_load(struct kimage *image) -{ - return kexec_image_load_default(image); -} - int kexec_image_post_load_cleanup_default(struct kimage *image) { if (!image->fops || !image->fops->cleanup) @@ -93,11 +81,6 @@ int kexec_image_post_load_cleanup_default(struct kimage *image) return image->fops->cleanup(image->image_loader_data); } -int __weak arch_kimage_file_post_load_cleanup(struct kimage *image) -{ - return kexec_image_post_load_cleanup_default(image); -} - #ifdef CONFIG_KEXEC_SIG static int kexec_image_verify_sig_default(struct kimage *image, void *buf, unsigned long buf_len) @@ -110,8 +93,7 @@ static int kexec_image_verify_sig_default(struct kimage *image, void *buf, return image->fops->verify_sig(buf, buf_len); } -int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, - unsigned long buf_len) +int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, unsigned long buf_len) { return kexec_image_verify_sig_default(image, buf, buf_len); } @@ -621,19 +603,6 @@ int kexec_locate_mem_hole(struct kexec_buf *kbuf) return ret == 1 ? 0 : -EADDRNOTAVAIL; } -/** - * arch_kexec_locate_mem_hole - Find free memory to place the segments. - * @kbuf: Parameters for the memory search. - * - * On success, kbuf->mem will have the start address of the memory region found. - * - * Return: 0 on success, negative errno on error. - */ -int __weak arch_kexec_locate_mem_hole(struct kexec_buf *kbuf) -{ - return kexec_locate_mem_hole(kbuf); -} - /** * kexec_add_buffer - place a buffer in a kexec segment * @kbuf: Buffer contents and memory parameters. -- cgit v1.2.3 From 0738eceb6201691534df07e0928d0a6168a35787 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Fri, 1 Jul 2022 13:04:05 +0530 Subject: kexec: drop weak attribute from functions Drop __weak attribute from functions in kexec_core.c: - machine_kexec_post_load() - arch_kexec_protect_crashkres() - arch_kexec_unprotect_crashkres() - crash_free_reserved_phys_range() Link: https://lkml.kernel.org/r/c0f6219e03cb399d166d518ab505095218a902dd.1656659357.git.naveen.n.rao@linux.vnet.ibm.com Signed-off-by: Naveen N. Rao Suggested-by: Eric Biederman Signed-off-by: Andrew Morton Signed-off-by: Mimi Zohar --- arch/arm64/include/asm/kexec.h | 16 ++++++++++++++-- arch/powerpc/include/asm/kexec.h | 5 +++++ arch/s390/include/asm/kexec.h | 11 +++++++++++ arch/x86/include/asm/kexec.h | 6 ++++++ include/linux/kexec.h | 32 ++++++++++++++++++++++++++++---- kernel/kexec_core.c | 27 --------------------------- 6 files changed, 64 insertions(+), 33 deletions(-) diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index 78d272b26ebd..559bfae26715 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -84,16 +84,30 @@ static inline void crash_setup_regs(struct pt_regs *newregs, extern bool crash_is_nosave(unsigned long pfn); extern void crash_prepare_suspend(void); extern void crash_post_resume(void); + +void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); +#define crash_free_reserved_phys_range crash_free_reserved_phys_range #else static inline bool crash_is_nosave(unsigned long pfn) {return false; } static inline void crash_prepare_suspend(void) {} static inline void crash_post_resume(void) {} #endif +struct kimage; + #if defined(CONFIG_KEXEC_CORE) void cpu_soft_restart(unsigned long el2_switch, unsigned long entry, unsigned long arg0, unsigned long arg1, unsigned long arg2); + +int machine_kexec_post_load(struct kimage *image); +#define machine_kexec_post_load machine_kexec_post_load + +void arch_kexec_protect_crashkres(void); +#define arch_kexec_protect_crashkres arch_kexec_protect_crashkres + +void arch_kexec_unprotect_crashkres(void); +#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres #endif #define ARCH_HAS_KIMAGE_ARCH @@ -113,8 +127,6 @@ struct kimage_arch { #ifdef CONFIG_KEXEC_FILE extern const struct kexec_file_ops kexec_image_ops; -struct kimage; - int arch_kimage_file_post_load_cleanup(struct kimage *image); #define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 1e5e9b6ec78d..d6f4edfe4737 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -98,6 +98,11 @@ void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_co void kexec_copy_flush(struct kimage *image); +#if defined(CONFIG_CRASH_DUMP) && defined(CONFIG_PPC_RTAS) +void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); +#define crash_free_reserved_phys_range crash_free_reserved_phys_range +#endif + #ifdef CONFIG_KEXEC_FILE extern const struct kexec_file_ops kexec_elf64_ops; diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h index 8886aadc11a3..1bd08eb56d5f 100644 --- a/arch/s390/include/asm/kexec.h +++ b/arch/s390/include/asm/kexec.h @@ -85,6 +85,17 @@ struct kimage_arch { extern const struct kexec_file_ops s390_kexec_image_ops; extern const struct kexec_file_ops s390_kexec_elf_ops; +#ifdef CONFIG_CRASH_DUMP +void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); +#define crash_free_reserved_phys_range crash_free_reserved_phys_range + +void arch_kexec_protect_crashkres(void); +#define arch_kexec_protect_crashkres arch_kexec_protect_crashkres + +void arch_kexec_unprotect_crashkres(void); +#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres +#endif + #ifdef CONFIG_KEXEC_FILE struct purgatory_info; int arch_kexec_apply_relocations_add(struct purgatory_info *pi, diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h index 5ec359c1b50c..a3760ca796aa 100644 --- a/arch/x86/include/asm/kexec.h +++ b/arch/x86/include/asm/kexec.h @@ -186,6 +186,12 @@ extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages); #define arch_kexec_pre_free_pages arch_kexec_pre_free_pages +void arch_kexec_protect_crashkres(void); +#define arch_kexec_protect_crashkres arch_kexec_protect_crashkres + +void arch_kexec_unprotect_crashkres(void); +#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres + #ifdef CONFIG_KEXEC_FILE struct purgatory_info; int arch_kexec_apply_relocations_add(struct purgatory_info *pi, diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 6958c6b471f4..8107606ad1e8 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -390,7 +390,10 @@ extern void machine_kexec_cleanup(struct kimage *image); extern int kernel_kexec(void); extern struct page *kimage_alloc_control_pages(struct kimage *image, unsigned int order); -int machine_kexec_post_load(struct kimage *image); + +#ifndef machine_kexec_post_load +static inline int machine_kexec_post_load(struct kimage *image) { return 0; } +#endif extern void __crash_kexec(struct pt_regs *); extern void crash_kexec(struct pt_regs *); @@ -423,10 +426,21 @@ extern bool kexec_in_progress; int crash_shrink_memory(unsigned long new_size); size_t crash_get_memory_size(void); -void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); -void arch_kexec_protect_crashkres(void); -void arch_kexec_unprotect_crashkres(void); +#ifndef arch_kexec_protect_crashkres +/* + * Protection mechanism for crashkernel reserved memory after + * the kdump kernel is loaded. + * + * Provide an empty default implementation here -- architecture + * code may override this + */ +static inline void arch_kexec_protect_crashkres(void) { } +#endif + +#ifndef arch_kexec_unprotect_crashkres +static inline void arch_kexec_unprotect_crashkres(void) { } +#endif #ifndef page_to_boot_pfn static inline unsigned long page_to_boot_pfn(struct page *page) @@ -456,6 +470,16 @@ static inline phys_addr_t boot_phys_to_phys(unsigned long boot_phys) } #endif +#ifndef crash_free_reserved_phys_range +static inline void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) +{ + unsigned long addr; + + for (addr = begin; addr < end; addr += PAGE_SIZE) + free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT)); +} +#endif + static inline unsigned long virt_to_boot_phys(void *addr) { return phys_to_boot_phys(__pa((unsigned long)addr)); diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 4d34c78334ce..acd029b307e4 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -591,11 +591,6 @@ static void kimage_free_extra_pages(struct kimage *image) } -int __weak machine_kexec_post_load(struct kimage *image) -{ - return 0; -} - void kimage_terminate(struct kimage *image) { if (*image->entry != 0) @@ -1020,15 +1015,6 @@ size_t crash_get_memory_size(void) return size; } -void __weak crash_free_reserved_phys_range(unsigned long begin, - unsigned long end) -{ - unsigned long addr; - - for (addr = begin; addr < end; addr += PAGE_SIZE) - free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT)); -} - int crash_shrink_memory(unsigned long new_size) { int ret = 0; @@ -1225,16 +1211,3 @@ int kernel_kexec(void) mutex_unlock(&kexec_mutex); return error; } - -/* - * Protection mechanism for crashkernel reserved memory after - * the kdump kernel is loaded. - * - * Provide an empty default implementation here -- architecture - * code may override this - */ -void __weak arch_kexec_protect_crashkres(void) -{} - -void __weak arch_kexec_unprotect_crashkres(void) -{} -- cgit v1.2.3 From 689a71493bd2f31c024f8c0395f85a1fd4b2138e Mon Sep 17 00:00:00 2001 From: Coiby Xu Date: Thu, 14 Jul 2022 21:40:24 +0800 Subject: kexec: clean up arch_kexec_kernel_verify_sig Before commit 105e10e2cf1c ("kexec_file: drop weak attribute from functions"), there was already no arch-specific implementation of arch_kexec_kernel_verify_sig. With weak attribute dropped by that commit, arch_kexec_kernel_verify_sig is completely useless. So clean it up. Note later patches are dependent on this patch so it should be backported to the stable tree as well. Cc: stable@vger.kernel.org Suggested-by: Eric W. Biederman Reviewed-by: Michal Suchanek Acked-by: Baoquan He Signed-off-by: Coiby Xu [zohar@linux.ibm.com: reworded patch description "Note"] Link: https://lore.kernel.org/linux-integrity/20220714134027.394370-1-coxu@redhat.com/ Signed-off-by: Mimi Zohar --- include/linux/kexec.h | 5 ----- kernel/kexec_file.c | 33 +++++++++++++-------------------- 2 files changed, 13 insertions(+), 25 deletions(-) diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 8107606ad1e8..7f710fb3712b 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -212,11 +212,6 @@ static inline void *arch_kexec_kernel_image_load(struct kimage *image) } #endif -#ifdef CONFIG_KEXEC_SIG -int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, - unsigned long buf_len); -#endif - extern int kexec_add_buffer(struct kexec_buf *kbuf); int kexec_locate_mem_hole(struct kexec_buf *kbuf); diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index 0c27c81351ee..6dc1294c90fc 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -81,24 +81,6 @@ int kexec_image_post_load_cleanup_default(struct kimage *image) return image->fops->cleanup(image->image_loader_data); } -#ifdef CONFIG_KEXEC_SIG -static int kexec_image_verify_sig_default(struct kimage *image, void *buf, - unsigned long buf_len) -{ - if (!image->fops || !image->fops->verify_sig) { - pr_debug("kernel loader does not support signature verification.\n"); - return -EKEYREJECTED; - } - - return image->fops->verify_sig(buf, buf_len); -} - -int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, unsigned long buf_len) -{ - return kexec_image_verify_sig_default(image, buf, buf_len); -} -#endif - /* * Free up memory used by kernel, initrd, and command line. This is temporary * memory allocation which is not needed any more after these buffers have @@ -141,13 +123,24 @@ void kimage_file_post_load_cleanup(struct kimage *image) } #ifdef CONFIG_KEXEC_SIG +static int kexec_image_verify_sig(struct kimage *image, void *buf, + unsigned long buf_len) +{ + if (!image->fops || !image->fops->verify_sig) { + pr_debug("kernel loader does not support signature verification.\n"); + return -EKEYREJECTED; + } + + return image->fops->verify_sig(buf, buf_len); +} + static int kimage_validate_signature(struct kimage *image) { int ret; - ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf, - image->kernel_buf_len); + ret = kexec_image_verify_sig(image, image->kernel_buf, + image->kernel_buf_len); if (ret) { if (sig_enforce) { -- cgit v1.2.3 From c903dae8941deb55043ee46ded29e84e97cd84bb Mon Sep 17 00:00:00 2001 From: Coiby Xu Date: Thu, 14 Jul 2022 21:40:25 +0800 Subject: kexec, KEYS: make the code in bzImage64_verify_sig generic commit 278311e417be ("kexec, KEYS: Make use of platform keyring for signature verify") adds platform keyring support on x86 kexec but not arm64. The code in bzImage64_verify_sig uses the keys on the .builtin_trusted_keys, .machine, if configured and enabled, .secondary_trusted_keys, also if configured, and .platform keyrings to verify the signed kernel image as PE file. Cc: kexec@lists.infradead.org Cc: keyrings@vger.kernel.org Cc: linux-security-module@vger.kernel.org Reviewed-by: Michal Suchanek Signed-off-by: Coiby Xu Signed-off-by: Mimi Zohar --- arch/x86/kernel/kexec-bzimage64.c | 20 +------------------- include/linux/kexec.h | 7 +++++++ kernel/kexec_file.c | 17 +++++++++++++++++ 3 files changed, 25 insertions(+), 19 deletions(-) diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index 170d0fd68b1f..f299b48f9c9f 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include @@ -528,28 +527,11 @@ static int bzImage64_cleanup(void *loader_data) return 0; } -#ifdef CONFIG_KEXEC_BZIMAGE_VERIFY_SIG -static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len) -{ - int ret; - - ret = verify_pefile_signature(kernel, kernel_len, - VERIFY_USE_SECONDARY_KEYRING, - VERIFYING_KEXEC_PE_SIGNATURE); - if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING)) { - ret = verify_pefile_signature(kernel, kernel_len, - VERIFY_USE_PLATFORM_KEYRING, - VERIFYING_KEXEC_PE_SIGNATURE); - } - return ret; -} -#endif - const struct kexec_file_ops kexec_bzImage64_ops = { .probe = bzImage64_probe, .load = bzImage64_load, .cleanup = bzImage64_cleanup, #ifdef CONFIG_KEXEC_BZIMAGE_VERIFY_SIG - .verify_sig = bzImage64_verify_sig, + .verify_sig = kexec_kernel_verify_pe_sig, #endif }; diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 7f710fb3712b..13e6c4b58f07 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -19,6 +19,7 @@ #include #include +#include /* Location of a reserved region to hold the crash kernel. */ @@ -212,6 +213,12 @@ static inline void *arch_kexec_kernel_image_load(struct kimage *image) } #endif +#ifdef CONFIG_KEXEC_SIG +#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION +int kexec_kernel_verify_pe_sig(const char *kernel, unsigned long kernel_len); +#endif +#endif + extern int kexec_add_buffer(struct kexec_buf *kbuf); int kexec_locate_mem_hole(struct kexec_buf *kbuf); diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index 6dc1294c90fc..a7b411c22f19 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -123,6 +123,23 @@ void kimage_file_post_load_cleanup(struct kimage *image) } #ifdef CONFIG_KEXEC_SIG +#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION +int kexec_kernel_verify_pe_sig(const char *kernel, unsigned long kernel_len) +{ + int ret; + + ret = verify_pefile_signature(kernel, kernel_len, + VERIFY_USE_SECONDARY_KEYRING, + VERIFYING_KEXEC_PE_SIGNATURE); + if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING)) { + ret = verify_pefile_signature(kernel, kernel_len, + VERIFY_USE_PLATFORM_KEYRING, + VERIFYING_KEXEC_PE_SIGNATURE); + } + return ret; +} +#endif + static int kexec_image_verify_sig(struct kimage *image, void *buf, unsigned long buf_len) { -- cgit v1.2.3 From 0d519cadf75184a24313568e7f489a7fc9b1be3b Mon Sep 17 00:00:00 2001 From: Coiby Xu Date: Thu, 14 Jul 2022 21:40:26 +0800 Subject: arm64: kexec_file: use more system keyrings to verify kernel image signature Currently, when loading a kernel image via the kexec_file_load() system call, arm64 can only use the .builtin_trusted_keys keyring to verify a signature whereas x86 can use three more keyrings i.e. .secondary_trusted_keys, .machine and .platform keyrings. For example, one resulting problem is kexec'ing a kernel image would be rejected with the error "Lockdown: kexec: kexec of unsigned images is restricted; see man kernel_lockdown.7". This patch set enables arm64 to make use of the same keyrings as x86 to verify the signature kexec'ed kernel image. Fixes: 732b7b93d849 ("arm64: kexec_file: add kernel signature verification support") Cc: stable@vger.kernel.org # 105e10e2cf1c: kexec_file: drop weak attribute from functions Cc: stable@vger.kernel.org # 34d5960af253: kexec: clean up arch_kexec_kernel_verify_sig Cc: stable@vger.kernel.org # 83b7bb2d49ae: kexec, KEYS: make the code in bzImage64_verify_sig generic Acked-by: Baoquan He Cc: kexec@lists.infradead.org Cc: keyrings@vger.kernel.org Cc: linux-security-module@vger.kernel.org Co-developed-by: Michal Suchanek Signed-off-by: Michal Suchanek Acked-by: Will Deacon Signed-off-by: Coiby Xu Signed-off-by: Mimi Zohar --- arch/arm64/kernel/kexec_image.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/arch/arm64/kernel/kexec_image.c b/arch/arm64/kernel/kexec_image.c index 9ec34690e255..5ed6a585f21f 100644 --- a/arch/arm64/kernel/kexec_image.c +++ b/arch/arm64/kernel/kexec_image.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -130,18 +129,10 @@ static void *image_load(struct kimage *image, return NULL; } -#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG -static int image_verify_sig(const char *kernel, unsigned long kernel_len) -{ - return verify_pefile_signature(kernel, kernel_len, NULL, - VERIFYING_KEXEC_PE_SIGNATURE); -} -#endif - const struct kexec_file_ops kexec_image_ops = { .probe = image_probe, .load = image_load, #ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG - .verify_sig = image_verify_sig, + .verify_sig = kexec_kernel_verify_pe_sig, #endif }; -- cgit v1.2.3 From 0828c4a39be57768b8788e8cbd0d84683ea757e5 Mon Sep 17 00:00:00 2001 From: Michal Suchanek Date: Thu, 14 Jul 2022 21:40:27 +0800 Subject: kexec, KEYS, s390: Make use of built-in and secondary keyring for signature verification commit e23a8020ce4e ("s390/kexec_file: Signature verification prototype") adds support for KEXEC_SIG verification with keys from platform keyring but the built-in keys and secondary keyring are not used. Add support for the built-in keys and secondary keyring as x86 does. Fixes: e23a8020ce4e ("s390/kexec_file: Signature verification prototype") Cc: stable@vger.kernel.org Cc: Philipp Rudo Cc: kexec@lists.infradead.org Cc: keyrings@vger.kernel.org Cc: linux-security-module@vger.kernel.org Signed-off-by: Michal Suchanek Reviewed-by: "Lee, Chun-Yi" Acked-by: Baoquan He Signed-off-by: Coiby Xu Acked-by: Heiko Carstens Signed-off-by: Mimi Zohar --- arch/s390/kernel/machine_kexec_file.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c index 8f43575a4dd3..fc6d5f58debe 100644 --- a/arch/s390/kernel/machine_kexec_file.c +++ b/arch/s390/kernel/machine_kexec_file.c @@ -31,6 +31,7 @@ int s390_verify_sig(const char *kernel, unsigned long kernel_len) const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1; struct module_signature *ms; unsigned long sig_len; + int ret; /* Skip signature verification when not secure IPLed. */ if (!ipl_secure_flag) @@ -65,11 +66,18 @@ int s390_verify_sig(const char *kernel, unsigned long kernel_len) return -EBADMSG; } - return verify_pkcs7_signature(kernel, kernel_len, - kernel + kernel_len, sig_len, - VERIFY_USE_PLATFORM_KEYRING, - VERIFYING_MODULE_SIGNATURE, - NULL, NULL); + ret = verify_pkcs7_signature(kernel, kernel_len, + kernel + kernel_len, sig_len, + VERIFY_USE_SECONDARY_KEYRING, + VERIFYING_MODULE_SIGNATURE, + NULL, NULL); + if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING)) + ret = verify_pkcs7_signature(kernel, kernel_len, + kernel + kernel_len, sig_len, + VERIFY_USE_PLATFORM_KEYRING, + VERIFYING_MODULE_SIGNATURE, + NULL, NULL); + return ret; } #endif /* CONFIG_KEXEC_SIG */ -- cgit v1.2.3