summaryrefslogtreecommitdiff
path: root/security
diff options
context:
space:
mode:
Diffstat (limited to 'security')
-rw-r--r--security/apparmor/domain.c6
-rw-r--r--security/integrity/digsig.c2
-rw-r--r--security/integrity/evm/evm_crypto.c12
-rw-r--r--security/integrity/evm/evm_main.c4
-rw-r--r--security/integrity/ima/Kconfig12
-rw-r--r--security/integrity/ima/Makefile1
-rw-r--r--security/integrity/ima/ima.h31
-rw-r--r--security/integrity/ima/ima_appraise.c13
-rw-r--r--security/integrity/ima/ima_crypto.c6
-rw-r--r--security/integrity/ima/ima_fs.c32
-rw-r--r--security/integrity/ima/ima_init.c5
-rw-r--r--security/integrity/ima/ima_kexec.c168
-rw-r--r--security/integrity/ima/ima_main.c1
-rw-r--r--security/integrity/ima/ima_queue.c77
-rw-r--r--security/integrity/ima/ima_template.c297
-rw-r--r--security/integrity/ima/ima_template_lib.c7
-rw-r--r--security/keys/Kconfig2
-rw-r--r--security/keys/big_key.c59
-rw-r--r--security/keys/keyctl.c2
-rw-r--r--security/keys/proc.c2
-rw-r--r--security/selinux/hooks.c11
-rw-r--r--security/smack/smack.h1
-rw-r--r--security/smack/smack_access.c7
-rw-r--r--security/smack/smack_lsm.c119
-rw-r--r--security/smack/smackfs.c3
-rw-r--r--security/tomoyo/domain.c2
-rw-r--r--security/yama/yama_lsm.c16
27 files changed, 740 insertions, 158 deletions
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index fc3036b34e51..a4d90aa1045a 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -621,8 +621,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
/* released below */
cred = get_current_cred();
cxt = cred_cxt(cred);
- profile = aa_cred_profile(cred);
- previous_profile = cxt->previous;
+ profile = aa_get_newest_profile(aa_cred_profile(cred));
+ previous_profile = aa_get_newest_profile(cxt->previous);
if (unconfined(profile)) {
info = "unconfined";
@@ -718,6 +718,8 @@ audit:
out:
aa_put_profile(hat);
kfree(name);
+ aa_put_profile(profile);
+ aa_put_profile(previous_profile);
put_cred(cred);
return error;
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 4304372b323f..106e855e2d9d 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -51,7 +51,7 @@ static bool init_keyring __initdata;
int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
const char *digest, int digestlen)
{
- if (id >= INTEGRITY_KEYRING_MAX)
+ if (id >= INTEGRITY_KEYRING_MAX || siglen < 2)
return -EINVAL;
if (!keyring[id]) {
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index bf663915412e..d7f282d75cc1 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -151,8 +151,16 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
memset(&hmac_misc, 0, sizeof(hmac_misc));
hmac_misc.ino = inode->i_ino;
hmac_misc.generation = inode->i_generation;
- hmac_misc.uid = from_kuid(inode->i_sb->s_user_ns, inode->i_uid);
- hmac_misc.gid = from_kgid(inode->i_sb->s_user_ns, inode->i_gid);
+ /* The hmac uid and gid must be encoded in the initial user
+ * namespace (not the filesystems user namespace) as encoding
+ * them in the filesystems user namespace allows an attack
+ * where first they are written in an unprivileged fuse mount
+ * of a filesystem and then the system is tricked to mount the
+ * filesystem for real on next boot and trust it because
+ * everything is signed.
+ */
+ hmac_misc.uid = from_kuid(&init_user_ns, inode->i_uid);
+ hmac_misc.gid = from_kgid(&init_user_ns, inode->i_gid);
hmac_misc.mode = inode->i_mode;
crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc));
if (evm_hmac_attrs & EVM_ATTR_FSUUID)
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index ba8615576d4d..e2ed498c0f5f 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -145,6 +145,10 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
/* check value type */
switch (xattr_data->type) {
case EVM_XATTR_HMAC:
+ if (xattr_len != sizeof(struct evm_ima_xattr_data)) {
+ evm_status = INTEGRITY_FAIL;
+ goto out;
+ }
rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
xattr_value_len, calc.digest);
if (rc)
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 5487827fa86c..370eb2f4dd37 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -27,6 +27,18 @@ config IMA
to learn more about IMA.
If unsure, say N.
+config IMA_KEXEC
+ bool "Enable carrying the IMA measurement list across a soft boot"
+ depends on IMA && TCG_TPM && HAVE_IMA_KEXEC
+ default n
+ help
+ TPM PCRs are only reset on a hard reboot. In order to validate
+ a TPM's quote after a soft boot, the IMA measurement list of the
+ running kernel must be saved and restored on boot.
+
+ Depending on the IMA policy, the measurement list can grow to
+ be very large.
+
config IMA_MEASURE_PCR_IDX
int
depends on IMA
diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile
index 9aeaedad1e2b..29f198bde02b 100644
--- a/security/integrity/ima/Makefile
+++ b/security/integrity/ima/Makefile
@@ -8,4 +8,5 @@ obj-$(CONFIG_IMA) += ima.o
ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
ima_policy.o ima_template.o ima_template_lib.o
ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o
+ima-$(CONFIG_HAVE_IMA_KEXEC) += ima_kexec.o
obj-$(CONFIG_IMA_BLACKLIST_KEYRING) += ima_mok.o
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index db25f54a04fe..5e6180a4da7d 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -28,6 +28,10 @@
#include "../integrity.h"
+#ifdef CONFIG_HAVE_IMA_KEXEC
+#include <asm/ima.h>
+#endif
+
enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII };
enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
@@ -81,6 +85,7 @@ struct ima_template_field {
/* IMA template descriptor definition */
struct ima_template_desc {
+ struct list_head list;
char *name;
char *fmt;
int num_fields;
@@ -102,6 +107,27 @@ struct ima_queue_entry {
};
extern struct list_head ima_measurements; /* list of all measurements */
+/* Some details preceding the binary serialized measurement list */
+struct ima_kexec_hdr {
+ u16 version;
+ u16 _reserved0;
+ u32 _reserved1;
+ u64 buffer_size;
+ u64 count;
+};
+
+#ifdef CONFIG_HAVE_IMA_KEXEC
+void ima_load_kexec_buffer(void);
+#else
+static inline void ima_load_kexec_buffer(void) {}
+#endif /* CONFIG_HAVE_IMA_KEXEC */
+
+/*
+ * The default binary_runtime_measurements list format is defined as the
+ * platform native format. The canonical format is defined as little-endian.
+ */
+extern bool ima_canonical_fmt;
+
/* Internal IMA function definitions */
int ima_init(void);
int ima_fs_init(void);
@@ -122,7 +148,12 @@ int ima_init_crypto(void);
void ima_putc(struct seq_file *m, void *data, int datalen);
void ima_print_digest(struct seq_file *m, u8 *digest, u32 size);
struct ima_template_desc *ima_template_desc_current(void);
+int ima_restore_measurement_entry(struct ima_template_entry *entry);
+int ima_restore_measurement_list(loff_t bufsize, void *buf);
+int ima_measurements_show(struct seq_file *m, void *v);
+unsigned long ima_get_binary_runtime_size(void);
int ima_init_template(void);
+void ima_init_template_list(void);
/*
* used to protect h_table and sha_table
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 389325ac6067..1fd9539a969d 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -130,6 +130,7 @@ enum hash_algo ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value,
int xattr_len)
{
struct signature_v2_hdr *sig;
+ enum hash_algo ret;
if (!xattr_value || xattr_len < 2)
/* return default hash algo */
@@ -143,7 +144,9 @@ enum hash_algo ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value,
return sig->hash_algo;
break;
case IMA_XATTR_DIGEST_NG:
- return xattr_value->digest[0];
+ ret = xattr_value->digest[0];
+ if (ret < HASH_ALGO__LAST)
+ return ret;
break;
case IMA_XATTR_DIGEST:
/* this is for backward compatibility */
@@ -384,14 +387,10 @@ int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
result = ima_protect_xattr(dentry, xattr_name, xattr_value,
xattr_value_len);
if (result == 1) {
- bool digsig;
-
if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST))
return -EINVAL;
- digsig = (xvalue->type == EVM_IMA_XATTR_DIGSIG);
- if (!digsig && (ima_appraise & IMA_APPRAISE_ENFORCE))
- return -EPERM;
- ima_reset_appraise_flags(d_backing_inode(dentry), digsig);
+ ima_reset_appraise_flags(d_backing_inode(dentry),
+ (xvalue->type == EVM_IMA_XATTR_DIGSIG) ? 1 : 0);
result = 0;
}
return result;
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 38f2ed830dd6..802d5d20f36f 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -477,11 +477,13 @@ static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
u8 *data_to_hash = field_data[i].data;
u32 datalen = field_data[i].len;
+ u32 datalen_to_hash =
+ !ima_canonical_fmt ? datalen : cpu_to_le32(datalen);
if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
rc = crypto_shash_update(shash,
- (const u8 *) &field_data[i].len,
- sizeof(field_data[i].len));
+ (const u8 *) &datalen_to_hash,
+ sizeof(datalen_to_hash));
if (rc)
break;
} else if (strcmp(td->fields[i]->field_id, "n") == 0) {
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index c07a3844ea0a..ca303e5d2b94 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -28,6 +28,16 @@
static DEFINE_MUTEX(ima_write_mutex);
+bool ima_canonical_fmt;
+static int __init default_canonical_fmt_setup(char *str)
+{
+#ifdef __BIG_ENDIAN
+ ima_canonical_fmt = 1;
+#endif
+ return 1;
+}
+__setup("ima_canonical_fmt", default_canonical_fmt_setup);
+
static int valid_policy = 1;
#define TMPBUFLEN 12
static ssize_t ima_show_htable_value(char __user *buf, size_t count,
@@ -116,13 +126,13 @@ void ima_putc(struct seq_file *m, void *data, int datalen)
* [eventdata length]
* eventdata[n]=template specific data
*/
-static int ima_measurements_show(struct seq_file *m, void *v)
+int ima_measurements_show(struct seq_file *m, void *v)
{
/* the list never shrinks, so we don't need a lock here */
struct ima_queue_entry *qe = v;
struct ima_template_entry *e;
char *template_name;
- int namelen;
+ u32 pcr, namelen, template_data_len; /* temporary fields */
bool is_ima_template = false;
int i;
@@ -139,25 +149,29 @@ static int ima_measurements_show(struct seq_file *m, void *v)
* PCR used defaults to the same (config option) in
* little-endian format, unless set in policy
*/
- ima_putc(m, &e->pcr, sizeof(e->pcr));
+ pcr = !ima_canonical_fmt ? e->pcr : cpu_to_le32(e->pcr);
+ ima_putc(m, &pcr, sizeof(e->pcr));
/* 2nd: template digest */
ima_putc(m, e->digest, TPM_DIGEST_SIZE);
/* 3rd: template name size */
- namelen = strlen(template_name);
+ namelen = !ima_canonical_fmt ? strlen(template_name) :
+ cpu_to_le32(strlen(template_name));
ima_putc(m, &namelen, sizeof(namelen));
/* 4th: template name */
- ima_putc(m, template_name, namelen);
+ ima_putc(m, template_name, strlen(template_name));
/* 5th: template length (except for 'ima' template) */
if (strcmp(template_name, IMA_TEMPLATE_IMA_NAME) == 0)
is_ima_template = true;
- if (!is_ima_template)
- ima_putc(m, &e->template_data_len,
- sizeof(e->template_data_len));
+ if (!is_ima_template) {
+ template_data_len = !ima_canonical_fmt ? e->template_data_len :
+ cpu_to_le32(e->template_data_len);
+ ima_putc(m, &template_data_len, sizeof(e->template_data_len));
+ }
/* 6th: template specific data */
for (i = 0; i < e->template_desc->num_fields; i++) {
@@ -401,7 +415,7 @@ static int ima_release_policy(struct inode *inode, struct file *file)
const char *cause = valid_policy ? "completed" : "failed";
if ((file->f_flags & O_ACCMODE) == O_RDONLY)
- return 0;
+ return seq_release(inode, file);
if (valid_policy && ima_check_policy() < 0) {
cause = "failed";
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 32912bd54ead..2967d497a665 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -115,7 +115,8 @@ int __init ima_init(void)
ima_used_chip = 1;
if (!ima_used_chip)
- pr_info("No TPM chip found, activating TPM-bypass!\n");
+ pr_info("No TPM chip found, activating TPM-bypass! (rc=%d)\n",
+ rc);
rc = integrity_init_keyring(INTEGRITY_KEYRING_IMA);
if (rc)
@@ -128,6 +129,8 @@ int __init ima_init(void)
if (rc != 0)
return rc;
+ ima_load_kexec_buffer();
+
rc = ima_add_boot_aggregate(); /* boot aggregate must be first entry */
if (rc != 0)
return rc;
diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
new file mode 100644
index 000000000000..e473eee913cb
--- /dev/null
+++ b/security/integrity/ima/ima_kexec.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2016 IBM Corporation
+ *
+ * Authors:
+ * Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>
+ * Mimi Zohar <zohar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include <linux/kexec.h>
+#include "ima.h"
+
+#ifdef CONFIG_IMA_KEXEC
+static int ima_dump_measurement_list(unsigned long *buffer_size, void **buffer,
+ unsigned long segment_size)
+{
+ struct ima_queue_entry *qe;
+ struct seq_file file;
+ struct ima_kexec_hdr khdr;
+ int ret = 0;
+
+ /* segment size can't change between kexec load and execute */
+ file.buf = vmalloc(segment_size);
+ if (!file.buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ file.size = segment_size;
+ file.read_pos = 0;
+ file.count = sizeof(khdr); /* reserved space */
+
+ memset(&khdr, 0, sizeof(khdr));
+ khdr.version = 1;
+ list_for_each_entry_rcu(qe, &ima_measurements, later) {
+ if (file.count < file.size) {
+ khdr.count++;
+ ima_measurements_show(&file, qe);
+ } else {
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ if (ret < 0)
+ goto out;
+
+ /*
+ * fill in reserved space with some buffer details
+ * (eg. version, buffer size, number of measurements)
+ */
+ khdr.buffer_size = file.count;
+ if (ima_canonical_fmt) {
+ khdr.version = cpu_to_le16(khdr.version);
+ khdr.count = cpu_to_le64(khdr.count);
+ khdr.buffer_size = cpu_to_le64(khdr.buffer_size);
+ }
+ memcpy(file.buf, &khdr, sizeof(khdr));
+
+ print_hex_dump(KERN_DEBUG, "ima dump: ", DUMP_PREFIX_NONE,
+ 16, 1, file.buf,
+ file.count < 100 ? file.count : 100, true);
+
+ *buffer_size = file.count;
+ *buffer = file.buf;
+out:
+ if (ret == -EINVAL)
+ vfree(file.buf);
+ return ret;
+}
+
+/*
+ * Called during kexec_file_load so that IMA can add a segment to the kexec
+ * image for the measurement list for the next kernel.
+ *
+ * This function assumes that kexec_mutex is held.
+ */
+void ima_add_kexec_buffer(struct kimage *image)
+{
+ struct kexec_buf kbuf = { .image = image, .buf_align = PAGE_SIZE,
+ .buf_min = 0, .buf_max = ULONG_MAX,
+ .top_down = true };
+ unsigned long binary_runtime_size;
+
+ /* use more understandable variable names than defined in kbuf */
+ void *kexec_buffer = NULL;
+ size_t kexec_buffer_size;
+ size_t kexec_segment_size;
+ int ret;
+
+ /*
+ * Reserve an extra half page of memory for additional measurements
+ * added during the kexec load.
+ */
+ binary_runtime_size = ima_get_binary_runtime_size();
+ if (binary_runtime_size >= ULONG_MAX - PAGE_SIZE)
+ kexec_segment_size = ULONG_MAX;
+ else
+ kexec_segment_size = ALIGN(ima_get_binary_runtime_size() +
+ PAGE_SIZE / 2, PAGE_SIZE);
+ if ((kexec_segment_size == ULONG_MAX) ||
+ ((kexec_segment_size >> PAGE_SHIFT) > totalram_pages / 2)) {
+ pr_err("Binary measurement list too large.\n");
+ return;
+ }
+
+ ima_dump_measurement_list(&kexec_buffer_size, &kexec_buffer,
+ kexec_segment_size);
+ if (!kexec_buffer) {
+ pr_err("Not enough memory for the kexec measurement buffer.\n");
+ return;
+ }
+
+ kbuf.buffer = kexec_buffer;
+ kbuf.bufsz = kexec_buffer_size;
+ kbuf.memsz = kexec_segment_size;
+ ret = kexec_add_buffer(&kbuf);
+ if (ret) {
+ pr_err("Error passing over kexec measurement buffer.\n");
+ return;
+ }
+
+ ret = arch_ima_add_kexec_buffer(image, kbuf.mem, kexec_segment_size);
+ if (ret) {
+ pr_err("Error passing over kexec measurement buffer.\n");
+ return;
+ }
+
+ pr_debug("kexec measurement buffer for the loaded kernel at 0x%lx.\n",
+ kbuf.mem);
+}
+#endif /* IMA_KEXEC */
+
+/*
+ * Restore the measurement list from the previous kernel.
+ */
+void ima_load_kexec_buffer(void)
+{
+ void *kexec_buffer = NULL;
+ size_t kexec_buffer_size = 0;
+ int rc;
+
+ rc = ima_get_kexec_buffer(&kexec_buffer, &kexec_buffer_size);
+ switch (rc) {
+ case 0:
+ rc = ima_restore_measurement_list(kexec_buffer_size,
+ kexec_buffer);
+ if (rc != 0)
+ pr_err("Failed to restore the measurement list: %d\n",
+ rc);
+
+ ima_free_kexec_buffer();
+ break;
+ case -ENOTSUPP:
+ pr_debug("Restoring the measurement list not supported\n");
+ break;
+ case -ENOENT:
+ pr_debug("No measurement list to restore\n");
+ break;
+ default:
+ pr_debug("Error restoring the measurement list: %d\n", rc);
+ }
+}
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 423d111b3b94..50818c60538b 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -418,6 +418,7 @@ static int __init init_ima(void)
{
int error;
+ ima_init_template_list();
hash_setup(CONFIG_IMA_DEFAULT_HASH);
error = ima_init();
if (!error) {
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index 32f6ac0f96df..d9aa5ab71204 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -29,6 +29,11 @@
#define AUDIT_CAUSE_LEN_MAX 32
LIST_HEAD(ima_measurements); /* list of all measurements */
+#ifdef CONFIG_IMA_KEXEC
+static unsigned long binary_runtime_size;
+#else
+static unsigned long binary_runtime_size = ULONG_MAX;
+#endif
/* key: inode (before secure-hashing a file) */
struct ima_h_table ima_htable = {
@@ -64,12 +69,32 @@ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value,
return ret;
}
+/*
+ * Calculate the memory required for serializing a single
+ * binary_runtime_measurement list entry, which contains a
+ * couple of variable length fields (e.g template name and data).
+ */
+static int get_binary_runtime_size(struct ima_template_entry *entry)
+{
+ int size = 0;
+
+ size += sizeof(u32); /* pcr */
+ size += sizeof(entry->digest);
+ size += sizeof(int); /* template name size field */
+ size += strlen(entry->template_desc->name) + 1;
+ size += sizeof(entry->template_data_len);
+ size += entry->template_data_len;
+ return size;
+}
+
/* ima_add_template_entry helper function:
- * - Add template entry to measurement list and hash table.
+ * - Add template entry to the measurement list and hash table, for
+ * all entries except those carried across kexec.
*
* (Called with ima_extend_list_mutex held.)
*/
-static int ima_add_digest_entry(struct ima_template_entry *entry)
+static int ima_add_digest_entry(struct ima_template_entry *entry,
+ bool update_htable)
{
struct ima_queue_entry *qe;
unsigned int key;
@@ -85,11 +110,34 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
list_add_tail_rcu(&qe->later, &ima_measurements);
atomic_long_inc(&ima_htable.len);
- key = ima_hash_key(entry->digest);
- hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
+ if (update_htable) {
+ key = ima_hash_key(entry->digest);
+ hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
+ }
+
+ if (binary_runtime_size != ULONG_MAX) {
+ int size;
+
+ size = get_binary_runtime_size(entry);
+ binary_runtime_size = (binary_runtime_size < ULONG_MAX - size) ?
+ binary_runtime_size + size : ULONG_MAX;
+ }
return 0;
}
+/*
+ * Return the amount of memory required for serializing the
+ * entire binary_runtime_measurement list, including the ima_kexec_hdr
+ * structure.
+ */
+unsigned long ima_get_binary_runtime_size(void)
+{
+ if (binary_runtime_size >= (ULONG_MAX - sizeof(struct ima_kexec_hdr)))
+ return ULONG_MAX;
+ else
+ return binary_runtime_size + sizeof(struct ima_kexec_hdr);
+};
+
static int ima_pcr_extend(const u8 *hash, int pcr)
{
int result = 0;
@@ -103,8 +151,13 @@ static int ima_pcr_extend(const u8 *hash, int pcr)
return result;
}
-/* Add template entry to the measurement list and hash table,
- * and extend the pcr.
+/*
+ * Add template entry to the measurement list and hash table, and
+ * extend the pcr.
+ *
+ * On systems which support carrying the IMA measurement list across
+ * kexec, maintain the total memory size required for serializing the
+ * binary_runtime_measurements.
*/
int ima_add_template_entry(struct ima_template_entry *entry, int violation,
const char *op, struct inode *inode,
@@ -126,7 +179,7 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
}
}
- result = ima_add_digest_entry(entry);
+ result = ima_add_digest_entry(entry, 1);
if (result < 0) {
audit_cause = "ENOMEM";
audit_info = 0;
@@ -149,3 +202,13 @@ out:
op, audit_cause, result, audit_info);
return result;
}
+
+int ima_restore_measurement_entry(struct ima_template_entry *entry)
+{
+ int result = 0;
+
+ mutex_lock(&ima_extend_list_mutex);
+ result = ima_add_digest_entry(entry, 0);
+ mutex_unlock(&ima_extend_list_mutex);
+ return result;
+}
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
index febd12ed9b55..cebb37c63629 100644
--- a/security/integrity/ima/ima_template.c
+++ b/security/integrity/ima/ima_template.c
@@ -15,16 +15,20 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/rculist.h>
#include "ima.h"
#include "ima_template_lib.h"
-static struct ima_template_desc defined_templates[] = {
+static struct ima_template_desc builtin_templates[] = {
{.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT},
{.name = "ima-ng", .fmt = "d-ng|n-ng"},
{.name = "ima-sig", .fmt = "d-ng|n-ng|sig"},
{.name = "", .fmt = ""}, /* placeholder for a custom format */
};
+static LIST_HEAD(defined_templates);
+static DEFINE_SPINLOCK(template_list);
+
static struct ima_template_field supported_fields[] = {
{.field_id = "d", .field_init = ima_eventdigest_init,
.field_show = ima_show_template_digest},
@@ -37,6 +41,7 @@ static struct ima_template_field supported_fields[] = {
{.field_id = "sig", .field_init = ima_eventsig_init,
.field_show = ima_show_template_sig},
};
+#define MAX_TEMPLATE_NAME_LEN 15
static struct ima_template_desc *ima_template;
static struct ima_template_desc *lookup_template_desc(const char *name);
@@ -52,6 +57,8 @@ static int __init ima_template_setup(char *str)
if (ima_template)
return 1;
+ ima_init_template_list();
+
/*
* Verify that a template with the supplied name exists.
* If not, use CONFIG_IMA_DEFAULT_TEMPLATE.
@@ -80,7 +87,7 @@ __setup("ima_template=", ima_template_setup);
static int __init ima_template_fmt_setup(char *str)
{
- int num_templates = ARRAY_SIZE(defined_templates);
+ int num_templates = ARRAY_SIZE(builtin_templates);
if (ima_template)
return 1;
@@ -91,22 +98,28 @@ static int __init ima_template_fmt_setup(char *str)
return 1;
}
- defined_templates[num_templates - 1].fmt = str;
- ima_template = defined_templates + num_templates - 1;
+ builtin_templates[num_templates - 1].fmt = str;
+ ima_template = builtin_templates + num_templates - 1;
+
return 1;
}
__setup("ima_template_fmt=", ima_template_fmt_setup);
static struct ima_template_desc *lookup_template_desc(const char *name)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(defined_templates); i++) {
- if (strcmp(defined_templates[i].name, name) == 0)
- return defined_templates + i;
+ struct ima_template_desc *template_desc;
+ int found = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(template_desc, &defined_templates, list) {
+ if ((strcmp(template_desc->name, name) == 0) ||
+ (strcmp(template_desc->fmt, name) == 0)) {
+ found = 1;
+ break;
+ }
}
-
- return NULL;
+ rcu_read_unlock();
+ return found ? template_desc : NULL;
}
static struct ima_template_field *lookup_template_field(const char *field_id)
@@ -142,9 +155,14 @@ static int template_desc_init_fields(const char *template_fmt,
{
const char *template_fmt_ptr;
struct ima_template_field *found_fields[IMA_TEMPLATE_NUM_FIELDS_MAX];
- int template_num_fields = template_fmt_size(template_fmt);
+ int template_num_fields;
int i, len;
+ if (num_fields && *num_fields > 0) /* already initialized? */
+ return 0;
+
+ template_num_fields = template_fmt_size(template_fmt);
+
if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX) {
pr_err("format string '%s' contains too many fields\n",
template_fmt);
@@ -182,11 +200,28 @@ static int template_desc_init_fields(const char *template_fmt,
return 0;
}
+void ima_init_template_list(void)
+{
+ int i;
+
+ if (!list_empty(&defined_templates))
+ return;
+
+ spin_lock(&template_list);
+ for (i = 0; i < ARRAY_SIZE(builtin_templates); i++) {
+ list_add_tail_rcu(&builtin_templates[i].list,
+ &defined_templates);
+ }
+ spin_unlock(&template_list);
+}
+
struct ima_template_desc *ima_template_desc_current(void)
{
- if (!ima_template)
+ if (!ima_template) {
+ ima_init_template_list();
ima_template =
lookup_template_desc(CONFIG_IMA_DEFAULT_TEMPLATE);
+ }
return ima_template;
}
@@ -205,3 +240,239 @@ int __init ima_init_template(void)
return result;
}
+
+static struct ima_template_desc *restore_template_fmt(char *template_name)
+{
+ struct ima_template_desc *template_desc = NULL;
+ int ret;
+
+ ret = template_desc_init_fields(template_name, NULL, NULL);
+ if (ret < 0) {
+ pr_err("attempting to initialize the template \"%s\" failed\n",
+ template_name);
+ goto out;
+ }
+
+ template_desc = kzalloc(sizeof(*template_desc), GFP_KERNEL);
+ if (!template_desc)
+ goto out;
+
+ template_desc->name = "";
+ template_desc->fmt = kstrdup(template_name, GFP_KERNEL);
+ if (!template_desc->fmt)
+ goto out;
+
+ spin_lock(&template_list);
+ list_add_tail_rcu(&template_desc->list, &defined_templates);
+ spin_unlock(&template_list);
+out:
+ return template_desc;
+}
+
+static int ima_restore_template_data(struct ima_template_desc *template_desc,
+ void *template_data,
+ int template_data_size,
+ struct ima_template_entry **entry)
+{
+ struct binary_field_data {
+ u32 len;
+ u8 data[0];
+ } __packed;
+
+ struct binary_field_data *field_data;
+ int offset = 0;
+ int ret = 0;
+ int i;
+
+ *entry = kzalloc(sizeof(**entry) +
+ template_desc->num_fields * sizeof(struct ima_field_data),
+ GFP_NOFS);
+ if (!*entry)
+ return -ENOMEM;
+
+ (*entry)->template_desc = template_desc;
+ for (i = 0; i < template_desc->num_fields; i++) {
+ field_data = template_data + offset;
+
+ /* Each field of the template data is prefixed with a length. */
+ if (offset > (template_data_size - sizeof(*field_data))) {
+ pr_err("Restoring the template field failed\n");
+ ret = -EINVAL;
+ break;
+ }
+ offset += sizeof(*field_data);
+
+ if (ima_canonical_fmt)
+ field_data->len = le32_to_cpu(field_data->len);
+
+ if (offset > (template_data_size - field_data->len)) {
+ pr_err("Restoring the template field data failed\n");
+ ret = -EINVAL;
+ break;
+ }
+ offset += field_data->len;
+
+ (*entry)->template_data[i].len = field_data->len;
+ (*entry)->template_data_len += sizeof(field_data->len);
+
+ (*entry)->template_data[i].data =
+ kzalloc(field_data->len + 1, GFP_KERNEL);
+ if (!(*entry)->template_data[i].data) {
+ ret = -ENOMEM;
+ break;
+ }
+ memcpy((*entry)->template_data[i].data, field_data->data,
+ field_data->len);
+ (*entry)->template_data_len += field_data->len;
+ }
+
+ if (ret < 0) {
+ ima_free_template_entry(*entry);
+ *entry = NULL;
+ }
+
+ return ret;
+}
+
+/* Restore the serialized binary measurement list without extending PCRs. */
+int ima_restore_measurement_list(loff_t size, void *buf)
+{
+ struct binary_hdr_v1 {
+ u32 pcr;
+ u8 digest[TPM_DIGEST_SIZE];
+ u32 template_name_len;
+ char template_name[0];
+ } __packed;
+ char template_name[MAX_TEMPLATE_NAME_LEN];
+
+ struct binary_data_v1 {
+ u32 template_data_size;
+ char template_data[0];
+ } __packed;
+
+ struct ima_kexec_hdr *khdr = buf;
+ struct binary_hdr_v1 *hdr_v1;
+ struct binary_data_v1 *data_v1;
+
+ void *bufp = buf + sizeof(*khdr);
+ void *bufendp;
+ struct ima_template_entry *entry;
+ struct ima_template_desc *template_desc;
+ unsigned long count = 0;
+ int ret = 0;
+
+ if (!buf || size < sizeof(*khdr))
+ return 0;
+
+ if (ima_canonical_fmt) {
+ khdr->version = le16_to_cpu(khdr->version);
+ khdr->count = le64_to_cpu(khdr->count);
+ khdr->buffer_size = le64_to_cpu(khdr->buffer_size);
+ }
+
+ if (khdr->version != 1) {
+ pr_err("attempting to restore a incompatible measurement list");
+ return -EINVAL;
+ }
+
+ if (khdr->count > ULONG_MAX - 1) {
+ pr_err("attempting to restore too many measurements");
+ return -EINVAL;
+ }
+
+ /*
+ * ima kexec buffer prefix: version, buffer size, count
+ * v1 format: pcr, digest, template-name-len, template-name,
+ * template-data-size, template-data
+ */
+ bufendp = buf + khdr->buffer_size;
+ while ((bufp < bufendp) && (count++ < khdr->count)) {
+ hdr_v1 = bufp;
+ if (bufp > (bufendp - sizeof(*hdr_v1))) {
+ pr_err("attempting to restore partial measurement\n");
+ ret = -EINVAL;
+ break;
+ }
+ bufp += sizeof(*hdr_v1);
+
+ if (ima_canonical_fmt)
+ hdr_v1->template_name_len =
+ le32_to_cpu(hdr_v1->template_name_len);
+
+ if ((hdr_v1->template_name_len >= MAX_TEMPLATE_NAME_LEN) ||
+ (bufp > (bufendp - hdr_v1->template_name_len))) {
+ pr_err("attempting to restore a template name \
+ that is too long\n");
+ ret = -EINVAL;
+ break;
+ }
+ data_v1 = bufp += (u_int8_t)hdr_v1->template_name_len;
+
+ /* template name is not null terminated */
+ memcpy(template_name, hdr_v1->template_name,
+ hdr_v1->template_name_len);
+ template_name[hdr_v1->template_name_len] = 0;
+
+ if (strcmp(template_name, "ima") == 0) {
+ pr_err("attempting to restore an unsupported \
+ template \"%s\" failed\n", template_name);
+ ret = -EINVAL;
+ break;
+ }
+
+ template_desc = lookup_template_desc(template_name);
+ if (!template_desc) {
+ template_desc = restore_template_fmt(template_name);
+ if (!template_desc)
+ break;
+ }
+
+ /*
+ * Only the running system's template format is initialized
+ * on boot. As needed, initialize the other template formats.
+ */
+ ret = template_desc_init_fields(template_desc->fmt,
+ &(template_desc->fields),
+ &(template_desc->num_fields));
+ if (ret < 0) {
+ pr_err("attempting to restore the template fmt \"%s\" \
+ failed\n", template_desc->fmt);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (bufp > (bufendp - sizeof(data_v1->template_data_size))) {
+ pr_err("restoring the template data size failed\n");
+ ret = -EINVAL;
+ break;
+ }
+ bufp += (u_int8_t) sizeof(data_v1->template_data_size);
+
+ if (ima_canonical_fmt)
+ data_v1->template_data_size =
+ le32_to_cpu(data_v1->template_data_size);
+
+ if (bufp > (bufendp - data_v1->template_data_size)) {
+ pr_err("restoring the template data failed\n");
+ ret = -EINVAL;
+ break;
+ }
+ bufp += data_v1->template_data_size;
+
+ ret = ima_restore_template_data(template_desc,
+ data_v1->template_data,
+ data_v1->template_data_size,
+ &entry);
+ if (ret < 0)
+ break;
+
+ memcpy(entry->digest, hdr_v1->digest, TPM_DIGEST_SIZE);
+ entry->pcr =
+ !ima_canonical_fmt ? hdr_v1->pcr : le32_to_cpu(hdr_v1->pcr);
+ ret = ima_restore_measurement_entry(entry);
+ if (ret < 0)
+ break;
+
+ }
+ return ret;
+}
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
index f9bae04ba176..f9ba37b3928d 100644
--- a/security/integrity/ima/ima_template_lib.c
+++ b/security/integrity/ima/ima_template_lib.c
@@ -103,8 +103,11 @@ static void ima_show_template_data_binary(struct seq_file *m,
u32 len = (show == IMA_SHOW_BINARY_OLD_STRING_FMT) ?
strlen(field_data->data) : field_data->len;
- if (show != IMA_SHOW_BINARY_NO_FIELD_LEN)
- ima_putc(m, &len, sizeof(len));
+ if (show != IMA_SHOW_BINARY_NO_FIELD_LEN) {
+ u32 field_len = !ima_canonical_fmt ? len : cpu_to_le32(len);
+
+ ima_putc(m, &field_len, sizeof(field_len));
+ }
if (!len)
return;
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index f826e8739023..d942c7c2bc0a 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -41,7 +41,7 @@ config BIG_KEYS
bool "Large payload keys"
depends on KEYS
depends on TMPFS
- select CRYPTO
+ depends on (CRYPTO_ANSI_CPRNG = y || CRYPTO_DRBG = y)
select CRYPTO_AES
select CRYPTO_ECB
select CRYPTO_RNG
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index c0b3030b5634..835c1ab30d01 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -9,6 +9,7 @@
* 2 of the Licence, or (at your option) any later version.
*/
+#define pr_fmt(fmt) "big_key: "fmt
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/file.h>
@@ -341,44 +342,48 @@ error:
*/
static int __init big_key_init(void)
{
- return register_key_type(&key_type_big_key);
-}
-
-/*
- * Initialize big_key crypto and RNG algorithms
- */
-static int __init big_key_crypto_init(void)
-{
- int ret = -EINVAL;
+ struct crypto_skcipher *cipher;
+ struct crypto_rng *rng;
+ int ret;
- /* init RNG */
- big_key_rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
- if (IS_ERR(big_key_rng)) {
- big_key_rng = NULL;
- return -EFAULT;
+ rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
+ if (IS_ERR(rng)) {
+ pr_err("Can't alloc rng: %ld\n", PTR_ERR(rng));
+ return PTR_ERR(rng);
}
+ big_key_rng = rng;
+
/* seed RNG */
- ret = crypto_rng_reset(big_key_rng, NULL, crypto_rng_seedsize(big_key_rng));
- if (ret)
- goto error;
+ ret = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng));
+ if (ret) {
+ pr_err("Can't reset rng: %d\n", ret);
+ goto error_rng;
+ }
/* init block cipher */
- big_key_skcipher = crypto_alloc_skcipher(big_key_alg_name,
- 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(big_key_skcipher)) {
- big_key_skcipher = NULL;
- ret = -EFAULT;
- goto error;
+ cipher = crypto_alloc_skcipher(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(cipher)) {
+ ret = PTR_ERR(cipher);
+ pr_err("Can't alloc crypto: %d\n", ret);
+ goto error_rng;
+ }
+
+ big_key_skcipher = cipher;
+
+ ret = register_key_type(&key_type_big_key);
+ if (ret < 0) {
+ pr_err("Can't register type: %d\n", ret);
+ goto error_cipher;
}
return 0;
-error:
+error_cipher:
+ crypto_free_skcipher(big_key_skcipher);
+error_rng:
crypto_free_rng(big_key_rng);
- big_key_rng = NULL;
return ret;
}
-device_initcall(big_key_init);
-late_initcall(big_key_crypto_init);
+late_initcall(big_key_init);
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index d580ad06b792..f89f1900e58d 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1074,7 +1074,7 @@ long keyctl_instantiate_key_common(key_serial_t id,
}
ret = -EFAULT;
- if (copy_from_iter(payload, plen, from) != plen)
+ if (!copy_from_iter_full(payload, plen, from))
goto error2;
}
diff --git a/security/keys/proc.c b/security/keys/proc.c
index f0611a6368cd..b9f531c9e4fa 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -181,7 +181,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
struct timespec now;
unsigned long timo;
key_ref_t key_ref, skey_ref;
- char xbuf[12];
+ char xbuf[16];
int rc;
struct keyring_search_context ctx = {
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 98a2e92b3168..c7c6619431d5 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2538,7 +2538,8 @@ static void selinux_bprm_committing_creds(struct linux_binprm *bprm)
rlim->rlim_cur = min(rlim->rlim_max, initrlim->rlim_cur);
}
task_unlock(current);
- update_rlimit_cpu(current, rlimit(RLIMIT_CPU));
+ if (IS_ENABLED(CONFIG_POSIX_TIMERS))
+ update_rlimit_cpu(current, rlimit(RLIMIT_CPU));
}
}
@@ -2568,9 +2569,11 @@ static void selinux_bprm_committed_creds(struct linux_binprm *bprm)
*/
rc = avc_has_perm(osid, sid, SECCLASS_PROCESS, PROCESS__SIGINH, NULL);
if (rc) {
- memset(&itimer, 0, sizeof itimer);
- for (i = 0; i < 3; i++)
- do_setitimer(i, &itimer, NULL);
+ if (IS_ENABLED(CONFIG_POSIX_TIMERS)) {
+ memset(&itimer, 0, sizeof itimer);
+ for (i = 0; i < 3; i++)
+ do_setitimer(i, &itimer, NULL);
+ }
spin_lock_irq(&current->sighand->siglock);
if (!fatal_signal_pending(current)) {
flush_sigqueue(&current->pending);
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 51fd30192c08..77abe2efacae 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -336,7 +336,6 @@ extern int smack_ptrace_rule;
extern struct smack_known smack_known_floor;
extern struct smack_known smack_known_hat;
extern struct smack_known smack_known_huh;
-extern struct smack_known smack_known_invalid;
extern struct smack_known smack_known_star;
extern struct smack_known smack_known_web;
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index 23e5808a0970..356e3764cad9 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -36,11 +36,6 @@ struct smack_known smack_known_floor = {
.smk_secid = 5,
};
-struct smack_known smack_known_invalid = {
- .smk_known = "",
- .smk_secid = 6,
-};
-
struct smack_known smack_known_web = {
.smk_known = "@",
.smk_secid = 7,
@@ -615,7 +610,7 @@ struct smack_known *smack_from_secid(const u32 secid)
* of a secid that is not on the list.
*/
rcu_read_unlock();
- return &smack_known_invalid;
+ return &smack_known_huh;
}
/*
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 1cb060293505..94dc9d406ce3 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -225,7 +225,7 @@ static int smk_bu_credfile(const struct cred *cred, struct file *file,
{
struct task_smack *tsp = cred->security;
struct smack_known *sskp = tsp->smk_task;
- struct inode *inode = file->f_inode;
+ struct inode *inode = file_inode(file);
struct inode_smack *isp = inode->i_security;
char acc[SMK_NUM_ACCESS_TYPE + 1];
@@ -692,12 +692,12 @@ static int smack_parse_opts_str(char *options,
}
}
- opts->mnt_opts = kcalloc(NUM_SMK_MNT_OPTS, sizeof(char *), GFP_ATOMIC);
+ opts->mnt_opts = kcalloc(NUM_SMK_MNT_OPTS, sizeof(char *), GFP_KERNEL);
if (!opts->mnt_opts)
goto out_err;
opts->mnt_opts_flags = kcalloc(NUM_SMK_MNT_OPTS, sizeof(int),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!opts->mnt_opts_flags) {
kfree(opts->mnt_opts);
goto out_err;
@@ -769,6 +769,31 @@ static int smack_set_mnt_opts(struct super_block *sb,
if (sp->smk_flags & SMK_SB_INITIALIZED)
return 0;
+ if (!smack_privileged(CAP_MAC_ADMIN)) {
+ /*
+ * Unprivileged mounts don't get to specify Smack values.
+ */
+ if (num_opts)
+ return -EPERM;
+ /*
+ * Unprivileged mounts get root and default from the caller.
+ */
+ skp = smk_of_current();
+ sp->smk_root = skp;
+ sp->smk_default = skp;
+ /*
+ * For a handful of fs types with no user-controlled
+ * backing store it's okay to trust security labels
+ * in the filesystem. The rest are untrusted.
+ */
+ if (sb->s_user_ns != &init_user_ns &&
+ sb->s_magic != SYSFS_MAGIC && sb->s_magic != TMPFS_MAGIC &&
+ sb->s_magic != RAMFS_MAGIC) {
+ transmute = 1;
+ sp->smk_flags |= SMK_SB_UNTRUSTED;
+ }
+ }
+
sp->smk_flags |= SMK_SB_INITIALIZED;
for (i = 0; i < num_opts; i++) {
@@ -809,31 +834,6 @@ static int smack_set_mnt_opts(struct super_block *sb,
}
}
- if (!smack_privileged(CAP_MAC_ADMIN)) {
- /*
- * Unprivileged mounts don't get to specify Smack values.
- */
- if (num_opts)
- return -EPERM;
- /*
- * Unprivileged mounts get root and default from the caller.
- */
- skp = smk_of_current();
- sp->smk_root = skp;
- sp->smk_default = skp;
- /*
- * For a handful of fs types with no user-controlled
- * backing store it's okay to trust security labels
- * in the filesystem. The rest are untrusted.
- */
- if (sb->s_user_ns != &init_user_ns &&
- sb->s_magic != SYSFS_MAGIC && sb->s_magic != TMPFS_MAGIC &&
- sb->s_magic != RAMFS_MAGIC) {
- transmute = 1;
- sp->smk_flags |= SMK_SB_UNTRUSTED;
- }
- }
-
/*
* Initialize the root inode.
*/
@@ -1384,20 +1384,14 @@ static void smack_inode_post_setxattr(struct dentry *dentry, const char *name,
skp = smk_import_entry(value, size);
if (!IS_ERR(skp))
isp->smk_inode = skp;
- else
- isp->smk_inode = &smack_known_invalid;
} else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0) {
skp = smk_import_entry(value, size);
if (!IS_ERR(skp))
isp->smk_task = skp;
- else
- isp->smk_task = &smack_known_invalid;
} else if (strcmp(name, XATTR_NAME_SMACKMMAP) == 0) {
skp = smk_import_entry(value, size);
if (!IS_ERR(skp))
isp->smk_mmap = skp;
- else
- isp->smk_mmap = &smack_known_invalid;
}
return;
@@ -2023,6 +2017,8 @@ static int smack_cred_prepare(struct cred *new, const struct cred *old,
if (new_tsp == NULL)
return -ENOMEM;
+ new->security = new_tsp;
+
rc = smk_copy_rules(&new_tsp->smk_rules, &old_tsp->smk_rules, gfp);
if (rc != 0)
return rc;
@@ -2032,7 +2028,6 @@ static int smack_cred_prepare(struct cred *new, const struct cred *old,
if (rc != 0)
return rc;
- new->security = new_tsp;
return 0;
}
@@ -2067,12 +2062,8 @@ static void smack_cred_transfer(struct cred *new, const struct cred *old)
static int smack_kernel_act_as(struct cred *new, u32 secid)
{
struct task_smack *new_tsp = new->security;
- struct smack_known *skp = smack_from_secid(secid);
- if (skp == NULL)
- return -EINVAL;
-
- new_tsp->smk_task = skp;
+ new_tsp->smk_task = smack_from_secid(secid);
return 0;
}
@@ -2337,8 +2328,16 @@ static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags)
if (ssp == NULL)
return -ENOMEM;
- ssp->smk_in = skp;
- ssp->smk_out = skp;
+ /*
+ * Sockets created by kernel threads receive web label.
+ */
+ if (unlikely(current->flags & PF_KTHREAD)) {
+ ssp->smk_in = &smack_known_web;
+ ssp->smk_out = &smack_known_web;
+ } else {
+ ssp->smk_in = skp;
+ ssp->smk_out = skp;
+ }
ssp->smk_packet = NULL;
sk->sk_security = ssp;
@@ -2435,17 +2434,17 @@ static struct smack_known *smack_ipv6host_label(struct sockaddr_in6 *sip)
list_for_each_entry_rcu(snp, &smk_net6addr_list, list) {
/*
+ * If the label is NULL the entry has
+ * been renounced. Ignore it.
+ */
+ if (snp->smk_label == NULL)
+ continue;
+ /*
* we break after finding the first match because
* the list is sorted from longest to shortest mask
* so we have found the most specific match
*/
for (found = 1, i = 0; i < 8; i++) {
- /*
- * If the label is NULL the entry has
- * been renounced. Ignore it.
- */
- if (snp->smk_label == NULL)
- continue;
if ((sap->s6_addr16[i] & snp->smk_mask.s6_addr16[i]) !=
snp->smk_host.s6_addr16[i]) {
found = 0;
@@ -3661,10 +3660,11 @@ static int smack_setprocattr(struct task_struct *p, char *name,
return PTR_ERR(skp);
/*
- * No process is ever allowed the web ("@") label.
+ * No process is ever allowed the web ("@") label
+ * and the star ("*") label.
*/
- if (skp == &smack_known_web)
- return -EPERM;
+ if (skp == &smack_known_web || skp == &smack_known_star)
+ return -EINVAL;
if (!smack_privileged(CAP_MAC_ADMIN)) {
rc = -EPERM;
@@ -3884,21 +3884,11 @@ static struct smack_known *smack_from_secattr(struct netlbl_lsm_secattr *sap,
return &smack_known_web;
return &smack_known_star;
}
- if ((sap->flags & NETLBL_SECATTR_SECID) != 0) {
+ if ((sap->flags & NETLBL_SECATTR_SECID) != 0)
/*
* Looks like a fallback, which gives us a secid.
*/
- skp = smack_from_secid(sap->attr.secid);
- /*
- * This has got to be a bug because it is
- * impossible to specify a fallback without
- * specifying the label, which will ensure
- * it has a secid, and the only way to get a
- * secid is from a fallback.
- */
- BUG_ON(skp == NULL);
- return skp;
- }
+ return smack_from_secid(sap->attr.secid);
/*
* Without guidance regarding the smack value
* for the packet fall back on the network
@@ -4761,7 +4751,6 @@ static __init void init_smack_known_list(void)
mutex_init(&smack_known_hat.smk_rules_lock);
mutex_init(&smack_known_floor.smk_rules_lock);
mutex_init(&smack_known_star.smk_rules_lock);
- mutex_init(&smack_known_invalid.smk_rules_lock);
mutex_init(&smack_known_web.smk_rules_lock);
/*
* Initialize rule lists
@@ -4770,7 +4759,6 @@ static __init void init_smack_known_list(void)
INIT_LIST_HEAD(&smack_known_hat.smk_rules);
INIT_LIST_HEAD(&smack_known_star.smk_rules);
INIT_LIST_HEAD(&smack_known_floor.smk_rules);
- INIT_LIST_HEAD(&smack_known_invalid.smk_rules);
INIT_LIST_HEAD(&smack_known_web.smk_rules);
/*
* Create the known labels list
@@ -4779,7 +4767,6 @@ static __init void init_smack_known_list(void)
smk_insert_entry(&smack_known_hat);
smk_insert_entry(&smack_known_star);
smk_insert_entry(&smack_known_floor);
- smk_insert_entry(&smack_known_invalid);
smk_insert_entry(&smack_known_web);
}
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 6492fe96cae4..13743a01b35b 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -2998,9 +2998,6 @@ static int __init init_smk_fs(void)
rc = smk_preset_netlabel(&smack_known_huh);
if (err == 0 && rc < 0)
err = rc;
- rc = smk_preset_netlabel(&smack_known_invalid);
- if (err == 0 && rc < 0)
- err = rc;
rc = smk_preset_netlabel(&smack_known_star);
if (err == 0 && rc < 0)
err = rc;
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index 682b73af7766..838ffa78cfda 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -881,7 +881,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
* the execve().
*/
if (get_user_pages_remote(current, bprm->mm, pos, 1,
- FOLL_FORCE, &page, NULL) <= 0)
+ FOLL_FORCE, &page, NULL, NULL) <= 0)
return false;
#else
page = bprm->page[pos / PAGE_SIZE];
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 0309f2111c70..968e5e0a3f81 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -309,7 +309,7 @@ static int task_is_descendant(struct task_struct *parent,
* @tracer: the task_struct of the process attempting ptrace
* @tracee: the task_struct of the process to be ptraced
*
- * Returns 1 if tracer has is ptracer exception ancestor for tracee.
+ * Returns 1 if tracer has a ptracer exception ancestor for tracee.
*/
static int ptracer_exception_found(struct task_struct *tracer,
struct task_struct *tracee)
@@ -320,6 +320,18 @@ static int ptracer_exception_found(struct task_struct *tracer,
bool found = false;
rcu_read_lock();
+
+ /*
+ * If there's already an active tracing relationship, then make an
+ * exception for the sake of other accesses, like process_vm_rw().
+ */
+ parent = ptrace_parent(tracee);
+ if (parent != NULL && same_thread_group(parent, tracer)) {
+ rc = 1;
+ goto unlock;
+ }
+
+ /* Look for a PR_SET_PTRACER relationship. */
if (!thread_group_leader(tracee))
tracee = rcu_dereference(tracee->group_leader);
list_for_each_entry_rcu(relation, &ptracer_relations, node) {
@@ -334,6 +346,8 @@ static int ptracer_exception_found(struct task_struct *tracer,
if (found && (parent == NULL || task_is_descendant(parent, tracer)))
rc = 1;
+
+unlock:
rcu_read_unlock();
return rc;