summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-15 03:10:09 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-15 03:10:09 +0300
commite6b5be2be4e30037eb551e0ed09dd97bd00d85d3 (patch)
tree88801365987a0dc64d62d47e8a11f3b44691c37f /drivers/base
parent37da7bbbe84fe9e8862940d3f9194fd27dce59bb (diff)
parentf1c488a78d9f1a22cdb15648c15e70fd82ed229a (diff)
downloadlinux-e6b5be2be4e30037eb551e0ed09dd97bd00d85d3.tar.xz
Merge tag 'driver-core-3.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core
Pull driver core update from Greg KH: "Here's the set of driver core patches for 3.19-rc1. They are dominated by the removal of the .owner field in platform drivers. They touch a lot of files, but they are "simple" changes, just removing a line in a structure. Other than that, a few minor driver core and debugfs changes. There are some ath9k patches coming in through this tree that have been acked by the wireless maintainers as they relied on the debugfs changes. Everything has been in linux-next for a while" * tag 'driver-core-3.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core: (324 commits) Revert "ath: ath9k: use debugfs_create_devm_seqfile() helper for seq_file entries" fs: debugfs: add forward declaration for struct device type firmware class: Deletion of an unnecessary check before the function call "vunmap" firmware loader: fix hung task warning dump devcoredump: provide a one-way disable function device: Add dev_<level>_once variants ath: ath9k: use debugfs_create_devm_seqfile() helper for seq_file entries ath: use seq_file api for ath9k debugfs files debugfs: add helper function to create device related seq_file drivers/base: cacheinfo: remove noisy error boot message Revert "core: platform: add warning if driver has no owner" drivers: base: support cpu cache information interface to userspace via sysfs drivers: base: add cpu_device_create to support per-cpu devices topology: replace custom attribute macros with standard DEVICE_ATTR* cpumask: factor out show_cpumap into separate helper function driver core: Fix unbalanced device reference in drivers_probe driver core: fix race with userland in device_add() sysfs/kernfs: make read requests on pre-alloc files use the buffer. sysfs/kernfs: allow attributes to request write buffer be pre-allocated. fs: sysfs: return EGBIG on write if offset is larger than file size ...
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/bus.c8
-rw-r--r--drivers/base/cacheinfo.c539
-rw-r--r--drivers/base/core.c38
-rw-r--r--drivers/base/cpu.c59
-rw-r--r--drivers/base/devcoredump.c56
-rw-r--r--drivers/base/firmware_class.c7
-rw-r--r--drivers/base/node.c14
-rw-r--r--drivers/base/platform.c22
-rw-r--r--drivers/base/topology.c71
10 files changed, 709 insertions, 107 deletions
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 53c3fe1aeb29..527d291706e8 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -4,7 +4,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \
driver.o class.o platform.o \
cpu.o firmware.o init.o map.o devres.o \
attribute_container.o transport_class.o \
- topology.o container.o property.o
+ topology.o container.o property.o cacheinfo.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
obj-y += power/
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 83e910a57563..876bae5ade33 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -254,13 +254,15 @@ static ssize_t store_drivers_probe(struct bus_type *bus,
const char *buf, size_t count)
{
struct device *dev;
+ int err = -EINVAL;
dev = bus_find_device_by_name(bus, NULL, buf);
if (!dev)
return -ENODEV;
- if (bus_rescan_devices_helper(dev, NULL) != 0)
- return -EINVAL;
- return count;
+ if (bus_rescan_devices_helper(dev, NULL) == 0)
+ err = count;
+ put_device(dev);
+ return err;
}
static struct device *next_device(struct klist_iter *i)
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
new file mode 100644
index 000000000000..6e64563361f0
--- /dev/null
+++ b/drivers/base/cacheinfo.c
@@ -0,0 +1,539 @@
+/*
+ * cacheinfo support - processor cache information via sysfs
+ *
+ * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
+ * Author: Sudeep Holla <sudeep.holla@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/bitops.h>
+#include <linux/cacheinfo.h>
+#include <linux/compiler.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/sysfs.h>
+
+/* pointer to per cpu cacheinfo */
+static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
+#define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
+#define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
+#define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
+
+struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
+{
+ return ci_cacheinfo(cpu);
+}
+
+#ifdef CONFIG_OF
+static int cache_setup_of_node(unsigned int cpu)
+{
+ struct device_node *np;
+ struct cacheinfo *this_leaf;
+ struct device *cpu_dev = get_cpu_device(cpu);
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ unsigned int index = 0;
+
+ /* skip if of_node is already populated */
+ if (this_cpu_ci->info_list->of_node)
+ return 0;
+
+ if (!cpu_dev) {
+ pr_err("No cpu device for CPU %d\n", cpu);
+ return -ENODEV;
+ }
+ np = cpu_dev->of_node;
+ if (!np) {
+ pr_err("Failed to find cpu%d device node\n", cpu);
+ return -ENOENT;
+ }
+
+ while (np && index < cache_leaves(cpu)) {
+ this_leaf = this_cpu_ci->info_list + index;
+ if (this_leaf->level != 1)
+ np = of_find_next_cache_node(np);
+ else
+ np = of_node_get(np);/* cpu node itself */
+ this_leaf->of_node = np;
+ index++;
+ }
+ return 0;
+}
+
+static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
+ struct cacheinfo *sib_leaf)
+{
+ return sib_leaf->of_node == this_leaf->of_node;
+}
+#else
+static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
+static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
+ struct cacheinfo *sib_leaf)
+{
+ /*
+ * For non-DT systems, assume unique level 1 cache, system-wide
+ * shared caches for all other levels. This will be used only if
+ * arch specific code has not populated shared_cpu_map
+ */
+ return !(this_leaf->level == 1);
+}
+#endif
+
+static int cache_shared_cpu_map_setup(unsigned int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf, *sib_leaf;
+ unsigned int index;
+ int ret;
+
+ ret = cache_setup_of_node(cpu);
+ if (ret)
+ return ret;
+
+ for (index = 0; index < cache_leaves(cpu); index++) {
+ unsigned int i;
+
+ this_leaf = this_cpu_ci->info_list + index;
+ /* skip if shared_cpu_map is already populated */
+ if (!cpumask_empty(&this_leaf->shared_cpu_map))
+ continue;
+
+ cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
+ for_each_online_cpu(i) {
+ struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
+
+ if (i == cpu || !sib_cpu_ci->info_list)
+ continue;/* skip if itself or no cacheinfo */
+ sib_leaf = sib_cpu_ci->info_list + index;
+ if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
+ cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
+ cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void cache_shared_cpu_map_remove(unsigned int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf, *sib_leaf;
+ unsigned int sibling, index;
+
+ for (index = 0; index < cache_leaves(cpu); index++) {
+ this_leaf = this_cpu_ci->info_list + index;
+ for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
+ struct cpu_cacheinfo *sib_cpu_ci;
+
+ if (sibling == cpu) /* skip itself */
+ continue;
+ sib_cpu_ci = get_cpu_cacheinfo(sibling);
+ sib_leaf = sib_cpu_ci->info_list + index;
+ cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
+ cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
+ }
+ of_node_put(this_leaf->of_node);
+ }
+}
+
+static void free_cache_attributes(unsigned int cpu)
+{
+ cache_shared_cpu_map_remove(cpu);
+
+ kfree(per_cpu_cacheinfo(cpu));
+ per_cpu_cacheinfo(cpu) = NULL;
+}
+
+int __weak init_cache_level(unsigned int cpu)
+{
+ return -ENOENT;
+}
+
+int __weak populate_cache_leaves(unsigned int cpu)
+{
+ return -ENOENT;
+}
+
+static int detect_cache_attributes(unsigned int cpu)
+{
+ int ret;
+
+ if (init_cache_level(cpu))
+ return -ENOENT;
+
+ per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
+ sizeof(struct cacheinfo), GFP_KERNEL);
+ if (per_cpu_cacheinfo(cpu) == NULL)
+ return -ENOMEM;
+
+ ret = populate_cache_leaves(cpu);
+ if (ret)
+ goto free_ci;
+ /*
+ * For systems using DT for cache hierarcy, of_node and shared_cpu_map
+ * will be set up here only if they are not populated already
+ */
+ ret = cache_shared_cpu_map_setup(cpu);
+ if (ret)
+ goto free_ci;
+ return 0;
+
+free_ci:
+ free_cache_attributes(cpu);
+ return ret;
+}
+
+/* pointer to cpuX/cache device */
+static DEFINE_PER_CPU(struct device *, ci_cache_dev);
+#define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
+
+static cpumask_t cache_dev_map;
+
+/* pointer to array of devices for cpuX/cache/indexY */
+static DEFINE_PER_CPU(struct device **, ci_index_dev);
+#define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
+#define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
+
+#define show_one(file_name, object) \
+static ssize_t file_name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
+ return sprintf(buf, "%u\n", this_leaf->object); \
+}
+
+show_one(level, level);
+show_one(coherency_line_size, coherency_line_size);
+show_one(number_of_sets, number_of_sets);
+show_one(physical_line_partition, physical_line_partition);
+show_one(ways_of_associativity, ways_of_associativity);
+
+static ssize_t size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%uK\n", this_leaf->size >> 10);
+}
+
+static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf)
+{
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+ const struct cpumask *mask = &this_leaf->shared_cpu_map;
+
+ return cpumap_print_to_pagebuf(list, buf, mask);
+}
+
+static ssize_t shared_cpu_map_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return shared_cpumap_show_func(dev, false, buf);
+}
+
+static ssize_t shared_cpu_list_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return shared_cpumap_show_func(dev, true, buf);
+}
+
+static ssize_t type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+
+ switch (this_leaf->type) {
+ case CACHE_TYPE_DATA:
+ return sprintf(buf, "Data\n");
+ case CACHE_TYPE_INST:
+ return sprintf(buf, "Instruction\n");
+ case CACHE_TYPE_UNIFIED:
+ return sprintf(buf, "Unified\n");
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t allocation_policy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+ unsigned int ci_attr = this_leaf->attributes;
+ int n = 0;
+
+ if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
+ n = sprintf(buf, "ReadWriteAllocate\n");
+ else if (ci_attr & CACHE_READ_ALLOCATE)
+ n = sprintf(buf, "ReadAllocate\n");
+ else if (ci_attr & CACHE_WRITE_ALLOCATE)
+ n = sprintf(buf, "WriteAllocate\n");
+ return n;
+}
+
+static ssize_t write_policy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+ unsigned int ci_attr = this_leaf->attributes;
+ int n = 0;
+
+ if (ci_attr & CACHE_WRITE_THROUGH)
+ n = sprintf(buf, "WriteThrough\n");
+ else if (ci_attr & CACHE_WRITE_BACK)
+ n = sprintf(buf, "WriteBack\n");
+ return n;
+}
+
+static DEVICE_ATTR_RO(level);
+static DEVICE_ATTR_RO(type);
+static DEVICE_ATTR_RO(coherency_line_size);
+static DEVICE_ATTR_RO(ways_of_associativity);
+static DEVICE_ATTR_RO(number_of_sets);
+static DEVICE_ATTR_RO(size);
+static DEVICE_ATTR_RO(allocation_policy);
+static DEVICE_ATTR_RO(write_policy);
+static DEVICE_ATTR_RO(shared_cpu_map);
+static DEVICE_ATTR_RO(shared_cpu_list);
+static DEVICE_ATTR_RO(physical_line_partition);
+
+static struct attribute *cache_default_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_level.attr,
+ &dev_attr_shared_cpu_map.attr,
+ &dev_attr_shared_cpu_list.attr,
+ &dev_attr_coherency_line_size.attr,
+ &dev_attr_ways_of_associativity.attr,
+ &dev_attr_number_of_sets.attr,
+ &dev_attr_size.attr,
+ &dev_attr_allocation_policy.attr,
+ &dev_attr_write_policy.attr,
+ &dev_attr_physical_line_partition.attr,
+ NULL
+};
+
+static umode_t
+cache_default_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr, int unused)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+ const struct cpumask *mask = &this_leaf->shared_cpu_map;
+ umode_t mode = attr->mode;
+
+ if ((attr == &dev_attr_type.attr) && this_leaf->type)
+ return mode;
+ if ((attr == &dev_attr_level.attr) && this_leaf->level)
+ return mode;
+ if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
+ return mode;
+ if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
+ return mode;
+ if ((attr == &dev_attr_coherency_line_size.attr) &&
+ this_leaf->coherency_line_size)
+ return mode;
+ if ((attr == &dev_attr_ways_of_associativity.attr) &&
+ this_leaf->size) /* allow 0 = full associativity */
+ return mode;
+ if ((attr == &dev_attr_number_of_sets.attr) &&
+ this_leaf->number_of_sets)
+ return mode;
+ if ((attr == &dev_attr_size.attr) && this_leaf->size)
+ return mode;
+ if ((attr == &dev_attr_write_policy.attr) &&
+ (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
+ return mode;
+ if ((attr == &dev_attr_allocation_policy.attr) &&
+ (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
+ return mode;
+ if ((attr == &dev_attr_physical_line_partition.attr) &&
+ this_leaf->physical_line_partition)
+ return mode;
+
+ return 0;
+}
+
+static const struct attribute_group cache_default_group = {
+ .attrs = cache_default_attrs,
+ .is_visible = cache_default_attrs_is_visible,
+};
+
+static const struct attribute_group *cache_default_groups[] = {
+ &cache_default_group,
+ NULL,
+};
+
+static const struct attribute_group *cache_private_groups[] = {
+ &cache_default_group,
+ NULL, /* Place holder for private group */
+ NULL,
+};
+
+const struct attribute_group *
+__weak cache_get_priv_group(struct cacheinfo *this_leaf)
+{
+ return NULL;
+}
+
+static const struct attribute_group **
+cache_get_attribute_groups(struct cacheinfo *this_leaf)
+{
+ const struct attribute_group *priv_group =
+ cache_get_priv_group(this_leaf);
+
+ if (!priv_group)
+ return cache_default_groups;
+
+ if (!cache_private_groups[1])
+ cache_private_groups[1] = priv_group;
+
+ return cache_private_groups;
+}
+
+/* Add/Remove cache interface for CPU device */
+static void cpu_cache_sysfs_exit(unsigned int cpu)
+{
+ int i;
+ struct device *ci_dev;
+
+ if (per_cpu_index_dev(cpu)) {
+ for (i = 0; i < cache_leaves(cpu); i++) {
+ ci_dev = per_cache_index_dev(cpu, i);
+ if (!ci_dev)
+ continue;
+ device_unregister(ci_dev);
+ }
+ kfree(per_cpu_index_dev(cpu));
+ per_cpu_index_dev(cpu) = NULL;
+ }
+ device_unregister(per_cpu_cache_dev(cpu));
+ per_cpu_cache_dev(cpu) = NULL;
+}
+
+static int cpu_cache_sysfs_init(unsigned int cpu)
+{
+ struct device *dev = get_cpu_device(cpu);
+
+ if (per_cpu_cacheinfo(cpu) == NULL)
+ return -ENOENT;
+
+ per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
+ if (IS_ERR(per_cpu_cache_dev(cpu)))
+ return PTR_ERR(per_cpu_cache_dev(cpu));
+
+ /* Allocate all required memory */
+ per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
+ sizeof(struct device *), GFP_KERNEL);
+ if (unlikely(per_cpu_index_dev(cpu) == NULL))
+ goto err_out;
+
+ return 0;
+
+err_out:
+ cpu_cache_sysfs_exit(cpu);
+ return -ENOMEM;
+}
+
+static int cache_add_dev(unsigned int cpu)
+{
+ unsigned int i;
+ int rc;
+ struct device *ci_dev, *parent;
+ struct cacheinfo *this_leaf;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ const struct attribute_group **cache_groups;
+
+ rc = cpu_cache_sysfs_init(cpu);
+ if (unlikely(rc < 0))
+ return rc;
+
+ parent = per_cpu_cache_dev(cpu);
+ for (i = 0; i < cache_leaves(cpu); i++) {
+ this_leaf = this_cpu_ci->info_list + i;
+ if (this_leaf->disable_sysfs)
+ continue;
+ cache_groups = cache_get_attribute_groups(this_leaf);
+ ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
+ "index%1u", i);
+ if (IS_ERR(ci_dev)) {
+ rc = PTR_ERR(ci_dev);
+ goto err;
+ }
+ per_cache_index_dev(cpu, i) = ci_dev;
+ }
+ cpumask_set_cpu(cpu, &cache_dev_map);
+
+ return 0;
+err:
+ cpu_cache_sysfs_exit(cpu);
+ return rc;
+}
+
+static void cache_remove_dev(unsigned int cpu)
+{
+ if (!cpumask_test_cpu(cpu, &cache_dev_map))
+ return;
+ cpumask_clear_cpu(cpu, &cache_dev_map);
+
+ cpu_cache_sysfs_exit(cpu);
+}
+
+static int cacheinfo_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ int rc = 0;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+ rc = detect_cache_attributes(cpu);
+ if (!rc)
+ rc = cache_add_dev(cpu);
+ break;
+ case CPU_DEAD:
+ cache_remove_dev(cpu);
+ if (per_cpu_cacheinfo(cpu))
+ free_cache_attributes(cpu);
+ break;
+ }
+ return notifier_from_errno(rc);
+}
+
+static int __init cacheinfo_sysfs_init(void)
+{
+ int cpu, rc = 0;
+
+ cpu_notifier_register_begin();
+
+ for_each_online_cpu(cpu) {
+ rc = detect_cache_attributes(cpu);
+ if (rc)
+ goto out;
+ rc = cache_add_dev(cpu);
+ if (rc) {
+ free_cache_attributes(cpu);
+ pr_err("error populating cacheinfo..cpu%d\n", cpu);
+ goto out;
+ }
+ }
+ __hotcpu_notifier(cacheinfo_cpu_callback, 0);
+
+out:
+ cpu_notifier_register_done();
+ return rc;
+}
+
+device_initcall(cacheinfo_sysfs_init);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 842d04707de6..97e2baf6e5d8 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1021,18 +1021,6 @@ int device_add(struct device *dev)
if (error)
goto attrError;
- if (MAJOR(dev->devt)) {
- error = device_create_file(dev, &dev_attr_dev);
- if (error)
- goto ueventattrError;
-
- error = device_create_sys_dev_entry(dev);
- if (error)
- goto devtattrError;
-
- devtmpfs_create_node(dev);
- }
-
error = device_add_class_symlinks(dev);
if (error)
goto SymlinkError;
@@ -1047,6 +1035,18 @@ int device_add(struct device *dev)
goto DPMError;
device_pm_add(dev);
+ if (MAJOR(dev->devt)) {
+ error = device_create_file(dev, &dev_attr_dev);
+ if (error)
+ goto DevAttrError;
+
+ error = device_create_sys_dev_entry(dev);
+ if (error)
+ goto SysEntryError;
+
+ devtmpfs_create_node(dev);
+ }
+
/* Notify clients of device addition. This call must come
* after dpm_sysfs_add() and before kobject_uevent().
*/
@@ -1076,6 +1076,12 @@ int device_add(struct device *dev)
done:
put_device(dev);
return error;
+ SysEntryError:
+ if (MAJOR(dev->devt))
+ device_remove_file(dev, &dev_attr_dev);
+ DevAttrError:
+ device_pm_remove(dev);
+ dpm_sysfs_remove(dev);
DPMError:
bus_remove_device(dev);
BusError:
@@ -1083,14 +1089,6 @@ done:
AttrsError:
device_remove_class_symlinks(dev);
SymlinkError:
- if (MAJOR(dev->devt))
- devtmpfs_delete_node(dev);
- if (MAJOR(dev->devt))
- device_remove_sys_dev_entry(dev);
- devtattrError:
- if (MAJOR(dev->devt))
- device_remove_file(dev, &dev_attr_dev);
- ueventattrError:
device_remove_file(dev, &dev_attr_uevent);
attrError:
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 006b1bc5297d..f829a4c71749 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -207,11 +207,8 @@ static ssize_t show_cpus_attr(struct device *dev,
char *buf)
{
struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
- int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *(ca->map));
- buf[n++] = '\n';
- buf[n] = '\0';
- return n;
+ return cpumap_print_to_pagebuf(true, buf, *ca->map);
}
#define _CPU_ATTR(name, map) \
@@ -366,6 +363,60 @@ struct device *get_cpu_device(unsigned cpu)
}
EXPORT_SYMBOL_GPL(get_cpu_device);
+static void device_create_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+static struct device *
+__cpu_device_create(struct device *parent, void *drvdata,
+ const struct attribute_group **groups,
+ const char *fmt, va_list args)
+{
+ struct device *dev = NULL;
+ int retval = -ENODEV;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ device_initialize(dev);
+ dev->parent = parent;
+ dev->groups = groups;
+ dev->release = device_create_release;
+ dev_set_drvdata(dev, drvdata);
+
+ retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
+ if (retval)
+ goto error;
+
+ retval = device_add(dev);
+ if (retval)
+ goto error;
+
+ return dev;
+
+error:
+ put_device(dev);
+ return ERR_PTR(retval);
+}
+
+struct device *cpu_device_create(struct device *parent, void *drvdata,
+ const struct attribute_group **groups,
+ const char *fmt, ...)
+{
+ va_list vargs;
+ struct device *dev;
+
+ va_start(vargs, fmt);
+ dev = __cpu_device_create(parent, drvdata, groups, fmt, vargs);
+ va_end(vargs);
+ return dev;
+}
+EXPORT_SYMBOL_GPL(cpu_device_create);
+
#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL);
#endif
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 96614b04544c..1bd120a0b084 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -31,6 +31,11 @@
#include <linux/fs.h>
#include <linux/workqueue.h>
+static struct class devcd_class;
+
+/* global disable flag, for security purposes */
+static bool devcd_disabled;
+
/* if data isn't read by userspace after 5 minutes then delete it */
#define DEVCD_TIMEOUT (HZ * 60 * 5)
@@ -121,11 +126,51 @@ static const struct attribute_group *devcd_dev_groups[] = {
&devcd_dev_group, NULL,
};
+static int devcd_free(struct device *dev, void *data)
+{
+ struct devcd_entry *devcd = dev_to_devcd(dev);
+
+ flush_delayed_work(&devcd->del_wk);
+ return 0;
+}
+
+static ssize_t disabled_show(struct class *class, struct class_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", devcd_disabled);
+}
+
+static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
+ const char *buf, size_t count)
+{
+ long tmp = simple_strtol(buf, NULL, 10);
+
+ /*
+ * This essentially makes the attribute write-once, since you can't
+ * go back to not having it disabled. This is intentional, it serves
+ * as a system lockdown feature.
+ */
+ if (tmp != 1)
+ return -EINVAL;
+
+ devcd_disabled = true;
+
+ class_for_each_device(&devcd_class, NULL, NULL, devcd_free);
+
+ return count;
+}
+
+static struct class_attribute devcd_class_attrs[] = {
+ __ATTR_RW(disabled),
+ __ATTR_NULL
+};
+
static struct class devcd_class = {
.name = "devcoredump",
.owner = THIS_MODULE,
.dev_release = devcd_dev_release,
.dev_groups = devcd_dev_groups,
+ .class_attrs = devcd_class_attrs,
};
static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count,
@@ -192,6 +237,9 @@ void dev_coredumpm(struct device *dev, struct module *owner,
struct devcd_entry *devcd;
struct device *existing;
+ if (devcd_disabled)
+ goto free;
+
existing = class_find_device(&devcd_class, NULL, dev,
devcd_match_failing);
if (existing) {
@@ -249,14 +297,6 @@ static int __init devcoredump_init(void)
}
__initcall(devcoredump_init);
-static int devcd_free(struct device *dev, void *data)
-{
- struct devcd_entry *devcd = dev_to_devcd(dev);
-
- flush_delayed_work(&devcd->del_wk);
- return 0;
-}
-
static void __exit devcoredump_exit(void)
{
class_for_each_device(&devcd_class, NULL, NULL, devcd_free);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 3d785ebb48d3..58470c395301 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -591,8 +591,7 @@ static int fw_map_pages_buf(struct firmware_buf *buf)
if (!buf->is_paged_buf)
return 0;
- if (buf->data)
- vunmap(buf->data);
+ vunmap(buf->data);
buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
if (!buf->data)
return -ENOMEM;
@@ -925,7 +924,7 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
}
- wait_for_completion(&buf->completion);
+ retval = wait_for_completion_interruptible(&buf->completion);
cancel_delayed_work_sync(&fw_priv->timeout_work);
if (is_fw_load_aborted(buf))
@@ -1004,7 +1003,7 @@ static int sync_cached_firmware_buf(struct firmware_buf *buf)
break;
}
mutex_unlock(&fw_lock);
- wait_for_completion(&buf->completion);
+ ret = wait_for_completion_interruptible(&buf->completion);
mutex_lock(&fw_lock);
}
mutex_unlock(&fw_lock);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 472168cd0c97..a3b82e9c7f20 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -25,32 +25,26 @@ static struct bus_type node_subsys = {
};
-static ssize_t node_read_cpumap(struct device *dev, int type, char *buf)
+static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf)
{
struct node *node_dev = to_node(dev);
const struct cpumask *mask = cpumask_of_node(node_dev->dev.id);
- int len;
/* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));
- len = type?
- cpulist_scnprintf(buf, PAGE_SIZE-2, mask) :
- cpumask_scnprintf(buf, PAGE_SIZE-2, mask);
- buf[len++] = '\n';
- buf[len] = '\0';
- return len;
+ return cpumap_print_to_pagebuf(list, buf, mask);
}
static inline ssize_t node_read_cpumask(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return node_read_cpumap(dev, 0, buf);
+ return node_read_cpumap(dev, false, buf);
}
static inline ssize_t node_read_cpulist(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return node_read_cpumap(dev, 1, buf);
+ return node_read_cpumap(dev, true, buf);
}
static DEVICE_ATTR(cpumap, S_IRUGO, node_read_cpumask, NULL);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 233ececd15a3..9421fed40905 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -580,9 +580,10 @@ void platform_driver_unregister(struct platform_driver *drv)
EXPORT_SYMBOL_GPL(platform_driver_unregister);
/**
- * platform_driver_probe - register driver for non-hotpluggable device
+ * __platform_driver_probe - register driver for non-hotpluggable device
* @drv: platform driver structure
* @probe: the driver probe routine, probably from an __init section
+ * @module: module which will be the owner of the driver
*
* Use this instead of platform_driver_register() when you know the device
* is not hotpluggable and has already been registered, and you want to
@@ -598,8 +599,8 @@ EXPORT_SYMBOL_GPL(platform_driver_unregister);
* Returns zero if the driver registered and bound to a device, else returns
* a negative error code and with the driver not registered.
*/
-int __init_or_module platform_driver_probe(struct platform_driver *drv,
- int (*probe)(struct platform_device *))
+int __init_or_module __platform_driver_probe(struct platform_driver *drv,
+ int (*probe)(struct platform_device *), struct module *module)
{
int retval, code;
@@ -614,7 +615,7 @@ int __init_or_module platform_driver_probe(struct platform_driver *drv,
/* temporary section violation during probe() */
drv->probe = probe;
- retval = code = platform_driver_register(drv);
+ retval = code = __platform_driver_register(drv, module);
/*
* Fixup that section violation, being paranoid about code scanning
@@ -633,27 +634,28 @@ int __init_or_module platform_driver_probe(struct platform_driver *drv,
platform_driver_unregister(drv);
return retval;
}
-EXPORT_SYMBOL_GPL(platform_driver_probe);
+EXPORT_SYMBOL_GPL(__platform_driver_probe);
/**
- * platform_create_bundle - register driver and create corresponding device
+ * __platform_create_bundle - register driver and create corresponding device
* @driver: platform driver structure
* @probe: the driver probe routine, probably from an __init section
* @res: set of resources that needs to be allocated for the device
* @n_res: number of resources
* @data: platform specific data for this platform device
* @size: size of platform specific data
+ * @module: module which will be the owner of the driver
*
* Use this in legacy-style modules that probe hardware directly and
* register a single platform device and corresponding platform driver.
*
* Returns &struct platform_device pointer on success, or ERR_PTR() on error.
*/
-struct platform_device * __init_or_module platform_create_bundle(
+struct platform_device * __init_or_module __platform_create_bundle(
struct platform_driver *driver,
int (*probe)(struct platform_device *),
struct resource *res, unsigned int n_res,
- const void *data, size_t size)
+ const void *data, size_t size, struct module *module)
{
struct platform_device *pdev;
int error;
@@ -676,7 +678,7 @@ struct platform_device * __init_or_module platform_create_bundle(
if (error)
goto err_pdev_put;
- error = platform_driver_probe(driver, probe);
+ error = __platform_driver_probe(driver, probe, module);
if (error)
goto err_pdev_del;
@@ -689,7 +691,7 @@ err_pdev_put:
err_out:
return ERR_PTR(error);
}
-EXPORT_SYMBOL_GPL(platform_create_bundle);
+EXPORT_SYMBOL_GPL(__platform_create_bundle);
/* modalias support enables more hands-off userspace setup:
* (a) environment variable lets new-style hotplug events work once system is
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index be7c1fb7c0c9..6491f45200a7 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -29,75 +29,52 @@
#include <linux/hardirq.h>
#include <linux/topology.h>
-#define define_one_ro_named(_name, _func) \
- static DEVICE_ATTR(_name, 0444, _func, NULL)
-
-#define define_one_ro(_name) \
- static DEVICE_ATTR(_name, 0444, show_##_name, NULL)
-
#define define_id_show_func(name) \
-static ssize_t show_##name(struct device *dev, \
+static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
return sprintf(buf, "%d\n", topology_##name(dev->id)); \
}
-#if defined(topology_thread_cpumask) || defined(topology_core_cpumask) || \
- defined(topology_book_cpumask)
-static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf)
-{
- ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
- int n = 0;
-
- if (len > 1) {
- n = type?
- cpulist_scnprintf(buf, len-2, mask) :
- cpumask_scnprintf(buf, len-2, mask);
- buf[n++] = '\n';
- buf[n] = '\0';
- }
- return n;
-}
-#endif
-
-#define define_siblings_show_map(name) \
-static ssize_t show_##name(struct device *dev, \
+#define define_siblings_show_map(name, mask) \
+static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
- return show_cpumap(0, topology_##name(dev->id), buf); \
+ return cpumap_print_to_pagebuf(false, buf, topology_##mask(dev->id));\
}
-#define define_siblings_show_list(name) \
-static ssize_t show_##name##_list(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
+#define define_siblings_show_list(name, mask) \
+static ssize_t name##_list_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
{ \
- return show_cpumap(1, topology_##name(dev->id), buf); \
+ return cpumap_print_to_pagebuf(true, buf, topology_##mask(dev->id));\
}
-#define define_siblings_show_func(name) \
- define_siblings_show_map(name); define_siblings_show_list(name)
+#define define_siblings_show_func(name, mask) \
+ define_siblings_show_map(name, mask); \
+ define_siblings_show_list(name, mask)
define_id_show_func(physical_package_id);
-define_one_ro(physical_package_id);
+static DEVICE_ATTR_RO(physical_package_id);
define_id_show_func(core_id);
-define_one_ro(core_id);
+static DEVICE_ATTR_RO(core_id);
-define_siblings_show_func(thread_cpumask);
-define_one_ro_named(thread_siblings, show_thread_cpumask);
-define_one_ro_named(thread_siblings_list, show_thread_cpumask_list);
+define_siblings_show_func(thread_siblings, thread_cpumask);
+static DEVICE_ATTR_RO(thread_siblings);
+static DEVICE_ATTR_RO(thread_siblings_list);
-define_siblings_show_func(core_cpumask);
-define_one_ro_named(core_siblings, show_core_cpumask);
-define_one_ro_named(core_siblings_list, show_core_cpumask_list);
+define_siblings_show_func(core_siblings, core_cpumask);
+static DEVICE_ATTR_RO(core_siblings);
+static DEVICE_ATTR_RO(core_siblings_list);
#ifdef CONFIG_SCHED_BOOK
define_id_show_func(book_id);
-define_one_ro(book_id);
-define_siblings_show_func(book_cpumask);
-define_one_ro_named(book_siblings, show_book_cpumask);
-define_one_ro_named(book_siblings_list, show_book_cpumask_list);
+static DEVICE_ATTR_RO(book_id);
+define_siblings_show_func(book_siblings, book_cpumask);
+static DEVICE_ATTR_RO(book_siblings);
+static DEVICE_ATTR_RO(book_siblings_list);
#endif
static struct attribute *default_attrs[] = {