summaryrefslogtreecommitdiff
path: root/kernel/irq
diff options
context:
space:
mode:
authorDou Liyang <douliyangs@gmail.com>2018-12-04 18:51:20 +0300
committerThomas Gleixner <tglx@linutronix.de>2018-12-19 13:32:08 +0300
commitbec04037e4e484f41ee4d9409e40616874169d20 (patch)
tree5fde4bc9e597250dbdac3a4c0741b13766da8d4e /kernel/irq
parentc2899c3470de04823870b28646981695c0046efe (diff)
downloadlinux-bec04037e4e484f41ee4d9409e40616874169d20.tar.xz
genirq/core: Introduce struct irq_affinity_desc
The interrupt affinity management uses straight cpumask pointers to convey the automatically assigned affinity masks for managed interrupts. The core interrupt descriptor allocation also decides based on the pointer being non NULL whether an interrupt is managed or not. Devices which use managed interrupts usually have two classes of interrupts: - Interrupts for multiple device queues - Interrupts for general device management Currently both classes are treated the same way, i.e. as managed interrupts. The general interrupts get the default affinity mask assigned while the device queue interrupts are spread out over the possible CPUs. Treating the general interrupts as managed is both a limitation and under certain circumstances a bug. Assume the following situation: default_irq_affinity = 4..7 So if CPUs 4-7 are offlined, then the core code will shut down the device management interrupts because the last CPU in their affinity mask went offline. It's also a limitation because it's desired to allow manual placement of the general device interrupts for various reasons. If they are marked managed then the interrupt affinity setting from both user and kernel space is disabled. To remedy that situation it's required to convey more information than the cpumasks through various interfaces related to interrupt descriptor allocation. Instead of adding yet another argument, create a new data structure 'irq_affinity_desc' which for now just contains the cpumask. This struct can be expanded to convey auxilliary information in the next step. No functional change, just preparatory work. [ tglx: Simplified logic and clarified changelog ] Suggested-by: Thomas Gleixner <tglx@linutronix.de> Suggested-by: Bjorn Helgaas <bhelgaas@google.com> Signed-off-by: Dou Liyang <douliyangs@gmail.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: linux-pci@vger.kernel.org Cc: kashyap.desai@broadcom.com Cc: shivasharan.srikanteshwara@broadcom.com Cc: sumit.saxena@broadcom.com Cc: ming.lei@redhat.com Cc: hch@lst.de Cc: douliyang1@huawei.com Link: https://lkml.kernel.org/r/20181204155122.6327-2-douliyangs@gmail.com
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/affinity.c22
-rw-r--r--kernel/irq/devres.c4
-rw-r--r--kernel/irq/irqdesc.c17
-rw-r--r--kernel/irq/irqdomain.c4
-rw-r--r--kernel/irq/msi.c8
5 files changed, 29 insertions, 26 deletions
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index e423bff1928c..c0fe591b0dc9 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -99,7 +99,7 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
cpumask_var_t *node_to_cpumask,
const struct cpumask *cpu_mask,
struct cpumask *nmsk,
- struct cpumask *masks)
+ struct irq_affinity_desc *masks)
{
int n, nodes, cpus_per_vec, extra_vecs, done = 0;
int last_affv = firstvec + numvecs;
@@ -117,7 +117,9 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
*/
if (numvecs <= nodes) {
for_each_node_mask(n, nodemsk) {
- cpumask_or(masks + curvec, masks + curvec, node_to_cpumask[n]);
+ cpumask_or(&masks[curvec].mask,
+ &masks[curvec].mask,
+ node_to_cpumask[n]);
if (++curvec == last_affv)
curvec = firstvec;
}
@@ -150,7 +152,8 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
cpus_per_vec++;
--extra_vecs;
}
- irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
+ irq_spread_init_one(&masks[curvec].mask, nmsk,
+ cpus_per_vec);
}
done += v;
@@ -173,7 +176,7 @@ out:
static int irq_build_affinity_masks(const struct irq_affinity *affd,
int startvec, int numvecs, int firstvec,
cpumask_var_t *node_to_cpumask,
- struct cpumask *masks)
+ struct irq_affinity_desc *masks)
{
int curvec = startvec, nr_present, nr_others;
int ret = -ENOMEM;
@@ -226,15 +229,15 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
* @nvecs: The total number of vectors
* @affd: Description of the affinity requirements
*
- * Returns the masks pointer or NULL if allocation failed.
+ * Returns the irq_affinity_desc pointer or NULL if allocation failed.
*/
-struct cpumask *
+struct irq_affinity_desc *
irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
{
int affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
int curvec, usedvecs;
cpumask_var_t *node_to_cpumask;
- struct cpumask *masks = NULL;
+ struct irq_affinity_desc *masks = NULL;
int i, nr_sets;
/*
@@ -254,8 +257,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
/* Fill out vectors at the beginning that don't need affinity */
for (curvec = 0; curvec < affd->pre_vectors; curvec++)
- cpumask_copy(masks + curvec, irq_default_affinity);
-
+ cpumask_copy(&masks[curvec].mask, irq_default_affinity);
/*
* Spread on present CPUs starting from affd->pre_vectors. If we
* have multiple sets, build each sets affinity mask separately.
@@ -285,7 +287,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
else
curvec = affd->pre_vectors + usedvecs;
for (; curvec < nvecs; curvec++)
- cpumask_copy(masks + curvec, irq_default_affinity);
+ cpumask_copy(&masks[curvec].mask, irq_default_affinity);
outnodemsk:
free_node_to_cpumask(node_to_cpumask);
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c
index 6a682c229e10..5d5378ea0afe 100644
--- a/kernel/irq/devres.c
+++ b/kernel/irq/devres.c
@@ -169,7 +169,7 @@ static void devm_irq_desc_release(struct device *dev, void *res)
* @cnt: Number of consecutive irqs to allocate
* @node: Preferred node on which the irq descriptor should be allocated
* @owner: Owning module (can be NULL)
- * @affinity: Optional pointer to an affinity mask array of size @cnt
+ * @affinity: Optional pointer to an irq_affinity_desc array of size @cnt
* which hints where the irq descriptors should be allocated
* and which default affinities to use
*
@@ -179,7 +179,7 @@ static void devm_irq_desc_release(struct device *dev, void *res)
*/
int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
unsigned int cnt, int node, struct module *owner,
- const struct cpumask *affinity)
+ const struct irq_affinity_desc *affinity)
{
struct irq_desc_devres *dr;
int base;
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 578d0e5f1b5b..cb401d6c5040 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -449,28 +449,29 @@ static void free_desc(unsigned int irq)
}
static int alloc_descs(unsigned int start, unsigned int cnt, int node,
- const struct cpumask *affinity, struct module *owner)
+ const struct irq_affinity_desc *affinity,
+ struct module *owner)
{
- const struct cpumask *mask = NULL;
struct irq_desc *desc;
unsigned int flags;
int i;
/* Validate affinity mask(s) */
if (affinity) {
- for (i = 0, mask = affinity; i < cnt; i++, mask++) {
- if (cpumask_empty(mask))
+ for (i = 0; i < cnt; i++) {
+ if (cpumask_empty(&affinity[i].mask))
return -EINVAL;
}
}
flags = affinity ? IRQD_AFFINITY_MANAGED | IRQD_MANAGED_SHUTDOWN : 0;
- mask = NULL;
for (i = 0; i < cnt; i++) {
+ const struct cpumask *mask = NULL;
+
if (affinity) {
node = cpu_to_node(cpumask_first(affinity));
- mask = affinity;
+ mask = &affinity->mask;
affinity++;
}
desc = alloc_desc(start + i, node, flags, mask, owner);
@@ -575,7 +576,7 @@ static void free_desc(unsigned int irq)
}
static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
- const struct cpumask *affinity,
+ const struct irq_affinity_desc *affinity,
struct module *owner)
{
u32 i;
@@ -705,7 +706,7 @@ EXPORT_SYMBOL_GPL(irq_free_descs);
*/
int __ref
__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
- struct module *owner, const struct cpumask *affinity)
+ struct module *owner, const struct irq_affinity_desc *affinity)
{
int start, ret;
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 3366d11c3e02..8b0be4bd6565 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -969,7 +969,7 @@ const struct irq_domain_ops irq_domain_simple_ops = {
EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
- int node, const struct cpumask *affinity)
+ int node, const struct irq_affinity_desc *affinity)
{
unsigned int hint;
@@ -1281,7 +1281,7 @@ int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
*/
int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
unsigned int nr_irqs, int node, void *arg,
- bool realloc, const struct cpumask *affinity)
+ bool realloc, const struct irq_affinity_desc *affinity)
{
int i, ret, virq;
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 4ca2fd46645d..ad26fbcfbfc8 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -23,11 +23,11 @@
* @nvec: The number of vectors used in this entry
* @affinity: Optional pointer to an affinity mask array size of @nvec
*
- * If @affinity is not NULL then a an affinity array[@nvec] is allocated
- * and the affinity masks from @affinity are copied.
+ * If @affinity is not NULL then an affinity array[@nvec] is allocated
+ * and the affinity masks and flags from @affinity are copied.
*/
-struct msi_desc *
-alloc_msi_entry(struct device *dev, int nvec, const struct cpumask *affinity)
+struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
+ const struct irq_affinity_desc *affinity)
{
struct msi_desc *desc;