summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-14 20:58:24 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-14 20:58:24 +0300
commitd0316554d3586cbea60592a41391b5def2553d6f (patch)
tree5e7418f0bacbc68cec5dfd1541e03eb56870aa02 /drivers
parentfb0bbb92d42d5bd0ab224605444efdfed06d6934 (diff)
parent51e99be00ce2713cbb841cedc997cafa6e26c7f4 (diff)
downloadlinux-d0316554d3586cbea60592a41391b5def2553d6f.tar.xz
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (34 commits) m68k: rename global variable vmalloc_end to m68k_vmalloc_end percpu: add missing per_cpu_ptr_to_phys() definition for UP percpu: Fix kdump failure if booted with percpu_alloc=page percpu: make misc percpu symbols unique percpu: make percpu symbols in ia64 unique percpu: make percpu symbols in powerpc unique percpu: make percpu symbols in x86 unique percpu: make percpu symbols in xen unique percpu: make percpu symbols in cpufreq unique percpu: make percpu symbols in oprofile unique percpu: make percpu symbols in tracer unique percpu: make percpu symbols under kernel/ and mm/ unique percpu: remove some sparse warnings percpu: make alloc_percpu() handle array types vmalloc: fix use of non-existent percpu variable in put_cpu_var() this_cpu: Use this_cpu_xx in trace_functions_graph.c this_cpu: Use this_cpu_xx for ftrace this_cpu: Use this_cpu_xx in nmi handling this_cpu: Use this_cpu operations in RCU this_cpu: Use this_cpu ops for VM statistics ... Fix up trivial (famous last words) global per-cpu naming conflicts in arch/x86/kvm/svm.c mm/slab.c
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/cpufreq/cpufreq.c16
-rw-r--r--drivers/cpufreq/freq_table.c12
-rw-r--r--drivers/crypto/padlock-aes.c12
-rw-r--r--drivers/dma/dmaengine.c36
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c3
-rw-r--r--drivers/lguest/x86/core.c6
-rw-r--r--drivers/net/chelsio/sge.c5
-rw-r--r--drivers/net/loopback.c2
-rw-r--r--drivers/net/veth.c7
-rw-r--r--drivers/oprofile/cpu_buffer.c19
-rw-r--r--drivers/oprofile/cpu_buffer.h4
-rw-r--r--drivers/oprofile/oprofile_stats.c4
-rw-r--r--drivers/s390/net/netiucv.c8
14 files changed, 60 insertions, 76 deletions
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 27fd775375b0..958bd1540c30 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -131,7 +131,7 @@ static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute
* boot up and this data does not change there after. Hence this
* operation should be safe. No locking required.
*/
- addr = __pa(per_cpu_ptr(crash_notes, cpunum));
+ addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
rc = sprintf(buf, "%Lx\n", addr);
return rc;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index f20668c09ce0..67bc2ece7b4b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -64,14 +64,14 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
* - Lock should not be held across
* __cpufreq_governor(data, CPUFREQ_GOV_STOP);
*/
-static DEFINE_PER_CPU(int, policy_cpu);
+static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
#define lock_policy_rwsem(mode, cpu) \
int lock_policy_rwsem_##mode \
(int cpu) \
{ \
- int policy_cpu = per_cpu(policy_cpu, cpu); \
+ int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
BUG_ON(policy_cpu == -1); \
down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
if (unlikely(!cpu_online(cpu))) { \
@@ -90,7 +90,7 @@ EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
void unlock_policy_rwsem_read(int cpu)
{
- int policy_cpu = per_cpu(policy_cpu, cpu);
+ int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
BUG_ON(policy_cpu == -1);
up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
}
@@ -98,7 +98,7 @@ EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
void unlock_policy_rwsem_write(int cpu)
{
- int policy_cpu = per_cpu(policy_cpu, cpu);
+ int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
BUG_ON(policy_cpu == -1);
up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
}
@@ -818,7 +818,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu,
/* Set proper policy_cpu */
unlock_policy_rwsem_write(cpu);
- per_cpu(policy_cpu, cpu) = managed_policy->cpu;
+ per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
if (lock_policy_rwsem_write(cpu) < 0) {
/* Should not go through policy unlock path */
@@ -932,7 +932,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
if (!cpu_online(j))
continue;
per_cpu(cpufreq_cpu_data, j) = policy;
- per_cpu(policy_cpu, j) = policy->cpu;
+ per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
}
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -1020,7 +1020,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
cpumask_copy(policy->cpus, cpumask_of(cpu));
/* Initially set CPU itself as the policy_cpu */
- per_cpu(policy_cpu, cpu) = cpu;
+ per_cpu(cpufreq_policy_cpu, cpu) = cpu;
ret = (lock_policy_rwsem_write(cpu) < 0);
WARN_ON(ret);
@@ -2002,7 +2002,7 @@ static int __init cpufreq_core_init(void)
int cpu;
for_each_possible_cpu(cpu) {
- per_cpu(policy_cpu, cpu) = -1;
+ per_cpu(cpufreq_policy_cpu, cpu) = -1;
init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
}
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index a9bd3a05a684..05432216e224 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -174,7 +174,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
-static DEFINE_PER_CPU(struct cpufreq_frequency_table *, show_table);
+static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table);
/**
* show_available_freqs - show available frequencies for the specified CPU
*/
@@ -185,10 +185,10 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf)
ssize_t count = 0;
struct cpufreq_frequency_table *table;
- if (!per_cpu(show_table, cpu))
+ if (!per_cpu(cpufreq_show_table, cpu))
return -ENODEV;
- table = per_cpu(show_table, cpu);
+ table = per_cpu(cpufreq_show_table, cpu);
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
@@ -217,20 +217,20 @@ void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
unsigned int cpu)
{
dprintk("setting show_table for cpu %u to %p\n", cpu, table);
- per_cpu(show_table, cpu) = table;
+ per_cpu(cpufreq_show_table, cpu) = table;
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr);
void cpufreq_frequency_table_put_attr(unsigned int cpu)
{
dprintk("clearing show_table for cpu %u\n", cpu);
- per_cpu(show_table, cpu) = NULL;
+ per_cpu(cpufreq_show_table, cpu) = NULL;
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
{
- return per_cpu(show_table, cpu);
+ return per_cpu(cpufreq_show_table, cpu);
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 84c51e177269..8c2f3703ec85 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -64,7 +64,7 @@ struct aes_ctx {
u32 *D;
};
-static DEFINE_PER_CPU(struct cword *, last_cword);
+static DEFINE_PER_CPU(struct cword *, paes_last_cword);
/* Tells whether the ACE is capable to generate
the extended key for a given key_len. */
@@ -152,9 +152,9 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
ok:
for_each_online_cpu(cpu)
- if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) ||
- &ctx->cword.decrypt == per_cpu(last_cword, cpu))
- per_cpu(last_cword, cpu) = NULL;
+ if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
+ &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
+ per_cpu(paes_last_cword, cpu) = NULL;
return 0;
}
@@ -166,7 +166,7 @@ static inline void padlock_reset_key(struct cword *cword)
{
int cpu = raw_smp_processor_id();
- if (cword != per_cpu(last_cword, cpu))
+ if (cword != per_cpu(paes_last_cword, cpu))
#ifndef CONFIG_X86_64
asm volatile ("pushfl; popfl");
#else
@@ -176,7 +176,7 @@ static inline void padlock_reset_key(struct cword *cword)
static inline void padlock_store_cword(struct cword *cword)
{
- per_cpu(last_cword, raw_smp_processor_id()) = cword;
+ per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
}
/*
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 8f99354082ce..6f51a0a7a8bb 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -326,14 +326,7 @@ arch_initcall(dma_channel_table_init);
*/
struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
{
- struct dma_chan *chan;
- int cpu;
-
- cpu = get_cpu();
- chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
- put_cpu();
-
- return chan;
+ return this_cpu_read(channel_table[tx_type]->chan);
}
EXPORT_SYMBOL(dma_find_channel);
@@ -857,7 +850,6 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
struct dma_async_tx_descriptor *tx;
dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
- int cpu;
unsigned long flags;
dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
@@ -876,10 +868,10 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
tx->callback = NULL;
cookie = tx->tx_submit(tx);
- cpu = get_cpu();
- per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
- per_cpu_ptr(chan->local, cpu)->memcpy_count++;
- put_cpu();
+ preempt_disable();
+ __this_cpu_add(chan->local->bytes_transferred, len);
+ __this_cpu_inc(chan->local->memcpy_count);
+ preempt_enable();
return cookie;
}
@@ -906,7 +898,6 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
struct dma_async_tx_descriptor *tx;
dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
- int cpu;
unsigned long flags;
dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
@@ -923,10 +914,10 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
tx->callback = NULL;
cookie = tx->tx_submit(tx);
- cpu = get_cpu();
- per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
- per_cpu_ptr(chan->local, cpu)->memcpy_count++;
- put_cpu();
+ preempt_disable();
+ __this_cpu_add(chan->local->bytes_transferred, len);
+ __this_cpu_inc(chan->local->memcpy_count);
+ preempt_enable();
return cookie;
}
@@ -955,7 +946,6 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
struct dma_async_tx_descriptor *tx;
dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
- int cpu;
unsigned long flags;
dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
@@ -973,10 +963,10 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
tx->callback = NULL;
cookie = tx->tx_submit(tx);
- cpu = get_cpu();
- per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
- per_cpu_ptr(chan->local, cpu)->memcpy_count++;
- put_cpu();
+ preempt_disable();
+ __this_cpu_add(chan->local->bytes_transferred, len);
+ __this_cpu_inc(chan->local->memcpy_count);
+ preempt_enable();
return cookie;
}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 4b89b791be6a..42be0b15084b 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -826,8 +826,7 @@ static void __cpuinit take_over_work(struct ehca_comp_pool *pool, int cpu)
cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
list_del(&cq->entry);
- __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks,
- smp_processor_id()));
+ __queue_comp_task(cq, this_cpu_ptr(pool->cpu_comp_tasks));
}
spin_unlock_irqrestore(&cct->task_lock, flags_cct);
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 6ae388849a3b..fb2b7ef7868e 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -69,7 +69,7 @@ static struct lguest_pages *lguest_pages(unsigned int cpu)
(SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
}
-static DEFINE_PER_CPU(struct lg_cpu *, last_cpu);
+static DEFINE_PER_CPU(struct lg_cpu *, lg_last_cpu);
/*S:010
* We approach the Switcher.
@@ -90,8 +90,8 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
* meanwhile). If that's not the case, we pretend everything in the
* Guest has changed.
*/
- if (__get_cpu_var(last_cpu) != cpu || cpu->last_pages != pages) {
- __get_cpu_var(last_cpu) = cpu;
+ if (__get_cpu_var(lg_last_cpu) != cpu || cpu->last_pages != pages) {
+ __get_cpu_var(lg_last_cpu) = cpu;
cpu->last_pages = pages;
cpu->changed = CHANGED_ALL;
}
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 8c658cf6f62f..109d2783e4d8 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1378,7 +1378,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
}
__skb_pull(skb, sizeof(*p));
- st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id());
+ st = this_cpu_ptr(sge->port_stats[p->iff]);
skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev);
if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
@@ -1780,8 +1780,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct adapter *adapter = dev->ml_priv;
struct sge *sge = adapter->sge;
- struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port],
- smp_processor_id());
+ struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
struct cpl_tx_pkt *cpl;
struct sk_buff *orig_skb = skb;
int ret;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index eae4ad749e9d..b9fcc9819837 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -81,7 +81,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
/* it's OK to use per_cpu_ptr() because BHs are off */
pcpu_lstats = dev->ml_priv;
- lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id());
+ lb_stats = this_cpu_ptr(pcpu_lstats);
len = skb->len;
if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 63099c58a6dd..3a15de56df9c 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -153,15 +153,14 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_device *rcv = NULL;
struct veth_priv *priv, *rcv_priv;
struct veth_net_stats *stats, *rcv_stats;
- int length, cpu;
+ int length;
priv = netdev_priv(dev);
rcv = priv->peer;
rcv_priv = netdev_priv(rcv);
- cpu = smp_processor_id();
- stats = per_cpu_ptr(priv->stats, cpu);
- rcv_stats = per_cpu_ptr(rcv_priv->stats, cpu);
+ stats = this_cpu_ptr(priv->stats);
+ rcv_stats = this_cpu_ptr(rcv_priv->stats);
if (!(rcv->flags & IFF_UP))
goto tx_drop;
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index a7aae24f2889..166b67ea622f 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -47,7 +47,7 @@
*/
static struct ring_buffer *op_ring_buffer_read;
static struct ring_buffer *op_ring_buffer_write;
-DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
+DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
static void wq_sync_buffer(struct work_struct *work);
@@ -61,8 +61,7 @@ unsigned long oprofile_get_cpu_buffer_size(void)
void oprofile_cpu_buffer_inc_smpl_lost(void)
{
- struct oprofile_cpu_buffer *cpu_buf
- = &__get_cpu_var(cpu_buffer);
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
cpu_buf->sample_lost_overflow++;
}
@@ -95,7 +94,7 @@ int alloc_cpu_buffers(void)
goto fail;
for_each_possible_cpu(i) {
- struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
+ struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
b->last_task = NULL;
b->last_is_kernel = -1;
@@ -122,7 +121,7 @@ void start_cpu_work(void)
work_enabled = 1;
for_each_online_cpu(i) {
- struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
+ struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
/*
* Spread the work by 1 jiffy per cpu so they dont all
@@ -139,7 +138,7 @@ void end_cpu_work(void)
work_enabled = 0;
for_each_online_cpu(i) {
- struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
+ struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
cancel_delayed_work(&b->work);
}
@@ -330,7 +329,7 @@ static inline void
__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
unsigned long event, int is_kernel)
{
- struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
unsigned long backtrace = oprofile_backtrace_depth;
/*
@@ -375,7 +374,7 @@ oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
{
struct op_sample *sample;
int is_kernel = !user_mode(regs);
- struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
cpu_buf->sample_received++;
@@ -430,13 +429,13 @@ int oprofile_write_commit(struct op_entry *entry)
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
{
- struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
log_sample(cpu_buf, pc, 0, is_kernel, event);
}
void oprofile_add_trace(unsigned long pc)
{
- struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
if (!cpu_buf->tracing)
return;
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 272995d20293..68ea16ab645f 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -50,7 +50,7 @@ struct oprofile_cpu_buffer {
struct delayed_work work;
};
-DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
+DECLARE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
/*
* Resets the cpu buffer to a sane state.
@@ -60,7 +60,7 @@ DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
*/
static inline void op_cpu_buffer_reset(int cpu)
{
- struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
+ struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu);
cpu_buf->last_is_kernel = -1;
cpu_buf->last_task = NULL;
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
index 61689e814d46..917d28ebeacd 100644
--- a/drivers/oprofile/oprofile_stats.c
+++ b/drivers/oprofile/oprofile_stats.c
@@ -23,7 +23,7 @@ void oprofile_reset_stats(void)
int i;
for_each_possible_cpu(i) {
- cpu_buf = &per_cpu(cpu_buffer, i);
+ cpu_buf = &per_cpu(op_cpu_buffer, i);
cpu_buf->sample_received = 0;
cpu_buf->sample_lost_overflow = 0;
cpu_buf->backtrace_aborted = 0;
@@ -51,7 +51,7 @@ void oprofile_create_stats_files(struct super_block *sb, struct dentry *root)
return;
for_each_possible_cpu(i) {
- cpu_buf = &per_cpu(cpu_buffer, i);
+ cpu_buf = &per_cpu(op_cpu_buffer, i);
snprintf(buf, 10, "cpu%d", i);
cpudir = oprofilefs_mkdir(sb, dir, buf);
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 395c04c2b00f..98c04cac43c1 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -113,11 +113,9 @@ static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
#define IUCV_DBF_TEXT_(name, level, text...) \
do { \
if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
- char* iucv_dbf_txt_buf = \
- get_cpu_var(iucv_dbf_txt_buf); \
- sprintf(iucv_dbf_txt_buf, text); \
- debug_text_event(iucv_dbf_##name, level, \
- iucv_dbf_txt_buf); \
+ char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
+ sprintf(__buf, text); \
+ debug_text_event(iucv_dbf_##name, level, __buf); \
put_cpu_var(iucv_dbf_txt_buf); \
} \
} while (0)