[PATCH 27/28] atomic: Replace atomic_{set,clear}_mask() usage

2015-07-16 Thread Peter Zijlstra
Replace the deprecated atomic_{set,clear}_mask() usage with the now
ubiquous atomic_{or,andnot}() functions.

Signed-off-by: Peter Zijlstra (Intel) 
---
 arch/blackfin/mach-common/smp.c |2 -
 arch/m32r/kernel/smp.c  |4 +-
 arch/mn10300/mm/tlb-smp.c   |2 -
 arch/s390/kernel/time.c |4 +-
 arch/s390/kvm/interrupt.c   |   30 +--
 arch/s390/kvm/kvm-s390.c|   32 ++--
 drivers/gpu/drm/i915/i915_drv.c |2 -
 drivers/gpu/drm/i915/i915_gem.c |2 -
 drivers/gpu/drm/i915/i915_irq.c |4 +-
 drivers/s390/scsi/zfcp_aux.c|2 -
 drivers/s390/scsi/zfcp_erp.c|   62 
 drivers/s390/scsi/zfcp_fc.c |8 ++---
 drivers/s390/scsi/zfcp_fsf.c|   26 
 drivers/s390/scsi/zfcp_qdio.c   |   14 -
 14 files changed, 97 insertions(+), 97 deletions(-)

--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -195,7 +195,7 @@ void send_ipi(const struct cpumask *cpum
local_irq_save(flags);
for_each_cpu(cpu, cpumask) {
bfin_ipi_data = _cpu(bfin_ipi, cpu);
-   atomic_set_mask((1 << msg), _ipi_data->bits);
+   atomic_or((1 << msg), _ipi_data->bits);
atomic_inc(_ipi_data->count);
}
local_irq_restore(flags);
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -156,7 +156,7 @@ void smp_flush_cache_all(void)
cpumask_clear_cpu(smp_processor_id(), );
spin_lock(_lock);
mask=cpumask_bits();
-   atomic_set_mask(*mask, (atomic_t *)_cpumask);
+   atomic_or(*mask, (atomic_t *)_cpumask);
send_IPI_mask(, INVALIDATE_CACHE_IPI, 0);
_flush_cache_copyback_all();
while (flushcache_cpumask)
@@ -407,7 +407,7 @@ static void flush_tlb_others(cpumask_t c
flush_vma = vma;
flush_va = va;
mask=cpumask_bits();
-   atomic_set_mask(*mask, (atomic_t *)_cpumask);
+   atomic_or(*mask, (atomic_t *)_cpumask);
 
/*
 * We have to send the IPI only to
--- a/arch/mn10300/mm/tlb-smp.c
+++ b/arch/mn10300/mm/tlb-smp.c
@@ -119,7 +119,7 @@ static void flush_tlb_others(cpumask_t c
flush_mm = mm;
flush_va = va;
 #if NR_CPUS <= BITS_PER_LONG
-   atomic_set_mask(cpumask.bits[0], _cpumask.bits[0]);
+   atomic_or(cpumask.bits[0], (atomic_t *)_cpumask.bits[0]);
 #else
 #error Not supported.
 #endif
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -381,7 +381,7 @@ static void disable_sync_clock(void *dum
 * increase the "sequence" counter to avoid the race of an
 * etr event and the complete recovery against get_sync_clock.
 */
-   atomic_clear_mask(0x8000, sw_ptr);
+   atomic_andnot(0x8000, sw_ptr);
atomic_inc(sw_ptr);
 }
 
@@ -392,7 +392,7 @@ static void disable_sync_clock(void *dum
 static void enable_sync_clock(void)
 {
atomic_t *sw_ptr = this_cpu_ptr(_sync_word);
-   atomic_set_mask(0x8000, sw_ptr);
+   atomic_or(0x8000, sw_ptr);
 }
 
 /*
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -170,20 +170,20 @@ static unsigned long deliverable_irqs(st
 
 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
 {
-   atomic_set_mask(CPUSTAT_WAIT, >arch.sie_block->cpuflags);
+   atomic_or(CPUSTAT_WAIT, >arch.sie_block->cpuflags);
set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
 }
 
 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
 {
-   atomic_clear_mask(CPUSTAT_WAIT, >arch.sie_block->cpuflags);
+   atomic_andnot(CPUSTAT_WAIT, >arch.sie_block->cpuflags);
clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
 }
 
 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
 {
-   atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
- >arch.sie_block->cpuflags);
+   atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
+   >arch.sie_block->cpuflags);
vcpu->arch.sie_block->lctl = 0x;
vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
 
@@ -196,7 +196,7 @@ static void __reset_intercept_indicators
 
 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
 {
-   atomic_set_mask(flag, >arch.sie_block->cpuflags);
+   atomic_or(flag, >arch.sie_block->cpuflags);
 }
 
 static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
@@ -919,7 +919,7 @@ void kvm_s390_clear_local_irqs(struct kv
spin_unlock(>lock);
 
/* clear pending external calls set by sigp interpretation facility */
-   atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
+   atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
 }
 
@@ -1020,7 +1020,7 @@ static int __inject_pfault_init(struct k
 
li->irq.ext = irq->u.ext;

[PATCH 27/28] atomic: Replace atomic_{set,clear}_mask() usage

2015-07-16 Thread Peter Zijlstra
Replace the deprecated atomic_{set,clear}_mask() usage with the now
ubiquous atomic_{or,andnot}() functions.

Signed-off-by: Peter Zijlstra (Intel) pet...@infradead.org
---
 arch/blackfin/mach-common/smp.c |2 -
 arch/m32r/kernel/smp.c  |4 +-
 arch/mn10300/mm/tlb-smp.c   |2 -
 arch/s390/kernel/time.c |4 +-
 arch/s390/kvm/interrupt.c   |   30 +--
 arch/s390/kvm/kvm-s390.c|   32 ++--
 drivers/gpu/drm/i915/i915_drv.c |2 -
 drivers/gpu/drm/i915/i915_gem.c |2 -
 drivers/gpu/drm/i915/i915_irq.c |4 +-
 drivers/s390/scsi/zfcp_aux.c|2 -
 drivers/s390/scsi/zfcp_erp.c|   62 
 drivers/s390/scsi/zfcp_fc.c |8 ++---
 drivers/s390/scsi/zfcp_fsf.c|   26 
 drivers/s390/scsi/zfcp_qdio.c   |   14 -
 14 files changed, 97 insertions(+), 97 deletions(-)

--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -195,7 +195,7 @@ void send_ipi(const struct cpumask *cpum
local_irq_save(flags);
for_each_cpu(cpu, cpumask) {
bfin_ipi_data = per_cpu(bfin_ipi, cpu);
-   atomic_set_mask((1  msg), bfin_ipi_data-bits);
+   atomic_or((1  msg), bfin_ipi_data-bits);
atomic_inc(bfin_ipi_data-count);
}
local_irq_restore(flags);
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -156,7 +156,7 @@ void smp_flush_cache_all(void)
cpumask_clear_cpu(smp_processor_id(), cpumask);
spin_lock(flushcache_lock);
mask=cpumask_bits(cpumask);
-   atomic_set_mask(*mask, (atomic_t *)flushcache_cpumask);
+   atomic_or(*mask, (atomic_t *)flushcache_cpumask);
send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0);
_flush_cache_copyback_all();
while (flushcache_cpumask)
@@ -407,7 +407,7 @@ static void flush_tlb_others(cpumask_t c
flush_vma = vma;
flush_va = va;
mask=cpumask_bits(cpumask);
-   atomic_set_mask(*mask, (atomic_t *)flush_cpumask);
+   atomic_or(*mask, (atomic_t *)flush_cpumask);
 
/*
 * We have to send the IPI only to
--- a/arch/mn10300/mm/tlb-smp.c
+++ b/arch/mn10300/mm/tlb-smp.c
@@ -119,7 +119,7 @@ static void flush_tlb_others(cpumask_t c
flush_mm = mm;
flush_va = va;
 #if NR_CPUS = BITS_PER_LONG
-   atomic_set_mask(cpumask.bits[0], flush_cpumask.bits[0]);
+   atomic_or(cpumask.bits[0], (atomic_t *)flush_cpumask.bits[0]);
 #else
 #error Not supported.
 #endif
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -381,7 +381,7 @@ static void disable_sync_clock(void *dum
 * increase the sequence counter to avoid the race of an
 * etr event and the complete recovery against get_sync_clock.
 */
-   atomic_clear_mask(0x8000, sw_ptr);
+   atomic_andnot(0x8000, sw_ptr);
atomic_inc(sw_ptr);
 }
 
@@ -392,7 +392,7 @@ static void disable_sync_clock(void *dum
 static void enable_sync_clock(void)
 {
atomic_t *sw_ptr = this_cpu_ptr(clock_sync_word);
-   atomic_set_mask(0x8000, sw_ptr);
+   atomic_or(0x8000, sw_ptr);
 }
 
 /*
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -170,20 +170,20 @@ static unsigned long deliverable_irqs(st
 
 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
 {
-   atomic_set_mask(CPUSTAT_WAIT, vcpu-arch.sie_block-cpuflags);
+   atomic_or(CPUSTAT_WAIT, vcpu-arch.sie_block-cpuflags);
set_bit(vcpu-vcpu_id, vcpu-arch.local_int.float_int-idle_mask);
 }
 
 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
 {
-   atomic_clear_mask(CPUSTAT_WAIT, vcpu-arch.sie_block-cpuflags);
+   atomic_andnot(CPUSTAT_WAIT, vcpu-arch.sie_block-cpuflags);
clear_bit(vcpu-vcpu_id, vcpu-arch.local_int.float_int-idle_mask);
 }
 
 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
 {
-   atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
- vcpu-arch.sie_block-cpuflags);
+   atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
+   vcpu-arch.sie_block-cpuflags);
vcpu-arch.sie_block-lctl = 0x;
vcpu-arch.sie_block-ictl = ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
 
@@ -196,7 +196,7 @@ static void __reset_intercept_indicators
 
 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
 {
-   atomic_set_mask(flag, vcpu-arch.sie_block-cpuflags);
+   atomic_or(flag, vcpu-arch.sie_block-cpuflags);
 }
 
 static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
@@ -919,7 +919,7 @@ void kvm_s390_clear_local_irqs(struct kv
spin_unlock(li-lock);
 
/* clear pending external calls set by sigp interpretation facility */
-   atomic_clear_mask(CPUSTAT_ECALL_PEND, li-cpuflags);
+   atomic_andnot(CPUSTAT_ECALL_PEND, li-cpuflags);
vcpu-kvm-arch.sca-cpu[vcpu-vcpu_id].sigp_ctrl = 0;
 }
 
@@