mvebu SoC changes for v3.19

- Armada 38x
     - Implement CPU hotplug support
 
  - Armada 375
     - Remove Z1 stepping support (limited dist. of SoC)
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQIcBAABAgAGBQJUdV2SAAoJEP45WPkGe8Zn/wQQAIaXri49BxLdcHAQ4ZF+PKU3
 EGTTrkIzm2ACPEsBPb7IWIx9fQY3O34r0xw7trwKGZNlM3s97ayRLK91p1rNXT7l
 OqVBa+zrtuc4Nds6DCRRlVu6AdJ57juWtAmoUnG+Qmo43bma2+td8KmG32M0dqCh
 /9OMM1DEypq5hE+N5fdxQpQDNs000NQxz6tniSCQqlNoJU/ZJwIkIh6S5aAVXwIb
 GoCNqS2DqSYWqd2bSkmzg7cchQNCNryzC+PY9mOk9wIrsTpqvNfoPV1OFxk+WTtv
 poNJ0a+kEvBh+zFF2ki3uGWEyB91CyucleLwRv6ZicMCEGfYjfWuroaLjVnHQAc5
 fxMXmfxL4pJCh06fXMqTFXEORZI0SHdfRGDW+E8dIxi9BxRWuyExIV+0e+XYBeT8
 LrQlTXs+h0ziukZUbVGGprdkqOe1cuuW3PMF/wZEZFfPXNDPwN1ETtY1s4gpY8D9
 DkIHvZRkF9Rh62ePeJ6jnUhhQM2y4EsNTYbv+ZShjkgs7g5wy2XJkNqk8BnAzJG3
 KVednG1Pt5tUFa5gHSYVkXbbwwDf4Qj42etc5rBNkpd2QwSTtlM8WdKIxiBLBm7k
 aRoUkr3Ao+hpMdMZP7wQUU8Jwuzb8OAtTOfIRUrz1ZANdvXXcu8yy3V1iIowxYKN
 KirJiqpWaPsvx1b71qef
 =aCXo
 -----END PGP SIGNATURE-----

Merge tag 'mvebu-soc-3.19' of git://git.infradead.org/linux-mvebu into next/soc

Pull "mvebu SoC changes for v3.19" from Jason Cooper:

 - Armada 38x
    - Implement CPU hotplug support

 - Armada 375
    - Remove Z1 stepping support (limited dist. of SoC)

* tag 'mvebu-soc-3.19' of git://git.infradead.org/linux-mvebu:
  ARM: mvebu: Implement the CPU hotplug support for the Armada 38x SoCs
  ARM: mvebu: Fix the secondary startup for Cortex A9 SoC
  ARM: mvebu: Move SCU power up in a function
  ARM: mvebu: Clean-up the Armada XP support
  ARM: mvebu: update comments in coherency.c
  ARM: mvebu: remove Armada 375 Z1 workaround for I/O coherency
  ARM: mvebu: remove unused register offset definition
  ARM: mvebu: disable I/O coherency on non-SMP situations on Armada 370/375/38x/XP
  ARM: mvebu: make the coherency_ll.S functions work with no coherency fabric
  ARM: mvebu: Remove thermal quirk for A375 Z1 revision
  ARM: mvebu: add missing of_node_put() call in coherency.c
  ARM: orion: Fix for certain sequence of request_irq can cause irq storm
  ARM: mvebu: armada xp: Generalize use of i2c quirk

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
Arnd Bergmann 2014-11-28 22:25:11 +01:00
commit 756f80cee7
12 changed files with 166 additions and 275 deletions

View file

@ -16,14 +16,8 @@
#define __MACH_ARMADA_370_XP_H
#ifdef CONFIG_SMP
#include <linux/cpumask.h>
#define ARMADA_XP_MAX_CPUS 4
void armada_xp_secondary_startup(void);
extern struct smp_operations armada_xp_smp_ops;
#endif
int armada_370_xp_pmsu_idle_enter(unsigned long deepidle);
#endif /* __MACH_ARMADA_370_XP_H */

View file

@ -124,76 +124,12 @@ static void __init i2c_quirk(void)
return;
}
#define A375_Z1_THERMAL_FIXUP_OFFSET 0xc
static void __init thermal_quirk(void)
{
struct device_node *np;
u32 dev, rev;
int res;
/*
* The early SoC Z1 revision needs a quirk to be applied in order
* for the thermal controller to work properly. This quirk breaks
* the thermal support if applied on a SoC that doesn't need it,
* so we enforce the SoC revision to be known.
*/
res = mvebu_get_soc_id(&dev, &rev);
if (res < 0 || (res == 0 && rev > ARMADA_375_Z1_REV))
return;
for_each_compatible_node(np, NULL, "marvell,armada375-thermal") {
struct property *prop;
__be32 newval, *newprop, *oldprop;
int len;
/*
* The register offset is at a wrong location. This quirk
* creates a new reg property as a clone of the previous
* one and corrects the offset.
*/
oldprop = (__be32 *)of_get_property(np, "reg", &len);
if (!oldprop)
continue;
/* Create a duplicate of the 'reg' property */
prop = kzalloc(sizeof(*prop), GFP_KERNEL);
prop->length = len;
prop->name = kstrdup("reg", GFP_KERNEL);
prop->value = kzalloc(len, GFP_KERNEL);
memcpy(prop->value, oldprop, len);
/* Fixup the register offset of the second entry */
oldprop += 2;
newprop = (__be32 *)prop->value + 2;
newval = cpu_to_be32(be32_to_cpu(*oldprop) -
A375_Z1_THERMAL_FIXUP_OFFSET);
*newprop = newval;
of_update_property(np, prop);
/*
* The thermal controller needs some quirk too, so let's change
* the compatible string to reflect this and allow the driver
* the take the necessary action.
*/
prop = kzalloc(sizeof(*prop), GFP_KERNEL);
prop->name = kstrdup("compatible", GFP_KERNEL);
prop->length = sizeof("marvell,armada375-z1-thermal");
prop->value = kstrdup("marvell,armada375-z1-thermal",
GFP_KERNEL);
of_update_property(np, prop);
}
return;
}
static void __init mvebu_dt_init(void)
{
if (of_machine_is_compatible("plathome,openblocks-ax3-4"))
if (of_machine_is_compatible("marvell,armadaxp"))
i2c_quirk();
if (of_machine_is_compatible("marvell,a375-db")) {
if (of_machine_is_compatible("marvell,a375-db"))
external_abort_quirk();
thermal_quirk();
}
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
@ -206,6 +142,11 @@ static const char * const armada_370_xp_dt_compat[] = {
DT_MACHINE_START(ARMADA_370_XP_DT, "Marvell Armada 370/XP (Device Tree)")
.l2c_aux_val = 0,
.l2c_aux_mask = ~0,
/*
* The following field (.smp) is still needed to ensure backward
* compatibility with old Device Trees that were not specifying the
* cpus enable-method property.
*/
.smp = smp_ops(armada_xp_smp_ops),
.init_machine = mvebu_dt_init,
.init_irq = mvebu_init_irq,

View file

@ -1,5 +1,6 @@
/*
* Coherency fabric (Aurora) support for Armada 370 and XP platforms.
* Coherency fabric (Aurora) support for Armada 370, 375, 38x and XP
* platforms.
*
* Copyright (C) 2012 Marvell
*
@ -11,7 +12,7 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*
* The Armada 370 and Armada XP SOCs have a coherency fabric which is
* The Armada 370, 375, 38x and XP SOCs have a coherency fabric which is
* responsible for ensuring hardware coherency between all CPUs and between
* CPUs and I/O masters. This file initializes the coherency fabric and
* supplies basic routines for configuring and controlling hardware coherency
@ -28,12 +29,10 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/mbus.h>
#include <linux/clk.h>
#include <linux/pci.h>
#include <asm/smp_plat.h>
#include <asm/cacheflush.h>
#include <asm/mach/map.h>
#include "armada-370-xp.h"
#include "coherency.h"
#include "mvebu-soc-id.h"
@ -42,8 +41,6 @@ void __iomem *coherency_base;
static void __iomem *coherency_cpu_base;
/* Coherency fabric registers */
#define COHERENCY_FABRIC_CFG_OFFSET 0x4
#define IO_SYNC_BARRIER_CTL_OFFSET 0x0
enum {
@ -79,157 +76,8 @@ int set_cpu_coherent(void)
return ll_enable_coherency();
}
/*
* The below code implements the I/O coherency workaround on Armada
* 375. This workaround consists in using the two channels of the
* first XOR engine to trigger a XOR transaction that serves as the
* I/O coherency barrier.
*/
static void __iomem *xor_base, *xor_high_base;
static dma_addr_t coherency_wa_buf_phys[CONFIG_NR_CPUS];
static void *coherency_wa_buf[CONFIG_NR_CPUS];
static bool coherency_wa_enabled;
#define XOR_CONFIG(chan) (0x10 + (chan * 4))
#define XOR_ACTIVATION(chan) (0x20 + (chan * 4))
#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2))
#define WINDOW_BASE(w) (0x250 + ((w) << 2))
#define WINDOW_SIZE(w) (0x270 + ((w) << 2))
#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2))
#define WINDOW_OVERRIDE_CTRL(chan) (0x2A0 + ((chan) << 2))
#define XOR_DEST_POINTER(chan) (0x2B0 + (chan * 4))
#define XOR_BLOCK_SIZE(chan) (0x2C0 + (chan * 4))
#define XOR_INIT_VALUE_LOW 0x2E0
#define XOR_INIT_VALUE_HIGH 0x2E4
static inline void mvebu_hwcc_armada375_sync_io_barrier_wa(void)
{
int idx = smp_processor_id();
/* Write '1' to the first word of the buffer */
writel(0x1, coherency_wa_buf[idx]);
/* Wait until the engine is idle */
while ((readl(xor_base + XOR_ACTIVATION(idx)) >> 4) & 0x3)
;
dmb();
/* Trigger channel */
writel(0x1, xor_base + XOR_ACTIVATION(idx));
/* Poll the data until it is cleared by the XOR transaction */
while (readl(coherency_wa_buf[idx]))
;
}
static void __init armada_375_coherency_init_wa(void)
{
const struct mbus_dram_target_info *dram;
struct device_node *xor_node;
struct property *xor_status;
struct clk *xor_clk;
u32 win_enable = 0;
int i;
pr_warn("enabling coherency workaround for Armada 375 Z1, one XOR engine disabled\n");
/*
* Since the workaround uses one XOR engine, we grab a
* reference to its Device Tree node first.
*/
xor_node = of_find_compatible_node(NULL, NULL, "marvell,orion-xor");
BUG_ON(!xor_node);
/*
* Then we mark it as disabled so that the real XOR driver
* will not use it.
*/
xor_status = kzalloc(sizeof(struct property), GFP_KERNEL);
BUG_ON(!xor_status);
xor_status->value = kstrdup("disabled", GFP_KERNEL);
BUG_ON(!xor_status->value);
xor_status->length = 8;
xor_status->name = kstrdup("status", GFP_KERNEL);
BUG_ON(!xor_status->name);
of_update_property(xor_node, xor_status);
/*
* And we remap the registers, get the clock, and do the
* initial configuration of the XOR engine.
*/
xor_base = of_iomap(xor_node, 0);
xor_high_base = of_iomap(xor_node, 1);
xor_clk = of_clk_get_by_name(xor_node, NULL);
BUG_ON(!xor_clk);
clk_prepare_enable(xor_clk);
dram = mv_mbus_dram_info();
for (i = 0; i < 8; i++) {
writel(0, xor_base + WINDOW_BASE(i));
writel(0, xor_base + WINDOW_SIZE(i));
if (i < 4)
writel(0, xor_base + WINDOW_REMAP_HIGH(i));
}
for (i = 0; i < dram->num_cs; i++) {
const struct mbus_dram_window *cs = dram->cs + i;
writel((cs->base & 0xffff0000) |
(cs->mbus_attr << 8) |
dram->mbus_dram_target_id, xor_base + WINDOW_BASE(i));
writel((cs->size - 1) & 0xffff0000, xor_base + WINDOW_SIZE(i));
win_enable |= (1 << i);
win_enable |= 3 << (16 + (2 * i));
}
writel(win_enable, xor_base + WINDOW_BAR_ENABLE(0));
writel(win_enable, xor_base + WINDOW_BAR_ENABLE(1));
writel(0, xor_base + WINDOW_OVERRIDE_CTRL(0));
writel(0, xor_base + WINDOW_OVERRIDE_CTRL(1));
for (i = 0; i < CONFIG_NR_CPUS; i++) {
coherency_wa_buf[i] = kzalloc(PAGE_SIZE, GFP_KERNEL);
BUG_ON(!coherency_wa_buf[i]);
/*
* We can't use the DMA mapping API, since we don't
* have a valid 'struct device' pointer
*/
coherency_wa_buf_phys[i] =
virt_to_phys(coherency_wa_buf[i]);
BUG_ON(!coherency_wa_buf_phys[i]);
/*
* Configure the XOR engine for memset operation, with
* a 128 bytes block size
*/
writel(0x444, xor_base + XOR_CONFIG(i));
writel(128, xor_base + XOR_BLOCK_SIZE(i));
writel(coherency_wa_buf_phys[i],
xor_base + XOR_DEST_POINTER(i));
}
writel(0x0, xor_base + XOR_INIT_VALUE_LOW);
writel(0x0, xor_base + XOR_INIT_VALUE_HIGH);
coherency_wa_enabled = true;
}
static inline void mvebu_hwcc_sync_io_barrier(void)
{
if (coherency_wa_enabled) {
mvebu_hwcc_armada375_sync_io_barrier_wa();
return;
}
writel(0x1, coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET);
while (readl(coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET) & 0x1);
}
@ -361,25 +209,41 @@ static int coherency_type(void)
{
struct device_node *np;
const struct of_device_id *match;
int type;
/*
* The coherency fabric is needed:
* - For coherency between processors on Armada XP, so only
* when SMP is enabled.
* - For coherency between the processor and I/O devices, but
* this coherency requires many pre-requisites (write
* allocate cache policy, shareable pages, SMP bit set) that
* are only meant in SMP situations.
*
* Note that this means that on Armada 370, there is currently
* no way to use hardware I/O coherency, because even when
* CONFIG_SMP is enabled, is_smp() returns false due to the
* Armada 370 being a single-core processor. To lift this
* limitation, we would have to find a way to make the cache
* policy set to write-allocate (on all Armada SoCs), and to
* set the shareable attribute in page tables (on all Armada
* SoCs except the Armada 370). Unfortunately, such decisions
* are taken very early in the kernel boot process, at a point
* where we don't know yet on which SoC we are running.
*/
if (!is_smp())
return COHERENCY_FABRIC_TYPE_NONE;
np = of_find_matching_node_and_match(NULL, of_coherency_table, &match);
if (np) {
int type = (int) match->data;
if (!np)
return COHERENCY_FABRIC_TYPE_NONE;
/* Armada 370/XP coherency works in both UP and SMP */
if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP)
return type;
type = (int) match->data;
/* Armada 375 coherency works only on SMP */
else if (type == COHERENCY_FABRIC_TYPE_ARMADA_375 && is_smp())
return type;
of_node_put(np);
/* Armada 380 coherency works only on SMP */
else if (type == COHERENCY_FABRIC_TYPE_ARMADA_380 && is_smp())
return type;
}
return COHERENCY_FABRIC_TYPE_NONE;
return type;
}
int coherency_available(void)
@ -400,27 +264,16 @@ int __init coherency_init(void)
type == COHERENCY_FABRIC_TYPE_ARMADA_380)
armada_375_380_coherency_init(np);
of_node_put(np);
return 0;
}
static int __init coherency_late_init(void)
{
int type = coherency_type();
if (type == COHERENCY_FABRIC_TYPE_NONE)
return 0;
if (type == COHERENCY_FABRIC_TYPE_ARMADA_375) {
u32 dev, rev;
if (mvebu_get_soc_id(&dev, &rev) == 0 &&
rev == ARMADA_375_Z1_REV)
armada_375_coherency_init_wa();
}
bus_register_notifier(&platform_bus_type,
&mvebu_hwcc_nb);
if (coherency_available())
bus_register_notifier(&platform_bus_type,
&mvebu_hwcc_nb);
return 0;
}

View file

@ -24,7 +24,10 @@
#include <asm/cp15.h>
.text
/* Returns the coherency base address in r1 (r0 is untouched) */
/*
* Returns the coherency base address in r1 (r0 is untouched), or 0 if
* the coherency fabric is not enabled.
*/
ENTRY(ll_get_coherency_base)
mrc p15, 0, r1, c1, c0, 0
tst r1, #CR_M @ Check MMU bit enabled
@ -32,8 +35,13 @@ ENTRY(ll_get_coherency_base)
/*
* MMU is disabled, use the physical address of the coherency
* base address.
* base address. However, if the coherency fabric isn't mapped
* (i.e its virtual address is zero), it means coherency is
* not enabled, so we return 0.
*/
ldr r1, =coherency_base
cmp r1, #0
beq 2f
adr r1, 3f
ldr r3, [r1]
ldr r1, [r1, r3]
@ -85,6 +93,9 @@ ENTRY(ll_add_cpu_to_smp_group)
*/
mov r0, lr
bl ll_get_coherency_base
/* Bail out if the coherency is not enabled */
cmp r1, #0
reteq r0
bl ll_get_coherency_cpumask
mov lr, r0
add r0, r1, #ARMADA_XP_CFB_CFG_REG_OFFSET
@ -107,6 +118,9 @@ ENTRY(ll_enable_coherency)
*/
mov r0, lr
bl ll_get_coherency_base
/* Bail out if the coherency is not enabled */
cmp r1, #0
reteq r0
bl ll_get_coherency_cpumask
mov lr, r0
add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
@ -131,6 +145,9 @@ ENTRY(ll_disable_coherency)
*/
mov r0, lr
bl ll_get_coherency_base
/* Bail out if the coherency is not enabled */
cmp r1, #0
reteq r0
bl ll_get_coherency_cpumask
mov lr, r0
add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET

View file

@ -15,7 +15,6 @@
#include <linux/of_address.h>
#include <linux/io.h>
#include <linux/resource.h>
#include "armada-370-xp.h"
static void __iomem *cpu_reset_base;
static size_t cpu_reset_size;

View file

@ -22,5 +22,6 @@
ENTRY(mvebu_cortex_a9_secondary_startup)
ARM_BE8(setend be)
bl v7_invalidate_l1
bl armada_38x_scu_power_up
b secondary_startup
ENDPROC(mvebu_cortex_a9_secondary_startup)

View file

@ -43,21 +43,70 @@ static int __cpuinit mvebu_cortex_a9_boot_secondary(unsigned int cpu,
else
mvebu_pmsu_set_cpu_boot_addr(hw_cpu, mvebu_cortex_a9_secondary_startup);
smp_wmb();
/*
* Doing this before deasserting the CPUs is needed to wake up CPUs
* in the offline state after using CPU hotplug.
*/
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
ret = mvebu_cpu_reset_deassert(hw_cpu);
if (ret) {
pr_err("Could not start the secondary CPU: %d\n", ret);
return ret;
}
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
return 0;
}
/*
* When a CPU is brought back online, either through CPU hotplug, or
* because of the boot of a kexec'ed kernel, the PMSU configuration
* for this CPU might be in the deep idle state, preventing this CPU
* from receiving interrupts. Here, we therefore take out the current
* CPU from this state, which was entered by armada_38x_cpu_die()
* below.
*/
static void armada_38x_secondary_init(unsigned int cpu)
{
mvebu_v7_pmsu_idle_exit();
}
#ifdef CONFIG_HOTPLUG_CPU
static void armada_38x_cpu_die(unsigned int cpu)
{
/*
* CPU hotplug is implemented by putting offline CPUs into the
* deep idle sleep state.
*/
armada_38x_do_cpu_suspend(true);
}
/*
* We need a dummy function, so that platform_can_cpu_hotplug() knows
* we support CPU hotplug. However, the function does not need to do
* anything, because CPUs going offline can enter the deep idle state
* by themselves, without any help from a still alive CPU.
*/
static int armada_38x_cpu_kill(unsigned int cpu)
{
return 1;
}
#endif
static struct smp_operations mvebu_cortex_a9_smp_ops __initdata = {
.smp_boot_secondary = mvebu_cortex_a9_boot_secondary,
};
static struct smp_operations armada_38x_smp_ops __initdata = {
.smp_boot_secondary = mvebu_cortex_a9_boot_secondary,
.smp_secondary_init = armada_38x_secondary_init,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_die = armada_38x_cpu_die,
.cpu_kill = armada_38x_cpu_kill,
#endif
};
CPU_METHOD_OF_DECLARE(mvebu_armada_375_smp, "marvell,armada-375-smp",
&mvebu_cortex_a9_smp_ops);
CPU_METHOD_OF_DECLARE(mvebu_armada_380_smp, "marvell,armada-380-smp",
&mvebu_cortex_a9_smp_ops);
&armada_38x_smp_ops);

View file

@ -30,6 +30,8 @@
#include "pmsu.h"
#include "coherency.h"
#define ARMADA_XP_MAX_CPUS 4
#define AXP_BOOTROM_BASE 0xfff00000
#define AXP_BOOTROM_SIZE 0x100000

View file

@ -39,7 +39,6 @@
#include <asm/suspend.h>
#include <asm/tlbflush.h>
#include "common.h"
#include "armada-370-xp.h"
#define PMSU_BASE_OFFSET 0x100
@ -312,7 +311,7 @@ static int armada_370_xp_cpu_suspend(unsigned long deepidle)
return cpu_suspend(deepidle, armada_370_xp_pmsu_idle_enter);
}
static int armada_38x_do_cpu_suspend(unsigned long deepidle)
int armada_38x_do_cpu_suspend(unsigned long deepidle)
{
unsigned long flags = 0;

View file

@ -18,4 +18,6 @@ int mvebu_setup_boot_addr_wa(unsigned int crypto_eng_target,
void mvebu_v7_pmsu_idle_exit(void);
int armada_370_xp_pmsu_idle_enter(unsigned long deepidle);
int armada_38x_do_cpu_suspend(unsigned long deepidle);
#endif /* __MACH_370_XP_PMSU_H */

View file

@ -12,6 +12,18 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
ENTRY(armada_38x_scu_power_up)
mrc p15, 4, r1, c15, c0 @ get SCU base address
orr r1, r1, #0x8 @ SCU CPU Power Status Register
mrc 15, 0, r0, cr0, cr0, 5 @ get the CPU ID
and r0, r0, #15
add r1, r1, r0
mov r0, #0x0
strb r0, [r1] @ switch SCU power state to Normal mode
ret lr
ENDPROC(armada_38x_scu_power_up)
/*
* This is the entry point through which CPUs exiting cpuidle deep
* idle state are going.
@ -27,13 +39,7 @@ ENTRY(armada_38x_cpu_resume)
/* do we need it for Armada 38x*/
ARM_BE8(setend be ) @ go BE8 if entered LE
bl v7_invalidate_l1
mrc p15, 4, r1, c15, c0 @ get SCU base address
orr r1, r1, #0x8 @ SCU CPU Power Status Register
mrc 15, 0, r0, cr0, cr0, 5 @ get the CPU ID
and r0, r0, #15
add r1, r1, r0
mov r0, #0x0
strb r0, [r1] @ switch SCU power state to Normal mode
bl armada_38x_scu_power_up
b cpu_resume
ENDPROC(armada_38x_cpu_resume)

View file

@ -497,6 +497,34 @@ static void orion_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
#define orion_gpio_dbg_show NULL
#endif
static void orion_gpio_unmask_irq(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 reg_val;
u32 mask = d->mask;
irq_gc_lock(gc);
reg_val = irq_reg_readl(gc->reg_base + ct->regs.mask);
reg_val |= mask;
irq_reg_writel(reg_val, gc->reg_base + ct->regs.mask);
irq_gc_unlock(gc);
}
static void orion_gpio_mask_irq(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
u32 reg_val;
irq_gc_lock(gc);
reg_val = irq_reg_readl(gc->reg_base + ct->regs.mask);
reg_val &= ~mask;
irq_reg_writel(reg_val, gc->reg_base + ct->regs.mask);
irq_gc_unlock(gc);
}
void __init orion_gpio_init(struct device_node *np,
int gpio_base, int ngpio,
void __iomem *base, int mask_offset,
@ -565,8 +593,8 @@ void __init orion_gpio_init(struct device_node *np,
ct = gc->chip_types;
ct->regs.mask = ochip->mask_offset + GPIO_LEVEL_MASK_OFF;
ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW;
ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_mask = orion_gpio_mask_irq;
ct->chip.irq_unmask = orion_gpio_unmask_irq;
ct->chip.irq_set_type = gpio_irq_set_type;
ct->chip.name = ochip->chip.label;
@ -575,8 +603,8 @@ void __init orion_gpio_init(struct device_node *np,
ct->regs.ack = GPIO_EDGE_CAUSE_OFF;
ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
ct->chip.irq_ack = irq_gc_ack_clr_bit;
ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_mask = orion_gpio_mask_irq;
ct->chip.irq_unmask = orion_gpio_unmask_irq;
ct->chip.irq_set_type = gpio_irq_set_type;
ct->handler = handle_edge_irq;
ct->chip.name = ochip->chip.label;