Merge branches 'at91', 'ep93xx', 'errata', 'footbridge', 'fncpy', 'gemini', 'irqdata', 'pm', 'sh', 'smp', 'spear', 'ux500' and 'via' into devel

This commit is contained in:
688 changed files with 11722 additions and 6253 deletions

1
.gitignore vendored
View file

@ -28,6 +28,7 @@ modules.builtin
*.gz
*.bz2
*.lzma
*.xz
*.lzo
*.patch
*.gcno

View file

@ -82,6 +82,11 @@
</sect1>
</chapter>
<chapter id="fs_events">
<title>Events based on file descriptors</title>
!Efs/eventfd.c
</chapter>
<chapter id="sysfs">
<title>The Filesystem for Exporting Kernel Objects</title>
!Efs/sysfs/file.c

View file

@ -0,0 +1,8 @@
BIN := vrl4
.PHONY: all
all: $(BIN)
.PHONY: clean
clean:
rm -f *.o $(BIN)

View file

@ -0,0 +1,169 @@
/*
* vrl4 format generator
*
* Copyright (C) 2010 Simon Horman
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
/*
* usage: vrl4 < zImage > out
* dd if=out of=/dev/sdx bs=512 seek=1 # Write the image to sector 1
*
* Reads a zImage from stdin and writes a vrl4 image to stdout.
* In practice this means writing a padded vrl4 header to stdout followed
* by the zImage.
*
* The padding places the zImage at ALIGN bytes into the output.
* The vrl4 uses ALIGN + START_BASE as the start_address.
* This is where the mask ROM will jump to after verifying the header.
*
* The header sets copy_size to min(sizeof(zImage), MAX_BOOT_PROG_LEN) + ALIGN.
* That is, the mask ROM will load the padded header (ALIGN bytes)
* And then MAX_BOOT_PROG_LEN bytes of the image, or the entire image,
* whichever is smaller.
*
* The zImage is not modified in any way.
*/
#define _BSD_SOURCE
#include <endian.h>
#include <unistd.h>
#include <stdint.h>
#include <stdio.h>
#include <errno.h>
struct hdr {
uint32_t magic1;
uint32_t reserved1;
uint32_t magic2;
uint32_t reserved2;
uint16_t copy_size;
uint16_t boot_options;
uint32_t reserved3;
uint32_t start_address;
uint32_t reserved4;
uint32_t reserved5;
char reserved6[308];
};
#define DECLARE_HDR(h) \
struct hdr (h) = { \
.magic1 = htole32(0xea000000), \
.reserved1 = htole32(0x56), \
.magic2 = htole32(0xe59ff008), \
.reserved3 = htole16(0x1) }
/* Align to 512 bytes, the MMCIF sector size */
#define ALIGN_BITS 9
#define ALIGN (1 << ALIGN_BITS)
#define START_BASE 0xe55b0000
/*
* With an alignment of 512 the header uses the first sector.
* There is a 128 sector (64kbyte) limit on the data loaded by the mask ROM.
* So there are 127 sectors left for the boot programme. But in practice
* Only a small portion of a zImage is needed, 16 sectors should be more
* than enough.
*
* Note that this sets how much of the zImage is copied by the mask ROM.
* The entire zImage is present after the header and is loaded
* by the code in the boot program (which is the first portion of the zImage).
*/
#define MAX_BOOT_PROG_LEN (16 * 512)
#define ROUND_UP(x) ((x + ALIGN - 1) & ~(ALIGN - 1))
ssize_t do_read(int fd, void *buf, size_t count)
{
size_t offset = 0;
ssize_t l;
while (offset < count) {
l = read(fd, buf + offset, count - offset);
if (!l)
break;
if (l < 0) {
if (errno == EAGAIN || errno == EWOULDBLOCK)
continue;
perror("read");
return -1;
}
offset += l;
}
return offset;
}
ssize_t do_write(int fd, const void *buf, size_t count)
{
size_t offset = 0;
ssize_t l;
while (offset < count) {
l = write(fd, buf + offset, count - offset);
if (l < 0) {
if (errno == EAGAIN || errno == EWOULDBLOCK)
continue;
perror("write");
return -1;
}
offset += l;
}
return offset;
}
ssize_t write_zero(int fd, size_t len)
{
size_t i = len;
while (i--) {
const char x = 0;
if (do_write(fd, &x, 1) < 0)
return -1;
}
return len;
}
int main(void)
{
DECLARE_HDR(hdr);
char boot_program[MAX_BOOT_PROG_LEN];
size_t aligned_hdr_len, alligned_prog_len;
ssize_t prog_len;
prog_len = do_read(0, boot_program, sizeof(boot_program));
if (prog_len <= 0)
return -1;
aligned_hdr_len = ROUND_UP(sizeof(hdr));
hdr.start_address = htole32(START_BASE + aligned_hdr_len);
alligned_prog_len = ROUND_UP(prog_len);
hdr.copy_size = htole16(aligned_hdr_len + alligned_prog_len);
if (do_write(1, &hdr, sizeof(hdr)) < 0)
return -1;
if (write_zero(1, aligned_hdr_len - sizeof(hdr)) < 0)
return -1;
if (do_write(1, boot_program, prog_len) < 0)
return 1;
/* Write out the rest of the kernel */
while (1) {
prog_len = do_read(0, boot_program, sizeof(boot_program));
if (prog_len < 0)
return 1;
if (prog_len == 0)
break;
if (do_write(1, boot_program, prog_len) < 0)
return 1;
}
return 0;
}

View file

@ -0,0 +1,29 @@
ROM-able zImage boot from MMC
-----------------------------
An ROM-able zImage compiled with ZBOOT_ROM_MMCIF may be written to MMC and
SuperH Mobile ARM will to boot directly from the MMCIF hardware block.
This is achieved by the mask ROM loading the first portion of the image into
MERAM and then jumping to it. This portion contains loader code which
copies the entire image to SDRAM and jumps to it. From there the zImage
boot code proceeds as normal, uncompressing the image into its final
location and then jumping to it.
This code has been tested on an AP4EB board using the developer 1A eMMC
boot mode which is configured using the following jumper settings.
The board used for testing required a patched mask ROM in order for
this mode to function.
8 7 6 5 4 3 2 1
x|x|x|x|x| |x|
S4 -+-+-+-+-+-+-+-
| | | | |x| |x on
The zImage must be written to the MMC card at sector 1 (512 bytes) in
vrl4 format. A utility vrl4 is supplied to accomplish this.
e.g.
vrl4 < zImage | dd of=/dev/sdX bs=512 seek=1
A dual-voltage MMC 4.0 card was used for testing.

View file

@ -51,7 +51,8 @@ Supported chips:
* JEDEC JC 42.4 compliant temperature sensor chips
Prefix: 'jc42'
Addresses scanned: I2C 0x18 - 0x1f
Datasheet: -
Datasheet:
http://www.jedec.org/sites/default/files/docs/4_01_04R19.pdf
Author:
Guenter Roeck <guenter.roeck@ericsson.com>
@ -60,7 +61,11 @@ Author:
Description
-----------
This driver implements support for JEDEC JC 42.4 compliant temperature sensors.
This driver implements support for JEDEC JC 42.4 compliant temperature sensors,
which are used on many DDR3 memory modules for mobile devices and servers. Some
systems use the sensor to prevent memory overheating by automatically throttling
the memory controller.
The driver auto-detects the chips listed above, but can be manually instantiated
to support other JC 42.4 compliant chips.
@ -81,15 +86,19 @@ limits. The chip supports only a single register to configure the hysteresis,
which applies to all limits. This register can be written by writing into
temp1_crit_hyst. Other hysteresis attributes are read-only.
If the BIOS has configured the sensor for automatic temperature management, it
is likely that it has locked the registers, i.e., that the temperature limits
cannot be changed.
Sysfs entries
-------------
temp1_input Temperature (RO)
temp1_min Minimum temperature (RW)
temp1_max Maximum temperature (RW)
temp1_crit Critical high temperature (RW)
temp1_min Minimum temperature (RO or RW)
temp1_max Maximum temperature (RO or RW)
temp1_crit Critical high temperature (RO or RW)
temp1_crit_hyst Critical hysteresis temperature (RW)
temp1_crit_hyst Critical hysteresis temperature (RO or RW)
temp1_max_hyst Maximum hysteresis temperature (RO)
temp1_min_alarm Temperature low alarm

View file

@ -9,6 +9,8 @@ Supported chips:
Socket S1G3: Athlon II, Sempron, Turion II
* AMD Family 11h processors:
Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra)
* AMD Family 12h processors: "Llano"
* AMD Family 14h processors: "Brazos" (C/E/G-Series)
Prefix: 'k10temp'
Addresses scanned: PCI space
@ -17,10 +19,14 @@ Supported chips:
http://support.amd.com/us/Processor_TechDocs/31116.pdf
BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors:
http://support.amd.com/us/Processor_TechDocs/41256.pdf
BIOS and Kernel Developer's Guide (BKDG) for AMD Family 14h Models 00h-0Fh Processors:
http://support.amd.com/us/Processor_TechDocs/43170.pdf
Revision Guide for AMD Family 10h Processors:
http://support.amd.com/us/Processor_TechDocs/41322.pdf
Revision Guide for AMD Family 11h Processors:
http://support.amd.com/us/Processor_TechDocs/41788.pdf
Revision Guide for AMD Family 14h Models 00h-0Fh Processors:
http://support.amd.com/us/Processor_TechDocs/47534.pdf
AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks:
http://support.amd.com/us/Processor_TechDocs/43373.pdf
AMD Family 10h Server and Workstation Processor Power and Thermal Data Sheet:
@ -34,7 +40,7 @@ Description
-----------
This driver permits reading of the internal temperature sensor of AMD
Family 10h and 11h processors.
Family 10h/11h/12h/14h processors.
All these processors have a sensor, but on those for Socket F or AM2+,
the sensor may return inconsistent values (erratum 319). The driver

View file

@ -144,6 +144,11 @@ a fixed number of characters. This limit depends on the architecture
and is between 256 and 4096 characters. It is defined in the file
./include/asm/setup.h as COMMAND_LINE_SIZE.
Finally, the [KMG] suffix is commonly described after a number of kernel
parameter values. These 'K', 'M', and 'G' letters represent the _binary_
multipliers 'Kilo', 'Mega', and 'Giga', equalling 2^10, 2^20, and 2^30
bytes respectively. Such letter suffixes can also be entirely omitted.
acpi= [HW,ACPI,X86]
Advanced Configuration and Power Interface
@ -545,16 +550,20 @@ and is between 256 and 4096 characters. It is defined in the file
Format:
<first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>]
crashkernel=nn[KMG]@ss[KMG]
[KNL] Reserve a chunk of physical memory to
hold a kernel to switch to with kexec on panic.
crashkernel=size[KMG][@offset[KMG]]
[KNL] Using kexec, Linux can switch to a 'crash kernel'
upon panic. This parameter reserves the physical
memory region [offset, offset + size] for that kernel
image. If '@offset' is omitted, then a suitable offset
is selected automatically. Check
Documentation/kdump/kdump.txt for further details.
crashkernel=range1:size1[,range2:size2,...][@offset]
[KNL] Same as above, but depends on the memory
in the running system. The syntax of range is
start-[end] where start and end are both
a memory unit (amount[KMG]). See also
Documentation/kdump/kdump.txt for a example.
Documentation/kdump/kdump.txt for an example.
cs89x0_dma= [HW,NET]
Format: <dma>
@ -1262,10 +1271,9 @@ and is between 256 and 4096 characters. It is defined in the file
6 (KERN_INFO) informational
7 (KERN_DEBUG) debug-level messages
log_buf_len=n Sets the size of the printk ring buffer, in bytes.
Format: { n | nk | nM }
n must be a power of two. The default size
is set in the kernel config file.
log_buf_len=n[KMG] Sets the size of the printk ring buffer,
in bytes. n must be a power of two. The default
size is set in the kernel config file.
logo.nologo [FB] Disables display of the built-in Linux logo.
This may be used to provide more screen space for

View file

@ -40,8 +40,6 @@ decnet.txt
- info on using the DECnet networking layer in Linux.
depca.txt
- the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver
dgrs.txt
- the Digi International RightSwitch SE-X Ethernet driver
dmfe.txt
- info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver.
e100.txt
@ -50,8 +48,6 @@ e1000.txt
- info on Intel's E1000 line of gigabit ethernet boards
eql.txt
- serial IP load balancing
ethertap.txt
- the Ethertap user space packet reception and transmission driver
ewrk3.txt
- the Digital EtherWORKS 3 DE203/4/5 Ethernet driver
filter.txt
@ -104,8 +100,6 @@ tuntap.txt
- TUN/TAP device driver, allowing user space Rx/Tx of packets.
vortex.txt
- info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
wavelan.txt
- AT&T GIS (nee NCR) WaveLAN card: An Ethernet-like radio transceiver
x25.txt
- general info on X.25 development.
x25-iface.txt

View file

@ -4,6 +4,8 @@ obj- := dummy.o
# List of programs to build
hostprogs-y := ifenslave
HOSTCFLAGS_ifenslave.o += -I$(objtree)/usr/include
# Tell kbuild to always build the programs
always := $(hostprogs-y)

View file

@ -61,7 +61,6 @@ before the more general line given above as the first match is the one taken.
create dns_resolver foo:* * /usr/sbin/dns.foo %k
=====
USAGE
=====
@ -104,6 +103,14 @@ implemented in the module can be called after doing:
returned also.
===============================
READING DNS KEYS FROM USERSPACE
===============================
Keys of dns_resolver type can be read from userspace using keyctl_read() or
"keyctl read/print/pipe".
=========
MECHANISM
=========

View file

@ -190,9 +190,9 @@ resources, scheduled and executed.
* Long running CPU intensive workloads which can be better
managed by the system scheduler.
WQ_FREEZEABLE
WQ_FREEZABLE
A freezeable wq participates in the freeze phase of the system
A freezable wq participates in the freeze phase of the system
suspend operations. Work items on the wq are drained and no
new work item starts execution until thawed.

View file

@ -885,7 +885,7 @@ S: Supported
ARM/QUALCOMM MSM MACHINE SUPPORT
M: David Brown <davidb@codeaurora.org>
M: Daniel Walker <dwalker@codeaurora.org>
M: Daniel Walker <dwalker@fifo99.com>
M: Bryan Huntsman <bryanh@codeaurora.org>
L: linux-arm-msm@vger.kernel.org
F: arch/arm/mach-msm/
@ -1010,6 +1010,15 @@ L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-s5p*/
ARM/SAMSUNG MOBILE MACHINE SUPPORT
M: Kyungmin Park <kyungmin.park@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-s5pv210/mach-aquila.c
F: arch/arm/mach-s5pv210/mach-goni.c
F: arch/arm/mach-exynos4/mach-universal_c210.c
F: arch/arm/mach-exynos4/mach-nuri.c
ARM/SAMSUNG S5P SERIES FIMC SUPPORT
M: Kyungmin Park <kyungmin.park@samsung.com>
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
@ -1467,6 +1476,7 @@ F: include/net/bluetooth/
BONDING DRIVER
M: Jay Vosburgh <fubar@us.ibm.com>
M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org
W: http://sourceforge.net/projects/bonding/
S: Supported
@ -1692,6 +1702,13 @@ M: Andy Whitcroft <apw@canonical.com>
S: Supported
F: scripts/checkpatch.pl
CHINESE DOCUMENTATION
M: Harry Wei <harryxiyou@gmail.com>
L: xiyoulinuxkernelgroup@googlegroups.com
L: linux-kernel@zh-kernel.org (moderated for non-subscribers)
S: Maintained
F: Documentation/zh_CN/
CISCO VIC ETHERNET NIC DRIVER
M: Vasanthy Kolluri <vkolluri@cisco.com>
M: Roopa Prabhu <roprabhu@cisco.com>
@ -2026,7 +2043,7 @@ F: Documentation/scsi/dc395x.txt
F: drivers/scsi/dc395x.*
DCCP PROTOCOL
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
M: Gerrit Renker <gerrit@erg.abdn.ac.uk>
L: dccp@vger.kernel.org
W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
S: Maintained
@ -2873,7 +2890,6 @@ M: Guenter Roeck <guenter.roeck@ericsson.com>
L: lm-sensors@lm-sensors.org
W: http://www.lm-sensors.org/
T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
T: quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
S: Maintained
F: Documentation/hwmon/
@ -3513,7 +3529,7 @@ F: drivers/hwmon/jc42.c
F: Documentation/hwmon/jc42
JFS FILESYSTEM
M: Dave Kleikamp <shaggy@linux.vnet.ibm.com>
M: Dave Kleikamp <shaggy@kernel.org>
L: jfs-discussion@lists.sourceforge.net
W: http://jfs.sourceforge.net/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git
@ -5165,6 +5181,7 @@ F: drivers/char/random.c
RAPIDIO SUBSYSTEM
M: Matt Porter <mporter@kernel.crashing.org>
M: Alexandre Bounine <alexandre.bounine@idt.com>
S: Maintained
F: drivers/rapidio/
@ -5267,7 +5284,7 @@ S: Maintained
F: drivers/net/wireless/rtl818x/rtl8180/
RTL8187 WIRELESS DRIVER
M: Herton Ronaldo Krzesinski <herton@mandriva.com.br>
M: Herton Ronaldo Krzesinski <herton@canonical.com>
M: Hin-Tak Leung <htl10@users.sourceforge.net>
M: Larry Finger <Larry.Finger@lwfinger.net>
L: linux-wireless@vger.kernel.org
@ -6105,7 +6122,7 @@ S: Maintained
F: security/tomoyo/
TOPSTAR LAPTOP EXTRAS DRIVER
M: Herton Ronaldo Krzesinski <herton@mandriva.com.br>
M: Herton Ronaldo Krzesinski <herton@canonical.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/topstar-laptop.c

View file

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 38
EXTRAVERSION = -rc5
EXTRAVERSION = -rc8
NAME = Flesh-Eating Bats with Fangs
# *DOCUMENTATION*

View file

@ -11,6 +11,7 @@ config ALPHA
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE
select AUTO_IRQ_AFFINITY if SMP
select GENERIC_HARDIRQS_NO_DEPRECATED
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,

View file

@ -44,11 +44,16 @@ static char irq_user_affinity[NR_IRQS];
int irq_select_affinity(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc[irq];
struct irq_data *data = irq_get_irq_data(irq);
struct irq_chip *chip;
static int last_cpu;
int cpu = last_cpu + 1;
if (!desc || !get_irq_desc_chip(desc)->set_affinity || irq_user_affinity[irq])
if (!data)
return 1;
chip = irq_data_get_irq_chip(data);
if (!chip->irq_set_affinity || irq_user_affinity[irq])
return 1;
while (!cpu_possible(cpu) ||
@ -56,8 +61,8 @@ int irq_select_affinity(unsigned int irq)
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
last_cpu = cpu;
cpumask_copy(desc->affinity, cpumask_of(cpu));
get_irq_desc_chip(desc)->set_affinity(irq, cpumask_of(cpu));
cpumask_copy(data->affinity, cpumask_of(cpu));
chip->irq_set_affinity(data, cpumask_of(cpu), false);
return 0;
}
#endif /* CONFIG_SMP */

View file

@ -228,14 +228,9 @@ struct irqaction timer_irqaction = {
void __init
init_rtc_irq(void)
{
struct irq_desc *desc = irq_to_desc(RTC_IRQ);
if (desc) {
desc->status |= IRQ_DISABLED;
set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
handle_simple_irq, "RTC");
setup_irq(RTC_IRQ, &timer_irqaction);
}
set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
handle_simple_irq, "RTC");
setup_irq(RTC_IRQ, &timer_irqaction);
}
/* Dummy irqactions. */

View file

@ -33,10 +33,10 @@ i8259_update_irq_hw(unsigned int irq, unsigned long mask)
}
inline void
i8259a_enable_irq(unsigned int irq)
i8259a_enable_irq(struct irq_data *d)
{
spin_lock(&i8259_irq_lock);
i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq));
i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
spin_unlock(&i8259_irq_lock);
}
@ -47,16 +47,18 @@ __i8259a_disable_irq(unsigned int irq)
}
void
i8259a_disable_irq(unsigned int irq)
i8259a_disable_irq(struct irq_data *d)
{
spin_lock(&i8259_irq_lock);
__i8259a_disable_irq(irq);
__i8259a_disable_irq(d->irq);
spin_unlock(&i8259_irq_lock);
}
void
i8259a_mask_and_ack_irq(unsigned int irq)
i8259a_mask_and_ack_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
spin_lock(&i8259_irq_lock);
__i8259a_disable_irq(irq);
@ -71,9 +73,9 @@ i8259a_mask_and_ack_irq(unsigned int irq)
struct irq_chip i8259a_irq_type = {
.name = "XT-PIC",
.unmask = i8259a_enable_irq,
.mask = i8259a_disable_irq,
.mask_ack = i8259a_mask_and_ack_irq,
.irq_unmask = i8259a_enable_irq,
.irq_mask = i8259a_disable_irq,
.irq_mask_ack = i8259a_mask_and_ack_irq,
};
void __init

View file

@ -31,11 +31,9 @@ extern void init_rtc_irq(void);
extern void common_init_isa_dma(void);
extern void i8259a_enable_irq(unsigned int);
extern void i8259a_disable_irq(unsigned int);
extern void i8259a_mask_and_ack_irq(unsigned int);
extern unsigned int i8259a_startup_irq(unsigned int);
extern void i8259a_end_irq(unsigned int);
extern void i8259a_enable_irq(struct irq_data *d);
extern void i8259a_disable_irq(struct irq_data *d);
extern void i8259a_mask_and_ack_irq(struct irq_data *d);
extern struct irq_chip i8259a_irq_type;
extern void init_i8259a_irqs(void);

View file

@ -29,21 +29,21 @@ pyxis_update_irq_hw(unsigned long mask)
}
static inline void
pyxis_enable_irq(unsigned int irq)
pyxis_enable_irq(struct irq_data *d)
{
pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
}
static void
pyxis_disable_irq(unsigned int irq)
pyxis_disable_irq(struct irq_data *d)
{
pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
}
static void
pyxis_mask_and_ack_irq(unsigned int irq)
pyxis_mask_and_ack_irq(struct irq_data *d)
{
unsigned long bit = 1UL << (irq - 16);
unsigned long bit = 1UL << (d->irq - 16);
unsigned long mask = cached_irq_mask &= ~bit;
/* Disable the interrupt. */
@ -58,9 +58,9 @@ pyxis_mask_and_ack_irq(unsigned int irq)
static struct irq_chip pyxis_irq_type = {
.name = "PYXIS",
.mask_ack = pyxis_mask_and_ack_irq,
.mask = pyxis_disable_irq,
.unmask = pyxis_enable_irq,
.irq_mask_ack = pyxis_mask_and_ack_irq,
.irq_mask = pyxis_disable_irq,
.irq_unmask = pyxis_enable_irq,
};
void
@ -103,7 +103,7 @@ init_pyxis_irqs(unsigned long ignore_mask)
if ((ignore_mask >> i) & 1)
continue;
set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq);
irq_to_desc(i)->status |= IRQ_LEVEL;
irq_set_status_flags(i, IRQ_LEVEL);
}
setup_irq(16+7, &isa_cascade_irqaction);

View file

@ -18,27 +18,27 @@
DEFINE_SPINLOCK(srm_irq_lock);
static inline void
srm_enable_irq(unsigned int irq)
srm_enable_irq(struct irq_data *d)
{
spin_lock(&srm_irq_lock);
cserve_ena(irq - 16);
cserve_ena(d->irq - 16);
spin_unlock(&srm_irq_lock);
}
static void
srm_disable_irq(unsigned int irq)
srm_disable_irq(struct irq_data *d)
{
spin_lock(&srm_irq_lock);
cserve_dis(irq - 16);
cserve_dis(d->irq - 16);
spin_unlock(&srm_irq_lock);
}
/* Handle interrupts from the SRM, assuming no additional weirdness. */
static struct irq_chip srm_irq_type = {
.name = "SRM",
.unmask = srm_enable_irq,
.mask = srm_disable_irq,
.mask_ack = srm_disable_irq,
.irq_unmask = srm_enable_irq,
.irq_mask = srm_disable_irq,
.irq_mask_ack = srm_disable_irq,
};
void __init
@ -52,7 +52,7 @@ init_srm_irqs(long max, unsigned long ignore_mask)
if (i < 64 && ((ignore_mask >> i) & 1))
continue;
set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq);
irq_to_desc(i)->status |= IRQ_LEVEL;
irq_set_status_flags(i, IRQ_LEVEL);
}
}

View file

@ -44,31 +44,31 @@ alcor_update_irq_hw(unsigned long mask)
}
static inline void
alcor_enable_irq(unsigned int irq)
alcor_enable_irq(struct irq_data *d)
{
alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
}
static void
alcor_disable_irq(unsigned int irq)
alcor_disable_irq(struct irq_data *d)
{
alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
}
static void
alcor_mask_and_ack_irq(unsigned int irq)
alcor_mask_and_ack_irq(struct irq_data *d)
{
alcor_disable_irq(irq);
alcor_disable_irq(d);
/* On ALCOR/XLT, need to dismiss interrupt via GRU. */
*(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb();
*(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb();
*(vuip)GRU_INT_CLEAR = 0; mb();
}
static void
alcor_isa_mask_and_ack_irq(unsigned int irq)
alcor_isa_mask_and_ack_irq(struct irq_data *d)
{
i8259a_mask_and_ack_irq(irq);
i8259a_mask_and_ack_irq(d);
/* On ALCOR/XLT, need to dismiss interrupt via GRU. */
*(vuip)GRU_INT_CLEAR = 0x80000000; mb();
@ -77,9 +77,9 @@ alcor_isa_mask_and_ack_irq(unsigned int irq)
static struct irq_chip alcor_irq_type = {
.name = "ALCOR",
.unmask = alcor_enable_irq,
.mask = alcor_disable_irq,
.mask_ack = alcor_mask_and_ack_irq,
.irq_unmask = alcor_enable_irq,
.irq_mask = alcor_disable_irq,
.irq_mask_ack = alcor_mask_and_ack_irq,
};
static void
@ -126,9 +126,9 @@ alcor_init_irq(void)
if (i >= 16+20 && i <= 16+30)
continue;
set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq);
irq_to_desc(i)->status |= IRQ_LEVEL;
irq_set_status_flags(i, IRQ_LEVEL);
}
i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq;
i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq;
init_i8259a_irqs();
common_init_isa_dma();

View file

@ -46,22 +46,22 @@ cabriolet_update_irq_hw(unsigned int irq, unsigned long mask)
}
static inline void
cabriolet_enable_irq(unsigned int irq)
cabriolet_enable_irq(struct irq_data *d)
{
cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq));
cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq));
}
static void
cabriolet_disable_irq(unsigned int irq)
cabriolet_disable_irq(struct irq_data *d)
{
cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq);
cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq);
}
static struct irq_chip cabriolet_irq_type = {
.name = "CABRIOLET",
.unmask = cabriolet_enable_irq,
.mask = cabriolet_disable_irq,
.mask_ack = cabriolet_disable_irq,
.irq_unmask = cabriolet_enable_irq,
.irq_mask = cabriolet_disable_irq,
.irq_mask_ack = cabriolet_disable_irq,
};
static void
@ -107,7 +107,7 @@ common_init_irq(void (*srm_dev_int)(unsigned long v))
for (i = 16; i < 35; ++i) {
set_irq_chip_and_handler(i, &cabriolet_irq_type,
handle_level_irq);
irq_to_desc(i)->status |= IRQ_LEVEL;
irq_set_status_flags(i, IRQ_LEVEL);
}
}

View file

@ -98,37 +98,37 @@ tsunami_update_irq_hw(unsigned long mask)
}
static void
dp264_enable_irq(unsigned int irq)
dp264_enable_irq(struct irq_data *d)
{
spin_lock(&dp264_irq_lock);
cached_irq_mask |= 1UL << irq;
cached_irq_mask |= 1UL << d->irq;
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
}
static void
dp264_disable_irq(unsigned int irq)
dp264_disable_irq(struct irq_data *d)
{
spin_lock(&dp264_irq_lock);
cached_irq_mask &= ~(1UL << irq);
cached_irq_mask &= ~(1UL << d->irq);
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
}
static void
clipper_enable_irq(unsigned int irq)
clipper_enable_irq(struct irq_data *d)
{
spin_lock(&dp264_irq_lock);
cached_irq_mask |= 1UL << (irq - 16);
cached_irq_mask |= 1UL << (d->irq - 16);
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
}
static void
clipper_disable_irq(unsigned int irq)
clipper_disable_irq(struct irq_data *d)
{
spin_lock(&dp264_irq_lock);
cached_irq_mask &= ~(1UL << (irq - 16));
cached_irq_mask &= ~(1UL << (d->irq - 16));
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
}
@ -149,10 +149,11 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
}
static int
dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
{
dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity,
bool force)
{
spin_lock(&dp264_irq_lock);
cpu_set_irq_affinity(irq, *affinity);
cpu_set_irq_affinity(d->irq, *affinity);
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
@ -160,10 +161,11 @@ dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
}
static int
clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
{
clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity,
bool force)
{
spin_lock(&dp264_irq_lock);
cpu_set_irq_affinity(irq - 16, *affinity);
cpu_set_irq_affinity(d->irq - 16, *affinity);
tsunami_update_irq_hw(cached_irq_mask);
spin_unlock(&dp264_irq_lock);
@ -171,19 +173,19 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
}
static struct irq_chip dp264_irq_type = {
.name = "DP264",
.unmask = dp264_enable_irq,
.mask = dp264_disable_irq,
.mask_ack = dp264_disable_irq,
.set_affinity = dp264_set_affinity,
.name = "DP264",
.irq_unmask = dp264_enable_irq,
.irq_mask = dp264_disable_irq,
.irq_mask_ack = dp264_disable_irq,
.irq_set_affinity = dp264_set_affinity,
};
static struct irq_chip clipper_irq_type = {
.name = "CLIPPER",
.unmask = clipper_enable_irq,
.mask = clipper_disable_irq,
.mask_ack = clipper_disable_irq,
.set_affinity = clipper_set_affinity,
.name = "CLIPPER",
.irq_unmask = clipper_enable_irq,
.irq_mask = clipper_disable_irq,
.irq_mask_ack = clipper_disable_irq,
.irq_set_affinity = clipper_set_affinity,
};
static void
@ -268,8 +270,8 @@ init_tsunami_irqs(struct irq_chip * ops, int imin, int imax)
{
long i;
for (i = imin; i <= imax; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, ops, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}

View file

@ -44,22 +44,22 @@ eb64p_update_irq_hw(unsigned int irq, unsigned long mask)
}
static inline void
eb64p_enable_irq(unsigned int irq)
eb64p_enable_irq(struct irq_data *d)
{
eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq));
eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
}
static void
eb64p_disable_irq(unsigned int irq)
eb64p_disable_irq(struct irq_data *d)
{
eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq);
eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq);
}
static struct irq_chip eb64p_irq_type = {
.name = "EB64P",
.unmask = eb64p_enable_irq,
.mask = eb64p_disable_irq,
.mask_ack = eb64p_disable_irq,
.irq_unmask = eb64p_enable_irq,
.irq_mask = eb64p_disable_irq,
.irq_mask_ack = eb64p_disable_irq,
};
static void
@ -118,9 +118,9 @@ eb64p_init_irq(void)
init_i8259a_irqs();
for (i = 16; i < 32; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq);
}
irq_set_status_flags(i, IRQ_LEVEL);
}
common_init_isa_dma();
setup_irq(16+5, &isa_cascade_irqaction);

View file

@ -51,16 +51,18 @@ eiger_update_irq_hw(unsigned long irq, unsigned long mask)
}
static inline void
eiger_enable_irq(unsigned int irq)
eiger_enable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
unsigned long mask;
mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
eiger_update_irq_hw(irq, mask);
}
static void
eiger_disable_irq(unsigned int irq)
eiger_disable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
unsigned long mask;
mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
eiger_update_irq_hw(irq, mask);
@ -68,9 +70,9 @@ eiger_disable_irq(unsigned int irq)
static struct irq_chip eiger_irq_type = {
.name = "EIGER",
.unmask = eiger_enable_irq,
.mask = eiger_disable_irq,
.mask_ack = eiger_disable_irq,
.irq_unmask = eiger_enable_irq,
.irq_mask = eiger_disable_irq,
.irq_mask_ack = eiger_disable_irq,
};
static void
@ -136,8 +138,8 @@ eiger_init_irq(void)
init_i8259a_irqs();
for (i = 16; i < 128; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}

View file

@ -63,34 +63,34 @@
*/
static void
jensen_local_enable(unsigned int irq)
jensen_local_enable(struct irq_data *d)
{
/* the parport is really hw IRQ 1, silly Jensen. */
if (irq == 7)
i8259a_enable_irq(1);
if (d->irq == 7)
i8259a_enable_irq(d);
}
static void
jensen_local_disable(unsigned int irq)
jensen_local_disable(struct irq_data *d)
{
/* the parport is really hw IRQ 1, silly Jensen. */
if (irq == 7)
i8259a_disable_irq(1);
if (d->irq == 7)
i8259a_disable_irq(d);
}
static void
jensen_local_mask_ack(unsigned int irq)
jensen_local_mask_ack(struct irq_data *d)
{
/* the parport is really hw IRQ 1, silly Jensen. */
if (irq == 7)
i8259a_mask_and_ack_irq(1);
if (d->irq == 7)
i8259a_mask_and_ack_irq(d);
}
static struct irq_chip jensen_local_irq_type = {
.name = "LOCAL",
.unmask = jensen_local_enable,
.mask = jensen_local_disable,
.mask_ack = jensen_local_mask_ack,
.irq_unmask = jensen_local_enable,
.irq_mask = jensen_local_disable,
.irq_mask_ack = jensen_local_mask_ack,
};
static void

View file

@ -104,9 +104,10 @@ io7_get_irq_ctl(unsigned int irq, struct io7 **pio7)
}
static void
io7_enable_irq(unsigned int irq)
io7_enable_irq(struct irq_data *d)
{
volatile unsigned long *ctl;
unsigned int irq = d->irq;
struct io7 *io7;
ctl = io7_get_irq_ctl(irq, &io7);
@ -115,7 +116,7 @@ io7_enable_irq(unsigned int irq)
__func__, irq);
return;
}
spin_lock(&io7->irq_lock);
*ctl |= 1UL << 24;
mb();
@ -124,9 +125,10 @@ io7_enable_irq(unsigned int irq)
}
static void
io7_disable_irq(unsigned int irq)
io7_disable_irq(struct irq_data *d)
{
volatile unsigned long *ctl;
unsigned int irq = d->irq;
struct io7 *io7;
ctl = io7_get_irq_ctl(irq, &io7);
@ -135,7 +137,7 @@ io7_disable_irq(unsigned int irq)
__func__, irq);
return;
}
spin_lock(&io7->irq_lock);
*ctl &= ~(1UL << 24);
mb();
@ -144,35 +146,29 @@ io7_disable_irq(unsigned int irq)
}
static void
marvel_irq_noop(unsigned int irq)
{
return;
}
static unsigned int
marvel_irq_noop_return(unsigned int irq)
{
return 0;
marvel_irq_noop(struct irq_data *d)
{
return;
}
static struct irq_chip marvel_legacy_irq_type = {
.name = "LEGACY",
.mask = marvel_irq_noop,
.unmask = marvel_irq_noop,
.irq_mask = marvel_irq_noop,
.irq_unmask = marvel_irq_noop,
};
static struct irq_chip io7_lsi_irq_type = {
.name = "LSI",
.unmask = io7_enable_irq,
.mask = io7_disable_irq,
.mask_ack = io7_disable_irq,
.irq_unmask = io7_enable_irq,
.irq_mask = io7_disable_irq,
.irq_mask_ack = io7_disable_irq,
};
static struct irq_chip io7_msi_irq_type = {
.name = "MSI",
.unmask = io7_enable_irq,
.mask = io7_disable_irq,
.ack = marvel_irq_noop,
.irq_unmask = io7_enable_irq,
.irq_mask = io7_disable_irq,
.irq_ack = marvel_irq_noop,
};
static void
@ -280,8 +276,8 @@ init_io7_irqs(struct io7 *io7,
/* Set up the lsi irqs. */
for (i = 0; i < 128; ++i) {
irq_to_desc(base + i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
/* Disable the implemented irqs in hardware. */
@ -294,8 +290,8 @@ init_io7_irqs(struct io7 *io7,
/* Set up the msi irqs. */
for (i = 128; i < (128 + 512); ++i) {
irq_to_desc(base + i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
for (i = 0; i < 16; ++i)

View file

@ -43,22 +43,22 @@ mikasa_update_irq_hw(int mask)
}
static inline void
mikasa_enable_irq(unsigned int irq)
mikasa_enable_irq(struct irq_data *d)
{
mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16));
mikasa_update_irq_hw(cached_irq_mask |= 1 << (d->irq - 16));
}
static void
mikasa_disable_irq(unsigned int irq)
mikasa_disable_irq(struct irq_data *d)
{
mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16)));
mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (d->irq - 16)));
}
static struct irq_chip mikasa_irq_type = {
.name = "MIKASA",
.unmask = mikasa_enable_irq,
.mask = mikasa_disable_irq,
.mask_ack = mikasa_disable_irq,
.irq_unmask = mikasa_enable_irq,
.irq_mask = mikasa_disable_irq,
.irq_mask_ack = mikasa_disable_irq,
};
static void
@ -98,8 +98,8 @@ mikasa_init_irq(void)
mikasa_update_irq_hw(0);
for (i = 16; i < 32; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
init_i8259a_irqs();

View file

@ -48,22 +48,22 @@ noritake_update_irq_hw(int irq, int mask)
}
static void
noritake_enable_irq(unsigned int irq)
noritake_enable_irq(struct irq_data *d)
{
noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16));
noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16));
}
static void
noritake_disable_irq(unsigned int irq)
noritake_disable_irq(struct irq_data *d)
{
noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16)));
noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16)));
}
static struct irq_chip noritake_irq_type = {
.name = "NORITAKE",
.unmask = noritake_enable_irq,
.mask = noritake_disable_irq,
.mask_ack = noritake_disable_irq,
.irq_unmask = noritake_enable_irq,
.irq_mask = noritake_disable_irq,
.irq_mask_ack = noritake_disable_irq,
};
static void
@ -127,8 +127,8 @@ noritake_init_irq(void)
outw(0, 0x54c);
for (i = 16; i < 48; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
init_i8259a_irqs();

View file

@ -56,9 +56,10 @@ rawhide_update_irq_hw(int hose, int mask)
(((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0))
static inline void
rawhide_enable_irq(unsigned int irq)
rawhide_enable_irq(struct irq_data *d)
{
unsigned int mask, hose;
unsigned int irq = d->irq;
irq -= 16;
hose = irq / 24;
@ -76,9 +77,10 @@ rawhide_enable_irq(unsigned int irq)
}
static void
rawhide_disable_irq(unsigned int irq)
rawhide_disable_irq(struct irq_data *d)
{
unsigned int mask, hose;
unsigned int irq = d->irq;
irq -= 16;
hose = irq / 24;
@ -96,9 +98,10 @@ rawhide_disable_irq(unsigned int irq)
}
static void
rawhide_mask_and_ack_irq(unsigned int irq)
rawhide_mask_and_ack_irq(struct irq_data *d)
{
unsigned int mask, mask1, hose;
unsigned int irq = d->irq;
irq -= 16;
hose = irq / 24;
@ -123,9 +126,9 @@ rawhide_mask_and_ack_irq(unsigned int irq)
static struct irq_chip rawhide_irq_type = {
.name = "RAWHIDE",
.unmask = rawhide_enable_irq,
.mask = rawhide_disable_irq,
.mask_ack = rawhide_mask_and_ack_irq,
.irq_unmask = rawhide_enable_irq,
.irq_mask = rawhide_disable_irq,
.irq_mask_ack = rawhide_mask_and_ack_irq,
};
static void
@ -177,8 +180,8 @@ rawhide_init_irq(void)
}
for (i = 16; i < 128; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
init_i8259a_irqs();

View file

@ -47,22 +47,22 @@ rx164_update_irq_hw(unsigned long mask)
}
static inline void
rx164_enable_irq(unsigned int irq)
rx164_enable_irq(struct irq_data *d)
{
rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
rx164_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
}
static void
rx164_disable_irq(unsigned int irq)
rx164_disable_irq(struct irq_data *d)
{
rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
}
static struct irq_chip rx164_irq_type = {
.name = "RX164",
.unmask = rx164_enable_irq,
.mask = rx164_disable_irq,
.mask_ack = rx164_disable_irq,
.irq_unmask = rx164_enable_irq,
.irq_mask = rx164_disable_irq,
.irq_mask_ack = rx164_disable_irq,
};
static void
@ -99,8 +99,8 @@ rx164_init_irq(void)
rx164_update_irq_hw(0);
for (i = 16; i < 40; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
init_i8259a_irqs();

View file

@ -443,11 +443,11 @@ lynx_swizzle(struct pci_dev *dev, u8 *pinp)
/* GENERIC irq routines */
static inline void
sable_lynx_enable_irq(unsigned int irq)
sable_lynx_enable_irq(struct irq_data *d)
{
unsigned long bit, mask;
bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
spin_lock(&sable_lynx_irq_lock);
mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit);
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@ -459,11 +459,11 @@ sable_lynx_enable_irq(unsigned int irq)
}
static void
sable_lynx_disable_irq(unsigned int irq)
sable_lynx_disable_irq(struct irq_data *d)
{
unsigned long bit, mask;
bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
spin_lock(&sable_lynx_irq_lock);
mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@ -475,11 +475,11 @@ sable_lynx_disable_irq(unsigned int irq)
}
static void
sable_lynx_mask_and_ack_irq(unsigned int irq)
sable_lynx_mask_and_ack_irq(struct irq_data *d)
{
unsigned long bit, mask;
bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
spin_lock(&sable_lynx_irq_lock);
mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@ -489,9 +489,9 @@ sable_lynx_mask_and_ack_irq(unsigned int irq)
static struct irq_chip sable_lynx_irq_type = {
.name = "SABLE/LYNX",
.unmask = sable_lynx_enable_irq,
.mask = sable_lynx_disable_irq,
.mask_ack = sable_lynx_mask_and_ack_irq,
.irq_unmask = sable_lynx_enable_irq,
.irq_mask = sable_lynx_disable_irq,
.irq_mask_ack = sable_lynx_mask_and_ack_irq,
};
static void
@ -518,9 +518,9 @@ sable_lynx_init_irq(int nr_of_irqs)
long i;
for (i = 0; i < nr_of_irqs; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &sable_lynx_irq_type,
handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
common_init_isa_dma();

View file

@ -45,16 +45,18 @@ takara_update_irq_hw(unsigned long irq, unsigned long mask)
}
static inline void
takara_enable_irq(unsigned int irq)
takara_enable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
unsigned long mask;
mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
takara_update_irq_hw(irq, mask);
}
static void
takara_disable_irq(unsigned int irq)
takara_disable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
unsigned long mask;
mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
takara_update_irq_hw(irq, mask);
@ -62,9 +64,9 @@ takara_disable_irq(unsigned int irq)
static struct irq_chip takara_irq_type = {
.name = "TAKARA",
.unmask = takara_enable_irq,
.mask = takara_disable_irq,
.mask_ack = takara_disable_irq,
.irq_unmask = takara_enable_irq,
.irq_mask = takara_disable_irq,
.irq_mask_ack = takara_disable_irq,
};
static void
@ -136,8 +138,8 @@ takara_init_irq(void)
takara_update_irq_hw(i, -1);
for (i = 16; i < 128; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
common_init_isa_dma();

View file

@ -112,8 +112,9 @@ titan_update_irq_hw(unsigned long mask)
}
static inline void
titan_enable_irq(unsigned int irq)
titan_enable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
spin_lock(&titan_irq_lock);
titan_cached_irq_mask |= 1UL << (irq - 16);
titan_update_irq_hw(titan_cached_irq_mask);
@ -121,8 +122,9 @@ titan_enable_irq(unsigned int irq)
}
static inline void
titan_disable_irq(unsigned int irq)
titan_disable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
spin_lock(&titan_irq_lock);
titan_cached_irq_mask &= ~(1UL << (irq - 16));
titan_update_irq_hw(titan_cached_irq_mask);
@ -144,7 +146,8 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
}
static int
titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
bool force)
{
spin_lock(&titan_irq_lock);
titan_cpu_set_irq_affinity(irq - 16, *affinity);
@ -175,17 +178,17 @@ init_titan_irqs(struct irq_chip * ops, int imin, int imax)
{
long i;
for (i = imin; i <= imax; ++i) {
irq_to_desc(i)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i, ops, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}
static struct irq_chip titan_irq_type = {
.name = "TITAN",
.unmask = titan_enable_irq,
.mask = titan_disable_irq,
.mask_ack = titan_disable_irq,
.set_affinity = titan_set_irq_affinity,
.name = "TITAN",
.irq_unmask = titan_enable_irq,
.irq_mask = titan_disable_irq,
.irq_mask_ack = titan_disable_irq,
.irq_set_affinity = titan_set_irq_affinity,
};
static irqreturn_t

View file

@ -104,10 +104,12 @@ wildfire_init_irq_hw(void)
}
static void
wildfire_enable_irq(unsigned int irq)
wildfire_enable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
if (irq < 16)
i8259a_enable_irq(irq);
i8259a_enable_irq(d);
spin_lock(&wildfire_irq_lock);
set_bit(irq, &cached_irq_mask);
@ -116,10 +118,12 @@ wildfire_enable_irq(unsigned int irq)
}
static void
wildfire_disable_irq(unsigned int irq)
wildfire_disable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
if (irq < 16)
i8259a_disable_irq(irq);
i8259a_disable_irq(d);
spin_lock(&wildfire_irq_lock);
clear_bit(irq, &cached_irq_mask);
@ -128,10 +132,12 @@ wildfire_disable_irq(unsigned int irq)
}
static void
wildfire_mask_and_ack_irq(unsigned int irq)
wildfire_mask_and_ack_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
if (irq < 16)
i8259a_mask_and_ack_irq(irq);
i8259a_mask_and_ack_irq(d);
spin_lock(&wildfire_irq_lock);
clear_bit(irq, &cached_irq_mask);
@ -141,9 +147,9 @@ wildfire_mask_and_ack_irq(unsigned int irq)
static struct irq_chip wildfire_irq_type = {
.name = "WILDFIRE",
.unmask = wildfire_enable_irq,
.mask = wildfire_disable_irq,
.mask_ack = wildfire_mask_and_ack_irq,
.irq_unmask = wildfire_enable_irq,
.irq_mask = wildfire_disable_irq,
.irq_mask_ack = wildfire_mask_and_ack_irq,
};
static void __init
@ -177,21 +183,21 @@ wildfire_init_irq_per_pca(int qbbno, int pcano)
for (i = 0; i < 16; ++i) {
if (i == 2)
continue;
irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
handle_level_irq);
irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
}
irq_to_desc(36+irq_bias)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type,
handle_level_irq);
irq_set_status_flags(36 + irq_bias, IRQ_LEVEL);
for (i = 40; i < 64; ++i) {
irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
handle_level_irq);
irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
}
setup_irq(32+irq_bias, &isa_enable);
setup_irq(32+irq_bias, &isa_enable);
}
static void __init

View file

@ -346,7 +346,7 @@ config ARCH_FOOTBRIDGE
bool "FootBridge"
select CPU_SA110
select FOOTBRIDGE
select ARCH_USES_GETTIMEOFFSET
select GENERIC_CLOCKEVENTS
help
Support for systems based on the DC21285 companion chip
("FootBridge"), such as the Simtec CATS and the Rebel NetWinder.
@ -875,6 +875,16 @@ config PLAT_SPEAR
help
Support for ST's SPEAr platform (SPEAr3xx, SPEAr6xx and SPEAr13xx).
config ARCH_VT8500
bool "VIA/WonderMedia 85xx"
select CPU_ARM926T
select GENERIC_GPIO
select ARCH_HAS_CPUFREQ
select GENERIC_CLOCKEVENTS
select ARCH_REQUIRE_GPIOLIB
select HAVE_PWM
help
Support for VIA/WonderMedia VT8500/WM85xx System-on-Chip.
endchoice
#
@ -1007,6 +1017,8 @@ source "arch/arm/mach-versatile/Kconfig"
source "arch/arm/mach-vexpress/Kconfig"
source "arch/arm/mach-vt8500/Kconfig"
source "arch/arm/mach-w90x900/Kconfig"
# Definitions to make life easier
@ -1177,6 +1189,53 @@ config ARM_ERRATA_743622
visible impact on the overall performance or power consumption of the
processor.
config ARM_ERRATA_751472
bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation"
depends on CPU_V7 && SMP
help
This option enables the workaround for the 751472 Cortex-A9 (prior
to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the
completion of a following broadcasted operation if the second
operation is received by a CPU before the ICIALLUIS has completed,
potentially leading to corrupted entries in the cache or TLB.
config ARM_ERRATA_753970
bool "ARM errata: cache sync operation may be faulty"
depends on CACHE_PL310
help
This option enables the workaround for the 753970 PL310 (r3p0) erratum.
Under some condition the effect of cache sync operation on
the store buffer still remains when the operation completes.
This means that the store buffer is always asked to drain and
this prevents it from merging any further writes. The workaround
is to replace the normal offset of cache sync operation (0x730)
by another offset targeting an unmapped PL310 register 0x740.
This has the same effect as the cache sync operation: store buffer
drain and waiting for all buffers empty.
config ARM_ERRATA_754322
bool "ARM errata: possible faulty MMU translations following an ASID switch"
depends on CPU_V7
help
This option enables the workaround for the 754322 Cortex-A9 (r2p*,
r3p*) erratum. A speculative memory access may cause a page table walk
which starts prior to an ASID switch but completes afterwards. This
can populate the micro-TLB with a stale entry which may be hit with
the new ASID. This workaround places two dsb instructions in the mm
switching code so that no page table walks can cross the ASID switch.
config ARM_ERRATA_754327
bool "ARM errata: no automatic Store Buffer drain"
depends on CPU_V7 && SMP
help
This option enables the workaround for the 754327 Cortex-A9 (prior to
r2p0) erratum. The Store Buffer does not have any automatic draining
mechanism and therefore a livelock may occur if an external agent
continuously polls a memory location waiting to observe an update.
This workaround defines cpu_relax() as smp_mb(), preventing correctly
written polling loops from denying visibility of updates to memory.
endmenu
source "arch/arm/common/Kconfig"
@ -1619,6 +1678,18 @@ config ZBOOT_ROM
Say Y here if you intend to execute your compressed kernel image
(zImage) directly from ROM or flash. If unsure, say N.
config ZBOOT_ROM_MMCIF
bool "Include MMCIF loader in zImage (EXPERIMENTAL)"
depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL
help
Say Y here to include experimental MMCIF loading code in the
ROM-able zImage. With this enabled it is possible to write the
the ROM-able zImage kernel image to an MMC card and boot the
kernel straight from the reset vector. At reset the processor
Mask ROM will load the first part of the the ROM-able zImage
which in turn loads the rest the kernel image to RAM using the
MMCIF hardware block.
config CMDLINE
string "Default kernel command string"
default ""

View file

@ -15,7 +15,7 @@ ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
endif
OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
OBJCOPYFLAGS :=-O binary -R .comment -S
GZFLAGS :=-9
#KBUILD_CFLAGS +=-pipe
# Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb:
@ -190,6 +190,7 @@ machine-$(CONFIG_ARCH_U300) := u300
machine-$(CONFIG_ARCH_U8500) := ux500
machine-$(CONFIG_ARCH_VERSATILE) := versatile
machine-$(CONFIG_ARCH_VEXPRESS) := vexpress
machine-$(CONFIG_ARCH_VT8500) := vt8500
machine-$(CONFIG_ARCH_W90X900) := w90x900
machine-$(CONFIG_ARCH_NUC93X) := nuc93x
machine-$(CONFIG_FOOTBRIDGE) := footbridge

View file

@ -1,3 +1,7 @@
font.c
piggy.gz
lib1funcs.S
piggy.gzip
piggy.lzo
piggy.lzma
vmlinux
vmlinux.lds

View file

@ -4,9 +4,20 @@
# create a compressed vmlinuz image from the original vmlinux
#
OBJS =
# Ensure that mmcif loader code appears early in the image
# to minimise that number of bocks that have to be read in
# order to load it.
ifeq ($(CONFIG_ZBOOT_ROM_MMCIF),y)
ifeq ($(CONFIG_ARCH_SH7372),y)
OBJS += mmcif-sh7372.o
endif
endif
AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET)
HEAD = head.o
OBJS = misc.o decompress.o
OBJS += misc.o decompress.o
FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c
#
@ -29,6 +40,10 @@ ifeq ($(CONFIG_ARCH_SA1100),y)
OBJS += head-sa1100.o
endif
ifeq ($(CONFIG_ARCH_VT8500),y)
OBJS += head-vt8500.o
endif
ifeq ($(CONFIG_CPU_XSCALE),y)
OBJS += head-xscale.o
endif

View file

@ -25,6 +25,36 @@
/* load board-specific initialization code */
#include <mach/zboot.h>
#ifdef CONFIG_ZBOOT_ROM_MMCIF
/* Load image from MMC */
adr sp, __tmp_stack + 128
ldr r0, __image_start
ldr r1, __image_end
subs r1, r1, r0
ldr r0, __load_base
bl mmcif_loader
/* Jump to loaded code */
ldr r0, __loaded
ldr r1, __image_start
sub r0, r0, r1
ldr r1, __load_base
add pc, r0, r1
__image_start:
.long _start
__image_end:
.long _got_end
__load_base:
.long CONFIG_MEMORY_START + 0x02000000 @ Load at 32Mb into SDRAM
__loaded:
.long __continue
.align
__tmp_stack:
.space 128
__continue:
#endif /* CONFIG_ZBOOT_ROM_MMCIF */
b 1f
__atags:@ tag #1
.long 12 @ tag->hdr.size = tag_size(tag_core);

View file

@ -0,0 +1,46 @@
/*
* linux/arch/arm/boot/compressed/head-vt8500.S
*
* Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
*
* VIA VT8500 specific tweaks. This is merged into head.S by the linker.
*
*/
#include <linux/linkage.h>
#include <asm/mach-types.h>
.section ".start", "ax"
__VT8500_start:
@ Compare the SCC ID register against a list of known values
ldr r1, .SCCID
ldr r3, [r1]
@ VT8500 override
ldr r4, .VT8500SCC
cmp r3, r4
ldreq r7, .ID_BV07
beq .Lendvt8500
@ WM8505 override
ldr r4, .WM8505SCC
cmp r3, r4
ldreq r7, .ID_8505
beq .Lendvt8500
@ Otherwise, leave the bootloader's machine id untouched
.SCCID:
.word 0xd8120000
.VT8500SCC:
.word 0x34000102
.WM8505SCC:
.word 0x34260103
.ID_BV07:
.word MACH_TYPE_BV07
.ID_8505:
.word MACH_TYPE_WM8505_7IN_NETBOOK
.Lendvt8500:

View file

@ -0,0 +1,87 @@
/*
* sh7372 MMCIF loader
*
* Copyright (C) 2010 Magnus Damm
* Copyright (C) 2010 Simon Horman
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/mmc/sh_mmcif.h>
#include <mach/mmcif.h>
#define MMCIF_BASE (void __iomem *)0xe6bd0000
#define PORT84CR (void __iomem *)0xe6050054
#define PORT85CR (void __iomem *)0xe6050055
#define PORT86CR (void __iomem *)0xe6050056
#define PORT87CR (void __iomem *)0xe6050057
#define PORT88CR (void __iomem *)0xe6050058
#define PORT89CR (void __iomem *)0xe6050059
#define PORT90CR (void __iomem *)0xe605005a
#define PORT91CR (void __iomem *)0xe605005b
#define PORT92CR (void __iomem *)0xe605005c
#define PORT99CR (void __iomem *)0xe6050063
#define SMSTPCR3 (void __iomem *)0xe615013c
/* SH7372 specific MMCIF loader
*
* loads the zImage from an MMC card starting from block 1.
*
* The image must be start with a vrl4 header and
* the zImage must start at offset 512 of the image. That is,
* at block 2 (=byte 1024) on the media
*
* Use the following line to write the vrl4 formated zImage
* to an MMC card
* # dd if=vrl4.out of=/dev/sdx bs=512 seek=1
*/
asmlinkage void mmcif_loader(unsigned char *buf, unsigned long len)
{
mmcif_init_progress();
mmcif_update_progress(MMCIF_PROGRESS_ENTER);
/* Initialise MMC
* registers: PORT84CR-PORT92CR
* (MMCD0_0-MMCD0_7,MMCCMD0 Control)
* value: 0x04 - select function 4
*/
__raw_writeb(0x04, PORT84CR);
__raw_writeb(0x04, PORT85CR);
__raw_writeb(0x04, PORT86CR);
__raw_writeb(0x04, PORT87CR);
__raw_writeb(0x04, PORT88CR);
__raw_writeb(0x04, PORT89CR);
__raw_writeb(0x04, PORT90CR);
__raw_writeb(0x04, PORT91CR);
__raw_writeb(0x04, PORT92CR);
/* Initialise MMC
* registers: PORT99CR (MMCCLK0 Control)
* value: 0x10 | 0x04 - enable output | select function 4
*/
__raw_writeb(0x14, PORT99CR);
/* Enable clock to MMC hardware block */
__raw_writel(__raw_readl(SMSTPCR3) & ~(1 << 12), SMSTPCR3);
mmcif_update_progress(MMCIF_PROGRESS_INIT);
/* setup MMCIF hardware */
sh_mmcif_boot_init(MMCIF_BASE);
mmcif_update_progress(MMCIF_PROGRESS_LOAD);
/* load kernel via MMCIF interface */
sh_mmcif_boot_do_read(MMCIF_BASE, 2, /* Kernel is at block 2 */
(len + SH_MMCIF_BBS - 1) / SH_MMCIF_BBS, buf);
/* Disable clock to MMC hardware block */
__raw_writel(__raw_readl(SMSTPCR3) & (1 << 12), SMSTPCR3);
mmcif_update_progress(MMCIF_PROGRESS_DONE);
}

View file

@ -6,6 +6,8 @@ config ARM_VIC
config ARM_VIC_NR
int
default 4 if ARCH_S5PV210
default 3 if ARCH_S5P6442 || ARCH_S5PC100
default 2
depends on ARM_VIC
help

View file

@ -12,130 +12,13 @@
#include <linux/mm.h>
#include <asm/glue.h>
#include <asm/glue-cache.h>
#include <asm/shmparam.h>
#include <asm/cachetype.h>
#include <asm/outercache.h>
#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
/*
* Cache Model
* ===========
*/
#undef _CACHE
#undef MULTI_CACHE
#if defined(CONFIG_CPU_CACHE_V3)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v3
# endif
#endif
#if defined(CONFIG_CPU_CACHE_V4)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4
# endif
#endif
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
defined(CONFIG_CPU_ARM1026)
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_FA526)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE fa
# endif
#endif
#if defined(CONFIG_CPU_ARM926T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm926
# endif
#endif
#if defined(CONFIG_CPU_ARM940T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm940
# endif
#endif
#if defined(CONFIG_CPU_ARM946E)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm946
# endif
#endif
#if defined(CONFIG_CPU_CACHE_V4WB)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4wb
# endif
#endif
#if defined(CONFIG_CPU_XSCALE)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE xscale
# endif
#endif
#if defined(CONFIG_CPU_XSC3)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE xsc3
# endif
#endif
#if defined(CONFIG_CPU_MOHAWK)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE mohawk
# endif
#endif
#if defined(CONFIG_CPU_FEROCEON)
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_V6)
//# ifdef _CACHE
# define MULTI_CACHE 1
//# else
//# define _CACHE v6
//# endif
#endif
#if defined(CONFIG_CPU_V7)
//# ifdef _CACHE
# define MULTI_CACHE 1
//# else
//# define _CACHE v7
//# endif
#endif
#if !defined(_CACHE) && !defined(MULTI_CACHE)
#error Unknown cache maintainence model
#endif
/*
* This flag is used to indicate that the page pointed to by a pte is clean
* and does not require cleaning before returning it to the user.
@ -249,19 +132,11 @@ extern struct cpu_cache_fns cpu_cache;
* visible to the CPU.
*/
#define dmac_map_area cpu_cache.dma_map_area
#define dmac_unmap_area cpu_cache.dma_unmap_area
#define dmac_unmap_area cpu_cache.dma_unmap_area
#define dmac_flush_range cpu_cache.dma_flush_range
#else
#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
extern void __cpuc_flush_icache_all(void);
extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void);
@ -276,10 +151,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
#define dmac_map_area __glue(_CACHE,_dma_map_area)
#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
extern void dmac_map_area(const void *, size_t, int);
extern void dmac_unmap_area(const void *, size_t, int);
extern void dmac_flush_range(const void *, const void *);

View file

@ -1,69 +0,0 @@
/*
* arch/arm/include/asm/cpu-multi32.h
*
* Copyright (C) 2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/page.h>
struct mm_struct;
/*
* Don't change this structure - ASM code
* relies on it.
*/
extern struct processor {
/* MISC
* get data abort address/flags
*/
void (*_data_abort)(unsigned long pc);
/*
* Retrieve prefetch fault address
*/
unsigned long (*_prefetch_abort)(unsigned long lr);
/*
* Set up any processor specifics
*/
void (*_proc_init)(void);
/*
* Disable any processor specifics
*/
void (*_proc_fin)(void);
/*
* Special stuff for a reset
*/
void (*reset)(unsigned long addr) __attribute__((noreturn));
/*
* Idle the processor
*/
int (*_do_idle)(void);
/*
* Processor architecture specific
*/
/*
* clean a virtual address range from the
* D-cache without flushing the cache.
*/
void (*dcache_clean_area)(void *addr, int size);
/*
* Set the page table
*/
void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm);
/*
* Set a possibly extended PTE. Non-extended PTEs should
* ignore 'ext'.
*/
void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
} processor;
#define cpu_proc_init() processor._proc_init()
#define cpu_proc_fin() processor._proc_fin()
#define cpu_reset(addr) processor.reset(addr)
#define cpu_do_idle() processor._do_idle()
#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz)
#define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext)
#define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm)

View file

@ -1,44 +0,0 @@
/*
* arch/arm/include/asm/cpu-single.h
*
* Copyright (C) 2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* Single CPU
*/
#ifdef __STDC__
#define __catify_fn(name,x) name##x
#else
#define __catify_fn(name,x) name/**/x
#endif
#define __cpu_fn(name,x) __catify_fn(name,x)
/*
* If we are supporting multiple CPUs, then we must use a table of
* function pointers for this lot. Otherwise, we can optimise the
* table away.
*/
#define cpu_proc_init __cpu_fn(CPU_NAME,_proc_init)
#define cpu_proc_fin __cpu_fn(CPU_NAME,_proc_fin)
#define cpu_reset __cpu_fn(CPU_NAME,_reset)
#define cpu_do_idle __cpu_fn(CPU_NAME,_do_idle)
#define cpu_dcache_clean_area __cpu_fn(CPU_NAME,_dcache_clean_area)
#define cpu_do_switch_mm __cpu_fn(CPU_NAME,_switch_mm)
#define cpu_set_pte_ext __cpu_fn(CPU_NAME,_set_pte_ext)
#include <asm/page.h>
struct mm_struct;
/* declare all the functions as extern */
extern void cpu_proc_init(void);
extern void cpu_proc_fin(void);
extern int cpu_do_idle(void);
extern void cpu_dcache_clean_area(void *, int);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
extern void cpu_reset(unsigned long addr) __attribute__((noreturn));

View file

@ -0,0 +1,94 @@
/*
* arch/arm/include/asm/fncpy.h - helper macros for function body copying
*
* Copyright (C) 2011 Linaro Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* These macros are intended for use when there is a need to copy a low-level
* function body into special memory.
*
* For example, when reconfiguring the SDRAM controller, the code doing the
* reconfiguration may need to run from SRAM.
*
* NOTE: that the copied function body must be entirely self-contained and
* position-independent in order for this to work properly.
*
* NOTE: in order for embedded literals and data to get referenced correctly,
* the alignment of functions must be preserved when copying. To ensure this,
* the source and destination addresses for fncpy() must be aligned to a
* multiple of 8 bytes: you will be get a BUG() if this condition is not met.
* You will typically need a ".align 3" directive in the assembler where the
* function to be copied is defined, and ensure that your allocator for the
* destination buffer returns 8-byte-aligned pointers.
*
* Typical usage example:
*
* extern int f(args);
* extern uint32_t size_of_f;
* int (*copied_f)(args);
* void *sram_buffer;
*
* copied_f = fncpy(sram_buffer, &f, size_of_f);
*
* ... later, call the function: ...
*
* copied_f(args);
*
* The size of the function to be copied can't be determined from C:
* this must be determined by other means, such as adding assmbler directives
* in the file where f is defined.
*/
#ifndef __ASM_FNCPY_H
#define __ASM_FNCPY_H
#include <linux/types.h>
#include <linux/string.h>
#include <asm/bug.h>
#include <asm/cacheflush.h>
/*
* Minimum alignment requirement for the source and destination addresses
* for function copying.
*/
#define FNCPY_ALIGN 8
#define fncpy(dest_buf, funcp, size) ({ \
uintptr_t __funcp_address; \
typeof(funcp) __result; \
\
asm("" : "=r" (__funcp_address) : "0" (funcp)); \
\
/* \
* Ensure alignment of source and destination addresses, \
* disregarding the function's Thumb bit: \
*/ \
BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
(__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
\
memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
flush_icache_range((unsigned long)(dest_buf), \
(unsigned long)(dest_buf) + (size)); \
\
asm("" : "=r" (__result) \
: "0" ((uintptr_t)(dest_buf) | (__funcp_address & 1))); \
\
__result; \
})
#endif /* !__ASM_FNCPY_H */

View file

@ -0,0 +1,146 @@
/*
* arch/arm/include/asm/glue-cache.h
*
* Copyright (C) 1999-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_CACHE_H
#define ASM_GLUE_CACHE_H
#include <asm/glue.h>
/*
* Cache Model
* ===========
*/
#undef _CACHE
#undef MULTI_CACHE
#if defined(CONFIG_CPU_CACHE_V3)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v3
# endif
#endif
#if defined(CONFIG_CPU_CACHE_V4)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4
# endif
#endif
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
defined(CONFIG_CPU_ARM1026)
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_FA526)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE fa
# endif
#endif
#if defined(CONFIG_CPU_ARM926T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm926
# endif
#endif
#if defined(CONFIG_CPU_ARM940T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm940
# endif
#endif
#if defined(CONFIG_CPU_ARM946E)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm946
# endif
#endif
#if defined(CONFIG_CPU_CACHE_V4WB)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4wb
# endif
#endif
#if defined(CONFIG_CPU_XSCALE)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE xscale
# endif
#endif
#if defined(CONFIG_CPU_XSC3)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE xsc3
# endif
#endif
#if defined(CONFIG_CPU_MOHAWK)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE mohawk
# endif
#endif
#if defined(CONFIG_CPU_FEROCEON)
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_V6)
//# ifdef _CACHE
# define MULTI_CACHE 1
//# else
//# define _CACHE v6
//# endif
#endif
#if defined(CONFIG_CPU_V7)
//# ifdef _CACHE
# define MULTI_CACHE 1
//# else
//# define _CACHE v7
//# endif
#endif
#if !defined(_CACHE) && !defined(MULTI_CACHE)
#error Unknown cache maintainence model
#endif
#ifndef MULTI_CACHE
#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
#define dmac_map_area __glue(_CACHE,_dma_map_area)
#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
#endif
#endif

View file

@ -0,0 +1,110 @@
/*
* arch/arm/include/asm/glue-df.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_DF_H
#define ASM_GLUE_DF_H
#include <asm/glue.h>
/*
* Data Abort Model
* ================
*
* We have the following to choose from:
* arm6 - ARM6 style
* arm7 - ARM7 style
* v4_early - ARMv4 without Thumb early abort handler
* v4t_late - ARMv4 with Thumb late abort handler
* v4t_early - ARMv4 with Thumb early abort handler
* v5tej_early - ARMv5 with Thumb and Java early abort handler
* xscale - ARMv5 with Thumb with Xscale extensions
* v6_early - ARMv6 generic early abort handler
* v7_early - ARMv7 generic early abort handler
*/
#undef CPU_DABORT_HANDLER
#undef MULTI_DABORT
#if defined(CONFIG_CPU_ARM610)
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER cpu_arm6_data_abort
# endif
#endif
#if defined(CONFIG_CPU_ARM710)
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER cpu_arm7_data_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_LV4T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v4t_late_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV4
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v4_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV4T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v4t_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV5TJ
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v5tj_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV5T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v5t_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV6
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v6_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV7
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v7_early_abort
# endif
#endif
#ifndef CPU_DABORT_HANDLER
#error Unknown data abort handler type
#endif
#endif

View file

@ -0,0 +1,57 @@
/*
* arch/arm/include/asm/glue-pf.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_PF_H
#define ASM_GLUE_PF_H
#include <asm/glue.h>
/*
* Prefetch Abort Model
* ================
*
* We have the following to choose from:
* legacy - no IFSR, no IFAR
* v6 - ARMv6: IFSR, no IFAR
* v7 - ARMv7: IFSR and IFAR
*/
#undef CPU_PABORT_HANDLER
#undef MULTI_PABORT
#ifdef CONFIG_CPU_PABRT_LEGACY
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
# define CPU_PABORT_HANDLER legacy_pabort
# endif
#endif
#ifdef CONFIG_CPU_PABRT_V6
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
# define CPU_PABORT_HANDLER v6_pabort
# endif
#endif
#ifdef CONFIG_CPU_PABRT_V7
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
# define CPU_PABORT_HANDLER v7_pabort
# endif
#endif
#ifndef CPU_PABORT_HANDLER
#error Unknown prefetch abort handler type
#endif
#endif

View file

@ -0,0 +1,264 @@
/*
* arch/arm/include/asm/glue-proc.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000 Deep Blue Solutions Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_PROC_H
#define ASM_GLUE_PROC_H
#include <asm/glue.h>
/*
* Work out if we need multiple CPU support
*/
#undef MULTI_CPU
#undef CPU_NAME
/*
* CPU_NAME - the prefix for CPU related functions
*/
#ifdef CONFIG_CPU_ARM610
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm6
# endif
#endif
#ifdef CONFIG_CPU_ARM7TDMI
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm7tdmi
# endif
#endif
#ifdef CONFIG_CPU_ARM710
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm7
# endif
#endif
#ifdef CONFIG_CPU_ARM720T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm720
# endif
#endif
#ifdef CONFIG_CPU_ARM740T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm740
# endif
#endif
#ifdef CONFIG_CPU_ARM9TDMI
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm9tdmi
# endif
#endif
#ifdef CONFIG_CPU_ARM920T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm920
# endif
#endif
#ifdef CONFIG_CPU_ARM922T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm922
# endif
#endif
#ifdef CONFIG_CPU_FA526
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_fa526
# endif
#endif
#ifdef CONFIG_CPU_ARM925T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm925
# endif
#endif
#ifdef CONFIG_CPU_ARM926T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm926
# endif
#endif
#ifdef CONFIG_CPU_ARM940T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm940
# endif
#endif
#ifdef CONFIG_CPU_ARM946E
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm946
# endif
#endif
#ifdef CONFIG_CPU_SA110
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_sa110
# endif
#endif
#ifdef CONFIG_CPU_SA1100
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_sa1100
# endif
#endif
#ifdef CONFIG_CPU_ARM1020
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1020
# endif
#endif
#ifdef CONFIG_CPU_ARM1020E
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1020e
# endif
#endif
#ifdef CONFIG_CPU_ARM1022
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1022
# endif
#endif
#ifdef CONFIG_CPU_ARM1026
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1026
# endif
#endif
#ifdef CONFIG_CPU_XSCALE
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_xscale
# endif
#endif
#ifdef CONFIG_CPU_XSC3
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_xsc3
# endif
#endif
#ifdef CONFIG_CPU_MOHAWK
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_mohawk
# endif
#endif
#ifdef CONFIG_CPU_FEROCEON
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_feroceon
# endif
#endif
#ifdef CONFIG_CPU_V6
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_v6
# endif
#endif
#ifdef CONFIG_CPU_V7
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_v7
# endif
#endif
#ifndef MULTI_CPU
#define cpu_proc_init __glue(CPU_NAME,_proc_init)
#define cpu_proc_fin __glue(CPU_NAME,_proc_fin)
#define cpu_reset __glue(CPU_NAME,_reset)
#define cpu_do_idle __glue(CPU_NAME,_do_idle)
#define cpu_dcache_clean_area __glue(CPU_NAME,_dcache_clean_area)
#define cpu_do_switch_mm __glue(CPU_NAME,_switch_mm)
#define cpu_set_pte_ext __glue(CPU_NAME,_set_pte_ext)
#define cpu_suspend_size __glue(CPU_NAME,_suspend_size)
#define cpu_do_suspend __glue(CPU_NAME,_do_suspend)
#define cpu_do_resume __glue(CPU_NAME,_do_resume)
#endif
#endif

View file

@ -15,7 +15,6 @@
*/
#ifdef __KERNEL__
#ifdef __STDC__
#define ____glue(name,fn) name##fn
#else
@ -23,141 +22,4 @@
#endif
#define __glue(name,fn) ____glue(name,fn)
/*
* Data Abort Model
* ================
*
* We have the following to choose from:
* arm6 - ARM6 style
* arm7 - ARM7 style
* v4_early - ARMv4 without Thumb early abort handler
* v4t_late - ARMv4 with Thumb late abort handler
* v4t_early - ARMv4 with Thumb early abort handler
* v5tej_early - ARMv5 with Thumb and Java early abort handler
* xscale - ARMv5 with Thumb with Xscale extensions
* v6_early - ARMv6 generic early abort handler
* v7_early - ARMv7 generic early abort handler
*/
#undef CPU_DABORT_HANDLER
#undef MULTI_DABORT
#if defined(CONFIG_CPU_ARM610)
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER cpu_arm6_data_abort
# endif
#endif
#if defined(CONFIG_CPU_ARM710)
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER cpu_arm7_data_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_LV4T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v4t_late_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV4
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v4_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV4T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v4t_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV5TJ
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v5tj_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV5T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v5t_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV6
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v6_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV7
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v7_early_abort
# endif
#endif
#ifndef CPU_DABORT_HANDLER
#error Unknown data abort handler type
#endif
/*
* Prefetch Abort Model
* ================
*
* We have the following to choose from:
* legacy - no IFSR, no IFAR
* v6 - ARMv6: IFSR, no IFAR
* v7 - ARMv7: IFSR and IFAR
*/
#undef CPU_PABORT_HANDLER
#undef MULTI_PABORT
#ifdef CONFIG_CPU_PABRT_LEGACY
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
# define CPU_PABORT_HANDLER legacy_pabort
# endif
#endif
#ifdef CONFIG_CPU_PABRT_V6
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
# define CPU_PABORT_HANDLER v6_pabort
# endif
#endif
#ifdef CONFIG_CPU_PABRT_V7
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
# define CPU_PABORT_HANDLER v7_pabort
# endif
#endif
#ifndef CPU_PABORT_HANDLER
#error Unknown prefetch abort handler type
#endif
#endif

View file

@ -36,6 +36,7 @@
#define L2X0_RAW_INTR_STAT 0x21C
#define L2X0_INTR_CLEAR 0x220
#define L2X0_CACHE_SYNC 0x730
#define L2X0_DUMMY_REG 0x740
#define L2X0_INV_LINE_PA 0x770
#define L2X0_INV_WAY 0x77C
#define L2X0_CLEAN_LINE_PA 0x7B0

View file

@ -58,6 +58,9 @@
static inline void sysctl_soft_reset(void __iomem *base)
{
/* switch to slow mode */
writel(0x2, base + SCCTRL);
/* writing any value to SCSYSSTAT reg will reset system */
writel(0, base + SCSYSSTAT);
}

View file

@ -15,10 +15,6 @@ struct meminfo;
struct sys_timer;
struct machine_desc {
/*
* Note! The first two elements are used
* by assembler code in head.S, head-common.S
*/
unsigned int nr; /* architecture number */
const char *name; /* architecture name */
unsigned long boot_params; /* tagged list */

View file

@ -10,6 +10,8 @@
#ifndef _ASMARM_PGALLOC_H
#define _ASMARM_PGALLOC_H
#include <linux/pagemap.h>
#include <asm/domain.h>
#include <asm/pgtable-hwdef.h>
#include <asm/processor.h>

View file

@ -13,250 +13,86 @@
#ifdef __KERNEL__
/*
* Work out if we need multiple CPU support
*/
#undef MULTI_CPU
#undef CPU_NAME
/*
* CPU_NAME - the prefix for CPU related functions
*/
#ifdef CONFIG_CPU_ARM610
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm6
# endif
#endif
#ifdef CONFIG_CPU_ARM7TDMI
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm7tdmi
# endif
#endif
#ifdef CONFIG_CPU_ARM710
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm7
# endif
#endif
#ifdef CONFIG_CPU_ARM720T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm720
# endif
#endif
#ifdef CONFIG_CPU_ARM740T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm740
# endif
#endif
#ifdef CONFIG_CPU_ARM9TDMI
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm9tdmi
# endif
#endif
#ifdef CONFIG_CPU_ARM920T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm920
# endif
#endif
#ifdef CONFIG_CPU_ARM922T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm922
# endif
#endif
#ifdef CONFIG_CPU_FA526
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_fa526
# endif
#endif
#ifdef CONFIG_CPU_ARM925T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm925
# endif
#endif
#ifdef CONFIG_CPU_ARM926T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm926
# endif
#endif
#ifdef CONFIG_CPU_ARM940T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm940
# endif
#endif
#ifdef CONFIG_CPU_ARM946E
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm946
# endif
#endif
#ifdef CONFIG_CPU_SA110
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_sa110
# endif
#endif
#ifdef CONFIG_CPU_SA1100
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_sa1100
# endif
#endif
#ifdef CONFIG_CPU_ARM1020
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1020
# endif
#endif
#ifdef CONFIG_CPU_ARM1020E
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1020e
# endif
#endif
#ifdef CONFIG_CPU_ARM1022
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1022
# endif
#endif
#ifdef CONFIG_CPU_ARM1026
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1026
# endif
#endif
#ifdef CONFIG_CPU_XSCALE
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_xscale
# endif
#endif
#ifdef CONFIG_CPU_XSC3
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_xsc3
# endif
#endif
#ifdef CONFIG_CPU_MOHAWK
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_mohawk
# endif
#endif
#ifdef CONFIG_CPU_FEROCEON
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_feroceon
# endif
#endif
#ifdef CONFIG_CPU_V6
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_v6
# endif
#endif
#ifdef CONFIG_CPU_V7
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_v7
# endif
#endif
#include <asm/glue-proc.h>
#include <asm/page.h>
#ifndef __ASSEMBLY__
struct mm_struct;
/*
* Don't change this structure - ASM code relies on it.
*/
extern struct processor {
/* MISC
* get data abort address/flags
*/
void (*_data_abort)(unsigned long pc);
/*
* Retrieve prefetch fault address
*/
unsigned long (*_prefetch_abort)(unsigned long lr);
/*
* Set up any processor specifics
*/
void (*_proc_init)(void);
/*
* Disable any processor specifics
*/
void (*_proc_fin)(void);
/*
* Special stuff for a reset
*/
void (*reset)(unsigned long addr) __attribute__((noreturn));
/*
* Idle the processor
*/
int (*_do_idle)(void);
/*
* Processor architecture specific
*/
/*
* clean a virtual address range from the
* D-cache without flushing the cache.
*/
void (*dcache_clean_area)(void *addr, int size);
/*
* Set the page table
*/
void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm);
/*
* Set a possibly extended PTE. Non-extended PTEs should
* ignore 'ext'.
*/
void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
/* Suspend/resume */
unsigned int suspend_size;
void (*do_suspend)(void *);
void (*do_resume)(void *);
} processor;
#ifndef MULTI_CPU
#include <asm/cpu-single.h>
extern void cpu_proc_init(void);
extern void cpu_proc_fin(void);
extern int cpu_do_idle(void);
extern void cpu_dcache_clean_area(void *, int);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
#else
#include <asm/cpu-multi32.h>
#define cpu_proc_init() processor._proc_init()
#define cpu_proc_fin() processor._proc_fin()
#define cpu_reset(addr) processor.reset(addr)
#define cpu_do_idle() processor._do_idle()
#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz)
#define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext)
#define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm)
#endif
extern void cpu_resume(void);
#include <asm/memory.h>
#ifdef CONFIG_MMU

View file

@ -95,7 +95,7 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
#if __LINUX_ARM_ARCH__ == 6
#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
#define cpu_relax() smp_mb()
#else
#define cpu_relax() barrier()

View file

@ -1,7 +1,14 @@
#ifndef __ASMARM_ARCH_SCU_H
#define __ASMARM_ARCH_SCU_H
#define SCU_PM_NORMAL 0
#define SCU_PM_DORMANT 2
#define SCU_PM_POWEROFF 3
#ifndef __ASSEMBLER__
unsigned int scu_get_core_count(void __iomem *);
void scu_enable(void __iomem *);
int scu_power_mode(void __iomem *, unsigned int);
#endif
#endif

View file

@ -18,16 +18,34 @@
#define __ASMARM_TLB_H
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#ifndef CONFIG_MMU
#include <linux/pagemap.h>
#define tlb_flush(tlb) ((void) tlb)
#include <asm-generic/tlb.h>
#else /* !CONFIG_MMU */
#include <linux/swap.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
/*
* We need to delay page freeing for SMP as other CPUs can access pages
* which have been removed but not yet had their TLB entries invalidated.
* Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
* we need to apply this same delaying tactic to ensure correct operation.
*/
#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
#define tlb_fast_mode(tlb) 0
#define FREE_PTE_NR 500
#else
#define tlb_fast_mode(tlb) 1
#define FREE_PTE_NR 0
#endif
/*
* TLB handling. This allows us to remove pages from the page
@ -36,12 +54,58 @@
struct mmu_gather {
struct mm_struct *mm;
unsigned int fullmm;
struct vm_area_struct *vma;
unsigned long range_start;
unsigned long range_end;
unsigned int nr;
struct page *pages[FREE_PTE_NR];
};
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
/*
* This is unnecessarily complex. There's three ways the TLB shootdown
* code is used:
* 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
* tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
* tlb->vma will be non-NULL.
* 2. Unmapping all vmas. See exit_mmap().
* tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
* tlb->vma will be non-NULL. Additionally, page tables will be freed.
* 3. Unmapping argument pages. See shift_arg_pages().
* tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
* tlb->vma will be NULL.
*/
static inline void tlb_flush(struct mmu_gather *tlb)
{
if (tlb->fullmm || !tlb->vma)
flush_tlb_mm(tlb->mm);
else if (tlb->range_end > 0) {
flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
tlb->range_start = TASK_SIZE;
tlb->range_end = 0;
}
}
static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
{
if (!tlb->fullmm) {
if (addr < tlb->range_start)
tlb->range_start = addr;
if (addr + PAGE_SIZE > tlb->range_end)
tlb->range_end = addr + PAGE_SIZE;
}
}
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
tlb_flush(tlb);
if (!tlb_fast_mode(tlb)) {
free_pages_and_swap_cache(tlb->pages, tlb->nr);
tlb->nr = 0;
}
}
static inline struct mmu_gather *
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
{
@ -49,6 +113,8 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
tlb->mm = mm;
tlb->fullmm = full_mm_flush;
tlb->vma = NULL;
tlb->nr = 0;
return tlb;
}
@ -56,8 +122,7 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
if (tlb->fullmm)
flush_tlb_mm(tlb->mm);
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
check_pgt_cache();
@ -71,12 +136,7 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
static inline void
tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
{
if (!tlb->fullmm) {
if (addr < tlb->range_start)
tlb->range_start = addr;
if (addr + PAGE_SIZE > tlb->range_end)
tlb->range_end = addr + PAGE_SIZE;
}
tlb_add_flush(tlb, addr);
}
/*
@ -89,6 +149,7 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (!tlb->fullmm) {
flush_cache_range(vma, vma->vm_start, vma->vm_end);
tlb->vma = vma;
tlb->range_start = TASK_SIZE;
tlb->range_end = 0;
}
@ -97,12 +158,30 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
static inline void
tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (!tlb->fullmm && tlb->range_end > 0)
flush_tlb_range(vma, tlb->range_start, tlb->range_end);
if (!tlb->fullmm)
tlb_flush(tlb);
}
#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
if (tlb_fast_mode(tlb)) {
free_page_and_swap_cache(page);
} else {
tlb->pages[tlb->nr++] = page;
if (tlb->nr >= FREE_PTE_NR)
tlb_flush_mmu(tlb);
}
}
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{
pgtable_page_dtor(pte);
tlb_add_flush(tlb, addr);
tlb_remove_page(tlb, pte);
}
#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
#define tlb_migrate_finish(mm) do { } while (0)

View file

@ -10,12 +10,7 @@
#ifndef _ASMARM_TLBFLUSH_H
#define _ASMARM_TLBFLUSH_H
#ifndef CONFIG_MMU
#define tlb_flush(tlb) ((void) tlb)
#else /* CONFIG_MMU */
#ifdef CONFIG_MMU
#include <asm/glue.h>

View file

@ -29,6 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o
obj-$(CONFIG_ARTHUR) += arthur.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o
obj-$(CONFIG_PM) += sleep.o
obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
obj-$(CONFIG_SMP) += smp.o smp_tlb.o
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o

View file

@ -13,6 +13,9 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/mach/arch.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
@ -113,6 +116,14 @@ int main(void)
#endif
#ifdef MULTI_PABORT
DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort));
#endif
#ifdef MULTI_CPU
DEFINE(CPU_SLEEP_SIZE, offsetof(struct processor, suspend_size));
DEFINE(CPU_DO_SUSPEND, offsetof(struct processor, do_suspend));
DEFINE(CPU_DO_RESUME, offsetof(struct processor, do_resume));
#endif
#ifdef MULTI_CACHE
DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all));
#endif
BLANK();
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);

View file

@ -16,7 +16,8 @@
*/
#include <asm/memory.h>
#include <asm/glue.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/vfpmacros.h>
#include <mach/entry-macro.S>
#include <asm/thread_notify.h>

View file

@ -836,9 +836,11 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
/*
* One-time initialisation.
*/
static void reset_ctrl_regs(void *unused)
static void reset_ctrl_regs(void *info)
{
int i;
int i, cpu = smp_processor_id();
u32 dbg_power;
cpumask_t *cpumask = info;
/*
* v7 debug contains save and restore registers so that debug state
@ -849,6 +851,17 @@ static void reset_ctrl_regs(void *unused)
* later on.
*/
if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
/*
* Ensure sticky power-down is clear (i.e. debug logic is
* powered up).
*/
asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
if ((dbg_power & 0x1) == 0) {
pr_warning("CPU %d debug is powered down!\n", cpu);
cpumask_or(cpumask, cpumask, cpumask_of(cpu));
return;
}
/*
* Unconditionally clear the lock by writing a value
* other than 0xC5ACCE55 to the access register.
@ -887,6 +900,7 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
static int __init arch_hw_breakpoint_init(void)
{
u32 dscr;
cpumask_t cpumask = { CPU_BITS_NONE };
debug_arch = get_debug_arch();
@ -911,7 +925,13 @@ static int __init arch_hw_breakpoint_init(void)
* Reset the breakpoint resources. We assume that a halting
* debugger will leave the world in a nice state for us.
*/
on_each_cpu(reset_ctrl_regs, NULL, 1);
on_each_cpu(reset_ctrl_regs, &cpumask, 1);
if (!cpumask_empty(&cpumask)) {
core_num_brps = 0;
core_num_reserved_brps = 0;
core_num_wrps = 0;
return 0;
}
ARM_DBG_READ(c1, 0, dscr);
if (dscr & ARM_DSCR_HDBGEN) {

View file

@ -1437,7 +1437,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
return space_cccc_1100_010x(insn, asi);
} else if ((insn & 0x0e000000) == 0x0c400000) {
} else if ((insn & 0x0e000000) == 0x0c000000) {
return space_cccc_110x(insn, asi);

View file

@ -97,28 +97,34 @@ set_irq_affinity(int irq,
irq, cpu);
return err;
#else
return 0;
return -EINVAL;
#endif
}
static int
init_cpu_pmu(void)
{
int i, err = 0;
int i, irqs, err = 0;
struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU];
if (!pdev) {
err = -ENODEV;
goto out;
}
if (!pdev)
return -ENODEV;
for (i = 0; i < pdev->num_resources; ++i) {
irqs = pdev->num_resources;
/*
* If we have a single PMU interrupt that we can't shift, assume that
* we're running on a uniprocessor machine and continue.
*/
if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0)))
return 0;
for (i = 0; i < irqs; ++i) {
err = set_irq_affinity(platform_get_irq(pdev, i), i);
if (err)
break;
}
out:
return err;
}

View file

@ -996,10 +996,10 @@ static int ptrace_gethbpregs(struct task_struct *tsk, long num,
while (!(arch_ctrl.len & 0x1))
arch_ctrl.len >>= 1;
if (idx & 0x1)
reg = encode_ctrl_reg(arch_ctrl);
else
if (num & 0x1)
reg = bp->attr.bp_addr;
else
reg = encode_ctrl_reg(arch_ctrl);
}
put:

View file

@ -226,8 +226,8 @@ int cpu_architecture(void)
* Register 0 and check for VMSAv7 or PMSAv7 */
asm("mrc p15, 0, %0, c0, c1, 4"
: "=r" (mmfr0));
if ((mmfr0 & 0x0000000f) == 0x00000003 ||
(mmfr0 & 0x000000f0) == 0x00000030)
if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
(mmfr0 & 0x000000f0) >= 0x00000030)
cpu_arch = CPU_ARCH_ARMv7;
else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
(mmfr0 & 0x000000f0) == 0x00000020)

View file

@ -474,7 +474,9 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
unsigned long handler = (unsigned long)ka->sa.sa_handler;
unsigned long retcode;
int thumb = 0;
unsigned long cpsr = regs->ARM_cpsr & ~PSR_f;
unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
cpsr |= PSR_ENDSTATE;
/*
* Maybe we need to deliver a 32-bit signal to a 26-bit task.

134
arch/arm/kernel/sleep.S Normal file
View file

@ -0,0 +1,134 @@
#include <linux/linkage.h>
#include <linux/threads.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/glue-cache.h>
#include <asm/glue-proc.h>
#include <asm/system.h>
.text
/*
* Save CPU state for a suspend
* r1 = v:p offset
* r3 = virtual return function
* Note: sp is decremented to allocate space for CPU state on stack
* r0-r3,r9,r10,lr corrupted
*/
ENTRY(cpu_suspend)
mov r9, lr
#ifdef MULTI_CPU
ldr r10, =processor
mov r2, sp @ current virtual SP
ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function
sub sp, sp, r0 @ allocate CPU state on stack
mov r0, sp @ save pointer
add ip, ip, r1 @ convert resume fn to phys
stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn
ldr r3, =sleep_save_sp
add r2, sp, r1 @ convert SP to phys
#ifdef CONFIG_SMP
ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
ALT_UP(mov lr, #0)
and lr, lr, #15
str r2, [r3, lr, lsl #2] @ save phys SP
#else
str r2, [r3] @ save phys SP
#endif
mov lr, pc
ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state
#else
mov r2, sp @ current virtual SP
ldr r0, =cpu_suspend_size
sub sp, sp, r0 @ allocate CPU state on stack
mov r0, sp @ save pointer
stmfd sp!, {r1, r2, r3} @ save v:p, virt SP, return fn
ldr r3, =sleep_save_sp
add r2, sp, r1 @ convert SP to phys
#ifdef CONFIG_SMP
ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
ALT_UP(mov lr, #0)
and lr, lr, #15
str r2, [r3, lr, lsl #2] @ save phys SP
#else
str r2, [r3] @ save phys SP
#endif
bl cpu_do_suspend
#endif
@ flush data cache
#ifdef MULTI_CACHE
ldr r10, =cpu_cache
mov lr, r9
ldr pc, [r10, #CACHE_FLUSH_KERN_ALL]
#else
mov lr, r9
b __cpuc_flush_kern_all
#endif
ENDPROC(cpu_suspend)
.ltorg
/*
* r0 = control register value
* r1 = v:p offset (preserved by cpu_do_resume)
* r2 = phys page table base
* r3 = L1 section flags
*/
ENTRY(cpu_resume_mmu)
adr r4, cpu_resume_turn_mmu_on
mov r4, r4, lsr #20
orr r3, r3, r4, lsl #20
ldr r5, [r2, r4, lsl #2] @ save old mapping
str r3, [r2, r4, lsl #2] @ setup 1:1 mapping for mmu code
sub r2, r2, r1
ldr r3, =cpu_resume_after_mmu
bic r1, r0, #CR_C @ ensure D-cache is disabled
b cpu_resume_turn_mmu_on
ENDPROC(cpu_resume_mmu)
.ltorg
.align 5
cpu_resume_turn_mmu_on:
mcr p15, 0, r1, c1, c0, 0 @ turn on MMU, I-cache, etc
mrc p15, 0, r1, c0, c0, 0 @ read id reg
mov r1, r1
mov r1, r1
mov pc, r3 @ jump to virtual address
ENDPROC(cpu_resume_turn_mmu_on)
cpu_resume_after_mmu:
str r5, [r2, r4, lsl #2] @ restore old mapping
mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache
mov pc, lr
ENDPROC(cpu_resume_after_mmu)
/*
* Note: Yes, part of the following code is located into the .data section.
* This is to allow sleep_save_sp to be accessed with a relative load
* while we can't rely on any MMU translation. We could have put
* sleep_save_sp in the .text section as well, but some setups might
* insist on it to be truly read-only.
*/
.data
.align
ENTRY(cpu_resume)
#ifdef CONFIG_SMP
adr r0, sleep_save_sp
ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
ALT_UP(mov r1, #0)
and r1, r1, #15
ldr r0, [r0, r1, lsl #2] @ stack phys addr
#else
ldr r0, sleep_save_sp @ stack phys addr
#endif
msr cpsr_c, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off
#ifdef MULTI_CPU
ldmia r0!, {r1, sp, lr, pc} @ load v:p, stack, return fn, resume fn
#else
ldmia r0!, {r1, sp, lr} @ load v:p, stack, return fn
b cpu_do_resume
#endif
ENDPROC(cpu_resume)
sleep_save_sp:
.rept CONFIG_NR_CPUS
.long 0 @ preserve stack phys ptr here
.endr

View file

@ -50,3 +50,26 @@ void __init scu_enable(void __iomem *scu_base)
*/
flush_cache_all();
}
/*
* Set the executing CPUs power mode as defined. This will be in
* preparation for it executing a WFI instruction.
*
* This function must be called with preemption disabled, and as it
* has the side effect of disabling coherency, caches must have been
* flushed. Interrupts must also have been disabled.
*/
int scu_power_mode(void __iomem *scu_base, unsigned int mode)
{
unsigned int val;
int cpu = smp_processor_id();
if (mode > 3 || mode == 1 || cpu > 3)
return -EINVAL;
val = __raw_readb(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
val |= mode;
__raw_writeb(val, scu_base + SCU_CPU_STATUS + cpu);
return 0;
}

View file

@ -21,6 +21,12 @@
#define ARM_CPU_KEEP(x)
#endif
#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)
#define ARM_EXIT_KEEP(x) x
#else
#define ARM_EXIT_KEEP(x)
#endif
OUTPUT_ARCH(arm)
ENTRY(stext)
@ -43,6 +49,7 @@ SECTIONS
_sinittext = .;
HEAD_TEXT
INIT_TEXT
ARM_EXIT_KEEP(EXIT_TEXT)
_einittext = .;
ARM_CPU_DISCARD(PROC_INFO)
__arch_info_begin = .;
@ -67,6 +74,7 @@ SECTIONS
#ifndef CONFIG_XIP_KERNEL
__init_begin = _stext;
INIT_DATA
ARM_EXIT_KEEP(EXIT_DATA)
#endif
}
@ -162,6 +170,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_begin = .;
INIT_DATA
ARM_EXIT_KEEP(EXIT_DATA)
. = ALIGN(PAGE_SIZE);
__init_end = .;
#endif
@ -247,6 +256,8 @@ SECTIONS
}
#endif
NOTES
BSS_SECTION(0, 0, 0)
_end = .;

View file

@ -132,7 +132,7 @@ static int davinci_target(struct cpufreq_policy *policy,
return ret;
}
static int __init davinci_cpu_init(struct cpufreq_policy *policy)
static int davinci_cpu_init(struct cpufreq_policy *policy)
{
int result = 0;
struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;

View file

@ -480,8 +480,15 @@ static struct platform_device da850_mcasp_device = {
.resource = da850_mcasp_resources,
};
struct platform_device davinci_pcm_device = {
.name = "davinci-pcm-audio",
.id = -1,
};
void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata)
{
platform_device_register(&davinci_pcm_device);
/* DA830/OMAP-L137 has 3 instances of McASP */
if (cpu_is_davinci_da830() && id == 1) {
da830_mcasp1_device.dev.platform_data = pdata;

View file

@ -58,7 +58,7 @@ static int tnetv107x_gpio_request(struct gpio_chip *chip, unsigned offset)
spin_lock_irqsave(&ctlr->lock, flags);
gpio_reg_set_bit(&regs->enable, gpio);
gpio_reg_set_bit(regs->enable, gpio);
spin_unlock_irqrestore(&ctlr->lock, flags);
@ -74,7 +74,7 @@ static void tnetv107x_gpio_free(struct gpio_chip *chip, unsigned offset)
spin_lock_irqsave(&ctlr->lock, flags);
gpio_reg_clear_bit(&regs->enable, gpio);
gpio_reg_clear_bit(regs->enable, gpio);
spin_unlock_irqrestore(&ctlr->lock, flags);
}
@ -88,7 +88,7 @@ static int tnetv107x_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
spin_lock_irqsave(&ctlr->lock, flags);
gpio_reg_set_bit(&regs->direction, gpio);
gpio_reg_set_bit(regs->direction, gpio);
spin_unlock_irqrestore(&ctlr->lock, flags);
@ -106,11 +106,11 @@ static int tnetv107x_gpio_dir_out(struct gpio_chip *chip,
spin_lock_irqsave(&ctlr->lock, flags);
if (value)
gpio_reg_set_bit(&regs->data_out, gpio);
gpio_reg_set_bit(regs->data_out, gpio);
else
gpio_reg_clear_bit(&regs->data_out, gpio);
gpio_reg_clear_bit(regs->data_out, gpio);
gpio_reg_clear_bit(&regs->direction, gpio);
gpio_reg_clear_bit(regs->direction, gpio);
spin_unlock_irqrestore(&ctlr->lock, flags);
@ -124,7 +124,7 @@ static int tnetv107x_gpio_get(struct gpio_chip *chip, unsigned offset)
unsigned gpio = chip->base + offset;
int ret;
ret = gpio_reg_get_bit(&regs->data_in, gpio);
ret = gpio_reg_get_bit(regs->data_in, gpio);
return ret ? 1 : 0;
}
@ -140,9 +140,9 @@ static void tnetv107x_gpio_set(struct gpio_chip *chip,
spin_lock_irqsave(&ctlr->lock, flags);
if (value)
gpio_reg_set_bit(&regs->data_out, gpio);
gpio_reg_set_bit(regs->data_out, gpio);
else
gpio_reg_clear_bit(&regs->data_out, gpio);
gpio_reg_clear_bit(regs->data_out, gpio);
spin_unlock_irqrestore(&ctlr->lock, flags);
}

View file

@ -1,6 +1,8 @@
#ifndef __MACH_CLKDEV_H
#define __MACH_CLKDEV_H
struct clk;
static inline int __clk_get(struct clk *clk)
{
return 1;

View file

@ -30,8 +30,13 @@
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/i2c-gpio.h>
#include <linux/spi/spi.h>
#include <sound/cs4271.h>
#include <mach/hardware.h>
#include <mach/fb.h>
#include <mach/ep93xx_spi.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@ -92,6 +97,83 @@ static void __init edb93xx_register_i2c(void)
}
/*************************************************************************
* EDB93xx SPI peripheral handling
*************************************************************************/
static struct cs4271_platform_data edb93xx_cs4271_data = {
.gpio_nreset = -EINVAL, /* filled in later */
};
static int edb93xx_cs4271_hw_setup(struct spi_device *spi)
{
return gpio_request_one(EP93XX_GPIO_LINE_EGPIO6,
GPIOF_OUT_INIT_HIGH, spi->modalias);
}
static void edb93xx_cs4271_hw_cleanup(struct spi_device *spi)
{
gpio_free(EP93XX_GPIO_LINE_EGPIO6);
}
static void edb93xx_cs4271_hw_cs_control(struct spi_device *spi, int value)
{
gpio_set_value(EP93XX_GPIO_LINE_EGPIO6, value);
}
static struct ep93xx_spi_chip_ops edb93xx_cs4271_hw = {
.setup = edb93xx_cs4271_hw_setup,
.cleanup = edb93xx_cs4271_hw_cleanup,
.cs_control = edb93xx_cs4271_hw_cs_control,
};
static struct spi_board_info edb93xx_spi_board_info[] __initdata = {
{
.modalias = "cs4271",
.platform_data = &edb93xx_cs4271_data,
.controller_data = &edb93xx_cs4271_hw,
.max_speed_hz = 6000000,
.bus_num = 0,
.chip_select = 0,
.mode = SPI_MODE_3,
},
};
static struct ep93xx_spi_info edb93xx_spi_info __initdata = {
.num_chipselect = ARRAY_SIZE(edb93xx_spi_board_info),
};
static void __init edb93xx_register_spi(void)
{
if (machine_is_edb9301() || machine_is_edb9302())
edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_EGPIO1;
else if (machine_is_edb9302a() || machine_is_edb9307a())
edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_H(2);
else if (machine_is_edb9315a())
edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_EGPIO14;
ep93xx_register_spi(&edb93xx_spi_info, edb93xx_spi_board_info,
ARRAY_SIZE(edb93xx_spi_board_info));
}
/*************************************************************************
* EDB93xx I2S
*************************************************************************/
static int __init edb93xx_has_audio(void)
{
return (machine_is_edb9301() || machine_is_edb9302() ||
machine_is_edb9302a() || machine_is_edb9307a() ||
machine_is_edb9315a());
}
static void __init edb93xx_register_i2s(void)
{
if (edb93xx_has_audio()) {
ep93xx_register_i2s();
}
}
/*************************************************************************
* EDB93xx pwm
*************************************************************************/
@ -111,13 +193,47 @@ static void __init edb93xx_register_pwm(void)
}
/*************************************************************************
* EDB93xx framebuffer
*************************************************************************/
static struct ep93xxfb_mach_info __initdata edb93xxfb_info = {
.num_modes = EP93XXFB_USE_MODEDB,
.bpp = 16,
.flags = 0,
};
static int __init edb93xx_has_fb(void)
{
/* These platforms have an ep93xx with video capability */
return machine_is_edb9307() || machine_is_edb9307a() ||
machine_is_edb9312() || machine_is_edb9315() ||
machine_is_edb9315a();
}
static void __init edb93xx_register_fb(void)
{
if (!edb93xx_has_fb())
return;
if (machine_is_edb9307a() || machine_is_edb9315a())
edb93xxfb_info.flags |= EP93XXFB_USE_SDCSN0;
else
edb93xxfb_info.flags |= EP93XXFB_USE_SDCSN3;
ep93xx_register_fb(&edb93xxfb_info);
}
static void __init edb93xx_init_machine(void)
{
ep93xx_init_devices();
edb93xx_register_flash();
ep93xx_register_eth(&edb93xx_eth_data, 1);
edb93xx_register_i2c();
edb93xx_register_spi();
edb93xx_register_i2s();
edb93xx_register_pwm();
edb93xx_register_fb();
}

View file

@ -61,7 +61,7 @@ static inline void ep93xx_gpio_int_mask(unsigned line)
gpio_int_unmasked[line >> 3] &= ~(1 << (line & 7));
}
void ep93xx_gpio_int_debounce(unsigned int irq, int enable)
static void ep93xx_gpio_int_debounce(unsigned int irq, bool enable)
{
int line = irq_to_gpio(irq);
int port = line >> 3;
@ -75,7 +75,6 @@ void ep93xx_gpio_int_debounce(unsigned int irq, int enable)
__raw_writeb(gpio_int_debounce[port],
EP93XX_GPIO_REG(int_debounce_register_offset[port]));
}
EXPORT_SYMBOL(ep93xx_gpio_int_debounce);
static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc)
{
@ -335,6 +334,20 @@ static void ep93xx_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
local_irq_restore(flags);
}
static int ep93xx_gpio_set_debounce(struct gpio_chip *chip,
unsigned offset, unsigned debounce)
{
int gpio = chip->base + offset;
int irq = gpio_to_irq(gpio);
if (irq < 0)
return -EINVAL;
ep93xx_gpio_int_debounce(irq, debounce ? true : false);
return 0;
}
static void ep93xx_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip);
@ -434,6 +447,18 @@ void __init ep93xx_gpio_init(void)
EP93XX_SYSCON_DEVCFG_GONIDE |
EP93XX_SYSCON_DEVCFG_HONIDE);
for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++)
gpiochip_add(&ep93xx_gpio_banks[i].chip);
for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) {
struct gpio_chip *chip = &ep93xx_gpio_banks[i].chip;
/*
* Ports A, B, and F support input debouncing when
* used as interrupts.
*/
if (!strcmp(chip->label, "A") ||
!strcmp(chip->label, "B") ||
!strcmp(chip->label, "F"))
chip->set_debounce = ep93xx_gpio_set_debounce;
gpiochip_add(chip);
}
}

View file

@ -99,8 +99,6 @@
/* maximum value for irq capable line identifiers */
#define EP93XX_GPIO_LINE_MAX_IRQ EP93XX_GPIO_LINE_F(7)
extern void ep93xx_gpio_int_debounce(unsigned int irq, int enable);
/* new generic GPIO API - see Documentation/gpio.txt */
#include <asm-generic/gpio.h>

View file

@ -4,10 +4,11 @@
* Copyright (C) 1998 Russell King.
* Copyright (C) 1998 Phil Blundell
*/
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
#include <asm/irq.h>
@ -16,32 +17,76 @@
#include "common.h"
/*
* Footbridge timer 1 support.
*/
static unsigned long timer1_latch;
static unsigned long timer1_gettimeoffset (void)
static cycle_t cksrc_dc21285_read(struct clocksource *cs)
{
unsigned long value = timer1_latch - *CSR_TIMER1_VALUE;
return ((tick_nsec / 1000) * value) / timer1_latch;
return cs->mask - *CSR_TIMER2_VALUE;
}
static irqreturn_t
timer1_interrupt(int irq, void *dev_id)
static int cksrc_dc21285_enable(struct clocksource *cs)
{
*CSR_TIMER2_LOAD = cs->mask;
*CSR_TIMER2_CLR = 0;
*CSR_TIMER2_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_DIV16;
return 0;
}
static int cksrc_dc21285_disable(struct clocksource *cs)
{
*CSR_TIMER2_CNTL = 0;
}
static struct clocksource cksrc_dc21285 = {
.name = "dc21285_timer2",
.rating = 200,
.read = cksrc_dc21285_read,
.enable = cksrc_dc21285_enable,
.disable = cksrc_dc21285_disable,
.mask = CLOCKSOURCE_MASK(24),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void ckevt_dc21285_set_mode(enum clock_event_mode mode,
struct clock_event_device *c)
{
switch (mode) {
case CLOCK_EVT_MODE_RESUME:
case CLOCK_EVT_MODE_PERIODIC:
*CSR_TIMER1_CLR = 0;
*CSR_TIMER1_LOAD = (mem_fclk_21285 + 8 * HZ) / (16 * HZ);
*CSR_TIMER1_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_AUTORELOAD |
TIMER_CNTL_DIV16;
break;
default:
*CSR_TIMER1_CNTL = 0;
break;
}
}
static struct clock_event_device ckevt_dc21285 = {
.name = "dc21285_timer1",
.features = CLOCK_EVT_FEAT_PERIODIC,
.rating = 200,
.irq = IRQ_TIMER1,
.set_mode = ckevt_dc21285_set_mode,
};
static irqreturn_t timer1_interrupt(int irq, void *dev_id)
{
struct clock_event_device *ce = dev_id;
*CSR_TIMER1_CLR = 0;
timer_tick();
ce->event_handler(ce);
return IRQ_HANDLED;
}
static struct irqaction footbridge_timer_irq = {
.name = "Timer1 timer tick",
.name = "dc21285_timer1",
.handler = timer1_interrupt,
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
.dev_id = &ckevt_dc21285,
};
/*
@ -49,16 +94,19 @@ static struct irqaction footbridge_timer_irq = {
*/
static void __init footbridge_timer_init(void)
{
timer1_latch = (mem_fclk_21285 + 8 * HZ) / (16 * HZ);
struct clock_event_device *ce = &ckevt_dc21285;
*CSR_TIMER1_CLR = 0;
*CSR_TIMER1_LOAD = timer1_latch;
*CSR_TIMER1_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_AUTORELOAD | TIMER_CNTL_DIV16;
clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16);
setup_irq(IRQ_TIMER1, &footbridge_timer_irq);
setup_irq(ce->irq, &footbridge_timer_irq);
clockevents_calc_mult_shift(ce, mem_fclk_21285, 5);
ce->max_delta_ns = clockevent_delta2ns(0xffffff, ce);
ce->min_delta_ns = clockevent_delta2ns(0x000004, ce);
clockevents_register_device(ce);
}
struct sys_timer footbridge_timer = {
.init = footbridge_timer_init,
.offset = timer1_gettimeoffset,
};

View file

@ -4,10 +4,13 @@
* Copyright (C) 1998 Russell King.
* Copyright (C) 1998 Phil Blundell
*/
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/timex.h>
#include <asm/irq.h>
@ -15,77 +18,115 @@
#include "common.h"
/*
* ISA timer tick support
*/
#define mSEC_10_from_14 ((14318180 + 100) / 200)
#define PIT_MODE 0x43
#define PIT_CH0 0x40
static unsigned long isa_gettimeoffset(void)
#define PIT_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ)
static cycle_t pit_read(struct clocksource *cs)
{
unsigned long flags;
static int old_count;
static u32 old_jifs;
int count;
u32 jifs;
static int count_p = (mSEC_10_from_14/6); /* for the first call after boot */
static unsigned long jiffies_p = 0;
raw_local_irq_save(flags);
/*
* cache volatile jiffies temporarily; we have IRQs turned off.
*/
unsigned long jiffies_t;
jifs = jiffies;
outb_p(0x00, PIT_MODE); /* latch the count */
count = inb_p(PIT_CH0); /* read the latched count */
count |= inb_p(PIT_CH0) << 8;
/* timer count may underflow right here */
outb_p(0x00, 0x43); /* latch the count ASAP */
if (count > old_count && jifs == old_jifs)
count = old_count;
count = inb_p(0x40); /* read the latched count */
old_count = count;
old_jifs = jifs;
/*
* We do this guaranteed double memory access instead of a _p
* postfix in the previous port access. Wheee, hackady hack
*/
jiffies_t = jiffies;
raw_local_irq_restore(flags);
count |= inb_p(0x40) << 8;
count = (PIT_LATCH - 1) - count;
/* Detect timer underflows. If we haven't had a timer tick since
the last time we were called, and time is apparently going
backwards, the counter must have wrapped during this routine. */
if ((jiffies_t == jiffies_p) && (count > count_p))
count -= (mSEC_10_from_14/6);
else
jiffies_p = jiffies_t;
count_p = count;
count = (((mSEC_10_from_14/6)-1) - count) * (tick_nsec / 1000);
count = (count + (mSEC_10_from_14/6)/2) / (mSEC_10_from_14/6);
return count;
return (cycle_t)(jifs * PIT_LATCH) + count;
}
static irqreturn_t
isa_timer_interrupt(int irq, void *dev_id)
static struct clocksource pit_cs = {
.name = "pit",
.rating = 110,
.read = pit_read,
.mask = CLOCKSOURCE_MASK(32),
};
static void pit_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
timer_tick();
unsigned long flags;
raw_local_irq_save(flags);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
outb_p(0x34, PIT_MODE);
outb_p(PIT_LATCH & 0xff, PIT_CH0);
outb_p(PIT_LATCH >> 8, PIT_CH0);
break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
outb_p(0x30, PIT_MODE);
outb_p(0, PIT_CH0);
outb_p(0, PIT_CH0);
break;
case CLOCK_EVT_MODE_ONESHOT:
case CLOCK_EVT_MODE_RESUME:
break;
}
local_irq_restore(flags);
}
static int pit_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
return 0;
}
static struct clock_event_device pit_ce = {
.name = "pit",
.features = CLOCK_EVT_FEAT_PERIODIC,
.set_mode = pit_set_mode,
.set_next_event = pit_set_next_event,
.shift = 32,
};
static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *ce = dev_id;
ce->event_handler(ce);
return IRQ_HANDLED;
}
static struct irqaction isa_timer_irq = {
.name = "ISA timer tick",
.handler = isa_timer_interrupt,
static struct irqaction pit_timer_irq = {
.name = "pit",
.handler = pit_timer_interrupt,
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
.dev_id = &pit_ce,
};
static void __init isa_timer_init(void)
{
/* enable PIT timer */
/* set for periodic (4) and LSB/MSB write (0x30) */
outb(0x34, 0x43);
outb((mSEC_10_from_14/6) & 0xFF, 0x40);
outb((mSEC_10_from_14/6) >> 8, 0x40);
pit_ce.cpumask = cpumask_of(smp_processor_id());
pit_ce.mult = div_sc(PIT_TICK_RATE, NSEC_PER_SEC, pit_ce.shift);
pit_ce.max_delta_ns = clockevent_delta2ns(0x7fff, &pit_ce);
pit_ce.min_delta_ns = clockevent_delta2ns(0x000f, &pit_ce);
setup_irq(IRQ_ISA_TIMER, &isa_timer_irq);
clocksource_register_hz(&pit_cs, PIT_TICK_RATE);
setup_irq(pit_ce.irq, &pit_timer_irq);
clockevents_register_device(&pit_ce);
}
struct sys_timer isa_timer = {
.init = isa_timer_init,
.offset = isa_gettimeoffset,
};

View file

@ -98,6 +98,7 @@ static void __init ib4220b_init(void)
platform_register_pflash(SZ_16M, NULL, 0);
platform_device_register(&ib4220b_led_device);
platform_device_register(&ib4220b_key_device);
platform_register_rtc();
}
MACHINE_START(NAS4220B, "Raidsonic NAS IB-4220-B")

View file

@ -82,6 +82,7 @@ static void __init rut1xx_init(void)
platform_register_pflash(SZ_8M, NULL, 0);
platform_device_register(&rut1xx_leds);
platform_device_register(&rut1xx_keys_device);
platform_register_rtc();
}
MACHINE_START(RUT100, "Teltonika RUT100")

View file

@ -130,6 +130,7 @@ static void __init wbd111_init(void)
wbd111_num_partitions);
platform_device_register(&wbd111_leds_device);
platform_device_register(&wbd111_keys_device);
platform_register_rtc();
}
MACHINE_START(WBD111, "Wiliboard WBD-111")

View file

@ -130,6 +130,7 @@ static void __init wbd222_init(void)
wbd222_num_partitions);
platform_device_register(&wbd222_leds_device);
platform_device_register(&wbd222_keys_device);
platform_register_rtc();
}
MACHINE_START(WBD222, "Wiliboard WBD-222")

View file

@ -18,6 +18,7 @@ extern void gemini_map_io(void);
extern void gemini_init_irq(void);
extern void gemini_timer_init(void);
extern void gemini_gpio_init(void);
extern void platform_register_rtc(void);
/* Common platform devices registration functions */
extern int platform_register_uart(void);

View file

@ -90,3 +90,29 @@ int platform_register_pflash(unsigned int size, struct mtd_partition *parts,
return platform_device_register(&pflash_device);
}
static struct resource gemini_rtc_resources[] = {
[0] = {
.start = GEMINI_RTC_BASE,
.end = GEMINI_RTC_BASE + 0x24,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_RTC,
.end = IRQ_RTC,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device gemini_rtc_device = {
.name = "rtc-gemini",
.id = 0,
.num_resources = ARRAY_SIZE(gemini_rtc_resources),
.resource = gemini_rtc_resources,
};
int __init platform_register_rtc(void)
{
return platform_device_register(&gemini_rtc_device);
}

View file

@ -68,29 +68,29 @@ static void set_gpio_irqenable(struct mxs_gpio_port *port, u32 index,
}
}
static void mxs_gpio_ack_irq(u32 irq)
static void mxs_gpio_ack_irq(struct irq_data *d)
{
u32 gpio = irq_to_gpio(irq);
u32 gpio = irq_to_gpio(d->irq);
clear_gpio_irqstatus(&mxs_gpio_ports[gpio / 32], gpio & 0x1f);
}
static void mxs_gpio_mask_irq(u32 irq)
static void mxs_gpio_mask_irq(struct irq_data *d)
{
u32 gpio = irq_to_gpio(irq);
u32 gpio = irq_to_gpio(d->irq);
set_gpio_irqenable(&mxs_gpio_ports[gpio / 32], gpio & 0x1f, 0);
}
static void mxs_gpio_unmask_irq(u32 irq)
static void mxs_gpio_unmask_irq(struct irq_data *d)
{
u32 gpio = irq_to_gpio(irq);
u32 gpio = irq_to_gpio(d->irq);
set_gpio_irqenable(&mxs_gpio_ports[gpio / 32], gpio & 0x1f, 1);
}
static int mxs_gpio_get(struct gpio_chip *chip, unsigned offset);
static int mxs_gpio_set_irq_type(u32 irq, u32 type)
static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
{
u32 gpio = irq_to_gpio(irq);
u32 gpio = irq_to_gpio(d->irq);
u32 pin_mask = 1 << (gpio & 31);
struct mxs_gpio_port *port = &mxs_gpio_ports[gpio / 32];
void __iomem *pin_addr;
@ -160,9 +160,9 @@ static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
* @param enable enable as wake-up if equal to non-zero
* @return This function returns 0 on success.
*/
static int mxs_gpio_set_wake_irq(u32 irq, u32 enable)
static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable)
{
u32 gpio = irq_to_gpio(irq);
u32 gpio = irq_to_gpio(d->irq);
u32 gpio_idx = gpio & 0x1f;
struct mxs_gpio_port *port = &mxs_gpio_ports[gpio / 32];
@ -182,11 +182,11 @@ static int mxs_gpio_set_wake_irq(u32 irq, u32 enable)
}
static struct irq_chip gpio_irq_chip = {
.ack = mxs_gpio_ack_irq,
.mask = mxs_gpio_mask_irq,
.unmask = mxs_gpio_unmask_irq,
.set_type = mxs_gpio_set_irq_type,
.set_wake = mxs_gpio_set_wake_irq,
.irq_ack = mxs_gpio_ack_irq,
.irq_mask = mxs_gpio_mask_irq,
.irq_unmask = mxs_gpio_unmask_irq,
.irq_set_type = mxs_gpio_set_irq_type,
.irq_set_wake = mxs_gpio_set_wake_irq,
};
static void mxs_set_gpio_direction(struct gpio_chip *chip, unsigned offset,

View file

@ -34,7 +34,7 @@
static void __iomem *icoll_base = MXS_IO_ADDRESS(MXS_ICOLL_BASE_ADDR);
static void icoll_ack_irq(unsigned int irq)
static void icoll_ack_irq(struct irq_data *d)
{
/*
* The Interrupt Collector is able to prioritize irqs.
@ -45,22 +45,22 @@ static void icoll_ack_irq(unsigned int irq)
icoll_base + HW_ICOLL_LEVELACK);
}
static void icoll_mask_irq(unsigned int irq)
static void icoll_mask_irq(struct irq_data *d)
{
__raw_writel(BM_ICOLL_INTERRUPTn_ENABLE,
icoll_base + HW_ICOLL_INTERRUPTn_CLR(irq));
icoll_base + HW_ICOLL_INTERRUPTn_CLR(d->irq));
}
static void icoll_unmask_irq(unsigned int irq)
static void icoll_unmask_irq(struct irq_data *d)
{
__raw_writel(BM_ICOLL_INTERRUPTn_ENABLE,
icoll_base + HW_ICOLL_INTERRUPTn_SET(irq));
icoll_base + HW_ICOLL_INTERRUPTn_SET(d->irq));
}
static struct irq_chip mxs_icoll_chip = {
.ack = icoll_ack_irq,
.mask = icoll_mask_irq,
.unmask = icoll_unmask_irq,
.irq_ack = icoll_ack_irq,
.irq_mask = icoll_mask_irq,
.irq_unmask = icoll_unmask_irq,
};
void __init icoll_init_irq(void)

View file

@ -123,9 +123,9 @@ extern void allow_idle_sleep(void);
extern void omap1_pm_idle(void);
extern void omap1_pm_suspend(void);
extern void omap7xx_cpu_suspend(unsigned short, unsigned short);
extern void omap1510_cpu_suspend(unsigned short, unsigned short);
extern void omap1610_cpu_suspend(unsigned short, unsigned short);
extern void omap7xx_cpu_suspend(unsigned long, unsigned long);
extern void omap1510_cpu_suspend(unsigned long, unsigned long);
extern void omap1610_cpu_suspend(unsigned long, unsigned long);
extern void omap7xx_idle_loop_suspend(void);
extern void omap1510_idle_loop_suspend(void);
extern void omap1610_idle_loop_suspend(void);

View file

@ -58,6 +58,7 @@
*/
#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
.align 3
ENTRY(omap7xx_cpu_suspend)
@ save registers on stack
@ -137,6 +138,7 @@ ENTRY(omap7xx_cpu_suspend_sz)
#endif /* CONFIG_ARCH_OMAP730 || CONFIG_ARCH_OMAP850 */
#ifdef CONFIG_ARCH_OMAP15XX
.align 3
ENTRY(omap1510_cpu_suspend)
@ save registers on stack
@ -211,6 +213,7 @@ ENTRY(omap1510_cpu_suspend_sz)
#endif /* CONFIG_ARCH_OMAP15XX */
#if defined(CONFIG_ARCH_OMAP16XX)
.align 3
ENTRY(omap1610_cpu_suspend)
@ save registers on stack

View file

@ -18,6 +18,7 @@
/*
* Reprograms ULPD and CKCTL.
*/
.align 3
ENTRY(omap1_sram_reprogram_clock)
stmfd sp!, {r0 - r12, lr} @ save registers on stack

View file

@ -77,7 +77,7 @@ static int _dpll_test_fint(struct clk *clk, u8 n)
dd = clk->dpll_data;
/* DPLL divider must result in a valid jitter correction val */
fint = clk->parent->rate / (n + 1);
fint = clk->parent->rate / n;
if (fint < DPLL_FINT_BAND1_MIN) {
pr_debug("rejecting n=%d due to Fint failure, "

View file

@ -193,10 +193,12 @@ static void omap2_mbox_disable_irq(struct omap_mbox *mbox,
omap_mbox_type_t irq)
{
struct omap_mbox2_priv *p = mbox->priv;
u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
l = mbox_read_reg(p->irqdisable);
l &= ~bit;
mbox_write_reg(l, p->irqdisable);
u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
if (!cpu_is_omap44xx())
bit = mbox_read_reg(p->irqdisable) & ~bit;
mbox_write_reg(bit, p->irqdisable);
}
static void omap2_mbox_ack_irq(struct omap_mbox *mbox,
@ -334,7 +336,7 @@ static struct omap_mbox mbox_iva_info = {
.priv = &omap2_mbox_iva_priv,
};
struct omap_mbox *omap2_mboxes[] = { &mbox_iva_info, &mbox_dsp_info, NULL };
struct omap_mbox *omap2_mboxes[] = { &mbox_dsp_info, &mbox_iva_info, NULL };
#endif
#if defined(CONFIG_ARCH_OMAP4)

View file

@ -605,7 +605,7 @@ static void __init omap_mux_dbg_create_entry(
list_for_each_entry(e, &partition->muxmodes, node) {
struct omap_mux *m = &e->mux;
(void)debugfs_create_file(m->muxnames[0], S_IWUGO, mux_dbg_dir,
(void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir,
m, &omap_mux_dbg_signal_fops);
}
}

View file

@ -637,14 +637,14 @@ static int __init pm_dbg_init(void)
}
(void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUGO, d,
(void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUSR, d,
&enable_off_mode, &pm_dbg_option_fops);
(void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUGO, d,
(void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUSR, d,
&sleep_while_idle, &pm_dbg_option_fops);
(void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUGO, d,
(void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUSR, d,
&wakeup_timer_seconds, &pm_dbg_option_fops);
(void) debugfs_create_file("wakeup_timer_milliseconds",
S_IRUGO | S_IWUGO, d, &wakeup_timer_milliseconds,
S_IRUGO | S_IWUSR, d, &wakeup_timer_milliseconds,
&pm_dbg_option_fops);
pm_dbg_init_done = 1;

View file

@ -92,7 +92,7 @@ extern void omap24xx_idle_loop_suspend(void);
extern void omap24xx_cpu_suspend(u32 dll_ctrl, void __iomem *sdrc_dlla_ctrl,
void __iomem *sdrc_power);
extern void omap34xx_cpu_suspend(u32 *addr, int save_state);
extern void save_secure_ram_context(u32 *addr);
extern int save_secure_ram_context(u32 *addr);
extern void omap3_save_scratchpad_contents(void);
extern unsigned int omap24xx_idle_loop_suspend_sz;

View file

@ -38,8 +38,8 @@
#define OMAP4430_PRCM_MPU_CPU1_INST 0x0800
/* PRCM_MPU clockdomain register offsets (from instance start) */
#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0000
#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0000
#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0018
#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0018
/*

Some files were not shown because too many files have changed in this diff Show more