Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

Conflicts:

	drivers/net/e1000e/ich8lan.c
This commit is contained in:
David S. Miller 2008-12-15 20:03:50 -08:00
commit eb14f01959
197 changed files with 3874 additions and 2665 deletions

View File

@ -24,7 +24,7 @@ real bad - it changes the behaviour of all unaligned instructions in user
space, and might cause programs to fail unexpectedly.
To change the alignment trap behavior, simply echo a number into
/proc/sys/debug/alignment. The number is made up from various bits:
/proc/cpu/alignment. The number is made up from various bits:
bit behavior when set
--- -----------------

View File

@ -149,7 +149,7 @@ static void do_test_timer(unsigned long data)
int cpu;
/* Increment the counters */
on_each_cpu(test_each, NULL, 0, 1);
on_each_cpu(test_each, NULL, 1);
/* Read all the counters */
printk("Counters read from CPU %d\n", smp_processor_id());
for_each_online_cpu(cpu) {

View File

@ -1527,10 +1527,10 @@ W: http://ebtables.sourceforge.net/
S: Maintained
ECRYPT FILE SYSTEM
P: Mike Halcrow, Phillip Hellewell
M: mhalcrow@us.ibm.com, phillip@hellewell.homeip.net
L: ecryptfs-devel@lists.sourceforge.net
W: http://ecryptfs.sourceforge.net/
P: Tyler Hicks, Dustin Kirkland
M: tyhicks@linux.vnet.ibm.com, kirkland@canonical.com
L: ecryptfs-devel@lists.launchpad.net
W: https://launchpad.net/ecryptfs
S: Supported
EDAC-CORE
@ -3764,6 +3764,15 @@ M: drzeus-sdhci@drzeus.cx
L: sdhci-devel@list.drzeus.cx
S: Maintained
SECURITY SUBSYSTEM
F: security/
P: James Morris
M: jmorris@namei.org
L: linux-kernel@vger.kernel.org
L: linux-security-module@vger.kernel.org (suggested Cc:)
T: git kernel.org:pub/scm/linux/kernel/git/jmorris/security-testing-2.6.git
S: Supported
SECURITY CONTACT
P: Security Officers
M: security@kernel.org

View File

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 28
EXTRAVERSION = -rc7
EXTRAVERSION = -rc8
NAME = Erotic Pickled Herring
# *DOCUMENTATION*

View File

@ -630,7 +630,7 @@ __sa1111_probe(struct device *me, struct resource *mem, int irq)
return -ENOMEM;
sachip->clk = clk_get(me, "SA1111_CLK");
if (!sachip->clk) {
if (IS_ERR(sachip->clk)) {
ret = PTR_ERR(sachip->clk);
goto err_free;
}

View File

@ -237,6 +237,7 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#if __LINUX_ARM_ARCH__ < 5
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/ffs.h>
@ -277,16 +278,19 @@ static inline int constant_fls(int x)
* the clz instruction for much better code efficiency.
*/
#define __fls(x) \
( __builtin_constant_p(x) ? constant_fls(x) : \
({ int __r; asm("clz\t%0, %1" : "=r"(__r) : "r"(x) : "cc"); 32-__r; }) )
/* Implement fls() in C so that 64-bit args are suitably truncated */
static inline int fls(int x)
{
return __fls(x);
int ret;
if (__builtin_constant_p(x))
return constant_fls(x);
asm("clz\t%0, %1" : "=r" (ret) : "r" (x) : "cc");
ret = 32 - ret;
return ret;
}
#define __fls(x) (fls(x) - 1)
#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
#define __ffs(x) (ffs(x) - 1)
#define ffz(x) __ffs( ~(x) )

View File

@ -23,7 +23,7 @@
#include <asm/types.h>
#ifdef __KERNEL__
#define STACK_TOP ((current->personality == PER_LINUX_32BIT) ? \
#define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
TASK_SIZE : TASK_SIZE_26)
#define STACK_TOP_MAX TASK_SIZE
#endif

View File

@ -115,6 +115,8 @@ EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(__strncpy_from_user);
#ifdef CONFIG_MMU
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__copy_from_user);
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__clear_user);
@ -181,8 +183,6 @@ EXPORT_SYMBOL(_find_first_bit_be);
EXPORT_SYMBOL(_find_next_bit_be);
#endif
EXPORT_SYMBOL(copy_page);
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(mcount);
#endif

View File

@ -18,6 +18,7 @@
#include <linux/personality.h>
#include <linux/kallsyms.h>
#include <linux/delay.h>
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/uaccess.h>

View File

@ -128,7 +128,7 @@ void __init omap1_map_common_io(void)
* Common low-level hardware init for omap1. This should only get called from
* board specific init.
*/
void __init omap1_init_common_hw()
void __init omap1_init_common_hw(void)
{
/* REVISIT: Refer to OMAP5910 Errata, Advisory SYS_1: "Timeout Abort
* on a Posted Write in the TIPB Bridge".

View File

@ -12,9 +12,8 @@ extern void clear_reset_status(unsigned int mask);
/**
* init_gpio_reset() - register GPIO as reset generator
*
* @gpio - gpio nr
* @output - set gpio as out/low instead of input during normal work
* @gpio: gpio nr
* @output: set gpio as out/low instead of input during normal work
*/
extern int init_gpio_reset(int gpio, int output);

View File

@ -70,6 +70,10 @@ static unsigned long ai_dword;
static unsigned long ai_multi;
static int ai_usermode;
#define UM_WARN (1 << 0)
#define UM_FIXUP (1 << 1)
#define UM_SIGNAL (1 << 2)
#ifdef CONFIG_PROC_FS
static const char *usermode_action[] = {
"ignored",
@ -754,7 +758,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
user:
ai_user += 1;
if (ai_usermode & 1)
if (ai_usermode & UM_WARN)
printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx "
"Address=0x%08lx FSR 0x%03x\n", current->comm,
task_pid_nr(current), instrptr,
@ -762,10 +766,10 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
thumb_mode(regs) ? tinstr : instr,
addr, fsr);
if (ai_usermode & 2)
if (ai_usermode & UM_FIXUP)
goto fixup;
if (ai_usermode & 4)
if (ai_usermode & UM_SIGNAL)
force_sig(SIGBUS, current);
else
set_cr(cr_no_alignment);
@ -796,6 +800,22 @@ static int __init alignment_init(void)
res->write_proc = proc_alignment_write;
#endif
/*
* ARMv6 and later CPUs can perform unaligned accesses for
* most single load and store instructions up to word size.
* LDM, STM, LDRD and STRD still need to be handled.
*
* Ignoring the alignment fault is not an option on these
* CPUs since we spin re-faulting the instruction without
* making any progress.
*/
if (cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U)) {
cr_alignment &= ~CR_A;
cr_no_alignment &= ~CR_A;
set_cr(cr_alignment);
ai_usermode = UM_FIXUP;
}
hook_fault_code(1, do_alignment, SIGILL, "alignment exception");
hook_fault_code(3, do_alignment, SIGILL, "alignment exception");

View File

@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>

View File

@ -353,8 +353,8 @@ struct omapfb_device {
u32 pseudo_palette[17];
struct lcd_panel *panel; /* LCD panel */
struct lcd_ctrl *ctrl; /* LCD controller */
struct lcd_ctrl *int_ctrl; /* internal LCD ctrl */
const struct lcd_ctrl *ctrl; /* LCD controller */
const struct lcd_ctrl *int_ctrl; /* internal LCD ctrl */
struct lcd_ctrl_extif *ext_if; /* LCD ctrl external
interface */
struct device *dev;

View File

@ -255,7 +255,7 @@ void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl)
if (!_omap_sram_reprogram_clock)
omap_sram_error();
return _omap_sram_reprogram_clock(dpllctl, ckctl);
_omap_sram_reprogram_clock(dpllctl, ckctl);
}
int __init omap1_sram_init(void)
@ -282,8 +282,8 @@ void omap2_sram_ddr_init(u32 *slow_dll_ctrl, u32 fast_dll_ctrl,
if (!_omap2_sram_ddr_init)
omap_sram_error();
return _omap2_sram_ddr_init(slow_dll_ctrl, fast_dll_ctrl,
base_cs, force_unlock);
_omap2_sram_ddr_init(slow_dll_ctrl, fast_dll_ctrl,
base_cs, force_unlock);
}
static void (*_omap2_sram_reprogram_sdrc)(u32 perf_level, u32 dll_val,
@ -294,7 +294,7 @@ void omap2_sram_reprogram_sdrc(u32 perf_level, u32 dll_val, u32 mem_type)
if (!_omap2_sram_reprogram_sdrc)
omap_sram_error();
return _omap2_sram_reprogram_sdrc(perf_level, dll_val, mem_type);
_omap2_sram_reprogram_sdrc(perf_level, dll_val, mem_type);
}
static u32 (*_omap2_set_prcm)(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass);

View File

@ -35,7 +35,7 @@
#define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc))
#define PCIE_CONF_BUS(b) (((b) & 0xff) << 16)
#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11)
#define PCIE_CONF_FUNC(f) (((f) & 0x3) << 8)
#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8)
#define PCIE_CONF_DATA_OFF 0x18fc
#define PCIE_MASK_OFF 0x1910
#define PCIE_CTRL_OFF 0x1a00

View File

@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.27-rc1
# Mon Aug 4 15:38:01 2008
# Linux kernel version: 2.6.28-rc7
# Mon Dec 8 08:12:07 2008
#
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
@ -26,6 +26,7 @@ CONFIG_LOG_BUF_SHIFT=20
CONFIG_CGROUPS=y
# CONFIG_CGROUP_DEBUG is not set
# CONFIG_CGROUP_NS is not set
# CONFIG_CGROUP_FREEZER is not set
# CONFIG_CGROUP_DEVICE is not set
CONFIG_CPUSETS=y
# CONFIG_GROUP_SCHED is not set
@ -46,7 +47,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
# CONFIG_EMBEDDED is not set
CONFIG_SYSCTL_SYSCALL=y
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_KALLSYMS_EXTRA_PASS is not set
@ -63,7 +63,9 @@ CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
CONFIG_SLUB_DEBUG=y
# CONFIG_SLAB is not set
CONFIG_SLUB=y
@ -72,15 +74,11 @@ CONFIG_SLUB=y
# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
# CONFIG_HAVE_IOREMAP_PROT is not set
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
# CONFIG_HAVE_ARCH_TRACEHOOK is not set
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_USE_GENERIC_SMP_HELPERS=y
# CONFIG_HAVE_CLK is not set
CONFIG_PROC_PAGE_MONITOR=y
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
@ -113,6 +111,7 @@ CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="anticipatory"
CONFIG_CLASSIC_RCU=y
# CONFIG_FREEZER is not set
#
# Processor type and features
@ -125,8 +124,6 @@ CONFIG_MMU=y
CONFIG_SWIOTLB=y
CONFIG_IOMMU_HELPER=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_HUGETLB_PAGE_SIZE_VARIABLE=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
@ -139,13 +136,16 @@ CONFIG_GENERIC_IOMAP=y
CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
CONFIG_IA64_UNCACHED_ALLOCATOR=y
CONFIG_AUDIT_ARCH=y
# CONFIG_PARAVIRT_GUEST is not set
CONFIG_IA64_GENERIC=y
# CONFIG_IA64_DIG is not set
# CONFIG_IA64_DIG_VTD is not set
# CONFIG_IA64_HP_ZX1 is not set
# CONFIG_IA64_HP_ZX1_SWIOTLB is not set
# CONFIG_IA64_SGI_SN2 is not set
# CONFIG_IA64_SGI_UV is not set
# CONFIG_IA64_HP_SIM is not set
# CONFIG_IA64_XEN_GUEST is not set
# CONFIG_ITANIUM is not set
CONFIG_MCKINLEY=y
# CONFIG_IA64_PAGE_SIZE_4KB is not set
@ -182,16 +182,17 @@ CONFIG_DISCONTIGMEM_MANUAL=y
CONFIG_DISCONTIGMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_NEED_MULTIPLE_NODES=y
# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
CONFIG_RESOURCES_64BIT=y
CONFIG_PHYS_ADDR_T_64BIT=y
CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
CONFIG_NR_QUICK=1
CONFIG_VIRT_TO_BUS=y
CONFIG_UNEVICTABLE_LRU=y
CONFIG_MMU_NOTIFIER=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
@ -231,12 +232,12 @@ CONFIG_EFI_VARS=y
CONFIG_EFI_PCDP=y
CONFIG_DMIID=y
CONFIG_BINFMT_ELF=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_HAVE_AOUT is not set
CONFIG_BINFMT_MISC=m
# CONFIG_DMAR is not set
#
# Power management and ACPI
# Power management and ACPI options
#
CONFIG_PM=y
# CONFIG_PM_DEBUG is not set
@ -248,7 +249,6 @@ CONFIG_ACPI_PROC_EVENT=y
CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m
CONFIG_ACPI_DOCK=y
# CONFIG_ACPI_BAY is not set
CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_HOTPLUG_CPU=y
CONFIG_ACPI_THERMAL=m
@ -256,9 +256,7 @@ CONFIG_ACPI_NUMA=y
# CONFIG_ACPI_CUSTOM_DSDT is not set
CONFIG_ACPI_BLACKLIST_YEAR=0
# CONFIG_ACPI_DEBUG is not set
CONFIG_ACPI_EC=y
# CONFIG_ACPI_PCI_SLOT is not set
CONFIG_ACPI_POWER=y
CONFIG_ACPI_SYSTEM=y
CONFIG_ACPI_CONTAINER=m
@ -275,7 +273,7 @@ CONFIG_PCI_DOMAINS=y
CONFIG_PCI_SYSCALL=y
# CONFIG_PCIEPORTBUS is not set
CONFIG_ARCH_SUPPORTS_MSI=y
# CONFIG_PCI_MSI is not set
CONFIG_PCI_MSI=y
CONFIG_PCI_LEGACY=y
# CONFIG_PCI_DEBUG is not set
CONFIG_HOTPLUG_PCI=m
@ -286,6 +284,7 @@ CONFIG_HOTPLUG_PCI_ACPI=m
# CONFIG_HOTPLUG_PCI_SHPC is not set
# CONFIG_HOTPLUG_PCI_SGI is not set
# CONFIG_PCCARD is not set
CONFIG_DMAR=y
CONFIG_NET=y
#
@ -333,6 +332,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
# CONFIG_NET_DSA is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
@ -353,11 +353,10 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
#
# Wireless
#
# CONFIG_PHONET is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
CONFIG_WIRELESS_OLD_REGULATORY=y
# CONFIG_WIRELESS_EXT is not set
# CONFIG_MAC80211 is not set
# CONFIG_IEEE80211 is not set
@ -385,7 +384,7 @@ CONFIG_PROC_EVENTS=y
# CONFIG_MTD is not set
# CONFIG_PARPORT is not set
CONFIG_PNP=y
# CONFIG_PNP_DEBUG is not set
# CONFIG_PNP_DEBUG_MESSAGES is not set
#
# Protocols
@ -419,10 +418,9 @@ CONFIG_SGI_XP=m
# CONFIG_HP_ILO is not set
CONFIG_SGI_GRU=m
# CONFIG_SGI_GRU_DEBUG is not set
# CONFIG_C2PORT is not set
CONFIG_HAVE_IDE=y
CONFIG_IDE=y
CONFIG_IDE_MAX_HWIFS=4
CONFIG_BLK_DEV_IDE=y
#
# Please see Documentation/ide/ide.txt for help/info on IDE drives
@ -430,12 +428,12 @@ CONFIG_BLK_DEV_IDE=y
CONFIG_IDE_TIMINGS=y
CONFIG_IDE_ATAPI=y
# CONFIG_BLK_DEV_IDE_SATA is not set
CONFIG_BLK_DEV_IDEDISK=y
# CONFIG_IDEDISK_MULTI_MODE is not set
CONFIG_IDE_GD=y
CONFIG_IDE_GD_ATA=y
# CONFIG_IDE_GD_ATAPI is not set
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y
# CONFIG_BLK_DEV_IDETAPE is not set
CONFIG_BLK_DEV_IDEFLOPPY=y
CONFIG_BLK_DEV_IDESCSI=m
# CONFIG_BLK_DEV_IDEACPI is not set
# CONFIG_IDE_TASK_IOCTL is not set
@ -705,6 +703,9 @@ CONFIG_TULIP=m
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
@ -725,11 +726,11 @@ CONFIG_E100=m
# CONFIG_TLAN is not set
# CONFIG_VIA_RHINE is not set
# CONFIG_SC92031 is not set
# CONFIG_ATL2 is not set
CONFIG_NETDEV_1000=y
# CONFIG_ACENIC is not set
# CONFIG_DL2K is not set
CONFIG_E1000=y
# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
# CONFIG_E1000E is not set
# CONFIG_IP1000 is not set
CONFIG_IGB=y
@ -747,18 +748,22 @@ CONFIG_TIGON3=y
# CONFIG_QLA3XXX is not set
# CONFIG_ATL1 is not set
# CONFIG_ATL1E is not set
# CONFIG_JME is not set
CONFIG_NETDEV_10000=y
# CONFIG_CHELSIO_T1 is not set
# CONFIG_CHELSIO_T3 is not set
# CONFIG_ENIC is not set
# CONFIG_IXGBE is not set
# CONFIG_IXGB is not set
# CONFIG_S2IO is not set
# CONFIG_MYRI10GE is not set
# CONFIG_NETXEN_NIC is not set
# CONFIG_NIU is not set
# CONFIG_MLX4_EN is not set
# CONFIG_MLX4_CORE is not set
# CONFIG_TEHUTI is not set
# CONFIG_BNX2X is not set
# CONFIG_QLGE is not set
# CONFIG_SFC is not set
# CONFIG_TR is not set
@ -826,9 +831,11 @@ CONFIG_MOUSE_PS2_LOGIPS2PP=y
CONFIG_MOUSE_PS2_SYNAPTICS=y
CONFIG_MOUSE_PS2_LIFEBOOK=y
CONFIG_MOUSE_PS2_TRACKPOINT=y
# CONFIG_MOUSE_PS2_ELANTECH is not set
# CONFIG_MOUSE_PS2_TOUCHKIT is not set
# CONFIG_MOUSE_SERIAL is not set
# CONFIG_MOUSE_APPLETOUCH is not set
# CONFIG_MOUSE_BCM5974 is not set
# CONFIG_MOUSE_VSXXXAA is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
@ -942,15 +949,16 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_VT8231 is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
# CONFIG_SENSORS_LIS3LV02D is not set
# CONFIG_HWMON_DEBUG_CHIP is not set
CONFIG_THERMAL=m
# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
#
# Sonics Silicon Backplane
#
CONFIG_SSB_POSSIBLE=y
# CONFIG_SSB is not set
#
@ -959,6 +967,8 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_REGULATOR is not set
#
# Multimedia devices
@ -1009,6 +1019,7 @@ CONFIG_VGA_CONSOLE=y
# CONFIG_VGACON_SOFT_SCROLLBACK is not set
CONFIG_DUMMY_CONSOLE=y
CONFIG_SOUND=m
CONFIG_SOUND_OSS_CORE=y
CONFIG_SND=m
CONFIG_SND_TIMER=m
CONFIG_SND_PCM=m
@ -1113,8 +1124,7 @@ CONFIG_HID=y
# USB Input Devices
#
CONFIG_USB_HID=m
# CONFIG_USB_HIDINPUT_POWERBOOK is not set
# CONFIG_HID_FF is not set
# CONFIG_HID_PID is not set
# CONFIG_USB_HIDDEV is not set
#
@ -1122,6 +1132,34 @@ CONFIG_USB_HID=m
#
# CONFIG_USB_KBD is not set
# CONFIG_USB_MOUSE is not set
#
# Special HID drivers
#
CONFIG_HID_COMPAT=y
CONFIG_HID_A4TECH=m
CONFIG_HID_APPLE=m
CONFIG_HID_BELKIN=m
CONFIG_HID_BRIGHT=m
CONFIG_HID_CHERRY=m
CONFIG_HID_CHICONY=m
CONFIG_HID_CYPRESS=m
CONFIG_HID_DELL=m
CONFIG_HID_EZKEY=m
CONFIG_HID_GYRATION=m
CONFIG_HID_LOGITECH=m
# CONFIG_LOGITECH_FF is not set
# CONFIG_LOGIRUMBLEPAD2_FF is not set
CONFIG_HID_MICROSOFT=m
CONFIG_HID_MONTEREY=m
CONFIG_HID_PANTHERLORD=m
# CONFIG_PANTHERLORD_FF is not set
CONFIG_HID_PETALYNX=m
CONFIG_HID_SAMSUNG=m
CONFIG_HID_SONY=m
CONFIG_HID_SUNPLUS=m
# CONFIG_THRUSTMASTER_FF is not set
# CONFIG_ZEROPLUS_FF is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
@ -1138,6 +1176,9 @@ CONFIG_USB_DEVICE_CLASS=y
# CONFIG_USB_DYNAMIC_MINORS is not set
# CONFIG_USB_SUSPEND is not set
# CONFIG_USB_OTG is not set
CONFIG_USB_MON=y
# CONFIG_USB_WUSB is not set
# CONFIG_USB_WUSB_CBAF is not set
#
# USB Host Controller Drivers
@ -1155,6 +1196,12 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
CONFIG_USB_UHCI_HCD=m
# CONFIG_USB_SL811_HCD is not set
# CONFIG_USB_R8A66597_HCD is not set
# CONFIG_USB_WHCI_HCD is not set
# CONFIG_USB_HWA_HCD is not set
#
# Enable Host or Gadget support to see Inventra options
#
#
# USB Device Class drivers
@ -1162,13 +1209,14 @@ CONFIG_USB_UHCI_HCD=m
# CONFIG_USB_ACM is not set
# CONFIG_USB_PRINTER is not set
# CONFIG_USB_WDM is not set
# CONFIG_USB_TMC is not set
#
# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
#
#
# may also be needed; see USB_STORAGE Help for more information
# see USB_STORAGE Help for more information
#
CONFIG_USB_STORAGE=m
# CONFIG_USB_STORAGE_DEBUG is not set
@ -1191,7 +1239,6 @@ CONFIG_USB_STORAGE=m
#
# CONFIG_USB_MDC800 is not set
# CONFIG_USB_MICROTEK is not set
CONFIG_USB_MON=y
#
# USB port drivers
@ -1204,7 +1251,7 @@ CONFIG_USB_MON=y
# CONFIG_USB_EMI62 is not set
# CONFIG_USB_EMI26 is not set
# CONFIG_USB_ADUTUX is not set
# CONFIG_USB_AUERSWALD is not set
# CONFIG_USB_SEVSEG is not set
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
@ -1222,7 +1269,9 @@ CONFIG_USB_MON=y
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
# CONFIG_UWB is not set
# CONFIG_MMC is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
@ -1246,6 +1295,15 @@ CONFIG_INFINIBAND_IPOIB_DEBUG=y
# CONFIG_RTC_CLASS is not set
# CONFIG_DMADEVICES is not set
# CONFIG_UIO is not set
# CONFIG_STAGING is not set
CONFIG_STAGING_EXCLUDE_BUILD=y
#
# HP Simulator drivers
#
# CONFIG_HP_SIMETH is not set
# CONFIG_HP_SIMSERIAL is not set
# CONFIG_HP_SIMSCSI is not set
CONFIG_MSPEC=m
#
@ -1260,7 +1318,7 @@ CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_XATTR=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
# CONFIG_EXT4DEV_FS is not set
# CONFIG_EXT4_FS is not set
CONFIG_JBD=y
CONFIG_FS_MBCACHE=y
CONFIG_REISERFS_FS=y
@ -1271,6 +1329,7 @@ CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
CONFIG_FILE_LOCKING=y
CONFIG_XFS_FS=y
# CONFIG_XFS_QUOTA is not set
# CONFIG_XFS_POSIX_ACL is not set
@ -1282,8 +1341,8 @@ CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
# CONFIG_QUOTA is not set
CONFIG_AUTOFS_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
# CONFIG_FUSE_FS is not set
#
@ -1314,6 +1373,7 @@ CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
CONFIG_PROC_VMCORE=y
CONFIG_PROC_SYSCTL=y
CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
@ -1356,6 +1416,7 @@ CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=m
CONFIG_SUNRPC_GSS=m
CONFIG_SUNRPC_XPRT_RDMA=m
# CONFIG_SUNRPC_REGISTER_V4 is not set
CONFIG_RPCSEC_GSS_KRB5=m
# CONFIG_RPCSEC_GSS_SPKM3 is not set
CONFIG_SMB_FS=m
@ -1433,38 +1494,6 @@ CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
# CONFIG_DLM is not set
CONFIG_HAVE_KVM=y
CONFIG_VIRTUALIZATION=y
# CONFIG_KVM is not set
#
# Library routines
#
CONFIG_BITREVERSE=y
# CONFIG_GENERIC_FIND_FIRST_BIT is not set
# CONFIG_CRC_CCITT is not set
# CONFIG_CRC16 is not set
CONFIG_CRC_T10DIF=y
CONFIG_CRC_ITU_T=m
CONFIG_CRC32=y
# CONFIG_CRC7 is not set
# CONFIG_LIBCRC32C is not set
CONFIG_GENERIC_ALLOCATOR=y
CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_GENERIC_PENDING_IRQ=y
CONFIG_IRQ_PER_CPU=y
#
# HP Simulator drivers
#
# CONFIG_HP_SIMETH is not set
# CONFIG_HP_SIMSERIAL is not set
# CONFIG_HP_SIMSCSI is not set
#
# Kernel hacking
@ -1503,8 +1532,19 @@ CONFIG_DEBUG_MEMORY_INIT=y
# CONFIG_DEBUG_SG is not set
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_FAULT_INJECTION is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
#
# Tracers
#
# CONFIG_SCHED_TRACER is not set
# CONFIG_CONTEXT_SWITCH_TRACER is not set
# CONFIG_BOOT_TRACER is not set
# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_IA64_GRANULE_16MB=y
# CONFIG_IA64_GRANULE_64MB is not set
@ -1519,14 +1559,19 @@ CONFIG_SYSVIPC_COMPAT=y
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
CONFIG_CRYPTO=y
#
# Crypto core or helper
#
# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_AEAD=m
CONFIG_CRYPTO_BLKCIPHER=m
CONFIG_CRYPTO_HASH=m
CONFIG_CRYPTO_RNG=m
CONFIG_CRYPTO_MANAGER=m
# CONFIG_CRYPTO_GF128MUL is not set
# CONFIG_CRYPTO_NULL is not set
@ -1599,5 +1644,36 @@ CONFIG_CRYPTO_DES=m
#
# CONFIG_CRYPTO_DEFLATE is not set
# CONFIG_CRYPTO_LZO is not set
#
# Random Number Generation
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
# CONFIG_CRYPTO_DEV_HIFN_795X is not set
CONFIG_HAVE_KVM=y
CONFIG_VIRTUALIZATION=y
# CONFIG_KVM is not set
# CONFIG_VIRTIO_PCI is not set
# CONFIG_VIRTIO_BALLOON is not set
#
# Library routines
#
CONFIG_BITREVERSE=y
# CONFIG_CRC_CCITT is not set
# CONFIG_CRC16 is not set
CONFIG_CRC_T10DIF=y
CONFIG_CRC_ITU_T=m
CONFIG_CRC32=y
# CONFIG_CRC7 is not set
# CONFIG_LIBCRC32C is not set
CONFIG_GENERIC_ALLOCATOR=y
CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_GENERIC_PENDING_IRQ=y
CONFIG_IRQ_PER_CPU=y

View File

@ -4,6 +4,7 @@ menu "HP Simulator drivers"
config HP_SIMETH
bool "Simulated Ethernet "
depends on NET
config HP_SIMSERIAL
bool "Simulated serial driver support"

View File

@ -83,7 +83,6 @@ extern unsigned long ia64_native_getreg_func(int regnum);
#define paravirt_getreg(reg) \
({ \
unsigned long res; \
BUILD_BUG_ON(!__builtin_constant_p(reg)); \
if ((reg) == _IA64_REG_IP) \
res = ia64_native_getreg(_IA64_REG_IP); \
else \

View File

@ -53,10 +53,12 @@ int __ref arch_register_cpu(int num)
}
EXPORT_SYMBOL(arch_register_cpu);
void arch_unregister_cpu(int num)
void __ref arch_unregister_cpu(int num)
{
unregister_cpu(&sysfs_cpus[num].cpu);
#ifdef CONFIG_ACPI
unmap_cpu_from_node(num, cpu_to_node(num));
#endif
}
EXPORT_SYMBOL(arch_unregister_cpu);
#else

View File

@ -5,7 +5,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/irq.h>
@ -375,6 +375,7 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
int cpu = nasid_slice_to_cpuid(nasid, slice);
#ifdef CONFIG_SMP
int cpuphys;
irq_desc_t *desc;
#endif
pci_dev_get(pci_dev);
@ -391,6 +392,12 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
#ifdef CONFIG_SMP
cpuphys = cpu_physical_id(cpu);
set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0);
desc = irq_to_desc(sn_irq_info->irq_irq);
/*
* Affinity was set by the PROM, prevent it from
* being reset by the request_irq() path.
*/
desc->status |= IRQ_AFFINITY_SET;
#endif
}

View File

@ -200,7 +200,7 @@ static int __cpuinitdata shub_1_1_found;
* Set flag for enabling shub specific wars
*/
static inline int __init is_shub_1_1(int nasid)
static inline int __cpuinit is_shub_1_1(int nasid)
{
unsigned long id;
int rev;
@ -212,7 +212,7 @@ static inline int __init is_shub_1_1(int nasid)
return rev <= 2;
}
static void __init sn_check_for_wars(void)
static void __cpuinit sn_check_for_wars(void)
{
int cnode;
@ -512,7 +512,6 @@ static void __init sn_init_pdas(char **cmdline_p)
for_each_online_node(cnode) {
nodepdaindr[cnode] =
alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t));
memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
memset(nodepdaindr[cnode]->phys_cpuid, -1,
sizeof(nodepdaindr[cnode]->phys_cpuid));
spin_lock_init(&nodepdaindr[cnode]->ptc_lock);
@ -521,11 +520,9 @@ static void __init sn_init_pdas(char **cmdline_p)
/*
* Allocate & initialize nodepda for TIOs. For now, put them on node 0.
*/
for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++) {
for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++)
nodepdaindr[cnode] =
alloc_bootmem_node(NODE_DATA(0), sizeof(nodepda_t));
memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
}
/*
* Now copy the array of nodepda pointers to each nodepda.

View File

@ -48,7 +48,7 @@ config RUNTIME_DEBUG
help
If you say Y here, some debugging macros will do run-time checking.
If you say N here, those macros will mostly turn to no-ops. See
include/asm-mips/debug.h for debuging macros.
arch/mips/include/asm/debug.h for debugging macros.
If unsure, say N.
endmenu

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -35,6 +35,16 @@
mtc0 \reg, CP0_TCSTATUS
_ehb
.endm
#elif defined(CONFIG_CPU_MIPSR2)
.macro local_irq_enable reg=t0
ei
irq_enable_hazard
.endm
.macro local_irq_disable reg=t0
di
irq_disable_hazard
.endm
#else
.macro local_irq_enable reg=t0
mfc0 \reg, CP0_STATUS

View File

@ -79,6 +79,11 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
/* We don't do dynamic PCI IRQ allocation */
}
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
/*
* Dynamic DMA mapping stuff.
* MIPS has everything mapped statically.

View File

@ -262,14 +262,11 @@ bad_alignment:
LEAF(sys_syscall)
subu t0, a0, __NR_O32_Linux # check syscall number
sltiu v0, t0, __NR_O32_Linux_syscalls + 1
beqz t0, einval # do not recurse
sll t1, t0, 3
beqz v0, einval
lw t2, sys_call_table(t1) # syscall routine
li v1, 4000 - __NR_O32_Linux # index of sys_syscall
beq t0, v1, einval # do not recurse
/* Some syscalls like execve get their arguments from struct pt_regs
and claim zero arguments in the syscall table. Thus we have to
assume the worst case and shuffle around all potential arguments.
@ -627,7 +624,7 @@ einval: li v0, -ENOSYS
sys sys_pselect6 6
sys sys_ppoll 5
sys sys_unshare 1
sys sys_splice 4
sys sys_splice 6
sys sys_sync_file_range 7 /* 4305 */
sys sys_tee 4
sys sys_vmsplice 4

View File

@ -390,7 +390,7 @@ EXPORT(sysn32_call_table)
PTR sys_splice
PTR sys_sync_file_range
PTR sys_tee
PTR sys_vmsplice /* 6270 */
PTR compat_sys_vmsplice /* 6270 */
PTR sys_move_pages
PTR compat_sys_set_robust_list
PTR compat_sys_get_robust_list

View File

@ -174,14 +174,12 @@ not_o32_scall:
END(handle_sys)
LEAF(sys32_syscall)
sltu v0, a0, __NR_O32_Linux + __NR_O32_Linux_syscalls + 1
subu t0, a0, __NR_O32_Linux # check syscall number
sltiu v0, t0, __NR_O32_Linux_syscalls + 1
beqz t0, einval # do not recurse
dsll t1, t0, 3
beqz v0, einval
dsll v0, a0, 3
ld t2, (sys_call_table - (__NR_O32_Linux * 8))(v0)
li v1, 4000 # indirect syscall number
beq a0, v1, einval # do not recurse
ld t2, sys_call_table(t1) # syscall routine
move a0, a1 # shift argument registers
move a1, a2
@ -198,7 +196,7 @@ LEAF(sys32_syscall)
jr t2
/* Unreached */
einval: li v0, -EINVAL
einval: li v0, -ENOSYS
jr ra
END(sys32_syscall)
@ -512,7 +510,7 @@ sys_call_table:
PTR sys_splice
PTR sys32_sync_file_range /* 4305 */
PTR sys_tee
PTR sys_vmsplice
PTR compat_sys_vmsplice
PTR compat_sys_move_pages
PTR compat_sys_set_robust_list
PTR compat_sys_get_robust_list /* 4310 */

View File

@ -1134,7 +1134,7 @@ static int vpe_release(struct inode *inode, struct file *filp)
/* It's good to be able to run the SP and if it chokes have a look at
the /dev/rt?. But if we reset the pointer to the shared struct we
loose what has happened. So perhaps if garbage is sent to the vpe
lose what has happened. So perhaps if garbage is sent to the vpe
device, use it as a trigger for the reset. Hopefully a nice
executable will be along shortly. */
if (ret < 0)

View File

@ -111,6 +111,7 @@ EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
plat_unmap_dma_mem(dma_handle);
free_pages((unsigned long) vaddr, get_order(size));
}
@ -121,6 +122,8 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
{
unsigned long addr = (unsigned long) vaddr;
plat_unmap_dma_mem(dma_handle);
if (!plat_device_is_coherent(dev))
addr = CAC_ADDR(addr);

View File

@ -7,9 +7,8 @@
#
obj-y := malta-amon.o malta-cmdline.o \
malta-display.o malta-init.o malta-int.o \
malta-memory.o malta-mtd.o \
malta-platform.o malta-reset.o \
malta-setup.o malta-time.o
malta-memory.o malta-platform.o \
malta-reset.o malta-setup.o malta-time.o
obj-$(CONFIG_EARLY_PRINTK) += malta-console.o
obj-$(CONFIG_PCI) += malta-pci.o

View File

@ -1,63 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006 MIPS Technologies, Inc.
* written by Ralf Baechle <ralf@linux-mips.org>
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <mtd/mtd-abi.h>
static struct mtd_partition malta_mtd_partitions[] = {
{
.name = "YAMON",
.offset = 0x0,
.size = 0x100000,
.mask_flags = MTD_WRITEABLE
}, {
.name = "User FS",
.offset = 0x100000,
.size = 0x2e0000
}, {
.name = "Board Config",
.offset = 0x3e0000,
.size = 0x020000,
.mask_flags = MTD_WRITEABLE
}
};
static struct physmap_flash_data malta_flash_data = {
.width = 4,
.nr_parts = ARRAY_SIZE(malta_mtd_partitions),
.parts = malta_mtd_partitions
};
static struct resource malta_flash_resource = {
.start = 0x1e000000,
.end = 0x1e3fffff,
.flags = IORESOURCE_MEM
};
static struct platform_device malta_flash = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &malta_flash_data,
},
.num_resources = 1,
.resource = &malta_flash_resource,
};
static int __init malta_mtd_init(void)
{
platform_device_register(&malta_flash);
return 0;
}
module_init(malta_mtd_init)

View File

@ -3,10 +3,14 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 MIPS Technologies, Inc.
* Copyright (C) 2006, 07 MIPS Technologies, Inc.
* written by Ralf Baechle (ralf@linux-mips.org)
* written by Ralf Baechle <ralf@linux-mips.org>
*
* Probe driver for the Malta's UART ports:
* Copyright (C) 2008 Wind River Systems, Inc.
* updated by Tiejun Chen <tiejun.chen@windriver.com>
*
* 1. Probe driver for the Malta's UART ports:
*
* o 2 ports in the SMC SuperIO
* o 1 port in the CBUS UART, a discrete 16550 which normally is only used
@ -14,10 +18,17 @@
*
* We don't use 8250_platform.c on Malta as it would result in the CBUS
* UART becoming ttyS0.
*
* 2. Register RTC-CMOS platform device on Malta.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/serial_8250.h>
#include <linux/mc146818rtc.h>
#include <linux/module.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/platform_device.h>
#include <mtd/mtd-abi.h>
#define SMC_PORT(base, int) \
{ \
@ -45,21 +56,93 @@ static struct plat_serial8250_port uart8250_data[] = {
{ },
};
static struct platform_device uart8250_device = {
static struct platform_device malta_uart8250_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM2,
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = uart8250_data,
},
};
static int __init uart8250_init(void)
struct resource malta_rtc_resources[] = {
{
.start = RTC_PORT(0),
.end = RTC_PORT(7),
.flags = IORESOURCE_IO,
}, {
.start = RTC_IRQ,
.end = RTC_IRQ,
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device malta_rtc_device = {
.name = "rtc_cmos",
.id = -1,
.resource = malta_rtc_resources,
.num_resources = ARRAY_SIZE(malta_rtc_resources),
};
static struct mtd_partition malta_mtd_partitions[] = {
{
.name = "YAMON",
.offset = 0x0,
.size = 0x100000,
.mask_flags = MTD_WRITEABLE
}, {
.name = "User FS",
.offset = 0x100000,
.size = 0x2e0000
}, {
.name = "Board Config",
.offset = 0x3e0000,
.size = 0x020000,
.mask_flags = MTD_WRITEABLE
}
};
static struct physmap_flash_data malta_flash_data = {
.width = 4,
.nr_parts = ARRAY_SIZE(malta_mtd_partitions),
.parts = malta_mtd_partitions
};
static struct resource malta_flash_resource = {
.start = 0x1e000000,
.end = 0x1e3fffff,
.flags = IORESOURCE_MEM
};
static struct platform_device malta_flash_device = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &malta_flash_data,
},
.num_resources = 1,
.resource = &malta_flash_resource,
};
static struct platform_device *malta_devices[] __initdata = {
&malta_uart8250_device,
&malta_rtc_device,
&malta_flash_device,
};
static int __init malta_add_devices(void)
{
return platform_device_register(&uart8250_device);
int err;
err = platform_add_devices(malta_devices, ARRAY_SIZE(malta_devices));
if (err)
return err;
/*
* Set RTC to BCD mode to support current alarm code.
*/
CMOS_WRITE(CMOS_READ(RTC_CONTROL) & ~RTC_DM_BINARY, RTC_CONTROL);
return 0;
}
module_init(uart8250_init);
MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("8250 UART probe driver for the Malta CBUS UART");
device_initcall(malta_add_devices);

View File

@ -354,6 +354,30 @@ EXPORT_SYMBOL(PCIBIOS_MIN_IO);
EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
#endif
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long prot;
/*
* I/O space can be accessed via normal processor loads and stores on
* this platform but for now we elect not to do this and portable
* drivers should not do this anyway.
*/
if (mmap_state == pci_mmap_io)
return -EINVAL;
/*
* Ignore write-combine; for now only return uncached mappings.
*/
prot = pgprot_val(vma->vm_page_prot);
prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
vma->vm_page_prot = __pgprot(prot);
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
char * (*pcibios_plat_setup)(char *str) __devinitdata;
char *__devinit pcibios_setup(char *str)

View File

@ -180,6 +180,7 @@ ENTRY(resume_userspace)
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
__cli
mov (TI_preempt_count,a2),d0 # non-zero preempt_count ?
cmp 0,d0
bne restore_all
@ -190,7 +191,7 @@ need_resched:
mov (REG_EPSW,fp),d0
and EPSW_IM,d0
cmp EPSW_IM_7,d0 # interrupts off (exception path) ?
beq restore_all
bne restore_all
call preempt_schedule_irq[],0
jmp need_resched
#endif

View File

@ -99,6 +99,7 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
try_again:
/* pull chars out of the buffer */
ix = gdbstub_rx_outp;
barrier();
if (ix == gdbstub_rx_inp) {
if (nonblock)
return -EAGAIN;
@ -110,6 +111,7 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
ch = gdbstub_rx_buffer[ix++];
st = gdbstub_rx_buffer[ix++];
barrier();
gdbstub_rx_outp = ix & 0x00000fff;
if (st & UART_LSR_BI) {

View File

@ -566,6 +566,11 @@ static void mn10300_serial_transmit_interrupt(struct mn10300_serial_port *port)
{
_enter("%s", port->name);
if (!port->uart.info || !port->uart.info->port.tty) {
mn10300_serial_dis_tx_intr(port);
return;
}
if (uart_tx_stopped(&port->uart) ||
uart_circ_empty(&port->uart.info->xmit))
mn10300_serial_dis_tx_intr(port);

View File

@ -161,7 +161,7 @@ void __init setup_arch(char **cmdline_p)
reserve the page it is occupying. */
if (CONFIG_INTERRUPT_VECTOR_BASE >= CONFIG_KERNEL_RAM_BASE_ADDRESS &&
CONFIG_INTERRUPT_VECTOR_BASE < memory_end)
reserve_bootmem(CONFIG_INTERRUPT_VECTOR_BASE, 1,
reserve_bootmem(CONFIG_INTERRUPT_VECTOR_BASE, PAGE_SIZE,
BOOTMEM_DEFAULT);
reserve_bootmem(PAGE_ALIGN(PFN_PHYS(free_pfn)), bootmap_size,

View File

@ -11,6 +11,7 @@
#define __VMLINUX_LDS__
#include <asm-generic/vmlinux.lds.h>
#include <asm/thread_info.h>
#include <asm/page.h>
OUTPUT_FORMAT("elf32-am33lin", "elf32-am33lin", "elf32-am33lin")
OUTPUT_ARCH(mn10300)
@ -55,13 +56,13 @@ SECTIONS
CONSTRUCTORS
}
. = ALIGN(4096);
. = ALIGN(PAGE_SIZE);
__nosave_begin = .;
.data_nosave : { *(.data.nosave) }
. = ALIGN(4096);
. = ALIGN(PAGE_SIZE);
__nosave_end = .;
. = ALIGN(4096);
. = ALIGN(PAGE_SIZE);
.data.page_aligned : { *(.data.idt) }
. = ALIGN(32);
@ -78,7 +79,7 @@ SECTIONS
.data.init_task : { *(.data.init_task) }
/* might get freed after init */
. = ALIGN(4096);
. = ALIGN(PAGE_SIZE);
.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
__smp_locks = .;
*(.smp_locks)
@ -86,7 +87,7 @@ SECTIONS
}
/* will be freed after init */
. = ALIGN(4096); /* Init code and data */
. = ALIGN(PAGE_SIZE); /* Init code and data */
__init_begin = .;
.init.text : {
_sinittext = .;
@ -120,17 +121,14 @@ SECTIONS
.exit.data : { *(.exit.data) }
#ifdef CONFIG_BLK_DEV_INITRD
. = ALIGN(4096);
. = ALIGN(PAGE_SIZE);
__initramfs_start = .;
.init.ramfs : { *(.init.ramfs) }
__initramfs_end = .;
#endif
. = ALIGN(32);
__per_cpu_start = .;
.data.percpu : { *(.data.percpu) }
__per_cpu_end = .;
. = ALIGN(4096);
PERCPU(32)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
@ -145,7 +143,7 @@ SECTIONS
_end = . ;
/* This is where the kernel creates the early boot page tables */
. = ALIGN(4096);
. = ALIGN(PAGE_SIZE);
pg0 = .;
/* Sections to be discarded */

View File

@ -41,6 +41,7 @@ $(obj)/4xx.o: BOOTCFLAGS += -mcpu=405
$(obj)/ebony.o: BOOTCFLAGS += -mcpu=405
$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=405
$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405
$(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405
$(obj)/virtex405-head.o: BOOTAFLAGS += -mcpu=405

View File

@ -91,6 +91,14 @@
interrupts = <18 0x8>;
interrupt-parent = <&ipic>;
};
mcu_pio: mcu@a {
#gpio-cells = <2>;
compatible = "fsl,mc9s08qg8-mpc8349emitx",
"fsl,mcu-mpc8349emitx";
reg = <0x0a>;
gpio-controller;
};
};
spi@7000 {
@ -139,14 +147,6 @@
interrupt-parent = <&ipic>;
interrupts = <71 8>;
};
mcu_pio: mcu@a {
#gpio-cells = <2>;
compatible = "fsl,mc9s08qg8-mpc8349emitx",
"fsl,mcu-mpc8349emitx";
reg = <0x0a>;
gpio-controller;
};
};
usb@22000 {

View File

@ -723,7 +723,7 @@ CONFIG_CICADA_PHY=y
# CONFIG_BROADCOM_PHY is not set
# CONFIG_ICPLUS_PHY is not set
# CONFIG_REALTEK_PHY is not set
# CONFIG_FIXED_PHY is not set
CONFIG_FIXED_PHY=y
# CONFIG_MDIO_BITBANG is not set
# CONFIG_NET_ETHERNET is not set
CONFIG_NETDEV_1000=y

View File

@ -682,7 +682,7 @@ CONFIG_VITESSE_PHY=y
# CONFIG_BROADCOM_PHY is not set
CONFIG_ICPLUS_PHY=y
# CONFIG_REALTEK_PHY is not set
# CONFIG_FIXED_PHY is not set
CONFIG_FIXED_PHY=y
# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
CONFIG_MII=y

View File

@ -40,6 +40,7 @@ _GLOBAL(__setup_cpu_460gt)
mtlr r4
blr
_GLOBAL(__setup_cpu_440x5)
_GLOBAL(__setup_cpu_440gx)
_GLOBAL(__setup_cpu_440spe)
b __fixup_440A_mcheck

View File

@ -39,6 +39,7 @@ extern void __setup_cpu_440epx(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_440gx(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_440grx(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_440spe(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_440x5(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_460gt(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec);
@ -1500,6 +1501,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440x5,
.machine_check = machine_check_440A,
.platform = "ppc440",
},
{ /* 460EX */

View File

@ -75,6 +75,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
for_each_sg(sgl, sg, nents, i) {
sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
sg->dma_length = sg->length;
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
}
return nents;

View File

@ -479,6 +479,8 @@ _GLOBAL(_tlbil_pid)
* (no broadcast)
*/
_GLOBAL(_tlbil_va)
mfmsr r10
wrteei 0
slwi r4,r4,16
mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
tlbsx 0,r3
@ -490,6 +492,7 @@ _GLOBAL(_tlbil_va)
tlbwe
msync
isync
wrtee r10
blr
#endif /* CONFIG_FSL_BOOKE */

View File

@ -98,6 +98,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>

View File

@ -1014,7 +1014,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_SETFPREGS64:
ret = copy_regset_to_user(child, view, REGSET_FP,
ret = copy_regset_from_user(child, view, REGSET_FP,
0 * sizeof(u64),
33 * sizeof(u64),
fps);

View File

@ -131,7 +131,7 @@
#define VIS_OPF_SHIFT 5
#define VIS_OPF_MASK (0x1ff << VIS_OPF_SHIFT)
#define RS1(INSN) (((INSN) >> 24) & 0x1f)
#define RS1(INSN) (((INSN) >> 14) & 0x1f)
#define RS2(INSN) (((INSN) >> 0) & 0x1f)
#define RD(INSN) (((INSN) >> 25) & 0x1f)
@ -445,7 +445,7 @@ static void pdist(struct pt_regs *regs, unsigned int insn)
unsigned long i;
rs1 = fpd_regval(f, RS1(insn));
rs2 = fpd_regval(f, RS1(insn));
rs2 = fpd_regval(f, RS2(insn));
rd = fpd_regaddr(f, RD(insn));
rd_val = *rd;
@ -807,6 +807,8 @@ int vis_emul(struct pt_regs *regs, unsigned int insn)
if (get_user(insn, (u32 __user *) pc))
return -EFAULT;
save_and_clear_fpu();
opf = (insn & VIS_OPF_MASK) >> VIS_OPF_SHIFT;
switch (opf) {
default:

View File

@ -16,6 +16,8 @@
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/utsname.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
@ -785,7 +787,7 @@ static int __init mconsole_init(void)
/* long to avoid size mismatch warnings from gcc */
long sock;
int err;
char file[256];
char file[UNIX_PATH_MAX];
if (umid_file_name("mconsole", file, sizeof(file)))
return -1;

View File

@ -251,13 +251,6 @@ struct amd_iommu {
/* Pointer to PCI device of this IOMMU */
struct pci_dev *dev;
/*
* Capability pointer. There could be more than one IOMMU per PCI
* device function if there are more than one AMD IOMMU capability
* pointers.
*/
u16 cap_ptr;
/* physical address of MMIO space */
u64 mmio_phys;
/* virtual address of MMIO space */
@ -266,6 +259,13 @@ struct amd_iommu {
/* capabilities of that IOMMU read from ACPI */
u32 cap;
/*
* Capability pointer. There could be more than one IOMMU per PCI
* device function if there are more than one AMD IOMMU capability
* pointers.
*/
u16 cap_ptr;
/* pci domain of this IOMMU */
u16 pci_seg;
@ -284,19 +284,19 @@ struct amd_iommu {
/* size of command buffer */
u32 cmd_buf_size;
/* event buffer virtual address */
u8 *evt_buf;
/* size of event buffer */
u32 evt_buf_size;
/* event buffer virtual address */
u8 *evt_buf;
/* MSI number for event interrupt */
u16 evt_msi_num;
/* if one, we need to send a completion wait command */
int need_sync;
/* true if interrupts for this IOMMU are already enabled */
bool int_enabled;
/* if one, we need to send a completion wait command */
int need_sync;
/* default dma_ops domain for that IOMMU */
struct dma_ops_domain *default_dom;
};

View File

@ -71,15 +71,13 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
/* Make sure we keep the same behaviour */
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
#ifdef CONFIG_X86_32
return 0;
#else
#ifdef CONFIG_X86_64
struct dma_mapping_ops *ops = get_dma_ops(dev);
if (ops->mapping_error)
return ops->mapping_error(dev, dma_addr);
return (dma_addr == bad_dma_address);
#endif
return (dma_addr == bad_dma_address);
}
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)

View File

@ -239,7 +239,7 @@ struct pci_bus;
void set_pci_bus_resources_arch_default(struct pci_bus *b);
#ifdef CONFIG_SMP
#define mc_capable() (boot_cpu_data.x86_max_cores > 1)
#define mc_capable() (cpus_weight(per_cpu(cpu_core_map, 0)) != nr_cpu_ids)
#define smt_capable() (smp_num_siblings > 1)
#endif

View File

@ -223,9 +223,15 @@ struct pci_header {
} __attribute__((packed));
/* Function prototypes for bootstrapping */
#ifdef CONFIG_VMI
extern void vmi_init(void);
extern void vmi_activate(void);
extern void vmi_bringup(void);
extern void vmi_apply_boot_page_allocations(void);
#else
static inline void vmi_init(void) {}
static inline void vmi_activate(void) {}
static inline void vmi_bringup(void) {}
#endif
/* State needed to start an application processor in an SMP system. */
struct vmi_ap_state {

View File

@ -187,6 +187,8 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
spin_lock_irqsave(&iommu->lock, flags);
ret = __iommu_queue_command(iommu, cmd);
if (!ret)
iommu->need_sync = 1;
spin_unlock_irqrestore(&iommu->lock, flags);
return ret;
@ -210,10 +212,13 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
iommu->need_sync = 0;
spin_lock_irqsave(&iommu->lock, flags);
if (!iommu->need_sync)
goto out;
iommu->need_sync = 0;
ret = __iommu_queue_command(iommu, &cmd);
if (ret)
@ -254,8 +259,6 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
ret = iommu_queue_command(iommu, &cmd);
iommu->need_sync = 1;
return ret;
}
@ -281,8 +284,6 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
ret = iommu_queue_command(iommu, &cmd);
iommu->need_sync = 1;
return ret;
}
@ -343,7 +344,7 @@ static int iommu_map(struct protection_domain *dom,
u64 __pte, *pte, *page;
bus_addr = PAGE_ALIGN(bus_addr);
phys_addr = PAGE_ALIGN(bus_addr);
phys_addr = PAGE_ALIGN(phys_addr);
/* only support 512GB address spaces for now */
if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
@ -599,7 +600,7 @@ static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
continue;
p2 = IOMMU_PTE_PAGE(p1[i]);
for (j = 0; j < 512; ++i) {
for (j = 0; j < 512; ++j) {
if (!IOMMU_PTE_PRESENT(p2[j]))
continue;
p3 = IOMMU_PTE_PAGE(p2[j]);
@ -762,8 +763,6 @@ static void set_device_domain(struct amd_iommu *iommu,
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
iommu_queue_inv_dev_entry(iommu, devid);
iommu->need_sync = 1;
}
/*****************************************************************************
@ -858,6 +857,9 @@ static int get_device_resources(struct device *dev,
print_devid(_bdf, 1);
}
if (domain_for_device(_bdf) == NULL)
set_device_domain(*iommu, *domain, _bdf);
return 1;
}
@ -908,7 +910,7 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
if (address >= dom->aperture_size)
return;
WARN_ON(address & 0xfffULL || address > dom->aperture_size);
WARN_ON(address & ~PAGE_MASK || address >= dom->aperture_size);
pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
pte += IOMMU_PTE_L0_INDEX(address);
@ -920,8 +922,8 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
/*
* This function contains common code for mapping of a physically
* contiguous memory region into DMA address space. It is uses by all
* mapping functions provided by this IOMMU driver.
* contiguous memory region into DMA address space. It is used by all
* mapping functions provided with this IOMMU driver.
* Must be called with the domain lock held.
*/
static dma_addr_t __map_single(struct device *dev,
@ -981,7 +983,8 @@ static void __unmap_single(struct amd_iommu *iommu,
dma_addr_t i, start;
unsigned int pages;
if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size))
if ((dma_addr == bad_dma_address) ||
(dma_addr + size > dma_dom->aperture_size))
return;
pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
@ -1031,8 +1034,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
if (addr == bad_dma_address)
goto out;
if (unlikely(iommu->need_sync))
iommu_completion_wait(iommu);
iommu_completion_wait(iommu);
out:
spin_unlock_irqrestore(&domain->lock, flags);
@ -1060,8 +1062,7 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
__unmap_single(iommu, domain->priv, dma_addr, size, dir);
if (unlikely(iommu->need_sync))
iommu_completion_wait(iommu);
iommu_completion_wait(iommu);
spin_unlock_irqrestore(&domain->lock, flags);
}
@ -1127,8 +1128,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
goto unmap;
}
if (unlikely(iommu->need_sync))
iommu_completion_wait(iommu);
iommu_completion_wait(iommu);
out:
spin_unlock_irqrestore(&domain->lock, flags);
@ -1173,8 +1173,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
s->dma_address = s->dma_length = 0;
}
if (unlikely(iommu->need_sync))
iommu_completion_wait(iommu);
iommu_completion_wait(iommu);
spin_unlock_irqrestore(&domain->lock, flags);
}
@ -1225,8 +1224,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
goto out;
}
if (unlikely(iommu->need_sync))
iommu_completion_wait(iommu);
iommu_completion_wait(iommu);
out:
spin_unlock_irqrestore(&domain->lock, flags);
@ -1257,8 +1255,7 @@ static void free_coherent(struct device *dev, size_t size,
__unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
if (unlikely(iommu->need_sync))
iommu_completion_wait(iommu);
iommu_completion_wait(iommu);
spin_unlock_irqrestore(&domain->lock, flags);

View File

@ -604,6 +604,9 @@ static void __init __get_smp_config(unsigned int early)
printk(KERN_INFO "Using ACPI for processor (LAPIC) "
"configuration information\n");
if (!mpf)
return;
printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
mpf->mpf_specification);
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)

View File

@ -7,7 +7,8 @@
#include <asm/paravirt.h>
static void default_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags)
static inline void
default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
{
__raw_spin_lock(lock);
}

View File

@ -123,6 +123,8 @@ static void free_iommu(unsigned long offset, int size)
spin_lock_irqsave(&iommu_bitmap_lock, flags);
iommu_area_free(iommu_gart_bitmap, offset, size);
if (offset >= next_bit)
next_bit = offset + size;
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
}

View File

@ -794,6 +794,9 @@ void __init setup_arch(char **cmdline_p)
printk(KERN_INFO "Command line: %s\n", boot_command_line);
#endif
/* VMI may relocate the fixmap; do this before touching ioremap area */
vmi_init();
early_cpu_init();
early_ioremap_init();
@ -880,13 +883,8 @@ void __init setup_arch(char **cmdline_p)
check_efer();
#endif
#if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
/*
* Must be before kernel pagetables are setup
* or fixmap area is touched.
*/
vmi_init();
#endif
/* Must be before kernel pagetables are setup */
vmi_activate();
/* after early param, so could get panic from serial */
reserve_early_setup_data();

View File

@ -294,9 +294,7 @@ static void __cpuinit start_secondary(void *unused)
* fragile that we want to limit the things done here to the
* most necessary things.
*/
#ifdef CONFIG_VMI
vmi_bringup();
#endif
cpu_init();
preempt_disable();
smp_callin();

View File

@ -960,8 +960,6 @@ static inline int __init activate_vmi(void)
void __init vmi_init(void)
{
unsigned long flags;
if (!vmi_rom)
probe_vmi_rom();
else
@ -973,13 +971,21 @@ void __init vmi_init(void)
reserve_top_address(-vmi_rom->virtual_top);
local_irq_save(flags);
activate_vmi();
#ifdef CONFIG_X86_IO_APIC
/* This is virtual hardware; timer routing is wired correctly */
no_timer_check = 1;
#endif
}
void vmi_activate(void)
{
unsigned long flags;
if (!vmi_rom)
return;
local_irq_save(flags);
activate_vmi();
local_irq_restore(flags & X86_EFLAGS_IF);
}

View File

@ -401,14 +401,13 @@ static int __init ppro_init(char **cpu_type)
*cpu_type = "i386/pii";
break;
case 6 ... 8:
case 10 ... 11:
*cpu_type = "i386/piii";
break;
case 9:
case 13:
*cpu_type = "i386/p6_mobile";
break;
case 10 ... 13:
*cpu_type = "i386/p6";
break;
case 14:
*cpu_type = "i386/core";
break;

View File

@ -156,6 +156,8 @@ static void ppro_start(struct op_msrs const * const msrs)
unsigned int low, high;
int i;
if (!reset_value)
return;
for (i = 0; i < num_counters; ++i) {
if (reset_value[i]) {
CTRL_READ(low, high, msrs, i);
@ -171,6 +173,8 @@ static void ppro_stop(struct op_msrs const * const msrs)
unsigned int low, high;
int i;
if (!reset_value)
return;
for (i = 0; i < num_counters; ++i) {
if (!reset_value[i])
continue;

View File

@ -161,7 +161,7 @@ static inline struct request *start_ordered(struct request_queue *q,
/*
* Prep proxy barrier request.
*/
blkdev_dequeue_request(rq);
elv_dequeue_request(q, rq);
q->orig_bar_rq = rq;
rq = &q->bar_rq;
blk_rq_init(q, rq);
@ -219,7 +219,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
* This can happen when the queue switches to
* ORDERED_NONE while this request is on it.
*/
blkdev_dequeue_request(rq);
elv_dequeue_request(q, rq);
if (__blk_end_request(rq, -EOPNOTSUPP,
blk_rq_bytes(rq)))
BUG();

View File

@ -592,7 +592,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
1 << QUEUE_FLAG_STACKABLE);
q->queue_lock = lock;
blk_queue_segment_boundary(q, 0xffffffff);
blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
blk_queue_make_request(q, __make_request);
blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
@ -1636,6 +1636,28 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
/**
* blkdev_dequeue_request - dequeue request and start timeout timer
* @req: request to dequeue
*
* Dequeue @req and start timeout timer on it. This hands off the
* request to the driver.
*
* Block internal functions which don't want to start timer should
* call elv_dequeue_request().
*/
void blkdev_dequeue_request(struct request *req)
{
elv_dequeue_request(req->q, req);
/*
* We are now handing the request to the hardware, add the
* timeout handler.
*/
blk_add_timer(req);
}
EXPORT_SYMBOL(blkdev_dequeue_request);
/**
* __end_that_request_first - end I/O on a request
* @req: the request being processed
@ -1774,7 +1796,7 @@ static void end_that_request_last(struct request *req, int error)
blk_queue_end_tag(req->q, req);
if (blk_queued_rq(req))
blkdev_dequeue_request(req);
elv_dequeue_request(req->q, req);
if (unlikely(laptop_mode) && blk_fs_request(req))
laptop_io_completion();

View File

@ -224,7 +224,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
*/
bio_get(bio);
bio_endio(bio, 0);
bio_unmap_user(bio);
__blk_rq_unmap_user(bio);
return -EINVAL;
}

View File

@ -125,6 +125,9 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
q->nr_requests = BLKDEV_MAX_RQ;
blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
q->make_request_fn = mfn;
q->backing_dev_info.ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
@ -314,6 +317,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
/* zero is "infinity" */
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask);
t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments);
t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);

View File

@ -202,6 +202,8 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
rq->timeout = q->sg_timeout;
if (!rq->timeout)
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
if (rq->timeout < BLK_MIN_SG_TIMEOUT)
rq->timeout = BLK_MIN_SG_TIMEOUT;
return 0;
}

View File

@ -677,6 +677,29 @@ static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
case DVD_WRITE_STRUCT:
case DVD_AUTH:
arg = (unsigned long)compat_ptr(arg);
/* These intepret arg as an unsigned long, not as a pointer,
* so we must not do compat_ptr() conversion. */
case HDIO_SET_MULTCOUNT:
case HDIO_SET_UNMASKINTR:
case HDIO_SET_KEEPSETTINGS:
case HDIO_SET_32BIT:
case HDIO_SET_NOWERR:
case HDIO_SET_DMA:
case HDIO_SET_PIO_MODE:
case HDIO_SET_NICE:
case HDIO_SET_WCACHE:
case HDIO_SET_ACOUSTIC:
case HDIO_SET_BUSSTATE:
case HDIO_SET_ADDRESS:
case CDROMEJECT_SW:
case CDROM_SET_OPTIONS:
case CDROM_CLEAR_OPTIONS:
case CDROM_SELECT_SPEED:
case CDROM_SELECT_DISC:
case CDROM_MEDIA_CHANGED:
case CDROM_DRIVE_STATUS:
case CDROM_LOCKDOOR:
case CDROM_DEBUG:
break;
default:
/* unknown ioctl number */
@ -699,8 +722,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
struct backing_dev_info *bdi;
loff_t size;
/*
* O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
* to updated it before every ioctl.
*/
if (file->f_flags & O_NDELAY)
mode |= FMODE_NDELAY_NOW;
mode |= FMODE_NDELAY;
else
mode &= ~FMODE_NDELAY;
switch (cmd) {
case HDIO_GETGEO:

View File

@ -844,14 +844,7 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq)
*/
if (blk_account_rq(rq))
q->in_flight++;
/*
* We are now handing the request to the hardware, add the
* timeout handler.
*/
blk_add_timer(rq);
}
EXPORT_SYMBOL(elv_dequeue_request);
int elv_queue_empty(struct request_queue *q)
{

View File

@ -1102,6 +1102,7 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
kfree(disk);
return NULL;
}
disk->node_id = node_id;
if (disk_expand_part_tbl(disk, 0)) {
free_part_stats(&disk->part0);
kfree(disk);
@ -1116,7 +1117,6 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
device_initialize(disk_to_dev(disk));
INIT_WORK(&disk->async_notify,
media_change_notify_thread);
disk->node_id = node_id;
}
return disk;
}

View File

@ -208,6 +208,8 @@ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
rq->timeout = q->sg_timeout;
if (!rq->timeout)
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
if (rq->timeout < BLK_MIN_SG_TIMEOUT)
rq->timeout = BLK_MIN_SG_TIMEOUT;
return 0;
}

View File

@ -31,35 +31,63 @@ config CRYPTO_FIPS
config CRYPTO_ALGAPI
tristate
select CRYPTO_ALGAPI2
help
This option provides the API for cryptographic algorithms.
config CRYPTO_ALGAPI2
tristate
config CRYPTO_AEAD
tristate
select CRYPTO_AEAD2
select CRYPTO_ALGAPI
config CRYPTO_AEAD2
tristate
select CRYPTO_ALGAPI2
config CRYPTO_BLKCIPHER
tristate
select CRYPTO_BLKCIPHER2
select CRYPTO_ALGAPI
select CRYPTO_RNG
config CRYPTO_BLKCIPHER2
tristate
select CRYPTO_ALGAPI2
select CRYPTO_RNG2
config CRYPTO_HASH
tristate
select CRYPTO_HASH2
select CRYPTO_ALGAPI
config CRYPTO_HASH2
tristate
select CRYPTO_ALGAPI2
config CRYPTO_RNG
tristate
select CRYPTO_RNG2
select CRYPTO_ALGAPI
config CRYPTO_RNG2
tristate
select CRYPTO_ALGAPI2
config CRYPTO_MANAGER
tristate "Cryptographic algorithm manager"
select CRYPTO_AEAD
select CRYPTO_HASH
select CRYPTO_BLKCIPHER
select CRYPTO_MANAGER2
help
Create default cryptographic template instantiations such as
cbc(aes).
config CRYPTO_MANAGER2
def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y)
select CRYPTO_AEAD2
select CRYPTO_HASH2
select CRYPTO_BLKCIPHER2
config CRYPTO_GF128MUL
tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
depends on EXPERIMENTAL

View File

@ -9,24 +9,24 @@ obj-$(CONFIG_CRYPTO_FIPS) += fips.o
crypto_algapi-$(CONFIG_PROC_FS) += proc.o
crypto_algapi-objs := algapi.o scatterwalk.o $(crypto_algapi-y)
obj-$(CONFIG_CRYPTO_ALGAPI) += crypto_algapi.o
obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o
obj-$(CONFIG_CRYPTO_AEAD) += aead.o
obj-$(CONFIG_CRYPTO_AEAD2) += aead.o
crypto_blkcipher-objs := ablkcipher.o
crypto_blkcipher-objs += blkcipher.o
obj-$(CONFIG_CRYPTO_BLKCIPHER) += crypto_blkcipher.o
obj-$(CONFIG_CRYPTO_BLKCIPHER) += chainiv.o
obj-$(CONFIG_CRYPTO_BLKCIPHER) += eseqiv.o
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
crypto_hash-objs := hash.o
crypto_hash-objs += ahash.o
obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o
obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
cryptomgr-objs := algboss.o testmgr.o
obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
@ -73,8 +73,8 @@ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
obj-$(CONFIG_CRYPTO_LZO) += lzo.o
obj-$(CONFIG_CRYPTO_RNG) += rng.o
obj-$(CONFIG_CRYPTO_RNG) += krng.o
obj-$(CONFIG_CRYPTO_RNG2) += rng.o
obj-$(CONFIG_CRYPTO_RNG2) += krng.o
obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o

View File

@ -174,15 +174,6 @@ static int acpi_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
val->intval = battery->current_now * 1000;
/* if power units are mW, convert to mA by
dividing by current voltage (mV/1000) */
if (!battery->power_unit) {
if (battery->voltage_now) {
val->intval /= battery->voltage_now;
val->intval *= 1000;
} else
val->intval = -1;
}
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:

View File

@ -824,32 +824,36 @@ static int __init toshiba_acpi_init(void)
toshiba_acpi_exit();
return -ENOMEM;
}
}
/* Register input device for kill switch */
toshiba_acpi.poll_dev = input_allocate_polled_device();
if (!toshiba_acpi.poll_dev) {
printk(MY_ERR "unable to allocate kill-switch input device\n");
toshiba_acpi_exit();
return -ENOMEM;
}
toshiba_acpi.poll_dev->private = &toshiba_acpi;
toshiba_acpi.poll_dev->poll = bt_poll_rfkill;
toshiba_acpi.poll_dev->poll_interval = 1000; /* msecs */
/* Register input device for kill switch */
toshiba_acpi.poll_dev = input_allocate_polled_device();
if (!toshiba_acpi.poll_dev) {
printk(MY_ERR
"unable to allocate kill-switch input device\n");
toshiba_acpi_exit();
return -ENOMEM;
}
toshiba_acpi.poll_dev->private = &toshiba_acpi;
toshiba_acpi.poll_dev->poll = bt_poll_rfkill;
toshiba_acpi.poll_dev->poll_interval = 1000; /* msecs */
toshiba_acpi.poll_dev->input->name = toshiba_acpi.rfk_name;
toshiba_acpi.poll_dev->input->id.bustype = BUS_HOST;
toshiba_acpi.poll_dev->input->id.vendor = 0x0930; /* Toshiba USB ID */
set_bit(EV_SW, toshiba_acpi.poll_dev->input->evbit);
set_bit(SW_RFKILL_ALL, toshiba_acpi.poll_dev->input->swbit);
input_report_switch(toshiba_acpi.poll_dev->input, SW_RFKILL_ALL, TRUE);
input_sync(toshiba_acpi.poll_dev->input);
toshiba_acpi.poll_dev->input->name = toshiba_acpi.rfk_name;
toshiba_acpi.poll_dev->input->id.bustype = BUS_HOST;
/* Toshiba USB ID */
toshiba_acpi.poll_dev->input->id.vendor = 0x0930;
set_bit(EV_SW, toshiba_acpi.poll_dev->input->evbit);
set_bit(SW_RFKILL_ALL, toshiba_acpi.poll_dev->input->swbit);
input_report_switch(toshiba_acpi.poll_dev->input,
SW_RFKILL_ALL, TRUE);
input_sync(toshiba_acpi.poll_dev->input);
ret = input_register_polled_device(toshiba_acpi.poll_dev);
if (ret) {
printk(MY_ERR "unable to register kill-switch input device\n");
toshiba_acpi_exit();
return ret;
ret = input_register_polled_device(toshiba_acpi.poll_dev);
if (ret) {
printk(MY_ERR
"unable to register kill-switch input device\n");
toshiba_acpi_exit();
return ret;
}
}
return 0;

View File

@ -153,7 +153,7 @@ config SATA_PROMISE
If unsure, say N.
config SATA_SX4
tristate "Promise SATA SX4 support"
tristate "Promise SATA SX4 support (Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for Promise Serial ATA SX4.
@ -219,8 +219,8 @@ config PATA_ACPI
otherwise unsupported hardware.
config PATA_ALI
tristate "ALi PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
tristate "ALi PATA support"
depends on PCI
help
This option enables support for the ALi ATA interfaces
found on the many ALi chipsets.
@ -263,7 +263,7 @@ config PATA_ATIIXP
If unsure, say N.
config PATA_CMD640_PCI
tristate "CMD640 PCI PATA support (Very Experimental)"
tristate "CMD640 PCI PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the CMD640 PCI IDE
@ -291,8 +291,8 @@ config PATA_CS5520
If unsure, say N.
config PATA_CS5530
tristate "CS5530 PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
tristate "CS5530 PATA support"
depends on PCI
help
This option enables support for the Cyrix/NatSemi/AMD CS5530
companion chip used with the MediaGX/Geode processor family.
@ -309,8 +309,8 @@ config PATA_CS5535
If unsure, say N.
config PATA_CS5536
tristate "CS5536 PATA support (Experimental)"
depends on PCI && X86 && !X86_64 && EXPERIMENTAL
tristate "CS5536 PATA support"
depends on PCI && X86 && !X86_64
help
This option enables support for the AMD CS5536
companion chip used with the Geode LX processor family.
@ -363,7 +363,7 @@ config PATA_HPT37X
If unsure, say N.
config PATA_HPT3X2N
tristate "HPT 372N/302N PATA support (Very Experimental)"
tristate "HPT 372N/302N PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the N variant HPT PATA
@ -389,8 +389,8 @@ config PATA_HPT3X3_DMA
problems with DMA on this chipset.
config PATA_ISAPNP
tristate "ISA Plug and Play PATA support (Experimental)"
depends on EXPERIMENTAL && ISAPNP
tristate "ISA Plug and Play PATA support"
depends on ISAPNP
help
This option enables support for ISA plug & play ATA
controllers such as those found on old soundcards.
@ -498,8 +498,8 @@ config PATA_NINJA32
If unsure, say N.
config PATA_NS87410
tristate "Nat Semi NS87410 PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
tristate "Nat Semi NS87410 PATA support"
depends on PCI
help
This option enables support for the National Semiconductor
NS87410 PCI-IDE controller.
@ -507,8 +507,8 @@ config PATA_NS87410
If unsure, say N.
config PATA_NS87415
tristate "Nat Semi NS87415 PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
tristate "Nat Semi NS87415 PATA support"
depends on PCI
help
This option enables support for the National Semiconductor
NS87415 PCI-IDE controller.
@ -544,8 +544,8 @@ config PATA_PCMCIA
If unsure, say N.
config PATA_PDC_OLD
tristate "Older Promise PATA controller support (Experimental)"
depends on PCI && EXPERIMENTAL
tristate "Older Promise PATA controller support"
depends on PCI
help
This option enables support for the Promise 20246, 20262, 20263,
20265 and 20267 adapters.
@ -559,7 +559,7 @@ config PATA_QDI
Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
config PATA_RADISYS
tristate "RADISYS 82600 PATA support (Very Experimental)"
tristate "RADISYS 82600 PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the RADISYS 82600
@ -586,8 +586,8 @@ config PATA_RZ1000
If unsure, say N.
config PATA_SC1200
tristate "SC1200 PATA support (Very Experimental)"
depends on PCI && EXPERIMENTAL
tristate "SC1200 PATA support"
depends on PCI
help
This option enables support for the NatSemi/AMD SC1200 SoC
companion chip used with the Geode processor family.
@ -620,8 +620,8 @@ config PATA_SIL680
If unsure, say N.
config PATA_SIS
tristate "SiS PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
tristate "SiS PATA support"
depends on PCI
help
This option enables support for SiS PATA controllers

View File

@ -1072,7 +1072,14 @@ static int piix_broken_suspend(void)
* matching is necessary because dmi_system_id.matches is
* limited to four entries.
*/
if (!strcmp(dmi_get_system_info(DMI_SYS_VENDOR), "TOSHIBA") &&
if (dmi_get_system_info(DMI_SYS_VENDOR) &&
dmi_get_system_info(DMI_PRODUCT_NAME) &&
dmi_get_system_info(DMI_PRODUCT_VERSION) &&
dmi_get_system_info(DMI_PRODUCT_SERIAL) &&
dmi_get_system_info(DMI_BOARD_VENDOR) &&
dmi_get_system_info(DMI_BOARD_NAME) &&
dmi_get_system_info(DMI_BOARD_VERSION) &&
!strcmp(dmi_get_system_info(DMI_SYS_VENDOR), "TOSHIBA") &&
!strcmp(dmi_get_system_info(DMI_PRODUCT_NAME), "000000") &&
!strcmp(dmi_get_system_info(DMI_PRODUCT_VERSION), "000000") &&
!strcmp(dmi_get_system_info(DMI_PRODUCT_SERIAL), "000000") &&

View File

@ -382,10 +382,10 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
/* PCI clocking determines the ATA timing values to use */
/* info_hpt366 is safe against re-entry so we can scribble on it */
switch((reg1 & 0x700) >> 8) {
case 5:
case 9:
hpriv = &hpt366_40;
break;
case 9:
case 5:
hpriv = &hpt366_25;
break;
default:

View File

@ -44,7 +44,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_ninja32"
#define DRV_VERSION "0.1.1"
#define DRV_VERSION "0.1.3"
/**
@ -130,7 +130,8 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
return rc;
pci_set_master(dev);
/* Set up the register mappings */
/* Set up the register mappings. We use the I/O mapping as only the
older chips also have MMIO on BAR 1 */
base = host->iomap[0];
if (!base)
return -ENOMEM;
@ -167,8 +168,12 @@ static int ninja32_reinit_one(struct pci_dev *pdev)
#endif
static const struct pci_device_id ninja32[] = {
{ 0x10FC, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0x8008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf02C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ },
};

View File

@ -56,7 +56,6 @@ static const struct sis_laptop sis_laptop[] = {
{ 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */
{ 0x5513, 0x1734, 0x105F }, /* FSC Amilo A1630 */
{ 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */
{ 0x5513, 0x1039, 0x5513 }, /* Targa Visionary 1000 */
/* end marker */
{ 0, }
};

View File

@ -302,7 +302,7 @@ static struct kobj_type kobj_pkt_type_wqueue = {
static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
{
if (class_pktcdvd) {
pd->dev = device_create(class_pktcdvd, NULL, pd->pkt_dev, NULL,
pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL,
"%s", pd->name);
if (IS_ERR(pd->dev))
pd->dev = NULL;
@ -2790,7 +2790,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
return 0;
out_mem:
blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
return ret;
@ -2975,7 +2975,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
pkt_debugfs_dev_remove(pd);
pkt_sysfs_dev_remove(pd);
blkdev_put(pd->bdev, FMODE_READ|FMODE_WRITE);
blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
remove_proc_entry(pd->name, pkt_proc);
DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name);

View File

@ -2081,10 +2081,6 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
if (!q)
return -ENXIO;
rq = blk_get_request(q, READ, GFP_KERNEL);
if (!rq)
return -ENOMEM;
cdi->last_sense = 0;
while (nframes) {
@ -2096,9 +2092,17 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
len = nr * CD_FRAMESIZE_RAW;
ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
if (ret)
rq = blk_get_request(q, READ, GFP_KERNEL);
if (!rq) {
ret = -ENOMEM;
break;
}
ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
if (ret) {
blk_put_request(rq);
break;
}
rq->cmd[0] = GPCMD_READ_CD;
rq->cmd[1] = 1 << 2;
@ -2124,6 +2128,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
if (blk_rq_unmap_user(bio))
ret = -EFAULT;
blk_put_request(rq);
if (ret)
break;
@ -2133,7 +2138,6 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
ubuf += len;
}
blk_put_request(rq);
return ret;
}

View File

@ -27,7 +27,7 @@
0x0c U+2640
0x0d U+266a
0x0e U+266b
0x0f U+263c
0x0f U+263c U+00a4
0x10 U+25b6 U+25ba
0x11 U+25c0 U+25c4
0x12 U+2195
@ -55,7 +55,7 @@
0x24 U+0024
0x25 U+0025
0x26 U+0026
0x27 U+0027
0x27 U+0027 U+00b4
0x28 U+0028
0x29 U+0029
0x2a U+002a
@ -84,7 +84,7 @@
0x41 U+0041 U+00c0 U+00c1 U+00c2 U+00c3
0x42 U+0042
0x43 U+0043 U+00a9
0x44 U+0044
0x44 U+0044 U+00d0
0x45 U+0045 U+00c8 U+00ca U+00cb
0x46 U+0046
0x47 U+0047
@ -140,7 +140,7 @@
0x79 U+0079 U+00fd
0x7a U+007a
0x7b U+007b
0x7c U+007c U+00a5
0x7c U+007c U+00a6
0x7d U+007d
0x7e U+007e
#
@ -263,10 +263,10 @@
0xe8 U+03a6 U+00d8
0xe9 U+0398
0xea U+03a9 U+2126
0xeb U+03b4
0xeb U+03b4 U+00f0
0xec U+221e
0xed U+03c6 U+00f8
0xee U+03b5
0xee U+03b5 U+2208
0xef U+2229
0xf0 U+2261
0xf1 U+00b1

View File

@ -418,7 +418,7 @@ static irqreturn_t cd2401_rxerr_interrupt(int irq, void *dev_id)
TTY_OVERRUN);
/*
If the flip buffer itself is
overflowing, we still loose
overflowing, we still lose
the next incoming character.
*/
if (tty_buffer_request_room(tty, 1) !=

View File

@ -2274,7 +2274,7 @@ rescan_last_byte:
continue; /* nothing to display */
}
/* Glyph not found */
if ((!(vc->vc_utf && !vc->vc_disp_ctrl) || c < 128) && !(c & ~charmask)) {
if ((!(vc->vc_utf && !vc->vc_disp_ctrl) && c < 128) && !(c & ~charmask)) {
/* In legacy mode use the glyph we get by a 1:1 mapping.
This would make absolutely no sense with Unicode in mind,
but do this for ASCII characters since a font may lack

View File

@ -974,6 +974,7 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
packet->ack = RCODE_SEND_ERROR;
return -1;
}
packet->payload_bus = payload_bus;
d[2].req_count = cpu_to_le16(packet->payload_length);
d[2].data_address = cpu_to_le32(payload_bus);
@ -1025,7 +1026,6 @@ static int handle_at_packet(struct context *context,
struct driver_data *driver_data;
struct fw_packet *packet;
struct fw_ohci *ohci = context->ohci;
dma_addr_t payload_bus;
int evt;
if (last->transfer_status == 0)
@ -1038,9 +1038,8 @@ static int handle_at_packet(struct context *context,
/* This packet was cancelled, just continue. */
return 1;
payload_bus = le32_to_cpu(last->data_address);
if (payload_bus != 0)
dma_unmap_single(ohci->card.device, payload_bus,
if (packet->payload_bus)
dma_unmap_single(ohci->card.device, packet->payload_bus,
packet->payload_length, DMA_TO_DEVICE);
evt = le16_to_cpu(last->transfer_status) & 0x1f;
@ -1697,6 +1696,10 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
if (packet->ack != 0)
goto out;
if (packet->payload_bus)
dma_unmap_single(ohci->card.device, packet->payload_bus,
packet->payload_length, DMA_TO_DEVICE);
log_ar_at_event('T', packet->speed, packet->header, 0x20);
driver_data->packet = NULL;
packet->ack = RCODE_CANCELLED;

View File

@ -207,6 +207,7 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
packet->speed = speed;
packet->generation = generation;
packet->ack = 0;
packet->payload_bus = 0;
}
/**
@ -581,6 +582,8 @@ fw_fill_response(struct fw_packet *response, u32 *request_header,
BUG();
return;
}
response->payload_bus = 0;
}
EXPORT_SYMBOL(fw_fill_response);

View File

@ -27,6 +27,7 @@
#include <linux/list.h>
#include <linux/spinlock_types.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
@ -153,6 +154,7 @@ struct fw_packet {
size_t header_length;
void *payload;
size_t payload_length;
dma_addr_t payload_bus;
u32 timestamp;
/*

View File

@ -847,9 +847,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
* and the registers being closely associated.
*
* According to chipset errata, on the 965GM, MSI interrupts may
* be lost or delayed
* be lost or delayed, but we use them anyways to avoid
* stuck interrupts on some machines.
*/
if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev))
if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev);
intel_opregion_init(dev);

View File

@ -244,6 +244,10 @@ typedef struct drm_i915_private {
* List of objects currently involved in rendering from the
* ringbuffer.
*
* Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
*/
struct list_head active_list;
@ -253,6 +257,8 @@ typedef struct drm_i915_private {
* still have a write_domain which needs to be flushed before
* unbinding.
*
* last_rendering_seqno is 0 while an object is in this list.
*
* A reference is held on the buffer while on this list.
*/
struct list_head flushing_list;
@ -261,6 +267,8 @@ typedef struct drm_i915_private {
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
* last_rendering_seqno is 0 while an object is in this list.
*
* A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being
* freed, and we'll pull it off the list in the free path.
@ -371,8 +379,8 @@ struct drm_i915_gem_object {
uint32_t agp_type;
/**
* Flagging of which individual pages are valid in GEM_DOMAIN_CPU when
* GEM_DOMAIN_CPU is not in the object's read domain.
* If present, while GEM_DOMAIN_CPU is in the read domain this array
* flags which individual pages are valid.
*/
uint8_t *page_cpu_valid;
};
@ -394,9 +402,6 @@ struct drm_i915_gem_request {
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
/** Cache domains that were flushed at the start of the request. */
uint32_t flush_domains;
struct list_head list;
};

View File

@ -33,21 +33,21 @@
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
static int
i915_gem_object_set_domain(struct drm_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
static int
i915_gem_object_set_domain_range(struct drm_gem_object *obj,
uint64_t offset,
uint64_t size,
uint32_t read_domains,
uint32_t write_domain);
static int
i915_gem_set_domain(struct drm_gem_object *obj,
struct drm_file *file_priv,
uint32_t read_domains,
uint32_t write_domain);
static void
i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write);
static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
int write);
static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
uint64_t offset,
uint64_t size);
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
@ -162,8 +162,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
I915_GEM_DOMAIN_CPU, 0);
ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
args->size);
if (ret != 0) {
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@ -260,8 +260,7 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
mutex_unlock(&dev->struct_mutex);
return ret;
}
ret = i915_gem_set_domain(obj, file_priv,
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret)
goto fail;
@ -320,8 +319,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
mutex_lock(&dev->struct_mutex);
ret = i915_gem_set_domain(obj, file_priv,
I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
@ -397,7 +395,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
}
/**
* Called when user space prepares to use an object
* Called when user space prepares to use an object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
*/
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
@ -405,11 +404,26 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_set_domain *args = data;
struct drm_gem_object *obj;
uint32_t read_domains = args->read_domains;
uint32_t write_domain = args->write_domain;
int ret;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
/* Only handle setting domains to types used by the CPU. */
if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
return -EINVAL;
if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
return -EINVAL;
/* Having something in the write domain implies it's in the read
* domain, and only that read domain. Enforce that in the request.
*/
if (write_domain != 0 && read_domains != write_domain)
return -EINVAL;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EBADF;
@ -417,10 +431,21 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
#if WATCH_BUF
DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
obj, obj->size, args->read_domains, args->write_domain);
obj, obj->size, read_domains, write_domain);
#endif
ret = i915_gem_set_domain(obj, file_priv,
args->read_domains, args->write_domain);
if (read_domains & I915_GEM_DOMAIN_GTT) {
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
/* Silently promote "you're not bound, there was nothing to do"
* to success, since the client was just asking us to
* make sure everything was done.
*/
if (ret == -EINVAL)
ret = 0;
} else {
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
@ -455,10 +480,9 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
obj_priv = obj->driver_private;
/* Pinned buffers may be scanout, so flush the cache */
if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
i915_gem_clflush_object(obj);
drm_agp_chipset_flush(dev);
}
if (obj_priv->pin_count)
i915_gem_object_flush_cpu_write_domain(obj);
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
@ -532,7 +556,7 @@ i915_gem_object_free_page_list(struct drm_gem_object *obj)
}
static void
i915_gem_object_move_to_active(struct drm_gem_object *obj)
i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@ -546,8 +570,20 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj)
/* Move from whatever list we were on to the tail of execution. */
list_move_tail(&obj_priv->list,
&dev_priv->mm.active_list);
obj_priv->last_rendering_seqno = seqno;
}
static void
i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
BUG_ON(!obj_priv->active);
list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
obj_priv->last_rendering_seqno = 0;
}
static void
i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
@ -562,6 +598,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
else
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
obj_priv->last_rendering_seqno = 0;
if (obj_priv->active) {
obj_priv->active = 0;
drm_gem_object_unreference(obj);
@ -610,10 +647,28 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
request->seqno = seqno;
request->emitted_jiffies = jiffies;
request->flush_domains = flush_domains;
was_empty = list_empty(&dev_priv->mm.request_list);
list_add_tail(&request->list, &dev_priv->mm.request_list);
/* Associate any objects on the flushing list matching the write
* domain we're flushing with our flush.
*/
if (flush_domains != 0) {
struct drm_i915_gem_object *obj_priv, *next;
list_for_each_entry_safe(obj_priv, next,
&dev_priv->mm.flushing_list, list) {
struct drm_gem_object *obj = obj_priv->obj;
if ((obj->write_domain & flush_domains) ==
obj->write_domain) {
obj->write_domain = 0;
i915_gem_object_move_to_active(obj, seqno);
}
}
}
if (was_empty && !dev_priv->mm.suspended)
schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
return seqno;
@ -676,30 +731,10 @@ i915_gem_retire_request(struct drm_device *dev,
__func__, request->seqno, obj);
#endif
if (obj->write_domain != 0) {
list_move_tail(&obj_priv->list,
&dev_priv->mm.flushing_list);
} else {
if (obj->write_domain != 0)
i915_gem_object_move_to_flushing(obj);
else
i915_gem_object_move_to_inactive(obj);
}
}
if (request->flush_domains != 0) {
struct drm_i915_gem_object *obj_priv, *next;
/* Clear the write domain and activity from any buffers
* that are just waiting for a flush matching the one retired.
*/
list_for_each_entry_safe(obj_priv, next,
&dev_priv->mm.flushing_list, list) {
struct drm_gem_object *obj = obj_priv->obj;
if (obj->write_domain & request->flush_domains) {
obj->write_domain = 0;
i915_gem_object_move_to_inactive(obj);
}
}
}
}
@ -892,25 +927,10 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int ret;
/* If there are writes queued to the buffer, flush and
* create a new seqno to wait for.
/* This function only exists to support waiting for existing rendering,
* not for emitting required flushes.
*/
if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
uint32_t write_domain = obj->write_domain;
#if WATCH_BUF
DRM_INFO("%s: flushing object %p from write domain %08x\n",
__func__, obj, write_domain);
#endif
i915_gem_flush(dev, 0, write_domain);
i915_gem_object_move_to_active(obj);
obj_priv->last_rendering_seqno = i915_add_request(dev,
write_domain);
BUG_ON(obj_priv->last_rendering_seqno == 0);
#if WATCH_LRU
DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
#endif
}
BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
/* If there is rendering queued on the buffer being evicted, wait for
* it.
@ -950,24 +970,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return -EINVAL;
}
/* Wait for any rendering to complete
*/
ret = i915_gem_object_wait_rendering(obj);
if (ret) {
DRM_ERROR("wait_rendering failed: %d\n", ret);
return ret;
}
/* Move the object to the CPU domain to ensure that
* any possible CPU writes while it's not in the GTT
* are flushed when we go to remap it. This will
* also ensure that all pending GPU writes are finished
* before we unbind.
*/
ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
I915_GEM_DOMAIN_CPU);
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret) {
DRM_ERROR("set_domain failed: %d\n", ret);
if (ret != -ERESTARTSYS)
DRM_ERROR("set_domain failed: %d\n", ret);
return ret;
}
@ -1082,6 +1094,21 @@ i915_gem_evict_something(struct drm_device *dev)
return ret;
}
static int
i915_gem_evict_everything(struct drm_device *dev)
{
int ret;
for (;;) {
ret = i915_gem_evict_something(dev);
if (ret != 0)
break;
}
if (ret == -ENOMEM)
return 0;
return ret;
}
static int
i915_gem_object_get_page_list(struct drm_gem_object *obj)
{
@ -1168,7 +1195,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
ret = i915_gem_evict_something(dev);
if (ret != 0) {
DRM_ERROR("Failed to evict a buffer %d\n", ret);
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to evict a buffer %d\n", ret);
return ret;
}
goto search_free;
@ -1228,6 +1256,143 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
}
/** Flushes any GPU write domain for the object if it's dirty. */
static void
i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
uint32_t seqno;
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
return;
/* Queue the GPU write cache flushing we need. */
i915_gem_flush(dev, 0, obj->write_domain);
seqno = i915_add_request(dev, obj->write_domain);
obj->write_domain = 0;
i915_gem_object_move_to_active(obj, seqno);
}
/** Flushes the GTT write domain for the object if it's dirty. */
static void
i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
{
if (obj->write_domain != I915_GEM_DOMAIN_GTT)
return;
/* No actual flushing is required for the GTT write domain. Writes
* to it immediately go to main memory as far as we know, so there's
* no chipset flush. It also doesn't land in render cache.
*/
obj->write_domain = 0;
}
/** Flushes the CPU write domain for the object if it's dirty. */
static void
i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
if (obj->write_domain != I915_GEM_DOMAIN_CPU)
return;
i915_gem_clflush_object(obj);
drm_agp_chipset_flush(dev);
obj->write_domain = 0;
}
/**
* Moves a single object to the GTT read, and possibly write domain.
*
* This function returns when the move is complete, including waiting on
* flushes to occur.
*/
static int
i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int ret;
/* Not valid to be called on unbound objects. */
if (obj_priv->gtt_space == NULL)
return -EINVAL;
i915_gem_object_flush_gpu_write_domain(obj);
/* Wait on any GPU rendering and flushing to occur. */
ret = i915_gem_object_wait_rendering(obj);
if (ret != 0)
return ret;
/* If we're writing through the GTT domain, then CPU and GPU caches
* will need to be invalidated at next use.
*/
if (write)
obj->read_domains &= I915_GEM_DOMAIN_GTT;
i915_gem_object_flush_cpu_write_domain(obj);
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
obj->write_domain = I915_GEM_DOMAIN_GTT;
obj_priv->dirty = 1;
}
return 0;
}
/**
* Moves a single object to the CPU read, and possibly write domain.
*
* This function returns when the move is complete, including waiting on
* flushes to occur.
*/
static int
i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
{
struct drm_device *dev = obj->dev;
int ret;
i915_gem_object_flush_gpu_write_domain(obj);
/* Wait on any GPU rendering and flushing to occur. */
ret = i915_gem_object_wait_rendering(obj);
if (ret != 0)
return ret;
i915_gem_object_flush_gtt_write_domain(obj);
/* If we have a partially-valid cache of the object in the CPU,
* finish invalidating it and free the per-page flags.
*/
i915_gem_object_set_to_full_cpu_read_domain(obj);
/* Flush the CPU cache if it's still invalid. */
if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj);
drm_agp_chipset_flush(dev);
obj->read_domains |= I915_GEM_DOMAIN_CPU;
}
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
/* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use.
*/
if (write) {
obj->read_domains &= I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
}
return 0;
}
/*
* Set the next domain for the specified object. This
* may not actually perform the necessary flushing/invaliding though,
@ -1339,16 +1504,18 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
* MI_FLUSH
* drm_agp_chipset_flush
*/
static int
i915_gem_object_set_domain(struct drm_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain)
static void
i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
int ret;
BUG_ON(read_domains & I915_GEM_DOMAIN_CPU);
BUG_ON(write_domain == I915_GEM_DOMAIN_CPU);
#if WATCH_BUF
DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
@ -1385,34 +1552,11 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
__func__, flush_domains, invalidate_domains);
#endif
/*
* If we're invaliding the CPU cache and flushing a GPU cache,
* then pause for rendering so that the GPU caches will be
* flushed before the cpu cache is invalidated
*/
if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
(flush_domains & ~(I915_GEM_DOMAIN_CPU |
I915_GEM_DOMAIN_GTT))) {
ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
}
i915_gem_clflush_object(obj);
}
if ((write_domain | flush_domains) != 0)
obj->write_domain = write_domain;
/* If we're invalidating the CPU domain, clear the per-page CPU
* domain list as well.
*/
if (obj_priv->page_cpu_valid != NULL &&
(write_domain != 0 ||
read_domains & I915_GEM_DOMAIN_CPU)) {
drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
DRM_MEM_DRIVER);
obj_priv->page_cpu_valid = NULL;
}
obj->read_domains = read_domains;
dev->invalidate_domains |= invalidate_domains;
@ -1423,47 +1567,94 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
obj->read_domains, obj->write_domain,
dev->invalidate_domains, dev->flush_domains);
#endif
return 0;
}
/**
* Set the read/write domain on a range of the object.
* Moves the object from a partially CPU read to a full one.
*
* Currently only implemented for CPU reads, otherwise drops to normal
* i915_gem_object_set_domain().
* Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
* and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
*/
static void
i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
if (!obj_priv->page_cpu_valid)
return;
/* If we're partially in the CPU read domain, finish moving it in.
*/
if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
int i;
for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
if (obj_priv->page_cpu_valid[i])
continue;
drm_clflush_pages(obj_priv->page_list + i, 1);
}
drm_agp_chipset_flush(dev);
}
/* Free the page_cpu_valid mappings which are now stale, whether
* or not we've got I915_GEM_DOMAIN_CPU.
*/
drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
DRM_MEM_DRIVER);
obj_priv->page_cpu_valid = NULL;
}
/**
* Set the CPU read domain on a range of the object.
*
* The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
* not entirely valid. The page_cpu_valid member of the object flags which
* pages have been flushed, and will be respected by
* i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
* of the whole object.
*
* This function returns when the move is complete, including waiting on
* flushes to occur.
*/
static int
i915_gem_object_set_domain_range(struct drm_gem_object *obj,
uint64_t offset,
uint64_t size,
uint32_t read_domains,
uint32_t write_domain)
i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
uint64_t offset, uint64_t size)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int ret, i;
int i, ret;
if (obj->read_domains & I915_GEM_DOMAIN_CPU)
if (offset == 0 && size == obj->size)
return i915_gem_object_set_to_cpu_domain(obj, 0);
i915_gem_object_flush_gpu_write_domain(obj);
/* Wait on any GPU rendering and flushing to occur. */
ret = i915_gem_object_wait_rendering(obj);
if (ret != 0)
return ret;
i915_gem_object_flush_gtt_write_domain(obj);
/* If we're already fully in the CPU read domain, we're done. */
if (obj_priv->page_cpu_valid == NULL &&
(obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
return 0;
if (read_domains != I915_GEM_DOMAIN_CPU ||
write_domain != 0)
return i915_gem_object_set_domain(obj,
read_domains, write_domain);
/* Wait on any GPU rendering to the object to be flushed. */
ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
/* Otherwise, create/clear the per-page CPU read domain flag if we're
* newly adding I915_GEM_DOMAIN_CPU
*/
if (obj_priv->page_cpu_valid == NULL) {
obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
DRM_MEM_DRIVER);
}
if (obj_priv->page_cpu_valid == NULL)
return -ENOMEM;
} else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
/* Flush the cache on any pages that are still invalid from the CPU's
* perspective.
*/
for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) {
for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
i++) {
if (obj_priv->page_cpu_valid[i])
continue;
@ -1472,39 +1663,14 @@ i915_gem_object_set_domain_range(struct drm_gem_object *obj,
obj_priv->page_cpu_valid[i] = 1;
}
return 0;
}
/**
* Once all of the objects have been set in the proper domain,
* perform the necessary flush and invalidate operations.
*
* Returns the write domains flushed, for use in flush tracking.
*/
static uint32_t
i915_gem_dev_set_domain(struct drm_device *dev)
{
uint32_t flush_domains = dev->flush_domains;
/*
* Now that all the buffers are synced to the proper domains,
* flush and invalidate the collected domains
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
if (dev->invalidate_domains | dev->flush_domains) {
#if WATCH_EXEC
DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
__func__,
dev->invalidate_domains,
dev->flush_domains);
#endif
i915_gem_flush(dev,
dev->invalidate_domains,
dev->flush_domains);
dev->invalidate_domains = 0;
dev->flush_domains = 0;
}
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
return flush_domains;
obj->read_domains |= I915_GEM_DOMAIN_CPU;
return 0;
}
/**
@ -1585,6 +1751,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
return -EINVAL;
}
if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
reloc.read_domains & I915_GEM_DOMAIN_CPU) {
DRM_ERROR("reloc with read/write CPU domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc.target_handle,
(int) reloc.offset,
reloc.read_domains,
reloc.write_domain);
return -EINVAL;
}
if (reloc.write_domain && target_obj->pending_write_domain &&
reloc.write_domain != target_obj->pending_write_domain) {
DRM_ERROR("Write domain conflict: "
@ -1625,19 +1803,11 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
continue;
}
/* Now that we're going to actually write some data in,
* make sure that any rendering using this buffer's contents
* is completed.
*/
i915_gem_object_wait_rendering(obj);
/* As we're writing through the gtt, flush
* any CPU writes before we write the relocations
*/
if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj);
drm_agp_chipset_flush(dev);
obj->write_domain = 0;
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret != 0) {
drm_gem_object_unreference(target_obj);
i915_gem_object_unpin(obj);
return -EINVAL;
}
/* Map the page containing the relocation we're going to
@ -1779,6 +1949,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
int ret, i, pinned = 0;
uint64_t exec_offset;
uint32_t seqno, flush_domains;
int pin_tries;
#if WATCH_EXEC
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@ -1827,14 +1998,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -EBUSY;
}
/* Zero the gloabl flush/invalidate flags. These
* will be modified as each object is bound to the
* gtt
*/
dev->invalidate_domains = 0;
dev->flush_domains = 0;
/* Look up object handles and perform the relocations */
/* Look up object handles */
for (i = 0; i < args->buffer_count; i++) {
object_list[i] = drm_gem_object_lookup(dev, file_priv,
exec_list[i].handle);
@ -1844,17 +2008,39 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
ret = -EBADF;
goto err;
}
}
object_list[i]->pending_read_domains = 0;
object_list[i]->pending_write_domain = 0;
ret = i915_gem_object_pin_and_relocate(object_list[i],
file_priv,
&exec_list[i]);
if (ret) {
DRM_ERROR("object bind and relocate failed %d\n", ret);
/* Pin and relocate */
for (pin_tries = 0; ; pin_tries++) {
ret = 0;
for (i = 0; i < args->buffer_count; i++) {
object_list[i]->pending_read_domains = 0;
object_list[i]->pending_write_domain = 0;
ret = i915_gem_object_pin_and_relocate(object_list[i],
file_priv,
&exec_list[i]);
if (ret)
break;
pinned = i + 1;
}
/* success */
if (ret == 0)
break;
/* error other than GTT full, or we've already tried again */
if (ret != -ENOMEM || pin_tries >= 1) {
DRM_ERROR("Failed to pin buffers %d\n", ret);
goto err;
}
pinned = i + 1;
/* unpin all of our buffers */
for (i = 0; i < pinned; i++)
i915_gem_object_unpin(object_list[i]);
/* evict everyone we can from the aperture */
ret = i915_gem_evict_everything(dev);
if (ret)
goto err;
}
/* Set the pending read domains for the batch buffer to COMMAND */
@ -1864,21 +2050,37 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_verify_inactive(dev, __FILE__, __LINE__);
/* Zero the global flush/invalidate flags. These
* will be modified as new domains are computed
* for each object
*/
dev->invalidate_domains = 0;
dev->flush_domains = 0;
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
/* make sure all previous memory operations have passed */
ret = i915_gem_object_set_domain(obj,
obj->pending_read_domains,
obj->pending_write_domain);
if (ret)
goto err;
/* Compute new gpu domains and update invalidate/flush */
i915_gem_object_set_to_gpu_domain(obj,
obj->pending_read_domains,
obj->pending_write_domain);
}
i915_verify_inactive(dev, __FILE__, __LINE__);
/* Flush/invalidate caches and chipset buffer */
flush_domains = i915_gem_dev_set_domain(dev);
if (dev->invalidate_domains | dev->flush_domains) {
#if WATCH_EXEC
DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
__func__,
dev->invalidate_domains,
dev->flush_domains);
#endif
i915_gem_flush(dev,
dev->invalidate_domains,
dev->flush_domains);
if (dev->flush_domains)
(void)i915_add_request(dev, dev->flush_domains);
}
i915_verify_inactive(dev, __FILE__, __LINE__);
@ -1898,8 +2100,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
~0);
#endif
(void)i915_add_request(dev, flush_domains);
/* Exec the batchbuffer */
ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
if (ret) {
@ -1927,10 +2127,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_file_priv->mm.last_gem_seqno = seqno;
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
struct drm_i915_gem_object *obj_priv = obj->driver_private;
i915_gem_object_move_to_active(obj);
obj_priv->last_rendering_seqno = seqno;
i915_gem_object_move_to_active(obj, seqno);
#if WATCH_LRU
DRM_INFO("%s: move to exec list %p\n", __func__, obj);
#endif
@ -2061,11 +2259,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
/* XXX - flush the CPU caches for pinned objects
* as the X server doesn't manage domains yet
*/
if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj);
drm_agp_chipset_flush(dev);
obj->write_domain = 0;
}
i915_gem_object_flush_cpu_write_domain(obj);
args->offset = obj_priv->gtt_offset;
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@ -2167,29 +2361,6 @@ void i915_gem_free_object(struct drm_gem_object *obj)
drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
}
static int
i915_gem_set_domain(struct drm_gem_object *obj,
struct drm_file *file_priv,
uint32_t read_domains,
uint32_t write_domain)
{
struct drm_device *dev = obj->dev;
int ret;
uint32_t flush_domains;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
if (ret)
return ret;
flush_domains = i915_gem_dev_set_domain(obj->dev);
if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
(void) i915_add_request(dev, flush_domains);
return 0;
}
/** Unbinds all objects that are on the given buffer list. */
static int
i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)

View File

@ -166,10 +166,9 @@ static int i915_gem_request_info(char *buf, char **start, off_t offset,
list_for_each_entry(gem_request, &dev_priv->mm.request_list,
list)
{
DRM_PROC_PRINT(" %d @ %d %08x\n",
DRM_PROC_PRINT(" %d @ %d\n",
gem_request->seqno,
(int) (jiffies - gem_request->emitted_jiffies),
gem_request->flush_domains);
(int) (jiffies - gem_request->emitted_jiffies));
}
if (len > request + offset)
return request;

View File

@ -119,9 +119,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
dcc & DCC_CHANNEL_XOR_DISABLE) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else if (IS_I965GM(dev) || IS_GM45(dev)) {
/* GM965 only does bit 11-based channel
* randomization
} else if ((IS_I965GM(dev) || IS_GM45(dev)) &&
(dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
/* GM965/GM45 does either bit 11 or bit 17
* swizzling.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
swizzle_y = I915_BIT_6_SWIZZLE_9_11;

View File

@ -522,6 +522,7 @@
#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
#define DCC_ADDRESSING_MODE_MASK (3 << 0)
#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
/** 965 MCH register controlling DRAM channel configuration */
#define C0DRB3 0x10206

Some files were not shown because too many files have changed in this diff Show More