ASoC: Updates for v4.3

Not many updates to the core here, but an awful lot of driver updates
 this time round:
 
  - Factoring out of AC'97 reset code into the core
  - New drivers for Cirrus CS4349, GTM601, InvenSense ICS43432, Realtek
    RT298 and ST STI controllers.
  - Machine drivers for Rockchip systems with MAX98090 and RT5645 and
    RT5650.
  - Initial driver support for Intel Skylake devices.
  - A large number of cleanups for Lars-Peter Clausen and Axel Lin.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJV4xxpAAoJECTWi3JdVIfQjuUH/3xuZSROdiVUit9zWHepebqR
 4e9bgu9vN9RfFZ3BqYC/0TIv8PwtwvDFu3xJoCxy52wcXWuz/H4itH9xh21EiL9l
 4s14Sfkj8BA332aQMXKMFBtpB5S1feMFhBSATihfXlfLCLby9Gm0GDoLiiDklX47
 svysCy9G9CmCSDufZ6TcO58PUJvjrQaI0SY5oAG2JnHW7XmnG6a7s5gJR4bu/FXs
 oJfkLbpfIl03JdJ/q1IoTm0UUyRaTx7cWKoOtp8icoza8cfnsUG8uoCAvbYcG3Li
 /1JtZrVjZPM+ZpM0+5gl9mhRpj8zJksDgTPT2mlzWfsgJiuo48w9xqlqItvJjG8=
 =92/y
 -----END PGP SIGNATURE-----

Merge tag 'asoc-v4.2-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus

ASoC: Updates for v4.3

Not many updates to the core here, but an awful lot of driver updates
this time round:

 - Factoring out of AC'97 reset code into the core
 - New drivers for Cirrus CS4349, GTM601, InvenSense ICS43432, Realtek
   RT298 and ST STI controllers.
 - Machine drivers for Rockchip systems with MAX98090 and RT5645 and
   RT5650.
 - Initial driver support for Intel Skylake devices.
 - A large number of cleanups for Lars-Peter Clausen and Axel Lin.
This commit is contained in:
Takashi Iwai 2015-08-31 16:25:22 +02:00
commit 08ceab9d87
699 changed files with 19561 additions and 6757 deletions

1
.get_maintainer.ignore Normal file
View File

@ -0,0 +1 @@
Christoph Hellwig <hch@lst.de>

View File

@ -17,6 +17,7 @@ Aleksey Gorelov <aleksey_gorelov@phoenix.com>
Al Viro <viro@ftp.linux.org.uk>
Al Viro <viro@zenIV.linux.org.uk>
Andreas Herrmann <aherrman@de.ibm.com>
Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
Andrew Morton <akpm@linux-foundation.org>
Andrew Vasquez <andrew.vasquez@qlogic.com>
Andy Adamson <andros@citi.umich.edu>

View File

@ -108,7 +108,7 @@
<sect1><title>ASoC Core API</title>
!Iinclude/sound/soc.h
!Esound/soc/soc-core.c
!Esound/soc/soc-cache.c
<!-- !Esound/soc/soc-cache.c no docbook comments here -->
!Esound/soc/soc-devres.c
!Esound/soc/soc-io.c
!Esound/soc/soc-pcm.c

View File

@ -199,6 +199,7 @@ nodes to be present and contain the properties described below.
"qcom,kpss-acc-v1"
"qcom,kpss-acc-v2"
"rockchip,rk3066-smp"
"ste,dbx500-smp"
- cpu-release-addr
Usage: required for systems that have an "enable-method"

View File

@ -82,6 +82,9 @@ Optional properties:
- id: If there are multiple instance of the same type, in order to
differentiate between each instance "id" can be used (e.g., multi-lane PCIe
PHY). If "id" is not provided, it is set to default value of '1'.
- syscon-pllreset: Handle to system control region that contains the
CTRL_CORE_SMA_SW_0 register and register offset to the CTRL_CORE_SMA_SW_0
register that contains the SATA_PLL_SOFT_RESET bit. Only valid for sata_phy.
This is usually a subnode of ocp2scp to which it is connected.
@ -100,3 +103,16 @@ usb3phy@4a084400 {
"sysclk",
"refclk";
};
sata_phy: phy@4A096000 {
compatible = "ti,phy-pipe3-sata";
reg = <0x4A096000 0x80>, /* phy_rx */
<0x4A096400 0x64>, /* phy_tx */
<0x4A096800 0x40>; /* pll_ctrl */
reg-names = "phy_rx", "phy_tx", "pll_ctrl";
ctrl-module = <&omap_control_sata>;
clocks = <&sys_clkin1>, <&sata_ref_clk>;
clock-names = "sysclk", "refclk";
syscon-pllreset = <&scm_conf 0x3fc>;
#phy-cells = <0>;
};

View File

@ -0,0 +1,19 @@
CS4349 audio CODEC
Required properties:
- compatible : "cirrus,cs4349"
- reg : the I2C address of the device for I2C
Optional properties:
- reset-gpios : a GPIO spec for the reset pin.
Example:
codec: cs4349@48 {
compatible = "cirrus,cs4349";
reg = <0x48>;
reset-gpios = <&gpio 54 0>;
};

View File

@ -0,0 +1,17 @@
Invensense ICS-43432 MEMS microphone with I2S output.
There are no software configuration options for this device, indeed, the only
host connection is the I2S interface. Apart from requirements on clock
frequency (460 kHz to 3.379 MHz according to the data sheet) there must be
64 clock cycles in each stereo output frame; 24 of the 32 available bits
contain audio data. A hardware pin determines if the device outputs data
on the left or right channel of the I2S frame.
Required properties:
- compatible : Must be "invensense,ics43432"
Example:
ics43432: ics43432 {
compatible = "invensense,ics43432";
};

View File

@ -4,7 +4,11 @@ This node models the Maxim MAX98357A DAC.
Required properties:
- compatible : "maxim,max98357a"
- sdmode-gpios : GPIO specifier for the GPIO -> DAC SDMODE pin
Optional properties:
- sdmode-gpios : GPIO specifier for the chip's SD_MODE pin.
If this option is not specified then driver does not manage
the pin state (e.g. chip is always on).
Example:

View File

@ -18,6 +18,12 @@ Required properties:
- rcar_sound,src : Should contain SRC feature.
The number of SRC subnode should be same as HW.
see below for detail.
- rcar_sound,ctu : Should contain CTU feature.
The number of CTU subnode should be same as HW.
see below for detail.
- rcar_sound,mix : Should contain MIX feature.
The number of MIX subnode should be same as HW.
see below for detail.
- rcar_sound,dvc : Should contain DVC feature.
The number of DVC subnode should be same as HW.
see below for detail.
@ -90,6 +96,22 @@ rcar_sound: sound@ec500000 {
};
};
rcar_sound,mix {
mix0: mix@0 { };
mix1: mix@1 { };
};
rcar_sound,ctu {
ctu00: ctu@0 { };
ctu01: ctu@1 { };
ctu02: ctu@2 { };
ctu03: ctu@3 { };
ctu10: ctu@4 { };
ctu11: ctu@5 { };
ctu12: ctu@6 { };
ctu13: ctu@7 { };
};
rcar_sound,src {
src0: src@0 {
interrupts = <0 352 IRQ_TYPE_LEVEL_HIGH>;

View File

@ -6,6 +6,7 @@ Required properties:
- compatible : "renesas,rsrc-card,<board>"
Examples with soctypes are:
- "renesas,rsrc-card"
- "renesas,rsrc-card,lager"
- "renesas,rsrc-card,koelsch"
Optional properties:
@ -29,6 +30,12 @@ Optional subnode properties:
- frame-inversion : bool property. Add this if the
dai-link uses frame clock inversion.
- convert-rate : platform specified sampling rate convert
- audio-prefix : see audio-routing
- audio-routing : A list of the connections between audio components.
Each entry is a pair of strings, the first being the connection's sink,
the second being the connection's source. Valid names for sources.
use audio-prefix if some components is using same sink/sources naming.
it can be used if compatible was "renesas,rsrc-card";
Required CPU/CODEC subnodes properties:

View File

@ -0,0 +1,19 @@
ROCKCHIP with MAX98090 CODEC
Required properties:
- compatible: "rockchip,rockchip-audio-max98090"
- rockchip,model: The user-visible name of this sound complex
- rockchip,i2s-controller: The phandle of the Rockchip I2S controller that's
connected to the CODEC
- rockchip,audio-codec: The phandle of the MAX98090 audio codec
- rockchip,headset-codec: The phandle of Ext chip for jack detection
Example:
sound {
compatible = "rockchip,rockchip-audio-max98090";
rockchip,model = "ROCKCHIP-I2S";
rockchip,i2s-controller = <&i2s>;
rockchip,audio-codec = <&max98090>;
rockchip,headset-codec = <&headsetcodec>;
};

View File

@ -0,0 +1,17 @@
ROCKCHIP with RT5645/RT5650 CODECS
Required properties:
- compatible: "rockchip,rockchip-audio-rt5645"
- rockchip,model: The user-visible name of this sound complex
- rockchip,i2s-controller: The phandle of the Rockchip I2S controller that's
connected to the CODEC
- rockchip,audio-codec: The phandle of the RT5645/RT5650 audio codec
Example:
sound {
compatible = "rockchip,rockchip-audio-rt5645";
rockchip,model = "ROCKCHIP-I2S";
rockchip,i2s-controller = <&i2s>;
rockchip,audio-codec = <&rt5645>;
};

View File

@ -0,0 +1,155 @@
STMicroelectronics sti ASoC cards
The sti ASoC Sound Card can be used, for all sti SoCs using internal sti-sas
codec or external codecs.
sti sound drivers allows to expose sti SoC audio interface through the
generic ASoC simple card. For details about sound card declaration please refer to
Documentation/devicetree/bindings/sound/simple-card.txt.
1) sti-uniperiph-dai: audio dai device.
---------------------------------------
Required properties:
- compatible: "st,sti-uni-player" or "st,sti-uni-reader"
- st,syscfg: phandle to boot-device system configuration registers
- clock-names: name of the clocks listed in clocks property in the same order
- reg: CPU DAI IP Base address and size entries, listed in same
order than the CPU_DAI properties.
- reg-names: names of the mapped memory regions listed in regs property in
the same order.
- interrupts: CPU_DAI interrupt line, listed in the same order than the
CPU_DAI properties.
- dma: CPU_DAI DMA controller phandle and DMA request line, listed in the same
order than the CPU_DAI properties.
- dma-names: identifier string for each DMA request line in the dmas property.
"tx" for "st,sti-uni-player" compatibility
"rx" for "st,sti-uni-reader" compatibility
- version: IP version integrated in SOC.
- dai-name: DAI name that describes the IP.
Required properties ("st,sti-uni-player" compatibility only):
- clocks: CPU_DAI IP clock source, listed in the same order than the
CPU_DAI properties.
- uniperiph-id: internal SOC IP instance ID.
- IP mode: IP working mode depending on associated codec.
"HDMI" connected to HDMI codec IP and IEC HDMI formats.
"SPDIF"connected to SPDIF codec and support SPDIF formats.
"PCM" PCM standard mode for I2S or TDM bus.
Optional properties:
- pinctrl-0: defined for CPU_DAI@1 and CPU_DAI@4 to describe I2S PIOs for
external codecs connection.
- pinctrl-names: should contain only one value - "default".
Example:
sti_uni_player2: sti-uni-player@2 {
compatible = "st,sti-uni-player";
status = "okay";
#sound-dai-cells = <0>;
st,syscfg = <&syscfg_core>;
clocks = <&clk_s_d0_flexgen CLK_PCM_2>;
reg = <0x8D82000 0x158>;
interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
dmas = <&fdma0 4 0 1>;
dai-name = "Uni Player #1 (DAC)";
dma-names = "tx";
uniperiph-id = <2>;
version = <5>;
mode = "PCM";
};
sti_uni_player3: sti-uni-player@3 {
compatible = "st,sti-uni-player";
status = "okay";
#sound-dai-cells = <0>;
st,syscfg = <&syscfg_core>;
clocks = <&clk_s_d0_flexgen CLK_SPDIFF>;
reg = <0x8D85000 0x158>;
interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>;
dmas = <&fdma0 7 0 1>;
dma-names = "tx";
dai-name = "Uni Player #1 (PIO)";
uniperiph-id = <3>;
version = <5>;
mode = "SPDIF";
};
sti_uni_reader1: sti-uni-reader@1 {
compatible = "st,sti-uni-reader";
status = "disabled";
#sound-dai-cells = <0>;
st,syscfg = <&syscfg_core>;
reg = <0x8D84000 0x158>;
interrupts = <GIC_SPI 88 IRQ_TYPE_NONE>;
dmas = <&fdma0 6 0 1>;
dma-names = "rx";
dai-name = "Uni Reader #1 (HDMI RX)";
version = <3>;
};
2) sti-sas-codec: internal audio codec IPs driver
-------------------------------------------------
Required properties:
- compatible: "st,sti<chip>-sas-codec" .
Should be chip "st,stih416-sas-codec" or "st,stih407-sas-codec"
- st,syscfg: phandle to boot-device system configuration registers.
- pinctrl-0: SPDIF PIO description.
- pinctrl-names: should contain only one value - "default".
Example:
sti_sas_codec: sti-sas-codec {
compatible = "st,stih407-sas-codec";
#sound-dai-cells = <1>;
st,reg_audio = <&syscfg_core>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spdif_out >;
};
Example of audio card declaration:
sound {
compatible = "simple-audio-card";
simple-audio-card,name = "sti audio card";
status = "okay";
simple-audio-card,dai-link@0 {
/* DAC */
format = "i2s";
dai-tdm-slot-width = <32>;
cpu {
sound-dai = <&sti_uni_player2>;
};
codec {
sound-dai = <&sti_sasg_codec 1>;
};
};
simple-audio-card,dai-link@1 {
/* SPDIF */
format = "left_j";
cpu {
sound-dai = <&sti_uni_player3>;
};
codec {
sound-dai = <&sti_sasg_codec 0>;
};
};
};

View File

@ -110,6 +110,7 @@ ingenic Ingenic Semiconductor
innolux Innolux Corporation
intel Intel Corporation
intercontrol Inter Control Group
invensense InvenSense Inc.
isee ISEE 2007 S.L.
isil Intersil
karo Ka-Ro electronics GmbH
@ -150,6 +151,7 @@ nvidia NVIDIA
nxp NXP Semiconductors
onnn ON Semiconductor Corp.
opencores OpenCores.org
option Option NV
ortustech Ortus Technology Co., Ltd.
ovti OmniVision Technologies
panasonic Panasonic Corporation

View File

@ -119,8 +119,10 @@ ALPS Absolute Mode - Protocol Version 2
byte 5: 0 z6 z5 z4 z3 z2 z1 z0
Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
the DualPoint Stick. For non interleaved dualpoint devices the pointingstick
buttons get reported separately in the PSM, PSR and PSL bits.
the DualPoint Stick. The M, R and L bits signal the combined status of both
the pointingstick and touchpad buttons, except for Dell dualpoint devices
where the pointingstick buttons get reported separately in the PSM, PSR
and PSL bits.
Dualpoint device -- interleaved packet format
---------------------------------------------

View File

@ -3587,6 +3587,15 @@ S: Maintained
F: drivers/gpu/drm/rockchip/
F: Documentation/devicetree/bindings/video/rockchip*
DRM DRIVERS FOR STI
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
M: Vincent Abriou <vincent.abriou@st.com>
L: dri-devel@lists.freedesktop.org
T: git http://git.linaro.org/people/benjamin.gaignard/kernel.git
S: Maintained
F: drivers/gpu/drm/sti
F: Documentation/devicetree/bindings/gpu/st,stih4xx.txt
DSBR100 USB FM RADIO DRIVER
M: Alexey Klimov <klimov.linux@gmail.com>
L: linux-media@vger.kernel.org

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 2
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc8
NAME = Hurr durr I'ma sheep
# *DOCUMENTATION*
@ -597,6 +597,11 @@ endif # $(dot-config)
# Defaults to vmlinux, but the arch makefile usually adds further targets
all: vmlinux
# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
# values of the respective KBUILD_* variables
ARCH_CPPFLAGS :=
ARCH_AFLAGS :=
ARCH_CFLAGS :=
include arch/$(SRCARCH)/Makefile
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
@ -848,10 +853,10 @@ export mod_strip_cmd
mod_compress_cmd = true
ifdef CONFIG_MODULE_COMPRESS
ifdef CONFIG_MODULE_COMPRESS_GZIP
mod_compress_cmd = gzip -n
mod_compress_cmd = gzip -n -f
endif # CONFIG_MODULE_COMPRESS_GZIP
ifdef CONFIG_MODULE_COMPRESS_XZ
mod_compress_cmd = xz
mod_compress_cmd = xz -f
endif # CONFIG_MODULE_COMPRESS_XZ
endif # CONFIG_MODULE_COMPRESS
export mod_compress_cmd

View File

@ -313,11 +313,11 @@ config ARC_PAGE_SIZE_8K
config ARC_PAGE_SIZE_16K
bool "16KB"
depends on ARC_MMU_V3
depends on ARC_MMU_V3 || ARC_MMU_V4
config ARC_PAGE_SIZE_4K
bool "4KB"
depends on ARC_MMU_V3
depends on ARC_MMU_V3 || ARC_MMU_V4
endchoice
@ -365,6 +365,11 @@ config ARC_HAS_LLSC
default y
depends on !ARC_CANT_LLSC
config ARC_STAR_9000923308
bool "Workaround for llock/scond livelock"
default y
depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
config ARC_HAS_SWAPE
bool "Insn: SWAPE (endian-swap)"
default y
@ -379,6 +384,10 @@ config ARC_HAS_LL64
dest operands with 2 possible source operands.
default y
config ARC_HAS_DIV_REM
bool "Insn: div, divu, rem, remu"
default y
config ARC_HAS_RTC
bool "Local 64-bit r/o cycle counter"
default n

View File

@ -36,8 +36,16 @@ cflags-$(atleast_gcc44) += -fsection-anchors
cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
ifdef CONFIG_ISA_ARCV2
ifndef CONFIG_ARC_HAS_LL64
cflags-$(CONFIG_ISA_ARCV2) += -mno-ll64
cflags-y += -mno-ll64
endif
ifndef CONFIG_ARC_HAS_DIV_REM
cflags-y += -mno-div-rem
endif
endif
cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables

View File

@ -89,11 +89,10 @@
#define ECR_C_BIT_DTLB_LD_MISS 8
#define ECR_C_BIT_DTLB_ST_MISS 9
/* Auxiliary registers */
#define AUX_IDENTITY 4
#define AUX_INTR_VEC_BASE 0x25
#define AUX_NON_VOL 0x5e
/*
* Floating Pt Registers
@ -240,9 +239,9 @@ struct bcr_extn_xymem {
struct bcr_perip {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int start:8, pad2:8, sz:8, pad:8;
unsigned int start:8, pad2:8, sz:8, ver:8;
#else
unsigned int pad:8, sz:8, pad2:8, start:8;
unsigned int ver:8, sz:8, pad2:8, start:8;
#endif
};

View File

@ -23,33 +23,60 @@
#define atomic_set(v, i) (((v)->counter) = (i))
#ifdef CONFIG_ISA_ARCV2
#define PREFETCHW " prefetchw [%1] \n"
#else
#define PREFETCHW
#ifdef CONFIG_ARC_STAR_9000923308
#define SCOND_FAIL_RETRY_VAR_DEF \
unsigned int delay = 1, tmp; \
#define SCOND_FAIL_RETRY_ASM \
" bz 4f \n" \
" ; --- scond fail delay --- \n" \
" mov %[tmp], %[delay] \n" /* tmp = delay */ \
"2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
" sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
" rol %[delay], %[delay] \n" /* delay *= 2 */ \
" b 1b \n" /* start over */ \
"4: ; --- success --- \n" \
#define SCOND_FAIL_RETRY_VARS \
,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
#else /* !CONFIG_ARC_STAR_9000923308 */
#define SCOND_FAIL_RETRY_VAR_DEF
#define SCOND_FAIL_RETRY_ASM \
" bnz 1b \n" \
#define SCOND_FAIL_RETRY_VARS
#endif
#define ATOMIC_OP(op, c_op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
unsigned int temp; \
unsigned int val; \
SCOND_FAIL_RETRY_VAR_DEF \
\
__asm__ __volatile__( \
"1: \n" \
PREFETCHW \
" llock %0, [%1] \n" \
" " #asm_op " %0, %0, %2 \n" \
" scond %0, [%1] \n" \
" bnz 1b \n" \
: "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
: "r"(&v->counter), "ir"(i) \
"1: llock %[val], [%[ctr]] \n" \
" " #asm_op " %[val], %[val], %[i] \n" \
" scond %[val], [%[ctr]] \n" \
" \n" \
SCOND_FAIL_RETRY_ASM \
\
: [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
SCOND_FAIL_RETRY_VARS \
: [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
[i] "ir" (i) \
: "cc"); \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned int temp; \
unsigned int val; \
SCOND_FAIL_RETRY_VAR_DEF \
\
/* \
* Explicit full memory barrier needed before/after as \
@ -58,19 +85,21 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
smp_mb(); \
\
__asm__ __volatile__( \
"1: \n" \
PREFETCHW \
" llock %0, [%1] \n" \
" " #asm_op " %0, %0, %2 \n" \
" scond %0, [%1] \n" \
" bnz 1b \n" \
: "=&r"(temp) \
: "r"(&v->counter), "ir"(i) \
"1: llock %[val], [%[ctr]] \n" \
" " #asm_op " %[val], %[val], %[i] \n" \
" scond %[val], [%[ctr]] \n" \
" \n" \
SCOND_FAIL_RETRY_ASM \
\
: [val] "=&r" (val) \
SCOND_FAIL_RETRY_VARS \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
: "cc"); \
\
smp_mb(); \
\
return temp; \
return val; \
}
#else /* !CONFIG_ARC_HAS_LLSC */
@ -150,6 +179,9 @@ ATOMIC_OP(and, &=, and)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
#undef SCOND_FAIL_RETRY_VAR_DEF
#undef SCOND_FAIL_RETRY_ASM
#undef SCOND_FAIL_RETRY_VARS
/**
* __atomic_add_unless - add unless the number is a given value

View File

@ -20,20 +20,20 @@
struct pt_regs {
/* Real registers */
long bta; /* bta_l1, bta_l2, erbta */
unsigned long bta; /* bta_l1, bta_l2, erbta */
long lp_start, lp_end, lp_count;
unsigned long lp_start, lp_end, lp_count;
long status32; /* status32_l1, status32_l2, erstatus */
long ret; /* ilink1, ilink2 or eret */
long blink;
long fp;
long r26; /* gp */
unsigned long status32; /* status32_l1, status32_l2, erstatus */
unsigned long ret; /* ilink1, ilink2 or eret */
unsigned long blink;
unsigned long fp;
unsigned long r26; /* gp */
long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
long sp; /* user/kernel sp depending on where we came from */
long orig_r0;
unsigned long sp; /* User/Kernel depending on where we came from */
unsigned long orig_r0;
/*
* To distinguish bet excp, syscall, irq
@ -55,13 +55,13 @@ struct pt_regs {
unsigned long event;
};
long user_r25;
unsigned long user_r25;
};
#else
struct pt_regs {
long orig_r0;
unsigned long orig_r0;
union {
struct {
@ -76,26 +76,26 @@ struct pt_regs {
unsigned long event;
};
long bta; /* bta_l1, bta_l2, erbta */
unsigned long bta; /* bta_l1, bta_l2, erbta */
long user_r25;
unsigned long user_r25;
long r26; /* gp */
long fp;
long sp; /* user/kernel sp depending on where we came from */
unsigned long r26; /* gp */
unsigned long fp;
unsigned long sp; /* user/kernel sp depending on where we came from */
long r12;
unsigned long r12;
/*------- Below list auto saved by h/w -----------*/
long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
long blink;
long lp_end, lp_start, lp_count;
unsigned long blink;
unsigned long lp_end, lp_start, lp_count;
long ei, ldi, jli;
unsigned long ei, ldi, jli;
long ret;
long status32;
unsigned long ret;
unsigned long status32;
};
#endif
@ -103,10 +103,10 @@ struct pt_regs {
/* Callee saved registers - need to be saved only when you are scheduled out */
struct callee_regs {
long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
};
#define instruction_pointer(regs) (unsigned long)((regs)->ret)
#define instruction_pointer(regs) ((regs)->ret)
#define profile_pc(regs) instruction_pointer(regs)
/* return 1 if user mode or 0 if kernel mode */
@ -142,7 +142,7 @@ struct callee_regs {
static inline long regs_return_value(struct pt_regs *regs)
{
return regs->r0;
return (long)regs->r0;
}
#endif /* !__ASSEMBLY__ */

View File

@ -18,9 +18,518 @@
#define arch_spin_unlock_wait(x) \
do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
#ifdef CONFIG_ARC_HAS_LLSC
/*
* A normal LLOCK/SCOND based system, w/o need for livelock workaround
*/
#ifndef CONFIG_ARC_STAR_9000923308
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
unsigned int val;
smp_mb();
__asm__ __volatile__(
"1: llock %[val], [%[slock]] \n"
" breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
" scond %[LOCKED], [%[slock]] \n" /* acquire */
" bnz 1b \n"
" \n"
: [val] "=&r" (val)
: [slock] "r" (&(lock->slock)),
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
: "memory", "cc");
smp_mb();
}
/* 1 - lock taken successfully */
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int val, got_it = 0;
smp_mb();
__asm__ __volatile__(
"1: llock %[val], [%[slock]] \n"
" breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
" scond %[LOCKED], [%[slock]] \n" /* acquire */
" bnz 1b \n"
" mov %[got_it], 1 \n"
"4: \n"
" \n"
: [val] "=&r" (val),
[got_it] "+&r" (got_it)
: [slock] "r" (&(lock->slock)),
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
: "memory", "cc");
smp_mb();
return got_it;
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
smp_mb();
lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
smp_mb();
}
/*
* Read-write spinlocks, allowing multiple readers but only one writer.
* Unfair locking as Writers could be starved indefinitely by Reader(s)
*/
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int val;
smp_mb();
/*
* zero means writer holds the lock exclusively, deny Reader.
* Otherwise grant lock to first/subseq reader
*
* if (rw->counter > 0) {
* rw->counter--;
* ret = 1;
* }
*/
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
" sub %[val], %[val], 1 \n" /* reader lock */
" scond %[val], [%[rwlock]] \n"
" bnz 1b \n"
" \n"
: [val] "=&r" (val)
: [rwlock] "r" (&(rw->counter)),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
}
/* 1 - lock taken successfully */
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned int val, got_it = 0;
smp_mb();
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
" sub %[val], %[val], 1 \n" /* counter-- */
" scond %[val], [%[rwlock]] \n"
" bnz 1b \n" /* retry if collided with someone */
" mov %[got_it], 1 \n"
" \n"
"4: ; --- done --- \n"
: [val] "=&r" (val),
[got_it] "+&r" (got_it)
: [rwlock] "r" (&(rw->counter)),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
return got_it;
}
static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned int val;
smp_mb();
/*
* If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
* deny writer. Otherwise if unlocked grant to writer
* Hence the claim that Linux rwlocks are unfair to writers.
* (can be starved for an indefinite time by readers).
*
* if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
* rw->counter = 0;
* ret = 1;
* }
*/
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
" mov %[val], %[WR_LOCKED] \n"
" scond %[val], [%[rwlock]] \n"
" bnz 1b \n"
" \n"
: [val] "=&r" (val)
: [rwlock] "r" (&(rw->counter)),
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
}
/* 1 - lock taken successfully */
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int val, got_it = 0;
smp_mb();
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
" mov %[val], %[WR_LOCKED] \n"
" scond %[val], [%[rwlock]] \n"
" bnz 1b \n" /* retry if collided with someone */
" mov %[got_it], 1 \n"
" \n"
"4: ; --- done --- \n"
: [val] "=&r" (val),
[got_it] "+&r" (got_it)
: [rwlock] "r" (&(rw->counter)),
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
return got_it;
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int val;
smp_mb();
/*
* rw->counter++;
*/
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" add %[val], %[val], 1 \n"
" scond %[val], [%[rwlock]] \n"
" bnz 1b \n"
" \n"
: [val] "=&r" (val)
: [rwlock] "r" (&(rw->counter))
: "memory", "cc");
smp_mb();
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
smp_mb();
rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
smp_mb();
}
#else /* CONFIG_ARC_STAR_9000923308 */
/*
* HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
* coherency transactions in the SCU. The exclusive line state keeps rotating
* among contenting cores leading to a never ending cycle. So break the cycle
* by deferring the retry of failed exclusive access (SCOND). The actual delay
* needed is function of number of contending cores as well as the unrelated
* coherency traffic from other cores. To keep the code simple, start off with
* small delay of 1 which would suffice most cases and in case of contention
* double the delay. Eventually the delay is sufficient such that the coherency
* pipeline is drained, thus a subsequent exclusive access would succeed.
*/
#define SCOND_FAIL_RETRY_VAR_DEF \
unsigned int delay, tmp; \
#define SCOND_FAIL_RETRY_ASM \
" ; --- scond fail delay --- \n" \
" mov %[tmp], %[delay] \n" /* tmp = delay */ \
"2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
" sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
" rol %[delay], %[delay] \n" /* delay *= 2 */ \
" b 1b \n" /* start over */ \
" \n" \
"4: ; --- done --- \n" \
#define SCOND_FAIL_RETRY_VARS \
,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int val;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[slock]] \n"
" breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
" scond %[LOCKED], [%[slock]] \n" /* acquire */
" bz 4f \n" /* done */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val)
SCOND_FAIL_RETRY_VARS
: [slock] "r" (&(lock->slock)),
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
: "memory", "cc");
smp_mb();
}
/* 1 - lock taken successfully */
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int val, got_it = 0;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[slock]] \n"
" breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
" scond %[LOCKED], [%[slock]] \n" /* acquire */
" bz.d 4f \n"
" mov.z %[got_it], 1 \n" /* got it */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val),
[got_it] "+&r" (got_it)
SCOND_FAIL_RETRY_VARS
: [slock] "r" (&(lock->slock)),
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
: "memory", "cc");
smp_mb();
return got_it;
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
smp_mb();
lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
smp_mb();
}
/*
* Read-write spinlocks, allowing multiple readers but only one writer.
* Unfair locking as Writers could be starved indefinitely by Reader(s)
*/
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int val;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
/*
* zero means writer holds the lock exclusively, deny Reader.
* Otherwise grant lock to first/subseq reader
*
* if (rw->counter > 0) {
* rw->counter--;
* ret = 1;
* }
*/
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[rwlock]] \n"
" brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
" sub %[val], %[val], 1 \n" /* reader lock */
" scond %[val], [%[rwlock]] \n"
" bz 4f \n" /* done */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val)
SCOND_FAIL_RETRY_VARS
: [rwlock] "r" (&(rw->counter)),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
}
/* 1 - lock taken successfully */
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned int val, got_it = 0;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[rwlock]] \n"
" brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
" sub %[val], %[val], 1 \n" /* counter-- */
" scond %[val], [%[rwlock]] \n"
" bz.d 4f \n"
" mov.z %[got_it], 1 \n" /* got it */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val),
[got_it] "+&r" (got_it)
SCOND_FAIL_RETRY_VARS
: [rwlock] "r" (&(rw->counter)),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
return got_it;
}
static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned int val;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
/*
* If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
* deny writer. Otherwise if unlocked grant to writer
* Hence the claim that Linux rwlocks are unfair to writers.
* (can be starved for an indefinite time by readers).
*
* if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
* rw->counter = 0;
* ret = 1;
* }
*/
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[rwlock]] \n"
" brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
" mov %[val], %[WR_LOCKED] \n"
" scond %[val], [%[rwlock]] \n"
" bz 4f \n"
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val)
SCOND_FAIL_RETRY_VARS
: [rwlock] "r" (&(rw->counter)),
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
}
/* 1 - lock taken successfully */
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int val, got_it = 0;
SCOND_FAIL_RETRY_VAR_DEF;
smp_mb();
__asm__ __volatile__(
"0: mov %[delay], 1 \n"
"1: llock %[val], [%[rwlock]] \n"
" brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
" mov %[val], %[WR_LOCKED] \n"
" scond %[val], [%[rwlock]] \n"
" bz.d 4f \n"
" mov.z %[got_it], 1 \n" /* got it */
" \n"
SCOND_FAIL_RETRY_ASM
: [val] "=&r" (val),
[got_it] "+&r" (got_it)
SCOND_FAIL_RETRY_VARS
: [rwlock] "r" (&(rw->counter)),
[UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
[WR_LOCKED] "ir" (0)
: "memory", "cc");
smp_mb();
return got_it;
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int val;
smp_mb();
/*
* rw->counter++;
*/
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" add %[val], %[val], 1 \n"
" scond %[val], [%[rwlock]] \n"
" bnz 1b \n"
" \n"
: [val] "=&r" (val)
: [rwlock] "r" (&(rw->counter))
: "memory", "cc");
smp_mb();
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
unsigned int val;
smp_mb();
/*
* rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
*/
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" scond %[UNLOCKED], [%[rwlock]]\n"
" bnz 1b \n"
" \n"
: [val] "=&r" (val)
: [rwlock] "r" (&(rw->counter)),
[UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
: "memory", "cc");
smp_mb();
}
#undef SCOND_FAIL_RETRY_VAR_DEF
#undef SCOND_FAIL_RETRY_ASM
#undef SCOND_FAIL_RETRY_VARS
#endif /* CONFIG_ARC_STAR_9000923308 */
#else /* !CONFIG_ARC_HAS_LLSC */
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
/*
* This smp_mb() is technically superfluous, we only need the one
@ -33,7 +542,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
__asm__ __volatile__(
"1: ex %0, [%1] \n"
" breq %0, %2, 1b \n"
: "+&r" (tmp)
: "+&r" (val)
: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
: "memory");
@ -48,26 +557,27 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
smp_mb();
}
/* 1 - lock taken successfully */
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
smp_mb();
__asm__ __volatile__(
"1: ex %0, [%1] \n"
: "+r" (tmp)
: "+r" (val)
: "r"(&(lock->slock))
: "memory");
smp_mb();
return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
/*
* RELEASE barrier: given the instructions avail on ARCv2, full barrier
@ -77,7 +587,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
__asm__ __volatile__(
" ex %0, [%1] \n"
: "+r" (tmp)
: "+r" (val)
: "r"(&(lock->slock))
: "memory");
@ -90,19 +600,12 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
/*
* Read-write spinlocks, allowing multiple readers but only one writer.
* Unfair locking as Writers could be starved indefinitely by Reader(s)
*
* The spinlock itself is contained in @counter and access to it is
* serialized with @lock_mutex.
*
* Unfair locking as Writers could be starved indefinitely by Reader(s)
*/
/* Would read_trylock() succeed? */
#define arch_read_can_lock(x) ((x)->counter > 0)
/* Would write_trylock() succeed? */
#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
/* 1 - lock taken successfully */
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
@ -173,6 +676,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
arch_spin_unlock(&(rw->lock_mutex));
}
#endif
#define arch_read_can_lock(x) ((x)->counter > 0)
#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)

View File

@ -26,7 +26,9 @@ typedef struct {
*/
typedef struct {
volatile unsigned int counter;
#ifndef CONFIG_ARC_HAS_LLSC
arch_spinlock_t lock_mutex;
#endif
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000

View File

@ -32,20 +32,20 @@
*/
struct user_regs_struct {
long pad;
unsigned long pad;
struct {
long bta, lp_start, lp_end, lp_count;
long status32, ret, blink, fp, gp;
long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
long sp;
unsigned long bta, lp_start, lp_end, lp_count;
unsigned long status32, ret, blink, fp, gp;
unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
unsigned long sp;
} scratch;
long pad2;
unsigned long pad2;
struct {
long r25, r24, r23, r22, r21, r20;
long r19, r18, r17, r16, r15, r14, r13;
unsigned long r25, r24, r23, r22, r21, r20;
unsigned long r19, r18, r17, r16, r15, r14, r13;
} callee;
long efa; /* break pt addr, for break points in delay slots */
long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */
unsigned long efa; /* break pt addr, for break points in delay slots */
unsigned long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */
};
#endif /* !__ASSEMBLY__ */

View File

@ -47,6 +47,7 @@ static void read_arc_build_cfg_regs(void)
struct bcr_perip uncached_space;
struct bcr_generic bcr;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
unsigned long perip_space;
FIX_PTR(cpu);
READ_BCR(AUX_IDENTITY, cpu->core);
@ -56,7 +57,12 @@ static void read_arc_build_cfg_regs(void)
cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
BUG_ON((uncached_space.start << 24) != ARC_UNCACHED_ADDR_SPACE);
if (uncached_space.ver < 3)
perip_space = uncached_space.start << 24;
else
perip_space = read_aux_reg(AUX_NON_VOL) & 0xF0000000;
BUG_ON(perip_space != ARC_UNCACHED_ADDR_SPACE);
READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
@ -330,6 +336,10 @@ static void arc_chk_core_config(void)
pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n");
else if (!cpu->extn.fpu_dp && fpu_enabled)
panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
!IS_ENABLED(CONFIG_ARC_STAR_9000923308))
panic("llock/scond livelock workaround missing\n");
}
/*

View File

@ -203,34 +203,24 @@ static int arc_clkevent_set_next_event(unsigned long delta,
return 0;
}
static void arc_clkevent_set_mode(enum clock_event_mode mode,
struct clock_event_device *dev)
static int arc_clkevent_set_periodic(struct clock_event_device *dev)
{
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
/*
* At X Hz, 1 sec = 1000ms -> X cycles;
* 10ms -> X / 100 cycles
*/
arc_timer_event_setup(arc_get_core_freq() / HZ);
break;
case CLOCK_EVT_MODE_ONESHOT:
break;
default:
break;
}
return;
/*
* At X Hz, 1 sec = 1000ms -> X cycles;
* 10ms -> X / 100 cycles
*/
arc_timer_event_setup(arc_get_core_freq() / HZ);
return 0;
}
static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
.name = "ARC Timer0",
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
.mode = CLOCK_EVT_MODE_UNUSED,
.rating = 300,
.irq = TIMER0_IRQ, /* hardwired, no need for resources */
.set_next_event = arc_clkevent_set_next_event,
.set_mode = arc_clkevent_set_mode,
.name = "ARC Timer0",
.features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERIODIC,
.rating = 300,
.irq = TIMER0_IRQ, /* hardwired, no need for resources */
.set_next_event = arc_clkevent_set_next_event,
.set_state_periodic = arc_clkevent_set_periodic,
};
static irqreturn_t timer_irq_handler(int irq, void *dev_id)
@ -240,7 +230,7 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
* irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
*/
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
int irq_reenable = evt->mode == CLOCK_EVT_MODE_PERIODIC;
int irq_reenable = clockevent_state_periodic(evt);
/*
* Any write to CTRL reg ACks the interrupt, we rewrite the

View File

@ -206,7 +206,7 @@ unalignedOffby3:
ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4]
prefetch [r3, 32] ;Prefetch the next write location
prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 8)
or r7, r7, r5

View File

@ -10,12 +10,6 @@
#undef PREALLOC_NOT_AVAIL
#ifdef PREALLOC_NOT_AVAIL
#define PREWRITE(A,B) prefetchw [(A),(B)]
#else
#define PREWRITE(A,B) prealloc [(A),(B)]
#endif
ENTRY(memset)
prefetchw [r0] ; Prefetch the write location
mov.f 0, r2
@ -51,9 +45,15 @@ ENTRY(memset)
;;; Convert len to Dwords, unfold x8
lsr.f lp_count, lp_count, 6
lpnz @.Lset64bytes
;; LOOP START
PREWRITE(r3, 64) ;Prefetch the next write location
#ifdef PREALLOC_NOT_AVAIL
prefetchw [r3, 64] ;Prefetch the next write location
#else
prealloc [r3, 64]
#endif
#ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
@ -62,16 +62,45 @@ ENTRY(memset)
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
#else
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
#endif
.Lset64bytes:
lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
lpnz .Lset32bytes
;; LOOP START
prefetchw [r3, 32] ;Prefetch the next write location
#ifdef CONFIG_ARC_HAS_LL64
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
std.ab r4, [r3, 8]
#else
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
st.ab r4, [r3, 4]
#endif
.Lset32bytes:
and.f lp_count, r2, 0x1F ;Last remaining 31 bytes

View File

@ -389,6 +389,21 @@ axs103_set_freq(unsigned int id, unsigned int fd, unsigned int od)
static void __init axs103_early_init(void)
{
/*
* AXS103 configurations for SMP/QUAD configurations share device tree
* which defaults to 90 MHz. However recent failures of Quad config
* revealed P&R timing violations so clamp it down to safe 50 MHz
* Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack
*
* This hack is really hacky as of now. Fix it properly by getting the
* number of cores as return value of platform's early SMP callback
*/
#ifdef CONFIG_ARC_MCIP
unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
if (num_cores > 2)
arc_set_core_freq(50 * 1000000);
#endif
switch (arc_get_core_freq()/1000000) {
case 33:
axs103_set_freq(1, 1, 1);

View File

@ -312,6 +312,9 @@ INSTALL_TARGETS = zinstall uinstall install
PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
bootpImage uImage: zImage
zImage: Image
$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@

View File

@ -116,7 +116,7 @@
ranges = <0 0x2000 0x2000>;
scm_conf: scm_conf@0 {
compatible = "syscon";
compatible = "syscon", "simple-bus";
reg = <0x0 0x1400>;
#address-cells = <1>;
#size-cells = <1>;
@ -1140,6 +1140,7 @@
ctrl-module = <&omap_control_sata>;
clocks = <&sys_clkin1>, <&sata_ref_clk>;
clock-names = "sysclk", "refclk";
syscon-pllreset = <&scm_conf 0x3fc>;
#phy-cells = <0>;
};

View File

@ -181,10 +181,10 @@
interrupt-names = "msi";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
interrupt-map = <0 0 0 1 &gpc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 2 &gpc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 3 &gpc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
<0 0 0 4 &gpc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6QDL_CLK_PCIE_AXI>,
<&clks IMX6QDL_CLK_LVDS1_GATE>,
<&clks IMX6QDL_CLK_PCIE_REF_125M>;

View File

@ -131,10 +131,17 @@
<GIC_SPI 376 IRQ_TYPE_EDGE_RISING>;
};
};
mdio: mdio@24200f00 {
compatible = "ti,keystone_mdio", "ti,davinci_mdio";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x24200f00 0x100>;
status = "disabled";
clocks = <&clkcpgmac>;
clock-names = "fck";
bus_freq = <2500000>;
};
/include/ "k2e-netcp.dtsi"
};
};
&mdio {
reg = <0x24200f00 0x100>;
};

View File

@ -98,6 +98,17 @@
#gpio-cells = <2>;
gpio,syscon-dev = <&devctrl 0x25c>;
};
mdio: mdio@02090300 {
compatible = "ti,keystone_mdio", "ti,davinci_mdio";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x02090300 0x100>;
status = "disabled";
clocks = <&clkcpgmac>;
clock-names = "fck";
bus_freq = <2500000>;
};
/include/ "k2hk-netcp.dtsi"
};
};

View File

@ -29,7 +29,6 @@
};
soc {
/include/ "k2l-clocks.dtsi"
uart2: serial@02348400 {
@ -79,6 +78,17 @@
#gpio-cells = <2>;
gpio,syscon-dev = <&devctrl 0x24c>;
};
mdio: mdio@26200f00 {
compatible = "ti,keystone_mdio", "ti,davinci_mdio";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x26200f00 0x100>;
status = "disabled";
clocks = <&clkcpgmac>;
clock-names = "fck";
bus_freq = <2500000>;
};
/include/ "k2l-netcp.dtsi"
};
};
@ -96,7 +106,3 @@
/* Pin muxed. Enabled and configured by Bootloader */
status = "disabled";
};
&mdio {
reg = <0x26200f00 0x100>;
};

View File

@ -267,17 +267,6 @@
1 0 0x21000A00 0x00000100>;
};
mdio: mdio@02090300 {
compatible = "ti,keystone_mdio", "ti,davinci_mdio";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x02090300 0x100>;
status = "disabled";
clocks = <&clkpa>;
clock-names = "fck";
bus_freq = <2500000>;
};
kirq0: keystone_irq@26202a0 {
compatible = "ti,keystone-irq";
interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>;

View File

@ -51,7 +51,8 @@
};
scm_conf: scm_conf@270 {
compatible = "syscon";
compatible = "syscon",
"simple-bus";
reg = <0x270 0x240>;
#address-cells = <1>;
#size-cells = <1>;

View File

@ -191,7 +191,8 @@
};
omap4_padconf_global: omap4_padconf_global@5a0 {
compatible = "syscon";
compatible = "syscon",
"simple-bus";
reg = <0x5a0 0x170>;
#address-cells = <1>;
#size-cells = <1>;

View File

@ -180,7 +180,8 @@
};
omap5_padconf_global: omap5_padconf_global@5a0 {
compatible = "syscon";
compatible = "syscon",
"simple-bus";
reg = <0x5a0 0xec>;
#address-cells = <1>;
#size-cells = <1>;

View File

@ -15,6 +15,33 @@
#include "skeleton.dtsi"
/ {
cpus {
#address-cells = <1>;
#size-cells = <0>;
enable-method = "ste,dbx500-smp";
cpu-map {
cluster0 {
core0 {
cpu = <&CPU0>;
};
core1 {
cpu = <&CPU1>;
};
};
};
CPU0: cpu@300 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0x300>;
};
CPU1: cpu@301 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0x301>;
};
};
soc {
#address-cells = <1>;
#size-cells = <1>;
@ -22,32 +49,6 @@
interrupt-parent = <&intc>;
ranges;
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu-map {
cluster0 {
core0 {
cpu = <&CPU0>;
};
core1 {
cpu = <&CPU1>;
};
};
};
CPU0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0>;
};
CPU1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <1>;
};
};
ptm@801ae000 {
compatible = "arm,coresight-etm3x", "arm,primecell";
reg = <0x801ae000 0x1000>;

View File

@ -61,6 +61,7 @@ work_pending:
movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
ldmia sp, {r0 - r6} @ have to reload r0 - r6
b local_restart @ ... and off we go
ENDPROC(ret_fast_syscall)
/*
* "slow" syscall return path. "why" tells us if this was a real syscall.

View File

@ -399,6 +399,9 @@ ENTRY(secondary_startup)
sub lr, r4, r5 @ mmu has been enabled
add r3, r7, lr
ldrd r4, [r3, #0] @ get secondary_data.pgdir
ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
badr lr, __enable_mmu @ return address
mov r13, r12 @ __secondary_switched address

View File

@ -296,7 +296,6 @@ static bool tk_is_cntvct(const struct timekeeper *tk)
*/
void update_vsyscall(struct timekeeper *tk)
{
struct timespec xtime_coarse;
struct timespec64 *wtm = &tk->wall_to_monotonic;
if (!cntvct_ok) {
@ -308,10 +307,10 @@ void update_vsyscall(struct timekeeper *tk)
vdso_write_begin(vdso_data);
xtime_coarse = __current_kernel_time();
vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
vdso_data->xtime_coarse_sec = tk->xtime_sec;
vdso_data->xtime_coarse_nsec = (u32)(tk->tkr_mono.xtime_nsec >>
tk->tkr_mono.shift);
vdso_data->wtm_clock_sec = wtm->tv_sec;
vdso_data->wtm_clock_nsec = wtm->tv_nsec;

View File

@ -96,7 +96,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
}
/* the mmap semaphore is taken only if not in an atomic context */
atomic = in_atomic();
atomic = faulthandler_disabled();
if (!atomic)
down_read(&current->mm->mmap_sem);

View File

@ -146,9 +146,8 @@ static __init int exynos4_pm_init_power_domain(void)
pd->base = of_iomap(np, 0);
if (!pd->base) {
pr_warn("%s: failed to map memory\n", __func__);
kfree(pd->pd.name);
kfree_const(pd->pd.name);
kfree(pd);
of_node_put(np);
continue;
}

View File

@ -392,6 +392,7 @@ static struct irq_chip wakeupgen_chip = {
.irq_mask = wakeupgen_mask,
.irq_unmask = wakeupgen_unmask,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_type = irq_chip_set_type_parent,
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,

View File

@ -14,7 +14,7 @@ VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
VDSO_LDFLAGS += -nostdlib -shared
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
VDSO_LDFLAGS += $(call cc-option, -fuse-ld=bfd)
VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
obj-$(CONFIG_VDSO) += vdso.o
extra-$(CONFIG_VDSO) += vdso.lds

View File

@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
* Other callers might not initialize the si_lsb field,
* so check explicitely for the right codes here.
*/
if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
if (from->si_signo == SIGBUS &&
(from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
#endif
break;
@ -201,8 +202,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
{
memset(to, 0, sizeof *to);
if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
copy_from_user(to->_sifields._pad,
from->_sifields._pad, SI_PAD_SIZE))

View File

@ -199,16 +199,15 @@ up_fail:
*/
void update_vsyscall(struct timekeeper *tk)
{
struct timespec xtime_coarse;
u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter");
++vdso_data->tb_seq_count;
smp_wmb();
xtime_coarse = __current_kernel_time();
vdso_data->use_syscall = use_syscall;
vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
vdso_data->xtime_coarse_sec = tk->xtime_sec;
vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >>
tk->tkr_mono.shift;
vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;

View File

@ -151,7 +151,6 @@ config BMIPS_GENERIC
select BCM7120_L2_IRQ
select BRCMSTB_L2_IRQ
select IRQ_MIPS_CPU
select RAW_IRQ_ACCESSORS
select DMA_NONCOHERENT
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN

View File

@ -190,6 +190,7 @@ int get_c0_perfcount_int(void)
{
return ATH79_MISC_IRQ(5);
}
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{

View File

@ -42,7 +42,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
if (action & SMP_CALL_FUNCTION)
smp_call_function_interrupt();
generic_smp_call_function_interrupt();
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();

View File

@ -1,10 +0,0 @@
#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H
#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H
#include <asm/bmips.h>
#define plat_post_dma_flush bmips_post_dma_flush
#include <asm/mach-generic/dma-coherence.h>
#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */

View File

@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
#ifdef CONFIG_SMP
/*
* For SMP, multiple CPUs can race, so we need to do
* this atomically.
*/
#ifdef CONFIG_64BIT
#define LL_INSN "lld"
#define SC_INSN "scd"
#else /* CONFIG_32BIT */
#define LL_INSN "ll"
#define SC_INSN "sc"
#endif
unsigned long page_global = _PAGE_GLOBAL;
unsigned long tmp;
__asm__ __volatile__ (
" .set push\n"
" .set noreorder\n"
"1: " LL_INSN " %[tmp], %[buddy]\n"
" bnez %[tmp], 2f\n"
" or %[tmp], %[tmp], %[global]\n"
" " SC_INSN " %[tmp], %[buddy]\n"
" beqz %[tmp], 1b\n"
" nop\n"
"2:\n"
" .set pop"
: [buddy] "+m" (buddy->pte),
[tmp] "=&r" (tmp)
: [global] "r" (page_global));
#else /* !CONFIG_SMP */
if (pte_none(*buddy))
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
#endif /* CONFIG_SMP */
}
#endif
}

View File

@ -83,8 +83,6 @@ static inline void __cpu_die(unsigned int cpu)
extern void play_dead(void);
#endif
extern asmlinkage void smp_call_function_interrupt(void);
static inline void arch_send_call_function_single_ipi(int cpu)
{
extern struct plat_smp_ops *mp_ops; /* private */

View File

@ -152,6 +152,31 @@
.set noreorder
bltz k0, 8f
move k1, sp
#ifdef CONFIG_EVA
/*
* Flush interAptiv's Return Prediction Stack (RPS) by writing
* EntryHi. Toggling Config7.RPS is slower and less portable.
*
* The RPS isn't automatically flushed when exceptions are
* taken, which can result in kernel mode speculative accesses
* to user addresses if the RPS mispredicts. That's harmless
* when user and kernel share the same address space, but with
* EVA the same user segments may be unmapped to kernel mode,
* even containing sensitive MMIO regions or invalid memory.
*
* This can happen when the kernel sets the return address to
* ret_from_* and jr's to the exception handler, which looks
* more like a tail call than a function call. If nested calls
* don't evict the last user address in the RPS, it will
* mispredict the return and fetch from a user controlled
* address into the icache.
*
* More recent EVA-capable cores with MAAR to restrict
* speculative accesses aren't affected.
*/
MFC0 k0, CP0_ENTRYHI
MTC0 k0, CP0_ENTRYHI
#endif
.set reorder
/* Called from user mode, new stack. */
get_saved_sp

View File

@ -407,7 +407,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set noat
SAVE_ALL
FEXPORT(handle_\exception\ext)
__BUILD_clear_\clear
__build_clear_\clear
.set at
__BUILD_\verbose \exception
move a0, sp

View File

@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr)
{
unsigned int real_len;
cpumask_t mask;
cpumask_t allowed, mask;
int retval;
struct task_struct *p;
@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
if (retval)
goto out_unlock;
cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
cpumask_and(&mask, &allowed, cpu_active_mask);
out_unlock:
read_unlock(&tasklist_lock);

View File

@ -38,7 +38,7 @@ char *mips_get_machine_name(void)
return mips_machine_name;
}
#ifdef CONFIG_OF
#ifdef CONFIG_USE_OF
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
return add_memory_region(base, size, BOOT_MEM_RAM);

View File

@ -24,7 +24,7 @@ LEAF(relocate_new_kernel)
process_entry:
PTR_L s2, (s0)
PTR_ADD s0, s0, SZREG
PTR_ADDIU s0, s0, SZREG
/*
* In case of a kdump/crash kernel, the indirection page is not
@ -61,9 +61,9 @@ copy_word:
/* copy page word by word */
REG_L s5, (s2)
REG_S s5, (s4)
PTR_ADD s4, s4, SZREG
PTR_ADD s2, s2, SZREG
LONG_SUB s6, s6, 1
PTR_ADDIU s4, s4, SZREG
PTR_ADDIU s2, s2, SZREG
LONG_ADDIU s6, s6, -1
beq s6, zero, process_entry
b copy_word
b process_entry

View File

@ -80,7 +80,7 @@ syscall_trace_entry:
SAVE_STATIC
move s0, t2
move a0, sp
daddiu a1, v0, __NR_64_Linux
move a1, v0
jal syscall_trace_enter
bltz v0, 2f # seccomp failed? Skip syscall

View File

@ -72,7 +72,7 @@ n32_syscall_trace_entry:
SAVE_STATIC
move s0, t2
move a0, sp
daddiu a1, v0, __NR_N32_Linux
move a1, v0
jal syscall_trace_enter
bltz v0, 2f # seccomp failed? Skip syscall

View File

@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
{
memset(to, 0, sizeof *to);
if (copy_from_user(to, from, 3*sizeof(int)) ||
copy_from_user(to->_sifields._pad,
from->_sifields._pad, SI_PAD_SIZE32))

View File

@ -284,7 +284,7 @@ static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id)
if (action == 0)
scheduler_ipi();
else
smp_call_function_interrupt();
generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
@ -336,7 +336,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
if (action & SMP_CALL_FUNCTION)
smp_call_function_interrupt();
generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}

View File

@ -192,16 +192,6 @@ asmlinkage void start_secondary(void)
cpu_startup_entry(CPUHP_ONLINE);
}
/*
* Call into both interrupt handlers, as we share the IPI for them
*/
void __irq_entry smp_call_function_interrupt(void)
{
irq_enter();
generic_smp_call_function_interrupt();
irq_exit();
}
static void stop_this_cpu(void *dummy)
{
/*

View File

@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task,
void show_stack(struct task_struct *task, unsigned long *sp)
{
struct pt_regs regs;
mm_segment_t old_fs = get_fs();
if (sp) {
regs.regs[29] = (unsigned long)sp;
regs.regs[31] = 0;
@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp)
prepare_frametrace(&regs);
}
}
/*
* show_stack() deals exclusively with kernel mode, so be sure to access
* the stack in the kernel (not user) address space.
*/
set_fs(KERNEL_DS);
show_stacktrace(task, &regs);
set_fs(old_fs);
}
static void show_code(unsigned int __user *pc)
@ -1519,6 +1526,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
const int field = 2 * sizeof(unsigned long);
int multi_match = regs->cp0_status & ST0_TS;
enum ctx_state prev_state;
mm_segment_t old_fs = get_fs();
prev_state = exception_enter();
show_regs(regs);
@ -1540,8 +1548,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
dump_tlb_all();
}
if (!user_mode(regs))
set_fs(KERNEL_DS);
show_code((unsigned int __user *) regs->cp0_epc);
set_fs(old_fs);
/*
* Some chips may have other causes of machine check (e.g. SB1
* graduation timer)

View File

@ -438,7 +438,7 @@ do { \
: "memory"); \
} while(0)
#define StoreDW(addr, value, res) \
#define _StoreDW(addr, value, res) \
do { \
__asm__ __volatile__ ( \
".set\tpush\n\t" \

View File

@ -293,7 +293,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
smp_call_function_interrupt();
generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
@ -466,6 +466,7 @@ int get_c0_perfcount_int(void)
{
return ltq_perfcount_irq;
}
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{

View File

@ -266,8 +266,11 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
if (action & SMP_CALL_FUNCTION)
smp_call_function_interrupt();
if (action & SMP_CALL_FUNCTION) {
irq_enter();
generic_smp_call_function_interrupt();
irq_exit();
}
if (action & SMP_ASK_C0COUNT) {
BUG_ON(cpu != 0);

View File

@ -160,18 +160,18 @@ static inline void setup_protection_map(void)
protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ);
protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
} else {

View File

@ -133,7 +133,8 @@ good_area:
#endif
goto bad_area;
}
if (!(vma->vm_flags & VM_READ)) {
if (!(vma->vm_flags & VM_READ) &&
exception_epc(regs) != address) {
#if 0
pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
raw_smp_processor_id(),

View File

@ -222,7 +222,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
smp_call_function_interrupt();
generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}

View File

@ -154,6 +154,7 @@ int get_c0_perfcount_int(void)
return mips_cpu_perf_irq;
}
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{
@ -171,14 +172,17 @@ unsigned int get_c0_compare_int(void)
static void __init init_rtc(void)
{
/* stop the clock whilst setting it up */
CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL);
unsigned char freq, ctrl;
/* 32KHz time base */
CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
/* Set 32KHz time base if not already set */
freq = CMOS_READ(RTC_FREQ_SELECT);
if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ)
CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
/* start the clock */
CMOS_WRITE(RTC_24H, RTC_CONTROL);
/* Ensure SET bit is clear so RTC can run */
ctrl = CMOS_READ(RTC_CONTROL);
if (ctrl & RTC_SET)
CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL);
}
void __init plat_time_init(void)

View File

@ -77,6 +77,7 @@ int get_c0_perfcount_int(void)
return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
return -1;
}
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{

View File

@ -86,7 +86,7 @@ void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
{
clear_c0_eimr(irq);
ack_c0_eirr(irq);
smp_call_function_interrupt();
generic_smp_call_function_interrupt();
set_c0_eimr(irq);
}

View File

@ -114,7 +114,7 @@ static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id)
static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id)
{
smp_call_function_interrupt();
generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}

View File

@ -26,6 +26,7 @@ int get_c0_perfcount_int(void)
{
return gic_get_c0_perfcount_int();
}
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
int get_c0_fdc_int(void)
{

View File

@ -44,7 +44,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
smp_call_function_interrupt();
generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}

View File

@ -89,6 +89,7 @@ int get_c0_perfcount_int(void)
{
return rt_perfcount_irq;
}
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
unsigned int get_c0_compare_int(void)
{

View File

@ -107,10 +107,14 @@ static void ip27_do_irq_mask0(void)
scheduler_ipi();
} else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
smp_call_function_interrupt();
irq_enter();
generic_smp_call_function_interrupt();
irq_exit();
} else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
smp_call_function_interrupt();
irq_enter();
generic_smp_call_function_interrupt();
irq_exit();
} else
#endif
{

View File

@ -29,8 +29,6 @@
#include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/bcm1480_int.h>
extern void smp_call_function_interrupt(void);
/*
* These are routines for dealing with the bcm1480 smp capabilities
* independent of board/firmware
@ -184,6 +182,9 @@ void bcm1480_mailbox_interrupt(void)
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
if (action & SMP_CALL_FUNCTION)
smp_call_function_interrupt();
if (action & SMP_CALL_FUNCTION) {
irq_enter();
generic_smp_call_function_interrupt();
irq_exit();
}
}

View File

@ -172,6 +172,9 @@ void sb1250_mailbox_interrupt(void)
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
if (action & SMP_CALL_FUNCTION)
smp_call_function_interrupt();
if (action & SMP_CALL_FUNCTION) {
irq_enter();
generic_smp_call_function_interrupt();
irq_exit();
}
}

View File

@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
{
memset(to, 0, sizeof *to);
if (copy_from_user(to, from, 3*sizeof(int)) ||
copy_from_user(to->_sifields._pad,
from->_sifields._pad, SI_PAD_SIZE32))

View File

@ -1742,10 +1742,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu)
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
{
if (!vcpu->requests)
return 0;
retry:
kvm_s390_vcpu_request_handled(vcpu);
if (!vcpu->requests)
return 0;
/*
* We use MMU_RELOAD just to re-arm the ipte notifier for the
* guest prefix page. gmap_ipte_notify will wait on the ptl lock.

View File

@ -28,16 +28,10 @@
* Must preserve %o5 between VISEntryHalf and VISExitHalf */
#define VISEntryHalf \
rd %fprs, %o5; \
andcc %o5, FPRS_FEF, %g0; \
be,pt %icc, 297f; \
sethi %hi(298f), %g7; \
sethi %hi(VISenterhalf), %g1; \
jmpl %g1 + %lo(VISenterhalf), %g0; \
or %g7, %lo(298f), %g7; \
clr %o5; \
297: wr %o5, FPRS_FEF, %fprs; \
298:
VISEntry
#define VISExitHalf \
VISExit
#define VISEntryHalfFast(fail_label) \
rd %fprs, %o5; \
@ -47,7 +41,7 @@
ba,a,pt %xcc, fail_label; \
297: wr %o5, FPRS_FEF, %fprs;
#define VISExitHalf \
#define VISExitHalfFast \
wr %o5, 0, %fprs;
#ifndef __ASSEMBLY__

View File

@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
add %o0, 0x40, %o0
bne,pt %icc, 1b
LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
#ifdef NON_USER_COPY
VISExitHalfFast
#else
VISExitHalf
#endif
brz,pn %o2, .Lexit
cmp %o2, 19
ble,pn %icc, .Lsmall_unaligned

View File

@ -44,9 +44,8 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
stx %g3, [%g6 + TI_GSR]
2: add %g6, %g1, %g3
cmp %o5, FPRS_DU
be,pn %icc, 6f
sll %g1, 3, %g1
mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5
sll %g1, 3, %g1
stb %o5, [%g3 + TI_FPSAVED]
rd %gsr, %g2
add %g6, %g1, %g3
@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
.align 32
80: jmpl %g7 + %g0, %g0
nop
6: ldub [%g3 + TI_FPSAVED], %o5
or %o5, FPRS_DU, %o5
add %g6, TI_FPREGS+0x80, %g2
stb %o5, [%g3 + TI_FPSAVED]
sll %g1, 5, %g1
add %g6, TI_FPREGS+0xc0, %g3
wr %g0, FPRS_FEF, %fprs
membar #Sync
stda %f32, [%g2 + %g1] ASI_BLK_P
stda %f48, [%g3 + %g1] ASI_BLK_P
membar #Sync
ba,pt %xcc, 80f
nop
.align 32
80: jmpl %g7 + %g0, %g0
nop
.align 32
VISenterhalf:
ldub [%g6 + TI_FPDEPTH], %g1
brnz,a,pn %g1, 1f
cmp %g1, 1
stb %g0, [%g6 + TI_FPSAVED]
stx %fsr, [%g6 + TI_XFSR]
clr %o5
jmpl %g7 + %g0, %g0
wr %g0, FPRS_FEF, %fprs
1: bne,pn %icc, 2f
srl %g1, 1, %g1
ba,pt %xcc, vis1
sub %g7, 8, %g7
2: addcc %g6, %g1, %g3
sll %g1, 3, %g1
andn %o5, FPRS_DU, %g2
stb %g2, [%g3 + TI_FPSAVED]
rd %gsr, %g2
add %g6, %g1, %g3
stx %g2, [%g3 + TI_GSR]
add %g6, %g1, %g2
stx %fsr, [%g2 + TI_XFSR]
sll %g1, 5, %g1
3: andcc %o5, FPRS_DL, %g0
be,pn %icc, 4f
add %g6, TI_FPREGS, %g2
add %g6, TI_FPREGS+0x40, %g3
membar #Sync
stda %f0, [%g2 + %g1] ASI_BLK_P
stda %f16, [%g3 + %g1] ASI_BLK_P
membar #Sync
ba,pt %xcc, 4f
nop
.align 32
4: and %o5, FPRS_DU, %o5
jmpl %g7 + %g0, %g0
wr %o5, FPRS_FEF, %fprs

View File

@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page);
void VISenter(void);
EXPORT_SYMBOL(VISenter);
/* CRYPTO code needs this */
void VISenterhalf(void);
EXPORT_SYMBOL(VISenterhalf);
extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
unsigned long *);

View File

@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
return -EFAULT;
memset(to, 0, sizeof(*to));
err = __get_user(to->si_signo, &from->si_signo);
err |= __get_user(to->si_errno, &from->si_errno);
err |= __get_user(to->si_code, &from->si_code);

View File

@ -140,6 +140,7 @@ sysexit_from_sys_call:
*/
andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
movl RIP(%rsp), %ecx /* User %eip */
movq RAX(%rsp), %rax
RESTORE_RSI_RDI
xorl %edx, %edx /* Do not leak kernel information */
xorq %r8, %r8
@ -219,7 +220,6 @@ sysexit_from_sys_call:
1: setbe %al /* 1 if error, 0 if not */
movzbl %al, %edi /* zero-extend that into %edi */
call __audit_syscall_exit
movq RAX(%rsp), %rax /* reload syscall return value */
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
@ -368,6 +368,7 @@ sysretl_from_sys_call:
RESTORE_RSI_RDI_RDX
movl RIP(%rsp), %ecx
movl EFLAGS(%rsp), %r11d
movq RAX(%rsp), %rax
xorq %r10, %r10
xorq %r9, %r9
xorq %r8, %r8

View File

@ -57,9 +57,9 @@ struct sigcontext {
unsigned long ip;
unsigned long flags;
unsigned short cs;
unsigned short __pad2; /* Was called gs, but was always zero. */
unsigned short __pad1; /* Was called fs, but was always zero. */
unsigned short ss;
unsigned short gs;
unsigned short fs;
unsigned short __pad0;
unsigned long err;
unsigned long trapno;
unsigned long oldmask;

View File

@ -79,12 +79,12 @@ do { \
#else /* CONFIG_X86_32 */
/* frame pointer must be last for get_wchan */
#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
#define __EXTRA_CLOBBER \
, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
"r12", "r13", "r14", "r15", "flags"
"r12", "r13", "r14", "r15"
#ifdef CONFIG_CC_STACKPROTECTOR
#define __switch_canary \
@ -100,11 +100,7 @@ do { \
#define __switch_canary_iparam
#endif /* CC_STACKPROTECTOR */
/*
* There is no need to save or restore flags, because flags are always
* clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
* has no effect.
*/
/* Save restore flags to clear handle leaking NT */
#define switch_to(prev, next, last) \
asm volatile(SAVE_CONTEXT \
"movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \

View File

@ -177,24 +177,9 @@ struct sigcontext {
__u64 rip;
__u64 eflags; /* RFLAGS */
__u16 cs;
/*
* Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"),
* Linux saved and restored fs and gs in these slots. This
* was counterproductive, as fsbase and gsbase were never
* saved, so arch_prctl was presumably unreliable.
*
* If these slots are ever needed for any other purpose, there
* is some risk that very old 64-bit binaries could get
* confused. I doubt that many such binaries still work,
* though, since the same patch in 2.5.64 also removed the
* 64-bit set_thread_area syscall, so it appears that there is
* no TLS API that works in both pre- and post-2.5.64 kernels.
*/
__u16 __pad2; /* Was gs. */
__u16 __pad1; /* Was fs. */
__u16 ss;
__u16 gs;
__u16 fs;
__u16 __pad0;
__u64 err;
__u64 trapno;
__u64 oldmask;

View File

@ -322,7 +322,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
irq_data->chip = &lapic_controller;
irq_data->chip_data = data;
irq_data->hwirq = virq + i;
err = assign_irq_vector_policy(virq, irq_data->node, data,
err = assign_irq_vector_policy(virq + i, irq_data->node, data,
info);
if (err)
goto error;

View File

@ -2534,7 +2534,7 @@ static int intel_pmu_cpu_prepare(int cpu)
if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
cpuc->shared_regs = allocate_shared_regs(cpu);
if (!cpuc->shared_regs)
return NOTIFY_BAD;
goto err;
}
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
@ -2542,18 +2542,27 @@ static int intel_pmu_cpu_prepare(int cpu)
cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
if (!cpuc->constraint_list)
return NOTIFY_BAD;
goto err_shared_regs;
cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
if (!cpuc->excl_cntrs) {
kfree(cpuc->constraint_list);
kfree(cpuc->shared_regs);
return NOTIFY_BAD;
}
if (!cpuc->excl_cntrs)
goto err_constraint_list;
cpuc->excl_thread_id = 0;
}
return NOTIFY_OK;
err_constraint_list:
kfree(cpuc->constraint_list);
cpuc->constraint_list = NULL;
err_shared_regs:
kfree(cpuc->shared_regs);
cpuc->shared_regs = NULL;
err:
return NOTIFY_BAD;
}
static void intel_pmu_cpu_starting(int cpu)

View File

@ -1255,7 +1255,7 @@ static inline void cqm_pick_event_reader(int cpu)
cpumask_set_cpu(cpu, &cqm_cpumask);
}
static void intel_cqm_cpu_prepare(unsigned int cpu)
static void intel_cqm_cpu_starting(unsigned int cpu)
{
struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
struct cpuinfo_x86 *c = &cpu_data(cpu);
@ -1296,13 +1296,11 @@ static int intel_cqm_cpu_notifier(struct notifier_block *nb,
unsigned int cpu = (unsigned long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
intel_cqm_cpu_prepare(cpu);
break;
case CPU_DOWN_PREPARE:
intel_cqm_cpu_exit(cpu);
break;
case CPU_STARTING:
intel_cqm_cpu_starting(cpu);
cqm_pick_event_reader(cpu);
break;
}
@ -1373,7 +1371,7 @@ static int __init intel_cqm_init(void)
goto out;
for_each_online_cpu(i) {
intel_cqm_cpu_prepare(i);
intel_cqm_cpu_starting(i);
cqm_pick_event_reader(i);
}

View File

@ -270,7 +270,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
dst_fpu->fpregs_active = 0;
dst_fpu->last_cpu = -1;
if (src_fpu->fpstate_active)
if (src_fpu->fpstate_active && cpu_has_fpu)
fpu_copy(dst_fpu, src_fpu);
return 0;

View File

@ -40,7 +40,12 @@ static void fpu__init_cpu_generic(void)
write_cr0(cr0);
/* Flush out any pending x87 state: */
asm volatile ("fninit");
#ifdef CONFIG_MATH_EMULATION
if (!cpu_has_fpu)
fpstate_init_soft(&current->thread.fpu.state.soft);
else
#endif
asm volatile ("fninit");
}
/*

View File

@ -408,6 +408,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
static void mwait_idle(void)
{
if (!current_set_polling_and_test()) {
trace_cpu_idle_rcuidle(1, smp_processor_id());
if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
smp_mb(); /* quirk */
clflush((void *)&current_thread_info()->flags);
@ -419,6 +420,7 @@ static void mwait_idle(void)
__sti_mwait(0, 0);
else
local_irq_enable();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} else {
local_irq_enable();
}

Some files were not shown because too many files have changed in this diff Show More