Linux 4.10-rc8

-----BEGIN PGP SIGNATURE-----
 
 iQEcBAABAgAGBQJYoM2fAAoJEHm+PkMAQRiGr9MH/izEAMri7rJ0QMc3ejt+WmD0
 8pkZw3+MVn71z6cIEgpzk4QkEWJd5rfhkETCeCp7qQ9V6cDW1FDE9+0OmPjiphDt
 nnzKs7t7skEBwH5Mq5xygmIfkv+Z0QGHZ20gfQWY3F56Uxo+ARF88OBHBLKhqx3v
 98C7YbMFLKBslKClA78NUEIdx0UfBaRqerlERx0Lfl9aoOrbBS6WI3iuREiylpih
 9o7HTrwaGKkU4Kd6NdgJP2EyWPsd1LGalxBBjeDSpm5uokX6ALTdNXDZqcQscHjE
 RmTqJTGRdhSThXOpNnvUJvk9L442yuNRrVme/IqLpxMdHPyjaXR3FGSIDb2SfjY=
 =VMy8
 -----END PGP SIGNATURE-----

Merge tag 'v4.10-rc8' into perf/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2017-02-14 07:29:14 +01:00
commit 210f400d68
266 changed files with 1939 additions and 1129 deletions

View file

@ -33,11 +33,6 @@ Arguments
Description Description
=========== ===========
.. note::
This documents the proposed CEC API. This API is not yet finalized
and is currently only available as a staging kernel module.
Closes the cec device. Resources associated with the file descriptor are Closes the cec device. Resources associated with the file descriptor are
freed. The device configuration remain unchanged. freed. The device configuration remain unchanged.

View file

@ -39,11 +39,6 @@ Arguments
Description Description
=========== ===========
.. note::
This documents the proposed CEC API. This API is not yet finalized
and is currently only available as a staging kernel module.
The :c:func:`ioctl()` function manipulates cec device parameters. The The :c:func:`ioctl()` function manipulates cec device parameters. The
argument ``fd`` must be an open file descriptor. argument ``fd`` must be an open file descriptor.

View file

@ -46,11 +46,6 @@ Arguments
Description Description
=========== ===========
.. note::
This documents the proposed CEC API. This API is not yet finalized
and is currently only available as a staging kernel module.
To open a cec device applications call :c:func:`open()` with the To open a cec device applications call :c:func:`open()` with the
desired device name. The function has no side effects; the device desired device name. The function has no side effects; the device
configuration remain unchanged. configuration remain unchanged.

View file

@ -39,11 +39,6 @@ Arguments
Description Description
=========== ===========
.. note::
This documents the proposed CEC API. This API is not yet finalized
and is currently only available as a staging kernel module.
With the :c:func:`poll()` function applications can wait for CEC With the :c:func:`poll()` function applications can wait for CEC
events. events.

View file

@ -3,11 +3,6 @@
Introduction Introduction
============ ============
.. note::
This documents the proposed CEC API. This API is not yet finalized
and is currently only available as a staging kernel module.
HDMI connectors provide a single pin for use by the Consumer Electronics HDMI connectors provide a single pin for use by the Consumer Electronics
Control protocol. This protocol allows different devices connected by an Control protocol. This protocol allows different devices connected by an
HDMI cable to communicate. The protocol for CEC version 1.4 is defined HDMI cable to communicate. The protocol for CEC version 1.4 is defined
@ -31,3 +26,15 @@ control just the CEC pin.
Drivers that support CEC will create a CEC device node (/dev/cecX) to Drivers that support CEC will create a CEC device node (/dev/cecX) to
give userspace access to the CEC adapter. The give userspace access to the CEC adapter. The
:ref:`CEC_ADAP_G_CAPS` ioctl will tell userspace what it is allowed to do. :ref:`CEC_ADAP_G_CAPS` ioctl will tell userspace what it is allowed to do.
In order to check the support and test it, it is suggested to download
the `v4l-utils <https://git.linuxtv.org/v4l-utils.git/>`_ package. It
provides three tools to handle CEC:
- cec-ctl: the Swiss army knife of CEC. Allows you to configure, transmit
and monitor CEC messages.
- cec-compliance: does a CEC compliance test of a remote CEC device to
determine how compliant the CEC implementation is.
- cec-follower: emulates a CEC follower.

View file

@ -29,11 +29,6 @@ Arguments
Description Description
=========== ===========
.. note::
This documents the proposed CEC API. This API is not yet finalized
and is currently only available as a staging kernel module.
All cec devices must support :ref:`ioctl CEC_ADAP_G_CAPS <CEC_ADAP_G_CAPS>`. To query All cec devices must support :ref:`ioctl CEC_ADAP_G_CAPS <CEC_ADAP_G_CAPS>`. To query
device information, applications call the ioctl with a pointer to a device information, applications call the ioctl with a pointer to a
struct :c:type:`cec_caps`. The driver fills the structure and struct :c:type:`cec_caps`. The driver fills the structure and

View file

@ -35,11 +35,6 @@ Arguments
Description Description
=========== ===========
.. note::
This documents the proposed CEC API. This API is not yet finalized
and is currently only available as a staging kernel module.
To query the current CEC logical addresses, applications call To query the current CEC logical addresses, applications call
:ref:`ioctl CEC_ADAP_G_LOG_ADDRS <CEC_ADAP_G_LOG_ADDRS>` with a pointer to a :ref:`ioctl CEC_ADAP_G_LOG_ADDRS <CEC_ADAP_G_LOG_ADDRS>` with a pointer to a
struct :c:type:`cec_log_addrs` where the driver stores the logical addresses. struct :c:type:`cec_log_addrs` where the driver stores the logical addresses.

View file

@ -35,11 +35,6 @@ Arguments
Description Description
=========== ===========
.. note::
This documents the proposed CEC API. This API is not yet finalized
and is currently only available as a staging kernel module.
To query the current physical address applications call To query the current physical address applications call
:ref:`ioctl CEC_ADAP_G_PHYS_ADDR <CEC_ADAP_G_PHYS_ADDR>` with a pointer to a __u16 where the :ref:`ioctl CEC_ADAP_G_PHYS_ADDR <CEC_ADAP_G_PHYS_ADDR>` with a pointer to a __u16 where the
driver stores the physical address. driver stores the physical address.

View file

@ -30,11 +30,6 @@ Arguments
Description Description
=========== ===========
.. note::
This documents the proposed CEC API. This API is not yet finalized
and is currently only available as a staging kernel module.
CEC devices can send asynchronous events. These can be retrieved by CEC devices can send asynchronous events. These can be retrieved by
calling :c:func:`CEC_DQEVENT`. If the file descriptor is in calling :c:func:`CEC_DQEVENT`. If the file descriptor is in
non-blocking mode and no event is pending, then it will return -1 and non-blocking mode and no event is pending, then it will return -1 and

View file

@ -31,11 +31,6 @@ Arguments
Description Description
=========== ===========
.. note::
This documents the proposed CEC API. This API is not yet finalized
and is currently only available as a staging kernel module.
By default any filehandle can use :ref:`CEC_TRANSMIT`, but in order to prevent By default any filehandle can use :ref:`CEC_TRANSMIT`, but in order to prevent
applications from stepping on each others toes it must be possible to applications from stepping on each others toes it must be possible to
obtain exclusive access to the CEC adapter. This ioctl sets the obtain exclusive access to the CEC adapter. This ioctl sets the

View file

@ -34,11 +34,6 @@ Arguments
Description Description
=========== ===========
.. note::
This documents the proposed CEC API. This API is not yet finalized
and is currently only available as a staging kernel module.
To receive a CEC message the application has to fill in the To receive a CEC message the application has to fill in the
``timeout`` field of struct :c:type:`cec_msg` and pass it to ``timeout`` field of struct :c:type:`cec_msg` and pass it to
:ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`. :ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`.

View file

@ -1091,7 +1091,7 @@ F: arch/arm/boot/dts/aspeed-*
F: drivers/*/*aspeed* F: drivers/*/*aspeed*
ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
M: Nicolas Ferre <nicolas.ferre@atmel.com> M: Nicolas Ferre <nicolas.ferre@microchip.com>
M: Alexandre Belloni <alexandre.belloni@free-electrons.com> M: Alexandre Belloni <alexandre.belloni@free-electrons.com>
M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com> M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@ -1773,7 +1773,7 @@ F: drivers/soc/renesas/
F: include/linux/soc/renesas/ F: include/linux/soc/renesas/
ARM/SOCFPGA ARCHITECTURE ARM/SOCFPGA ARCHITECTURE
M: Dinh Nguyen <dinguyen@opensource.altera.com> M: Dinh Nguyen <dinguyen@kernel.org>
S: Maintained S: Maintained
F: arch/arm/mach-socfpga/ F: arch/arm/mach-socfpga/
F: arch/arm/boot/dts/socfpga* F: arch/arm/boot/dts/socfpga*
@ -1783,7 +1783,7 @@ W: http://www.rocketboards.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT
M: Dinh Nguyen <dinguyen@opensource.altera.com> M: Dinh Nguyen <dinguyen@kernel.org>
S: Maintained S: Maintained
F: drivers/clk/socfpga/ F: drivers/clk/socfpga/
@ -2175,56 +2175,56 @@ F: include/linux/atm*
F: include/uapi/linux/atm* F: include/uapi/linux/atm*
ATMEL AT91 / AT32 MCI DRIVER ATMEL AT91 / AT32 MCI DRIVER
M: Ludovic Desroches <ludovic.desroches@atmel.com> M: Ludovic Desroches <ludovic.desroches@microchip.com>
S: Maintained S: Maintained
F: drivers/mmc/host/atmel-mci.c F: drivers/mmc/host/atmel-mci.c
ATMEL AT91 SAMA5D2-Compatible Shutdown Controller ATMEL AT91 SAMA5D2-Compatible Shutdown Controller
M: Nicolas Ferre <nicolas.ferre@atmel.com> M: Nicolas Ferre <nicolas.ferre@microchip.com>
S: Supported S: Supported
F: drivers/power/reset/at91-sama5d2_shdwc.c F: drivers/power/reset/at91-sama5d2_shdwc.c
ATMEL SAMA5D2 ADC DRIVER ATMEL SAMA5D2 ADC DRIVER
M: Ludovic Desroches <ludovic.desroches@atmel.com> M: Ludovic Desroches <ludovic.desroches@microchip.com>
L: linux-iio@vger.kernel.org L: linux-iio@vger.kernel.org
S: Supported S: Supported
F: drivers/iio/adc/at91-sama5d2_adc.c F: drivers/iio/adc/at91-sama5d2_adc.c
ATMEL Audio ALSA driver ATMEL Audio ALSA driver
M: Nicolas Ferre <nicolas.ferre@atmel.com> M: Nicolas Ferre <nicolas.ferre@microchip.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers) L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported S: Supported
F: sound/soc/atmel F: sound/soc/atmel
ATMEL XDMA DRIVER ATMEL XDMA DRIVER
M: Ludovic Desroches <ludovic.desroches@atmel.com> M: Ludovic Desroches <ludovic.desroches@microchip.com>
L: linux-arm-kernel@lists.infradead.org L: linux-arm-kernel@lists.infradead.org
L: dmaengine@vger.kernel.org L: dmaengine@vger.kernel.org
S: Supported S: Supported
F: drivers/dma/at_xdmac.c F: drivers/dma/at_xdmac.c
ATMEL I2C DRIVER ATMEL I2C DRIVER
M: Ludovic Desroches <ludovic.desroches@atmel.com> M: Ludovic Desroches <ludovic.desroches@microchip.com>
L: linux-i2c@vger.kernel.org L: linux-i2c@vger.kernel.org
S: Supported S: Supported
F: drivers/i2c/busses/i2c-at91.c F: drivers/i2c/busses/i2c-at91.c
ATMEL ISI DRIVER ATMEL ISI DRIVER
M: Ludovic Desroches <ludovic.desroches@atmel.com> M: Ludovic Desroches <ludovic.desroches@microchip.com>
L: linux-media@vger.kernel.org L: linux-media@vger.kernel.org
S: Supported S: Supported
F: drivers/media/platform/soc_camera/atmel-isi.c F: drivers/media/platform/soc_camera/atmel-isi.c
F: include/media/atmel-isi.h F: include/media/atmel-isi.h
ATMEL LCDFB DRIVER ATMEL LCDFB DRIVER
M: Nicolas Ferre <nicolas.ferre@atmel.com> M: Nicolas Ferre <nicolas.ferre@microchip.com>
L: linux-fbdev@vger.kernel.org L: linux-fbdev@vger.kernel.org
S: Maintained S: Maintained
F: drivers/video/fbdev/atmel_lcdfb.c F: drivers/video/fbdev/atmel_lcdfb.c
F: include/video/atmel_lcdc.h F: include/video/atmel_lcdc.h
ATMEL MACB ETHERNET DRIVER ATMEL MACB ETHERNET DRIVER
M: Nicolas Ferre <nicolas.ferre@atmel.com> M: Nicolas Ferre <nicolas.ferre@microchip.com>
S: Supported S: Supported
F: drivers/net/ethernet/cadence/ F: drivers/net/ethernet/cadence/
@ -2236,32 +2236,32 @@ S: Supported
F: drivers/mtd/nand/atmel_nand* F: drivers/mtd/nand/atmel_nand*
ATMEL SDMMC DRIVER ATMEL SDMMC DRIVER
M: Ludovic Desroches <ludovic.desroches@atmel.com> M: Ludovic Desroches <ludovic.desroches@microchip.com>
L: linux-mmc@vger.kernel.org L: linux-mmc@vger.kernel.org
S: Supported S: Supported
F: drivers/mmc/host/sdhci-of-at91.c F: drivers/mmc/host/sdhci-of-at91.c
ATMEL SPI DRIVER ATMEL SPI DRIVER
M: Nicolas Ferre <nicolas.ferre@atmel.com> M: Nicolas Ferre <nicolas.ferre@microchip.com>
S: Supported S: Supported
F: drivers/spi/spi-atmel.* F: drivers/spi/spi-atmel.*
ATMEL SSC DRIVER ATMEL SSC DRIVER
M: Nicolas Ferre <nicolas.ferre@atmel.com> M: Nicolas Ferre <nicolas.ferre@microchip.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported S: Supported
F: drivers/misc/atmel-ssc.c F: drivers/misc/atmel-ssc.c
F: include/linux/atmel-ssc.h F: include/linux/atmel-ssc.h
ATMEL Timer Counter (TC) AND CLOCKSOURCE DRIVERS ATMEL Timer Counter (TC) AND CLOCKSOURCE DRIVERS
M: Nicolas Ferre <nicolas.ferre@atmel.com> M: Nicolas Ferre <nicolas.ferre@microchip.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported S: Supported
F: drivers/misc/atmel_tclib.c F: drivers/misc/atmel_tclib.c
F: drivers/clocksource/tcb_clksrc.c F: drivers/clocksource/tcb_clksrc.c
ATMEL USBA UDC DRIVER ATMEL USBA UDC DRIVER
M: Nicolas Ferre <nicolas.ferre@atmel.com> M: Nicolas Ferre <nicolas.ferre@microchip.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported S: Supported
F: drivers/usb/gadget/udc/atmel_usba_udc.* F: drivers/usb/gadget/udc/atmel_usba_udc.*
@ -9736,7 +9736,7 @@ S: Maintained
F: drivers/pinctrl/pinctrl-at91.* F: drivers/pinctrl/pinctrl-at91.*
PIN CONTROLLER - ATMEL AT91 PIO4 PIN CONTROLLER - ATMEL AT91 PIO4
M: Ludovic Desroches <ludovic.desroches@atmel.com> M: Ludovic Desroches <ludovic.desroches@microchip.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-gpio@vger.kernel.org L: linux-gpio@vger.kernel.org
S: Supported S: Supported
@ -13065,7 +13065,7 @@ F: drivers/input/serio/userio.c
F: include/uapi/linux/userio.h F: include/uapi/linux/userio.h
VIRTIO CONSOLE DRIVER VIRTIO CONSOLE DRIVER
M: Amit Shah <amit.shah@redhat.com> M: Amit Shah <amit@kernel.org>
L: virtualization@lists.linux-foundation.org L: virtualization@lists.linux-foundation.org
S: Maintained S: Maintained
F: drivers/char/virtio_console.c F: drivers/char/virtio_console.c

View file

@ -1,7 +1,7 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc6 EXTRAVERSION = -rc8
NAME = Fearless Coyote NAME = Fearless Coyote
# *DOCUMENTATION* # *DOCUMENTATION*
@ -799,7 +799,7 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
KBUILD_ARFLAGS := $(call ar-option,D) KBUILD_ARFLAGS := $(call ar-option,D)
# check for 'asm goto' # check for 'asm goto'
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y) ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
endif endif

View file

@ -243,7 +243,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
/* clear any remanants of delay slot */ /* clear any remanants of delay slot */
if (delay_mode(regs)) { if (delay_mode(regs)) {
regs->ret = regs->bta ~1U; regs->ret = regs->bta & ~1U;
regs->status32 &= ~STATUS_DE_MASK; regs->status32 &= ~STATUS_DE_MASK;
} else { } else {
regs->ret += state.instr_len; regs->ret += state.instr_len;

View file

@ -617,7 +617,7 @@ dtb-$(CONFIG_ARCH_ORION5X) += \
orion5x-lacie-ethernet-disk-mini-v2.dtb \ orion5x-lacie-ethernet-disk-mini-v2.dtb \
orion5x-linkstation-lsgl.dtb \ orion5x-linkstation-lsgl.dtb \
orion5x-linkstation-lswtgl.dtb \ orion5x-linkstation-lswtgl.dtb \
orion5x-lschl.dtb \ orion5x-linkstation-lschl.dtb \
orion5x-lswsgl.dtb \ orion5x-lswsgl.dtb \
orion5x-maxtor-shared-storage-2.dtb \ orion5x-maxtor-shared-storage-2.dtb \
orion5x-netgear-wnr854t.dtb \ orion5x-netgear-wnr854t.dtb \

View file

@ -18,6 +18,14 @@
/ { / {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases { aliases {
gpio0 = &gpio1; gpio0 = &gpio1;

View file

@ -16,6 +16,14 @@ / {
#size-cells = <1>; #size-cells = <1>;
interrupt-parent = <&icoll>; interrupt-parent = <&icoll>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases { aliases {
gpio0 = &gpio0; gpio0 = &gpio0;

View file

@ -14,6 +14,14 @@
/ { / {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases { aliases {
ethernet0 = &fec; ethernet0 = &fec;

View file

@ -19,6 +19,14 @@
/ { / {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases { aliases {
ethernet0 = &fec; ethernet0 = &fec;

View file

@ -17,6 +17,14 @@ / {
#size-cells = <1>; #size-cells = <1>;
interrupt-parent = <&icoll>; interrupt-parent = <&icoll>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases { aliases {
ethernet0 = &mac0; ethernet0 = &mac0;

View file

@ -12,6 +12,14 @@
/ { / {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases { aliases {
serial0 = &uart1; serial0 = &uart1;

View file

@ -13,6 +13,14 @@
/ { / {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases { aliases {
ethernet0 = &fec; ethernet0 = &fec;

View file

@ -17,6 +17,14 @@
/ { / {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases { aliases {
ethernet0 = &fec; ethernet0 = &fec;

View file

@ -19,6 +19,14 @@
/ { / {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases { aliases {
ethernet0 = &fec; ethernet0 = &fec;

View file

@ -19,6 +19,14 @@
/ { / {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases { aliases {
ethernet0 = &fec; ethernet0 = &fec;

View file

@ -137,7 +137,7 @@ &gpio3 {
&gpio4 { &gpio4 {
gpio-ranges = <&iomuxc 5 136 1>, <&iomuxc 6 145 1>, <&iomuxc 7 150 1>, gpio-ranges = <&iomuxc 5 136 1>, <&iomuxc 6 145 1>, <&iomuxc 7 150 1>,
<&iomuxc 8 146 1>, <&iomuxc 9 151 1>, <&iomuxc 10 147 1>, <&iomuxc 8 146 1>, <&iomuxc 9 151 1>, <&iomuxc 10 147 1>,
<&iomuxc 11 151 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>, <&iomuxc 11 152 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>,
<&iomuxc 14 149 1>, <&iomuxc 15 154 1>, <&iomuxc 16 39 7>, <&iomuxc 14 149 1>, <&iomuxc 15 154 1>, <&iomuxc 16 39 7>,
<&iomuxc 23 56 1>, <&iomuxc 24 61 7>, <&iomuxc 31 46 1>; <&iomuxc 23 56 1>, <&iomuxc 24 61 7>, <&iomuxc 31 46 1>;
}; };

View file

@ -16,6 +16,14 @@
/ { / {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases { aliases {
ethernet0 = &fec; ethernet0 = &fec;

View file

@ -14,6 +14,14 @@
/ { / {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases { aliases {
ethernet0 = &fec; ethernet0 = &fec;

View file

@ -15,6 +15,14 @@
/ { / {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases { aliases {
can0 = &flexcan1; can0 = &flexcan1;

View file

@ -15,6 +15,14 @@
/ { / {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases { aliases {
ethernet0 = &fec1; ethernet0 = &fec1;

View file

@ -50,6 +50,14 @@
/ { / {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
/*
* The decompressor and also some bootloaders rely on a
* pre-existing /chosen node to be available to insert the
* command line and merge other ATAGS info.
* Also for U-Boot there must be a pre-existing /memory node.
*/
chosen {};
memory { device_type = "memory"; reg = <0 0>; };
aliases { aliases {
gpio0 = &gpio1; gpio0 = &gpio1;

View file

@ -2,7 +2,7 @@
* Device Tree file for Buffalo Linkstation LS-CHLv3 * Device Tree file for Buffalo Linkstation LS-CHLv3
* *
* Copyright (C) 2016 Ash Hughes <ashley.hughes@blueyonder.co.uk> * Copyright (C) 2016 Ash Hughes <ashley.hughes@blueyonder.co.uk>
* Copyright (C) 2015, 2016 * Copyright (C) 2015-2017
* Roger Shimizu <rogershimizu@gmail.com> * Roger Shimizu <rogershimizu@gmail.com>
* *
* This file is dual-licensed: you can use it either under the terms * This file is dual-licensed: you can use it either under the terms
@ -52,7 +52,7 @@
#include <dt-bindings/gpio/gpio.h> #include <dt-bindings/gpio/gpio.h>
/ { / {
model = "Buffalo Linkstation Live v3 (LS-CHL)"; model = "Buffalo Linkstation LiveV3 (LS-CHL)";
compatible = "buffalo,lschl", "marvell,orion5x-88f5182", "marvell,orion5x"; compatible = "buffalo,lschl", "marvell,orion5x-88f5182", "marvell,orion5x";
memory { /* 128 MB */ memory { /* 128 MB */

View file

@ -680,6 +680,7 @@ dwc3: dwc3@9900000 {
phy-names = "usb2-phy", "usb3-phy"; phy-names = "usb2-phy", "usb3-phy";
phys = <&usb2_picophy0>, phys = <&usb2_picophy0>,
<&phy_port2 PHY_TYPE_USB3>; <&phy_port2 PHY_TYPE_USB3>;
snps,dis_u3_susphy_quirk;
}; };
}; };

View file

@ -64,8 +64,8 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_EVENTS=y CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CT_PROTO_SCTP=m CONFIG_NF_CT_PROTO_SCTP=y
CONFIG_NF_CT_PROTO_UDPLITE=m CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m CONFIG_NF_CONNTRACK_H323=m

View file

@ -56,8 +56,8 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_EVENTS=y CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CT_PROTO_SCTP=m CONFIG_NF_CT_PROTO_SCTP=y
CONFIG_NF_CT_PROTO_UDPLITE=m CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m CONFIG_NF_CONNTRACK_H323=m

View file

@ -600,7 +600,7 @@ static int gpr_set(struct task_struct *target,
const void *kbuf, const void __user *ubuf) const void *kbuf, const void __user *ubuf)
{ {
int ret; int ret;
struct pt_regs newregs; struct pt_regs newregs = *task_pt_regs(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&newregs, &newregs,

View file

@ -60,7 +60,6 @@
#define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu) #define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
static enum cpuhp_state cpuhp_mmdc_state;
static int ddr_type; static int ddr_type;
struct fsl_mmdc_devtype_data { struct fsl_mmdc_devtype_data {
@ -82,6 +81,7 @@ static const struct of_device_id imx_mmdc_dt_ids[] = {
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
static enum cpuhp_state cpuhp_mmdc_state;
static DEFINE_IDA(mmdc_ida); static DEFINE_IDA(mmdc_ida);
PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00") PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")

View file

@ -610,9 +610,9 @@ static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
void __init early_abt_enable(void) void __init early_abt_enable(void)
{ {
fsr_info[22].fn = early_abort_handler; fsr_info[FSR_FS_AEA].fn = early_abort_handler;
local_abt_enable(); local_abt_enable();
fsr_info[22].fn = do_bad; fsr_info[FSR_FS_AEA].fn = do_bad;
} }
#ifndef CONFIG_ARM_LPAE #ifndef CONFIG_ARM_LPAE

View file

@ -11,11 +11,15 @@
#define FSR_FS5_0 (0x3f) #define FSR_FS5_0 (0x3f)
#ifdef CONFIG_ARM_LPAE #ifdef CONFIG_ARM_LPAE
#define FSR_FS_AEA 17
static inline int fsr_fs(unsigned int fsr) static inline int fsr_fs(unsigned int fsr)
{ {
return fsr & FSR_FS5_0; return fsr & FSR_FS5_0;
} }
#else #else
#define FSR_FS_AEA 22
static inline int fsr_fs(unsigned int fsr) static inline int fsr_fs(unsigned int fsr)
{ {
return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6; return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;

View file

@ -55,6 +55,24 @@ / {
#address-cells = <2>; #address-cells = <2>;
#size-cells = <2>; #size-cells = <2>;
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
ranges;
/* 16 MiB reserved for Hardware ROM Firmware */
hwrom_reserved: hwrom@0 {
reg = <0x0 0x0 0x0 0x1000000>;
no-map;
};
/* 2 MiB reserved for ARM Trusted Firmware (BL31) */
secmon_reserved: secmon@10000000 {
reg = <0x0 0x10000000 0x0 0x200000>;
no-map;
};
};
cpus { cpus {
#address-cells = <0x2>; #address-cells = <0x2>;
#size-cells = <0x0>; #size-cells = <0x0>;

View file

@ -151,6 +151,18 @@ &ethmac {
status = "okay"; status = "okay";
pinctrl-0 = <&eth_rgmii_pins>; pinctrl-0 = <&eth_rgmii_pins>;
pinctrl-names = "default"; pinctrl-names = "default";
phy-handle = <&eth_phy0>;
mdio {
compatible = "snps,dwmac-mdio";
#address-cells = <1>;
#size-cells = <0>;
eth_phy0: ethernet-phy@0 {
reg = <0>;
eee-broken-1000t;
};
};
}; };
&ir { &ir {

View file

@ -164,7 +164,6 @@ config PPC
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
select HAVE_ARCH_HARDENED_USERCOPY select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
select HAVE_CC_STACKPROTECTOR
config GENERIC_CSUM config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN def_bool CPU_LITTLE_ENDIAN
@ -484,6 +483,7 @@ config RELOCATABLE
bool "Build a relocatable kernel" bool "Build a relocatable kernel"
depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE)) depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
select NONSTATIC_KERNEL select NONSTATIC_KERNEL
select MODULE_REL_CRCS if MODVERSIONS
help help
This builds a kernel image that is capable of running at the This builds a kernel image that is capable of running at the
location the kernel is loaded at. For ppc32, there is no any location the kernel is loaded at. For ppc32, there is no any

View file

@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
{ {
int i; int i;
#ifndef __clang__ /* clang can't cope with this */
BUILD_BUG_ON(!__builtin_constant_p(feature)); BUILD_BUG_ON(!__builtin_constant_p(feature));
#endif
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
if (!static_key_initialized) { if (!static_key_initialized) {

View file

@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
{ {
int i; int i;
#ifndef __clang__ /* clang can't cope with this */
BUILD_BUG_ON(!__builtin_constant_p(feature)); BUILD_BUG_ON(!__builtin_constant_p(feature));
#endif
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
if (!static_key_initialized) { if (!static_key_initialized) {

View file

@ -90,9 +90,5 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec
} }
#endif #endif
#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
#define ARCH_RELOCATES_KCRCTAB
#define reloc_start PHYSICAL_START
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MODULE_H */ #endif /* _ASM_POWERPC_MODULE_H */

View file

@ -649,9 +649,10 @@
#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ #define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ #define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */ #define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 and 9 */
#define SRR1_WAKESYSERR 0x00300000 /* System error */ #define SRR1_WAKESYSERR 0x00300000 /* System error */
#define SRR1_WAKEEE 0x00200000 /* External interrupt */ #define SRR1_WAKEEE 0x00200000 /* External interrupt */
#define SRR1_WAKEHVI 0x00240000 /* Hypervisor Virtualization Interrupt (P9) */
#define SRR1_WAKEMT 0x00280000 /* mtctrl */ #define SRR1_WAKEMT 0x00280000 /* mtctrl */
#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */ #define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */

View file

@ -1,40 +0,0 @@
/*
* GCC stack protector support.
*
* Stack protector works by putting predefined pattern at the start of
* the stack frame and verifying that it hasn't been overwritten when
* returning from the function. The pattern is called stack canary
* and gcc expects it to be defined by a global variable called
* "__stack_chk_guard" on PPC. This unfortunately means that on SMP
* we cannot have a different canary value per task.
*/
#ifndef _ASM_STACKPROTECTOR_H
#define _ASM_STACKPROTECTOR_H
#include <linux/random.h>
#include <linux/version.h>
#include <asm/reg.h>
extern unsigned long __stack_chk_guard;
/*
* Initialize the stackprotector canary value.
*
* NOTE: this must only be called from functions that never return,
* and it must always be inlined.
*/
static __always_inline void boot_init_stack_canary(void)
{
unsigned long canary;
/* Try to get a semi random initial value. */
get_random_bytes(&canary, sizeof(canary));
canary ^= mftb();
canary ^= LINUX_VERSION_CODE;
current->stack_canary = canary;
__stack_chk_guard = current->stack_canary;
}
#endif /* _ASM_STACKPROTECTOR_H */

View file

@ -44,6 +44,7 @@ static inline int icp_hv_init(void) { return -ENODEV; }
#ifdef CONFIG_PPC_POWERNV #ifdef CONFIG_PPC_POWERNV
extern int icp_opal_init(void); extern int icp_opal_init(void);
extern void icp_opal_flush_interrupt(void);
#else #else
static inline int icp_opal_init(void) { return -ENODEV; } static inline int icp_opal_init(void) { return -ENODEV; }
#endif #endif

View file

@ -19,10 +19,6 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
# -fstack-protector triggers protection checks in this code,
# but it is being used too early to link to meaningful stack_chk logic.
CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
ifdef CONFIG_FUNCTION_TRACER ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code # Do not trace early boot code
CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)

View file

@ -91,9 +91,6 @@ int main(void)
DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp)); DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
#endif #endif
#ifdef CONFIG_CC_STACKPROTECTOR
DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
#endif
DEFINE(KSP, offsetof(struct thread_struct, ksp)); DEFINE(KSP, offsetof(struct thread_struct, ksp));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
#ifdef CONFIG_BOOKE #ifdef CONFIG_BOOKE

View file

@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
static void *__eeh_clear_pe_frozen_state(void *data, void *flag) static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
{ {
struct eeh_pe *pe = (struct eeh_pe *)data; struct eeh_pe *pe = (struct eeh_pe *)data;
bool *clear_sw_state = flag; bool clear_sw_state = *(bool *)flag;
int i, rc = 1; int i, rc = 1;
for (i = 0; rc && i < 3; i++) for (i = 0; rc && i < 3; i++)

View file

@ -674,11 +674,7 @@ BEGIN_FTR_SECTION
mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */ mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
END_FTR_SECTION_IFSET(CPU_FTR_SPE) END_FTR_SECTION_IFSET(CPU_FTR_SPE)
#endif /* CONFIG_SPE */ #endif /* CONFIG_SPE */
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
lwz r0,TSK_STACK_CANARY(r2)
lis r4,__stack_chk_guard@ha
stw r0,__stack_chk_guard@l(r4)
#endif
lwz r0,_CCR(r1) lwz r0,_CCR(r1)
mtcrf 0xFF,r0 mtcrf 0xFF,r0
/* r3-r12 are destroyed -- Cort */ /* r3-r12 are destroyed -- Cort */

View file

@ -286,14 +286,6 @@ static void dedotify_versions(struct modversion_info *vers,
for (end = (void *)vers + size; vers < end; vers++) for (end = (void *)vers + size; vers < end; vers++)
if (vers->name[0] == '.') { if (vers->name[0] == '.') {
memmove(vers->name, vers->name+1, strlen(vers->name)); memmove(vers->name, vers->name+1, strlen(vers->name));
#ifdef ARCH_RELOCATES_KCRCTAB
/* The TOC symbol has no CRC computed. To avoid CRC
* check failing, we must force it to the expected
* value (see CRC check in module.c).
*/
if (!strcmp(vers->name, "TOC."))
vers->crc = -(unsigned long)reloc_start;
#endif
} }
} }

View file

@ -64,12 +64,6 @@
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#ifdef CONFIG_CC_STACKPROTECTOR
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
/* Transactional Memory debug */ /* Transactional Memory debug */
#ifdef TM_DEBUG_SW #ifdef TM_DEBUG_SW
#define TM_DEBUG(x...) printk(KERN_INFO x) #define TM_DEBUG(x...) printk(KERN_INFO x)

View file

@ -2834,6 +2834,9 @@ static void __init prom_find_boot_cpu(void)
cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu); cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
if (!PHANDLE_VALID(cpu_pkg))
return;
prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
prom.cpu = be32_to_cpu(rval); prom.cpu = be32_to_cpu(rval);

View file

@ -253,8 +253,11 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
if (unlikely(debugger_fault_handler(regs))) if (unlikely(debugger_fault_handler(regs)))
goto bail; goto bail;
/* On a kernel SLB miss we can only check for a valid exception entry */ /*
if (!user_mode(regs) && (address >= TASK_SIZE)) { * The kernel should never take an execute fault nor should it
* take a page fault to a kernel address.
*/
if (!user_mode(regs) && (is_exec || (address >= TASK_SIZE))) {
rc = SIGSEGV; rc = SIGSEGV;
goto bail; goto bail;
} }
@ -390,20 +393,6 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
#endif /* CONFIG_8xx */ #endif /* CONFIG_8xx */
if (is_exec) { if (is_exec) {
/*
* An execution fault + no execute ?
*
* On CPUs that don't have CPU_FTR_COHERENT_ICACHE we
* deliberately create NX mappings, and use the fault to do the
* cache flush. This is usually handled in hash_page_do_lazy_icache()
* but we could end up here if that races with a concurrent PTE
* update. In that case we need to fall through here to the VMA
* check below.
*/
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
(regs->msr & SRR1_ISI_N_OR_G))
goto bad_area;
/* /*
* Allow execution from readable areas if the MMU does not * Allow execution from readable areas if the MMU does not
* provide separate controls over reading and executing. * provide separate controls over reading and executing.

View file

@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
if (!pmdp) if (!pmdp)
return -ENOMEM; return -ENOMEM;
if (map_page_size == PMD_SIZE) { if (map_page_size == PMD_SIZE) {
ptep = (pte_t *)pudp; ptep = pmdp_ptep(pmdp);
goto set_the_pte; goto set_the_pte;
} }
ptep = pte_alloc_kernel(pmdp, ea); ptep = pte_alloc_kernel(pmdp, ea);
@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
} }
pmdp = pmd_offset(pudp, ea); pmdp = pmd_offset(pudp, ea);
if (map_page_size == PMD_SIZE) { if (map_page_size == PMD_SIZE) {
ptep = (pte_t *)pudp; ptep = pmdp_ptep(pmdp);
goto set_the_pte; goto set_the_pte;
} }
if (!pmd_present(*pmdp)) { if (!pmd_present(*pmdp)) {

View file

@ -50,9 +50,7 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
__tlbiel_pid(pid, set, ric); __tlbiel_pid(pid, set, ric);
} }
if (cpu_has_feature(CPU_FTR_POWER9_DD1)) asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
return;
} }
static inline void _tlbie_pid(unsigned long pid, unsigned long ric) static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
@ -85,8 +83,6 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
asm volatile("ptesync": : :"memory"); asm volatile("ptesync": : :"memory");
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
} }
static inline void _tlbie_va(unsigned long va, unsigned long pid, static inline void _tlbie_va(unsigned long va, unsigned long pid,

View file

@ -155,8 +155,10 @@ static void pnv_smp_cpu_kill_self(void)
wmask = SRR1_WAKEMASK_P8; wmask = SRR1_WAKEMASK_P8;
idle_states = pnv_get_supported_cpuidle_states(); idle_states = pnv_get_supported_cpuidle_states();
/* We don't want to take decrementer interrupts while we are offline, /* We don't want to take decrementer interrupts while we are offline,
* so clear LPCR:PECE1. We keep PECE2 enabled. * so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9)
* enabled as to let IPIs in.
*/ */
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
@ -206,8 +208,12 @@ static void pnv_smp_cpu_kill_self(void)
* contains 0. * contains 0.
*/ */
if (((srr1 & wmask) == SRR1_WAKEEE) || if (((srr1 & wmask) == SRR1_WAKEEE) ||
((srr1 & wmask) == SRR1_WAKEHVI) ||
(local_paca->irq_happened & PACA_IRQ_EE)) { (local_paca->irq_happened & PACA_IRQ_EE)) {
icp_native_flush_interrupt(); if (cpu_has_feature(CPU_FTR_ARCH_300))
icp_opal_flush_interrupt();
else
icp_native_flush_interrupt();
} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
@ -221,6 +227,8 @@ static void pnv_smp_cpu_kill_self(void)
if (srr1 && !generic_check_cpu_restart(cpu)) if (srr1 && !generic_check_cpu_restart(cpu))
DBG("CPU%d Unexpected exit while offline !\n", cpu); DBG("CPU%d Unexpected exit while offline !\n", cpu);
} }
/* Re-enable decrementer interrupts */
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1); mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
DBG("CPU%d coming online...\n", cpu); DBG("CPU%d coming online...\n", cpu);
} }

View file

@ -120,18 +120,49 @@ static void icp_opal_cause_ipi(int cpu, unsigned long data)
{ {
int hw_cpu = get_hard_smp_processor_id(cpu); int hw_cpu = get_hard_smp_processor_id(cpu);
kvmppc_set_host_ipi(cpu, 1);
opal_int_set_mfrr(hw_cpu, IPI_PRIORITY); opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
} }
static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id) static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
{ {
int hw_cpu = hard_smp_processor_id(); int cpu = smp_processor_id();
opal_int_set_mfrr(hw_cpu, 0xff); kvmppc_set_host_ipi(cpu, 0);
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
return smp_ipi_demux(); return smp_ipi_demux();
} }
/*
* Called when an interrupt is received on an off-line CPU to
* clear the interrupt, so that the CPU can go back to nap mode.
*/
void icp_opal_flush_interrupt(void)
{
unsigned int xirr;
unsigned int vec;
do {
xirr = icp_opal_get_xirr();
vec = xirr & 0x00ffffff;
if (vec == XICS_IRQ_SPURIOUS)
break;
if (vec == XICS_IPI) {
/* Clear pending IPI */
int cpu = smp_processor_id();
kvmppc_set_host_ipi(cpu, 0);
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
} else {
pr_err("XICS: hw interrupt 0x%x to offline cpu, "
"disabling\n", vec);
xics_mask_unknown_vec(vec);
}
/* EOI the interrupt */
} while (opal_int_eoi(xirr) > 0);
}
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
static const struct icp_ops icp_opal_ops = { static const struct icp_ops icp_opal_ops = {

View file

@ -1085,9 +1085,9 @@ static void aesni_free_simds(void)
aesni_simd_skciphers[i]; i++) aesni_simd_skciphers[i]; i++)
simd_skcipher_free(aesni_simd_skciphers[i]); simd_skcipher_free(aesni_simd_skciphers[i]);
for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2) && for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++)
aesni_simd_skciphers2[i].simd; i++) if (aesni_simd_skciphers2[i].simd)
simd_skcipher_free(aesni_simd_skciphers2[i].simd); simd_skcipher_free(aesni_simd_skciphers2[i].simd);
} }
static int __init aesni_init(void) static int __init aesni_init(void)
@ -1168,7 +1168,7 @@ static int __init aesni_init(void)
simd = simd_skcipher_create_compat(algname, drvname, basename); simd = simd_skcipher_create_compat(algname, drvname, basename);
err = PTR_ERR(simd); err = PTR_ERR(simd);
if (IS_ERR(simd)) if (IS_ERR(simd))
goto unregister_simds; continue;
aesni_simd_skciphers2[i].simd = simd; aesni_simd_skciphers2[i].simd = simd;
} }

View file

@ -104,6 +104,7 @@ struct cpuinfo_x86 {
__u8 x86_phys_bits; __u8 x86_phys_bits;
/* CPUID returned core id bits: */ /* CPUID returned core id bits: */
__u8 x86_coreid_bits; __u8 x86_coreid_bits;
__u8 cu_id;
/* Max extended CPUID function supported: */ /* Max extended CPUID function supported: */
__u32 extended_cpuid_level; __u32 extended_cpuid_level;
/* Maximum supported CPUID level, -1=no CPUID: */ /* Maximum supported CPUID level, -1=no CPUID: */

View file

@ -1875,7 +1875,6 @@ static struct irq_chip ioapic_chip __read_mostly = {
.irq_ack = irq_chip_ack_parent, .irq_ack = irq_chip_ack_parent,
.irq_eoi = ioapic_ack_level, .irq_eoi = ioapic_ack_level,
.irq_set_affinity = ioapic_set_affinity, .irq_set_affinity = ioapic_set_affinity,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.flags = IRQCHIP_SKIP_SET_WAKE, .flags = IRQCHIP_SKIP_SET_WAKE,
}; };
@ -1887,7 +1886,6 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
.irq_ack = irq_chip_ack_parent, .irq_ack = irq_chip_ack_parent,
.irq_eoi = ioapic_ir_ack_level, .irq_eoi = ioapic_ir_ack_level,
.irq_set_affinity = ioapic_set_affinity, .irq_set_affinity = ioapic_set_affinity,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.flags = IRQCHIP_SKIP_SET_WAKE, .flags = IRQCHIP_SKIP_SET_WAKE,
}; };
@ -2117,6 +2115,7 @@ static inline void __init check_timer(void)
if (idx != -1 && irq_trigger(idx)) if (idx != -1 && irq_trigger(idx))
unmask_ioapic_irq(irq_get_chip_data(0)); unmask_ioapic_irq(irq_get_chip_data(0));
} }
irq_domain_deactivate_irq(irq_data);
irq_domain_activate_irq(irq_data); irq_domain_activate_irq(irq_data);
if (timer_irq_works()) { if (timer_irq_works()) {
if (disable_timer_pin_1 > 0) if (disable_timer_pin_1 > 0)
@ -2138,6 +2137,7 @@ static inline void __init check_timer(void)
* legacy devices should be connected to IO APIC #0 * legacy devices should be connected to IO APIC #0
*/ */
replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2); replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
irq_domain_deactivate_irq(irq_data);
irq_domain_activate_irq(irq_data); irq_domain_activate_irq(irq_data);
legacy_pic->unmask(0); legacy_pic->unmask(0);
if (timer_irq_works()) { if (timer_irq_works()) {

View file

@ -309,8 +309,22 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
/* get information required for multi-node processors */ /* get information required for multi-node processors */
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
u32 eax, ebx, ecx, edx;
node_id = cpuid_ecx(0x8000001e) & 7; cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
node_id = ecx & 0xff;
smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
if (c->x86 == 0x15)
c->cu_id = ebx & 0xff;
if (c->x86 >= 0x17) {
c->cpu_core_id = ebx & 0xff;
if (smp_num_siblings > 1)
c->x86_max_cores /= smp_num_siblings;
}
/* /*
* We may have multiple LLCs if L3 caches exist, so check if we * We may have multiple LLCs if L3 caches exist, so check if we

View file

@ -1015,6 +1015,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
c->x86_model_id[0] = '\0'; /* Unset */ c->x86_model_id[0] = '\0'; /* Unset */
c->x86_max_cores = 1; c->x86_max_cores = 1;
c->x86_coreid_bits = 0; c->x86_coreid_bits = 0;
c->cu_id = 0xff;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
c->x86_clflush_size = 64; c->x86_clflush_size = 64;
c->x86_phys_bits = 36; c->x86_phys_bits = 36;

View file

@ -352,6 +352,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
} else { } else {
struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
irq_domain_activate_irq(irq_get_irq_data(hdev->irq)); irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
disable_irq(hdev->irq); disable_irq(hdev->irq);
irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));

View file

@ -433,9 +433,15 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
int cpu1 = c->cpu_index, cpu2 = o->cpu_index; int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
if (c->phys_proc_id == o->phys_proc_id && if (c->phys_proc_id == o->phys_proc_id &&
per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) && per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
c->cpu_core_id == o->cpu_core_id) if (c->cpu_core_id == o->cpu_core_id)
return topology_sane(c, o, "smt"); return topology_sane(c, o, "smt");
if ((c->cu_id != 0xff) &&
(o->cu_id != 0xff) &&
(c->cu_id == o->cu_id))
return topology_sane(c, o, "smt");
}
} else if (c->phys_proc_id == o->phys_proc_id && } else if (c->phys_proc_id == o->phys_proc_id &&
c->cpu_core_id == o->cpu_core_id) { c->cpu_core_id == o->cpu_core_id) {

View file

@ -1356,6 +1356,9 @@ void __init tsc_init(void)
(unsigned long)cpu_khz / 1000, (unsigned long)cpu_khz / 1000,
(unsigned long)cpu_khz % 1000); (unsigned long)cpu_khz % 1000);
/* Sanitize TSC ADJUST before cyc2ns gets initialized */
tsc_store_and_check_tsc_adjust(true);
/* /*
* Secondary CPUs do not run through tsc_init(), so set up * Secondary CPUs do not run through tsc_init(), so set up
* all the scale factors for all CPUs, assuming the same * all the scale factors for all CPUs, assuming the same
@ -1386,8 +1389,6 @@ void __init tsc_init(void)
if (unsynchronized_tsc()) if (unsynchronized_tsc())
mark_tsc_unstable("TSCs unsynchronized"); mark_tsc_unstable("TSCs unsynchronized");
else
tsc_store_and_check_tsc_adjust(true);
check_system_tsc_reliable(); check_system_tsc_reliable();

View file

@ -286,13 +286,6 @@ void check_tsc_sync_source(int cpu)
if (unsynchronized_tsc()) if (unsynchronized_tsc())
return; return;
if (tsc_clocksource_reliable) {
if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING)
pr_info(
"Skipped synchronization checks as TSC is reliable.\n");
return;
}
/* /*
* Set the maximum number of test runs to * Set the maximum number of test runs to
* 1 if the CPU does not provide the TSC_ADJUST MSR * 1 if the CPU does not provide the TSC_ADJUST MSR
@ -380,14 +373,19 @@ void check_tsc_sync_target(void)
int cpus = 2; int cpus = 2;
/* Also aborts if there is no TSC. */ /* Also aborts if there is no TSC. */
if (unsynchronized_tsc() || tsc_clocksource_reliable) if (unsynchronized_tsc())
return; return;
/* /*
* Store, verify and sanitize the TSC adjust register. If * Store, verify and sanitize the TSC adjust register. If
* successful skip the test. * successful skip the test.
*
* The test is also skipped when the TSC is marked reliable. This
* is true for SoCs which have no fallback clocksource. On these
* SoCs the TSC is frequency synchronized, but still the TSC ADJUST
* register might have been wreckaged by the BIOS..
*/ */
if (tsc_store_and_check_tsc_adjust(false)) { if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) {
atomic_inc(&skip_test); atomic_inc(&skip_test);
return; return;
} }

View file

@ -3182,6 +3182,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
memcpy(dest, xsave, XSAVE_HDR_OFFSET); memcpy(dest, xsave, XSAVE_HDR_OFFSET);
/* Set XSTATE_BV */ /* Set XSTATE_BV */
xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
*(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv; *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
/* /*

View file

@ -15,6 +15,7 @@
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/sched.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
@ -406,6 +407,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
} else } else
note_page(m, &st, __pgprot(0), 1); note_page(m, &st, __pgprot(0), 1);
cond_resched();
start++; start++;
} }

View file

@ -306,11 +306,6 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
if (ret == 0 || (ret && ret != -EOPNOTSUPP)) if (ret == 0 || (ret && ret != -EOPNOTSUPP))
goto out; goto out;
ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
ZERO_PAGE(0), biop);
if (ret == 0 || (ret && ret != -EOPNOTSUPP))
goto out;
ret = 0; ret = 0;
while (nr_sects != 0) { while (nr_sects != 0) {
bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES), bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES),
@ -369,6 +364,10 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
return 0; return 0;
} }
if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
ZERO_PAGE(0)))
return 0;
blk_start_plug(&plug); blk_start_plug(&plug);
ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
&bio, discard); &bio, discard);

View file

@ -661,9 +661,9 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
unlock: unlock:
list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) { list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
af_alg_free_sg(&rsgl->sgl); af_alg_free_sg(&rsgl->sgl);
list_del(&rsgl->list);
if (rsgl != &ctx->first_rsgl) if (rsgl != &ctx->first_rsgl)
sock_kfree_s(sk, rsgl, sizeof(*rsgl)); sock_kfree_s(sk, rsgl, sizeof(*rsgl));
list_del(&rsgl->list);
} }
INIT_LIST_HEAD(&ctx->list); INIT_LIST_HEAD(&ctx->list);
aead_wmem_wakeup(sk); aead_wmem_wakeup(sk);

View file

@ -2704,6 +2704,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
struct device *dev = acpi_desc->dev; struct device *dev = acpi_desc->dev;
struct acpi_nfit_flush_work flush; struct acpi_nfit_flush_work flush;
int rc;
/* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
device_lock(dev); device_lock(dev);
@ -2716,7 +2717,10 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
INIT_WORK_ONSTACK(&flush.work, flush_probe); INIT_WORK_ONSTACK(&flush.work, flush_probe);
COMPLETION_INITIALIZER_ONSTACK(flush.cmp); COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
queue_work(nfit_wq, &flush.work); queue_work(nfit_wq, &flush.work);
return wait_for_completion_interruptible(&flush.cmp);
rc = wait_for_completion_interruptible(&flush.cmp);
cancel_work_sync(&flush.work);
return rc;
} }
static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,

View file

@ -558,9 +558,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
struct firmware_buf *buf = fw_priv->buf; struct firmware_buf *buf = fw_priv->buf;
__fw_load_abort(buf); __fw_load_abort(buf);
/* avoid user action after loading abort */
fw_priv->buf = NULL;
} }
static LIST_HEAD(pending_fw_head); static LIST_HEAD(pending_fw_head);
@ -713,7 +710,7 @@ static ssize_t firmware_loading_store(struct device *dev,
mutex_lock(&fw_lock); mutex_lock(&fw_lock);
fw_buf = fw_priv->buf; fw_buf = fw_priv->buf;
if (!fw_buf) if (fw_state_is_aborted(&fw_buf->fw_st))
goto out; goto out;
switch (loading) { switch (loading) {

View file

@ -389,33 +389,33 @@ static ssize_t show_valid_zones(struct device *dev,
{ {
struct memory_block *mem = to_memory_block(dev); struct memory_block *mem = to_memory_block(dev);
unsigned long start_pfn, end_pfn; unsigned long start_pfn, end_pfn;
unsigned long valid_start, valid_end, valid_pages;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
struct page *first_page;
struct zone *zone; struct zone *zone;
int zone_shift = 0; int zone_shift = 0;
start_pfn = section_nr_to_pfn(mem->start_section_nr); start_pfn = section_nr_to_pfn(mem->start_section_nr);
end_pfn = start_pfn + nr_pages; end_pfn = start_pfn + nr_pages;
first_page = pfn_to_page(start_pfn);
/* The block contains more than one zone can not be offlined. */ /* The block contains more than one zone can not be offlined. */
if (!test_pages_in_a_zone(start_pfn, end_pfn)) if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
return sprintf(buf, "none\n"); return sprintf(buf, "none\n");
zone = page_zone(first_page); zone = page_zone(pfn_to_page(valid_start));
valid_pages = valid_end - valid_start;
/* MMOP_ONLINE_KEEP */ /* MMOP_ONLINE_KEEP */
sprintf(buf, "%s", zone->name); sprintf(buf, "%s", zone->name);
/* MMOP_ONLINE_KERNEL */ /* MMOP_ONLINE_KERNEL */
zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift); zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift);
if (zone_shift) { if (zone_shift) {
strcat(buf, " "); strcat(buf, " ");
strcat(buf, (zone + zone_shift)->name); strcat(buf, (zone + zone_shift)->name);
} }
/* MMOP_ONLINE_MOVABLE */ /* MMOP_ONLINE_MOVABLE */
zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift); zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift);
if (zone_shift) { if (zone_shift) {
strcat(buf, " "); strcat(buf, " ");
strcat(buf, (zone + zone_shift)->name); strcat(buf, (zone + zone_shift)->name);

View file

@ -966,13 +966,13 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
unsigned long flags; unsigned long flags;
int retval; int retval;
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
if (rpmflags & RPM_GET_PUT) { if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count)) if (!atomic_dec_and_test(&dev->power.usage_count))
return 0; return 0;
} }
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
spin_lock_irqsave(&dev->power.lock, flags); spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_idle(dev, rpmflags); retval = rpm_idle(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags); spin_unlock_irqrestore(&dev->power.lock, flags);
@ -998,13 +998,13 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
unsigned long flags; unsigned long flags;
int retval; int retval;
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
if (rpmflags & RPM_GET_PUT) { if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count)) if (!atomic_dec_and_test(&dev->power.usage_count))
return 0; return 0;
} }
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
spin_lock_irqsave(&dev->power.lock, flags); spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_suspend(dev, rpmflags); retval = rpm_suspend(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags); spin_unlock_irqrestore(&dev->power.lock, flags);
@ -1029,7 +1029,8 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
unsigned long flags; unsigned long flags;
int retval; int retval;
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
dev->power.runtime_status != RPM_ACTIVE);
if (rpmflags & RPM_GET_PUT) if (rpmflags & RPM_GET_PUT)
atomic_inc(&dev->power.usage_count); atomic_inc(&dev->power.usage_count);

View file

@ -92,7 +92,6 @@ static void add_early_randomness(struct hwrng *rng)
mutex_unlock(&reading_mutex); mutex_unlock(&reading_mutex);
if (bytes_read > 0) if (bytes_read > 0)
add_device_randomness(rng_buffer, bytes_read); add_device_randomness(rng_buffer, bytes_read);
memset(rng_buffer, 0, size);
} }
static inline void cleanup_rng(struct kref *kref) static inline void cleanup_rng(struct kref *kref)
@ -288,7 +287,6 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
} }
} }
out: out:
memset(rng_buffer, 0, rng_buffer_size());
return ret ? : err; return ret ? : err;
out_unlock_reading: out_unlock_reading:
@ -427,7 +425,6 @@ static int hwrng_fillfn(void *unused)
/* Outside lock, sure, but y'know: randomness. */ /* Outside lock, sure, but y'know: randomness. */
add_hwgenerator_randomness((void *)rng_fillbuf, rc, add_hwgenerator_randomness((void *)rng_fillbuf, rc,
rc * current_quality * 8 >> 10); rc * current_quality * 8 >> 10);
memset(rng_fillbuf, 0, rng_buffer_size());
} }
hwrng_fill = NULL; hwrng_fill = NULL;
return 0; return 0;

View file

@ -784,8 +784,19 @@ static int brcm_avs_target_index(struct cpufreq_policy *policy,
static int brcm_avs_suspend(struct cpufreq_policy *policy) static int brcm_avs_suspend(struct cpufreq_policy *policy)
{ {
struct private_data *priv = policy->driver_data; struct private_data *priv = policy->driver_data;
int ret;
return brcm_avs_get_pmap(priv, &priv->pmap); ret = brcm_avs_get_pmap(priv, &priv->pmap);
if (ret)
return ret;
/*
* We can't use the P-state returned by brcm_avs_get_pmap(), since
* that's the initial P-state from when the P-map was downloaded to the
* AVS co-processor, not necessarily the P-state we are running at now.
* So, we get the current P-state explicitly.
*/
return brcm_avs_get_pstate(priv, &priv->pmap.state);
} }
static int brcm_avs_resume(struct cpufreq_policy *policy) static int brcm_avs_resume(struct cpufreq_policy *policy)
@ -954,9 +965,9 @@ static ssize_t show_brcm_avs_pmap(struct cpufreq_policy *policy, char *buf)
brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv); brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv);
brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4); brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4);
return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u\n", return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u %u %u\n",
pmap.p1, pmap.p2, ndiv, pdiv, mdiv_p0, mdiv_p1, mdiv_p2, pmap.p1, pmap.p2, ndiv, pdiv, mdiv_p0, mdiv_p1, mdiv_p2,
mdiv_p3, mdiv_p4); mdiv_p3, mdiv_p4, pmap.mode, pmap.state);
} }
static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf) static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf)

View file

@ -1235,6 +1235,25 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
} }
#define MSR_IA32_POWER_CTL_BIT_EE 19
/* Disable energy efficiency optimization */
static void intel_pstate_disable_ee(int cpu)
{
u64 power_ctl;
int ret;
ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl);
if (ret)
return;
if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) {
pr_info("Disabling energy efficiency optimization\n");
power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl);
}
}
static int atom_get_min_pstate(void) static int atom_get_min_pstate(void)
{ {
u64 value; u64 value;
@ -1845,6 +1864,11 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
{} {}
}; };
static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_params),
{}
};
static int intel_pstate_init_cpu(unsigned int cpunum) static int intel_pstate_init_cpu(unsigned int cpunum)
{ {
struct cpudata *cpu; struct cpudata *cpu;
@ -1875,6 +1899,12 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
cpu->cpu = cpunum; cpu->cpu = cpunum;
if (hwp_active) { if (hwp_active) {
const struct x86_cpu_id *id;
id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
if (id)
intel_pstate_disable_ee(cpunum);
intel_pstate_hwp_enable(cpu); intel_pstate_hwp_enable(cpu);
pid_params.sample_rate_ms = 50; pid_params.sample_rate_ms = 50;
pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC; pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC;

View file

@ -959,7 +959,7 @@ static irqreturn_t ccp5_irq_handler(int irq, void *data)
static void ccp5_config(struct ccp_device *ccp) static void ccp5_config(struct ccp_device *ccp)
{ {
/* Public side */ /* Public side */
iowrite32(0x00001249, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
} }
static void ccp5other_config(struct ccp_device *ccp) static void ccp5other_config(struct ccp_device *ccp)

View file

@ -238,6 +238,7 @@ struct ccp_dma_chan {
struct ccp_device *ccp; struct ccp_device *ccp;
spinlock_t lock; spinlock_t lock;
struct list_head created;
struct list_head pending; struct list_head pending;
struct list_head active; struct list_head active;
struct list_head complete; struct list_head complete;

View file

@ -63,6 +63,7 @@ static void ccp_free_chan_resources(struct dma_chan *dma_chan)
ccp_free_desc_resources(chan->ccp, &chan->complete); ccp_free_desc_resources(chan->ccp, &chan->complete);
ccp_free_desc_resources(chan->ccp, &chan->active); ccp_free_desc_resources(chan->ccp, &chan->active);
ccp_free_desc_resources(chan->ccp, &chan->pending); ccp_free_desc_resources(chan->ccp, &chan->pending);
ccp_free_desc_resources(chan->ccp, &chan->created);
spin_unlock_irqrestore(&chan->lock, flags); spin_unlock_irqrestore(&chan->lock, flags);
} }
@ -273,6 +274,7 @@ static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
spin_lock_irqsave(&chan->lock, flags); spin_lock_irqsave(&chan->lock, flags);
cookie = dma_cookie_assign(tx_desc); cookie = dma_cookie_assign(tx_desc);
list_del(&desc->entry);
list_add_tail(&desc->entry, &chan->pending); list_add_tail(&desc->entry, &chan->pending);
spin_unlock_irqrestore(&chan->lock, flags); spin_unlock_irqrestore(&chan->lock, flags);
@ -426,7 +428,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
spin_lock_irqsave(&chan->lock, sflags); spin_lock_irqsave(&chan->lock, sflags);
list_add_tail(&desc->entry, &chan->pending); list_add_tail(&desc->entry, &chan->created);
spin_unlock_irqrestore(&chan->lock, sflags); spin_unlock_irqrestore(&chan->lock, sflags);
@ -610,6 +612,7 @@ static int ccp_terminate_all(struct dma_chan *dma_chan)
/*TODO: Purge the complete list? */ /*TODO: Purge the complete list? */
ccp_free_desc_resources(chan->ccp, &chan->active); ccp_free_desc_resources(chan->ccp, &chan->active);
ccp_free_desc_resources(chan->ccp, &chan->pending); ccp_free_desc_resources(chan->ccp, &chan->pending);
ccp_free_desc_resources(chan->ccp, &chan->created);
spin_unlock_irqrestore(&chan->lock, flags); spin_unlock_irqrestore(&chan->lock, flags);
@ -679,6 +682,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
chan->ccp = ccp; chan->ccp = ccp;
spin_lock_init(&chan->lock); spin_lock_init(&chan->lock);
INIT_LIST_HEAD(&chan->created);
INIT_LIST_HEAD(&chan->pending); INIT_LIST_HEAD(&chan->pending);
INIT_LIST_HEAD(&chan->active); INIT_LIST_HEAD(&chan->active);
INIT_LIST_HEAD(&chan->complete); INIT_LIST_HEAD(&chan->complete);

View file

@ -158,7 +158,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
case CRYPTO_ALG_TYPE_AEAD: case CRYPTO_ALG_TYPE_AEAD:
ctx_req.req.aead_req = (struct aead_request *)req; ctx_req.req.aead_req = (struct aead_request *)req;
ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req); ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst, dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE); ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
if (ctx_req.ctx.reqctx->skb) { if (ctx_req.ctx.reqctx->skb) {
kfree_skb(ctx_req.ctx.reqctx->skb); kfree_skb(ctx_req.ctx.reqctx->skb);
@ -1362,8 +1362,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
struct chcr_wr *chcr_req; struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl; struct cpl_rx_phys_dsgl *phys_cpl;
struct phys_sge_parm sg_param; struct phys_sge_parm sg_param;
struct scatterlist *src, *dst; struct scatterlist *src;
struct scatterlist src_sg[2], dst_sg[2];
unsigned int frags = 0, transhdr_len; unsigned int frags = 0, transhdr_len;
unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0; unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
unsigned int kctx_len = 0; unsigned int kctx_len = 0;
@ -1383,19 +1382,21 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
goto err; goto err;
src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
dst = src; reqctx->dst = src;
if (req->src != req->dst) { if (req->src != req->dst) {
err = chcr_copy_assoc(req, aeadctx); err = chcr_copy_assoc(req, aeadctx);
if (err) if (err)
return ERR_PTR(err); return ERR_PTR(err);
dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
req->assoclen);
} }
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) { if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
null = 1; null = 1;
assoclen = 0; assoclen = 0;
} }
reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
(op_type ? -authsize : authsize)); (op_type ? -authsize : authsize));
if (reqctx->dst_nents <= 0) { if (reqctx->dst_nents <= 0) {
pr_err("AUTHENC:Invalid Destination sg entries\n"); pr_err("AUTHENC:Invalid Destination sg entries\n");
@ -1460,7 +1461,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
sg_param.qid = qid; sg_param.qid = qid;
sg_param.align = 0; sg_param.align = 0;
if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
&sg_param)) &sg_param))
goto dstmap_fail; goto dstmap_fail;
@ -1711,8 +1712,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
struct chcr_wr *chcr_req; struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl; struct cpl_rx_phys_dsgl *phys_cpl;
struct phys_sge_parm sg_param; struct phys_sge_parm sg_param;
struct scatterlist *src, *dst; struct scatterlist *src;
struct scatterlist src_sg[2], dst_sg[2];
unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE; unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
unsigned int dst_size = 0, kctx_len; unsigned int dst_size = 0, kctx_len;
unsigned int sub_type; unsigned int sub_type;
@ -1728,17 +1728,19 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
goto err; goto err;
sub_type = get_aead_subtype(tfm); sub_type = get_aead_subtype(tfm);
src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
dst = src; reqctx->dst = src;
if (req->src != req->dst) { if (req->src != req->dst) {
err = chcr_copy_assoc(req, aeadctx); err = chcr_copy_assoc(req, aeadctx);
if (err) { if (err) {
pr_err("AAD copy to destination buffer fails\n"); pr_err("AAD copy to destination buffer fails\n");
return ERR_PTR(err); return ERR_PTR(err);
} }
dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
req->assoclen);
} }
reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
(op_type ? -authsize : authsize)); (op_type ? -authsize : authsize));
if (reqctx->dst_nents <= 0) { if (reqctx->dst_nents <= 0) {
pr_err("CCM:Invalid Destination sg entries\n"); pr_err("CCM:Invalid Destination sg entries\n");
@ -1777,7 +1779,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
sg_param.qid = qid; sg_param.qid = qid;
sg_param.align = 0; sg_param.align = 0;
if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
&sg_param)) &sg_param))
goto dstmap_fail; goto dstmap_fail;
@ -1809,8 +1811,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
struct chcr_wr *chcr_req; struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl; struct cpl_rx_phys_dsgl *phys_cpl;
struct phys_sge_parm sg_param; struct phys_sge_parm sg_param;
struct scatterlist *src, *dst; struct scatterlist *src;
struct scatterlist src_sg[2], dst_sg[2];
unsigned int frags = 0, transhdr_len; unsigned int frags = 0, transhdr_len;
unsigned int ivsize = AES_BLOCK_SIZE; unsigned int ivsize = AES_BLOCK_SIZE;
unsigned int dst_size = 0, kctx_len; unsigned int dst_size = 0, kctx_len;
@ -1832,13 +1833,14 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
goto err; goto err;
src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
dst = src; reqctx->dst = src;
if (req->src != req->dst) { if (req->src != req->dst) {
err = chcr_copy_assoc(req, aeadctx); err = chcr_copy_assoc(req, aeadctx);
if (err) if (err)
return ERR_PTR(err); return ERR_PTR(err);
dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
req->assoclen);
} }
if (!req->cryptlen) if (!req->cryptlen)
@ -1848,7 +1850,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
crypt_len = AES_BLOCK_SIZE; crypt_len = AES_BLOCK_SIZE;
else else
crypt_len = req->cryptlen; crypt_len = req->cryptlen;
reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
(op_type ? -authsize : authsize)); (op_type ? -authsize : authsize));
if (reqctx->dst_nents <= 0) { if (reqctx->dst_nents <= 0) {
pr_err("GCM:Invalid Destination sg entries\n"); pr_err("GCM:Invalid Destination sg entries\n");
@ -1923,7 +1925,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
sg_param.qid = qid; sg_param.qid = qid;
sg_param.align = 0; sg_param.align = 0;
if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
&sg_param)) &sg_param))
goto dstmap_fail; goto dstmap_fail;
@ -1937,7 +1939,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
write_sg_to_skb(skb, &frags, src, req->cryptlen); write_sg_to_skb(skb, &frags, src, req->cryptlen);
} else { } else {
aes_gcm_empty_pld_pad(req->dst, authsize - 1); aes_gcm_empty_pld_pad(req->dst, authsize - 1);
write_sg_to_skb(skb, &frags, dst, crypt_len); write_sg_to_skb(skb, &frags, reqctx->dst, crypt_len);
} }
create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1, create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
@ -2189,8 +2192,8 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int ck_size; unsigned int ck_size;
int ret = 0, key_ctx_size = 0; int ret = 0, key_ctx_size = 0;
if (get_aead_subtype(aead) == if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) { keylen > 3) {
keylen -= 4; /* nonce/salt is present in the last 4 bytes */ keylen -= 4; /* nonce/salt is present in the last 4 bytes */
memcpy(aeadctx->salt, key + keylen, 4); memcpy(aeadctx->salt, key + keylen, 4);
} }

View file

@ -52,6 +52,7 @@ static struct cxgb4_uld_info chcr_uld_info = {
int assign_chcr_device(struct chcr_dev **dev) int assign_chcr_device(struct chcr_dev **dev)
{ {
struct uld_ctx *u_ctx; struct uld_ctx *u_ctx;
int ret = -ENXIO;
/* /*
* Which device to use if multiple devices are available TODO * Which device to use if multiple devices are available TODO
@ -59,15 +60,14 @@ int assign_chcr_device(struct chcr_dev **dev)
* must go to the same device to maintain the ordering. * must go to the same device to maintain the ordering.
*/ */
mutex_lock(&dev_mutex); /* TODO ? */ mutex_lock(&dev_mutex); /* TODO ? */
u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry); list_for_each_entry(u_ctx, &uld_ctx_list, entry)
if (!u_ctx) { if (u_ctx && u_ctx->dev) {
mutex_unlock(&dev_mutex); *dev = u_ctx->dev;
return -ENXIO; ret = 0;
break;
} }
*dev = u_ctx->dev;
mutex_unlock(&dev_mutex); mutex_unlock(&dev_mutex);
return 0; return ret;
} }
static int chcr_dev_add(struct uld_ctx *u_ctx) static int chcr_dev_add(struct uld_ctx *u_ctx)
@ -202,10 +202,8 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
static int __init chcr_crypto_init(void) static int __init chcr_crypto_init(void)
{ {
if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) { if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
pr_err("ULD register fail: No chcr crypto support in cxgb4"); pr_err("ULD register fail: No chcr crypto support in cxgb4");
return -1;
}
return 0; return 0;
} }

View file

@ -158,6 +158,9 @@ struct ablk_ctx {
}; };
struct chcr_aead_reqctx { struct chcr_aead_reqctx {
struct sk_buff *skb; struct sk_buff *skb;
struct scatterlist *dst;
struct scatterlist srcffwd[2];
struct scatterlist dstffwd[2];
short int dst_nents; short int dst_nents;
u16 verify; u16 verify;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; u8 iv[CHCR_MAX_CRYPTO_IV_LEN];

View file

@ -233,7 +233,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
&hw_data->accel_capabilities_mask); &hw_data->accel_capabilities_mask);
/* Find and map all the device's BARS */ /* Find and map all the device's BARS */
i = 0; i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
ADF_PCI_MAX_BARS * 2) { ADF_PCI_MAX_BARS * 2) {

View file

@ -69,6 +69,7 @@
#define ADF_ERRSOU5 (0x3A000 + 0xD8) #define ADF_ERRSOU5 (0x3A000 + 0xD8)
#define ADF_DEVICE_FUSECTL_OFFSET 0x40 #define ADF_DEVICE_FUSECTL_OFFSET 0x40
#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
#define ADF_DEVICE_FUSECTL_MASK 0x80000000
#define ADF_PCI_MAX_BARS 3 #define ADF_PCI_MAX_BARS 3
#define ADF_DEVICE_NAME_LENGTH 32 #define ADF_DEVICE_NAME_LENGTH 32
#define ADF_ETR_MAX_RINGS_PER_BANK 16 #define ADF_ETR_MAX_RINGS_PER_BANK 16

View file

@ -456,7 +456,7 @@ static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
unsigned int csr_val; unsigned int csr_val;
int times = 30; int times = 30;
if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) if (handle->pci_dev->device != ADF_DH895XCC_PCI_DEVICE_ID)
return 0; return 0;
csr_val = ADF_CSR_RD(csr_addr, 0); csr_val = ADF_CSR_RD(csr_addr, 0);
@ -716,7 +716,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
(void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v + (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
LOCAL_TO_XFER_REG_OFFSET); LOCAL_TO_XFER_REG_OFFSET);
handle->pci_dev = pci_info->pci_dev; handle->pci_dev = pci_info->pci_dev;
if (handle->pci_dev->device != ADF_C3XXX_PCI_DEVICE_ID) { if (handle->pci_dev->device == ADF_DH895XCC_PCI_DEVICE_ID) {
sram_bar = sram_bar =
&pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)]; &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
handle->hal_sram_addr_v = sram_bar->virt_addr; handle->hal_sram_addr_v = sram_bar->virt_addr;

View file

@ -254,6 +254,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
} }
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
if (adev->mode_info.num_crtc)
amdgpu_display_set_vga_render_state(adev, false);
gmc_v6_0_mc_stop(adev, &save); gmc_v6_0_mc_stop(adev, &save);
if (gmc_v6_0_wait_for_idle((void *)adev)) { if (gmc_v6_0_wait_for_idle((void *)adev)) {
@ -283,7 +286,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
dev_warn(adev->dev, "Wait for MC idle timedout !\n"); dev_warn(adev->dev, "Wait for MC idle timedout !\n");
} }
gmc_v6_0_mc_resume(adev, &save); gmc_v6_0_mc_resume(adev, &save);
amdgpu_display_set_vga_render_state(adev, false);
} }
static int gmc_v6_0_mc_init(struct amdgpu_device *adev) static int gmc_v6_0_mc_init(struct amdgpu_device *adev)

View file

@ -2032,13 +2032,16 @@ static void complete_crtc_signaling(struct drm_device *dev,
} }
for_each_crtc_in_state(state, crtc, crtc_state, i) { for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct drm_pending_vblank_event *event = crtc_state->event;
/* /*
* TEST_ONLY and PAGE_FLIP_EVENT are mutually * Free the allocated event. drm_atomic_helper_setup_commit
* exclusive, if they weren't, this code should be * can allocate an event too, so only free it if it's ours
* called on success for TEST_ONLY too. * to prevent a double free in drm_atomic_state_clear.
*/ */
if (crtc_state->event) if (event && (event->base.fence || event->base.file_priv)) {
drm_event_cancel_free(dev, &crtc_state->event->base); drm_event_cancel_free(dev, &event->base);
crtc_state->event = NULL;
}
} }
if (!fence_state) if (!fence_state)

View file

@ -1666,9 +1666,6 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
funcs = plane->helper_private; funcs = plane->helper_private;
if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
continue;
if (funcs->prepare_fb) { if (funcs->prepare_fb) {
ret = funcs->prepare_fb(plane, plane_state); ret = funcs->prepare_fb(plane, plane_state);
if (ret) if (ret)
@ -1685,9 +1682,6 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
if (j >= i) if (j >= i)
continue; continue;
if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
continue;
funcs = plane->helper_private; funcs = plane->helper_private;
if (funcs->cleanup_fb) if (funcs->cleanup_fb)
@ -1954,9 +1948,6 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
for_each_plane_in_state(old_state, plane, plane_state, i) { for_each_plane_in_state(old_state, plane, plane_state, i) {
const struct drm_plane_helper_funcs *funcs; const struct drm_plane_helper_funcs *funcs;
if (!drm_atomic_helper_framebuffer_changed(dev, old_state, plane_state->crtc))
continue;
funcs = plane->helper_private; funcs = plane->helper_private;
if (funcs->cleanup_fb) if (funcs->cleanup_fb)

View file

@ -225,6 +225,7 @@ int drm_connector_init(struct drm_device *dev,
INIT_LIST_HEAD(&connector->probed_modes); INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes); INIT_LIST_HEAD(&connector->modes);
mutex_init(&connector->mutex);
connector->edid_blob_ptr = NULL; connector->edid_blob_ptr = NULL;
connector->status = connector_status_unknown; connector->status = connector_status_unknown;
@ -359,6 +360,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
connector->funcs->atomic_destroy_state(connector, connector->funcs->atomic_destroy_state(connector,
connector->state); connector->state);
mutex_destroy(&connector->mutex);
memset(connector, 0, sizeof(*connector)); memset(connector, 0, sizeof(*connector));
} }
EXPORT_SYMBOL(drm_connector_cleanup); EXPORT_SYMBOL(drm_connector_cleanup);
@ -374,14 +377,18 @@ EXPORT_SYMBOL(drm_connector_cleanup);
*/ */
int drm_connector_register(struct drm_connector *connector) int drm_connector_register(struct drm_connector *connector)
{ {
int ret; int ret = 0;
if (connector->registered) if (!connector->dev->registered)
return 0; return 0;
mutex_lock(&connector->mutex);
if (connector->registered)
goto unlock;
ret = drm_sysfs_connector_add(connector); ret = drm_sysfs_connector_add(connector);
if (ret) if (ret)
return ret; goto unlock;
ret = drm_debugfs_connector_add(connector); ret = drm_debugfs_connector_add(connector);
if (ret) { if (ret) {
@ -397,12 +404,14 @@ int drm_connector_register(struct drm_connector *connector)
drm_mode_object_register(connector->dev, &connector->base); drm_mode_object_register(connector->dev, &connector->base);
connector->registered = true; connector->registered = true;
return 0; goto unlock;
err_debugfs: err_debugfs:
drm_debugfs_connector_remove(connector); drm_debugfs_connector_remove(connector);
err_sysfs: err_sysfs:
drm_sysfs_connector_remove(connector); drm_sysfs_connector_remove(connector);
unlock:
mutex_unlock(&connector->mutex);
return ret; return ret;
} }
EXPORT_SYMBOL(drm_connector_register); EXPORT_SYMBOL(drm_connector_register);
@ -415,8 +424,11 @@ EXPORT_SYMBOL(drm_connector_register);
*/ */
void drm_connector_unregister(struct drm_connector *connector) void drm_connector_unregister(struct drm_connector *connector)
{ {
if (!connector->registered) mutex_lock(&connector->mutex);
if (!connector->registered) {
mutex_unlock(&connector->mutex);
return; return;
}
if (connector->funcs->early_unregister) if (connector->funcs->early_unregister)
connector->funcs->early_unregister(connector); connector->funcs->early_unregister(connector);
@ -425,6 +437,7 @@ void drm_connector_unregister(struct drm_connector *connector)
drm_debugfs_connector_remove(connector); drm_debugfs_connector_remove(connector);
connector->registered = false; connector->registered = false;
mutex_unlock(&connector->mutex);
} }
EXPORT_SYMBOL(drm_connector_unregister); EXPORT_SYMBOL(drm_connector_unregister);

View file

@ -745,6 +745,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
if (ret) if (ret)
goto err_minors; goto err_minors;
dev->registered = true;
if (dev->driver->load) { if (dev->driver->load) {
ret = dev->driver->load(dev, flags); ret = dev->driver->load(dev, flags);
if (ret) if (ret)
@ -785,6 +787,8 @@ void drm_dev_unregister(struct drm_device *dev)
drm_lastclose(dev); drm_lastclose(dev);
dev->registered = false;
if (drm_core_check_feature(dev, DRIVER_MODESET)) if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_modeset_unregister_all(dev); drm_modeset_unregister_all(dev);

View file

@ -213,7 +213,8 @@ static void intel_detect_pch(struct drm_device *dev)
} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_KBP; dev_priv->pch_type = PCH_KBP;
DRM_DEBUG_KMS("Found KabyPoint PCH\n"); DRM_DEBUG_KMS("Found KabyPoint PCH\n");
WARN_ON(!IS_KABYLAKE(dev_priv)); WARN_ON(!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
@ -2427,6 +2428,7 @@ static int intel_runtime_resume(struct device *kdev)
* we can do is to hope that things will still work (and disable RPM). * we can do is to hope that things will still work (and disable RPM).
*/ */
i915_gem_init_swizzling(dev_priv); i915_gem_init_swizzling(dev_priv);
i915_gem_restore_fences(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv);

View file

@ -1012,6 +1012,8 @@ struct intel_fbc {
struct work_struct underrun_work; struct work_struct underrun_work;
struct intel_fbc_state_cache { struct intel_fbc_state_cache {
struct i915_vma *vma;
struct { struct {
unsigned int mode_flags; unsigned int mode_flags;
uint32_t hsw_bdw_pixel_rate; uint32_t hsw_bdw_pixel_rate;
@ -1025,15 +1027,14 @@ struct intel_fbc {
} plane; } plane;
struct { struct {
u64 ilk_ggtt_offset;
uint32_t pixel_format; uint32_t pixel_format;
unsigned int stride; unsigned int stride;
int fence_reg;
unsigned int tiling_mode;
} fb; } fb;
} state_cache; } state_cache;
struct intel_fbc_reg_params { struct intel_fbc_reg_params {
struct i915_vma *vma;
struct { struct {
enum pipe pipe; enum pipe pipe;
enum plane plane; enum plane plane;
@ -1041,10 +1042,8 @@ struct intel_fbc {
} crtc; } crtc;
struct { struct {
u64 ggtt_offset;
uint32_t pixel_format; uint32_t pixel_format;
unsigned int stride; unsigned int stride;
int fence_reg;
} fb; } fb;
int cfb_size; int cfb_size;
@ -3168,13 +3167,6 @@ i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view); return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
} }
static inline unsigned long
i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view)
{
return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
}
/* i915_gem_fence_reg.c */ /* i915_gem_fence_reg.c */
int __must_check i915_vma_get_fence(struct i915_vma *vma); int __must_check i915_vma_get_fence(struct i915_vma *vma);
int __must_check i915_vma_put_fence(struct i915_vma *vma); int __must_check i915_vma_put_fence(struct i915_vma *vma);

View file

@ -2010,8 +2010,16 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
for (i = 0; i < dev_priv->num_fence_regs; i++) { for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
if (WARN_ON(reg->pin_count)) /* Ideally we want to assert that the fence register is not
continue; * live at this point (i.e. that no piece of code will be
* trying to write through fence + GTT, as that both violates
* our tracking of activity and associated locking/barriers,
* but also is illegal given that the hw is powered down).
*
* Previously we used reg->pin_count as a "liveness" indicator.
* That is not sufficient, and we need a more fine-grained
* tool if we want to have a sanity check here.
*/
if (!reg->vma) if (!reg->vma)
continue; continue;
@ -3478,7 +3486,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
vma->display_alignment = max_t(u64, vma->display_alignment, alignment); vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
/* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */ /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
if (obj->cache_dirty) { if (obj->cache_dirty || obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj, true); i915_gem_clflush_object(obj, true);
intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB); intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
} }

View file

@ -1181,14 +1181,14 @@ validate_exec_list(struct drm_device *dev,
if (exec[i].offset != if (exec[i].offset !=
gen8_canonical_addr(exec[i].offset & PAGE_MASK)) gen8_canonical_addr(exec[i].offset & PAGE_MASK))
return -EINVAL; return -EINVAL;
/* From drm_mm perspective address space is continuous,
* so from this point we're always using non-canonical
* form internally.
*/
exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
} }
/* From drm_mm perspective address space is continuous,
* so from this point we're always using non-canonical
* form internally.
*/
exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
if (exec[i].alignment && !is_power_of_2(exec[i].alignment)) if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
return -EINVAL; return -EINVAL;

View file

@ -66,8 +66,16 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
max_order = MAX_ORDER; max_order = MAX_ORDER;
#ifdef CONFIG_SWIOTLB #ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */ if (swiotlb_nr_tbl()) {
max_order = min(max_order, ilog2(IO_TLB_SEGPAGES)); unsigned int max_segment;
max_segment = swiotlb_max_segment();
if (max_segment) {
max_segment = max_t(unsigned int, max_segment,
PAGE_SIZE) >> PAGE_SHIFT;
max_order = min(max_order, ilog2(max_segment));
}
}
#endif #endif
gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;

View file

@ -85,6 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane)
__drm_atomic_helper_plane_duplicate_state(plane, state); __drm_atomic_helper_plane_duplicate_state(plane, state);
intel_state->vma = NULL;
return state; return state;
} }
@ -100,6 +102,24 @@ void
intel_plane_destroy_state(struct drm_plane *plane, intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state) struct drm_plane_state *state)
{ {
struct i915_vma *vma;
vma = fetch_and_zero(&to_intel_plane_state(state)->vma);
/*
* FIXME: Normally intel_cleanup_plane_fb handles destruction of vma.
* We currently don't clear all planes during driver unload, so we have
* to be able to unpin vma here for now.
*
* Normally this can only happen during unload when kmscon is disabled
* and userspace doesn't attempt to set a framebuffer at all.
*/
if (vma) {
mutex_lock(&plane->dev->struct_mutex);
intel_unpin_fb_vma(vma);
mutex_unlock(&plane->dev->struct_mutex);
}
drm_atomic_helper_plane_destroy_state(plane, state); drm_atomic_helper_plane_destroy_state(plane, state);
} }

Some files were not shown because too many files have changed in this diff Show more