diff --git a/Documentation/ABI/obsolete/sysfs-gpio b/Documentation/ABI/obsolete/sysfs-gpio index 32513dc2eec9..40d41ea1a3f5 100644 --- a/Documentation/ABI/obsolete/sysfs-gpio +++ b/Documentation/ABI/obsolete/sysfs-gpio @@ -11,7 +11,7 @@ Description: Kernel code may export it for complete or partial access. GPIOs are identified as they are inside the kernel, using integers in - the range 0..INT_MAX. See Documentation/gpio/gpio.txt for more information. + the range 0..INT_MAX. See Documentation/gpio for more information. /sys/class/gpio /export ... asks the kernel to export a GPIO to userspace diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index bd4975e132d3..9c5e7732d249 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -238,9 +238,6 @@ Description: Discover and change clock speed of CPUs See files in Documentation/cpu-freq/ for more information. - In particular, read Documentation/cpu-freq/user-guide.txt - to learn how to control the knobs. - What: /sys/devices/system/cpu/cpu#/cpufreq/freqdomain_cpus Date: June 2013 diff --git a/Documentation/ABI/testing/sysfs-platform-ideapad-laptop b/Documentation/ABI/testing/sysfs-platform-ideapad-laptop index 597a2f3d1efc..1b31be3f996a 100644 --- a/Documentation/ABI/testing/sysfs-platform-ideapad-laptop +++ b/Documentation/ABI/testing/sysfs-platform-ideapad-laptop @@ -25,3 +25,16 @@ Description: Control touchpad mode. * 1 -> Switched On * 0 -> Switched Off + +What: /sys/bus/pci/devices///VPC2004:00/fn_lock +Date: May 2018 +KernelVersion: 4.18 +Contact: "Oleg Keri " +Description: + Control fn-lock mode. + * 1 -> Switched On + * 0 -> Switched Off + + For example: + # echo "0" > \ + /sys/bus/pci/devices/0000:00:1f.0/PNP0C09:00/VPC2004:00/fn_lock diff --git a/Documentation/acpi/method-customizing.txt b/Documentation/acpi/method-customizing.txt index a3f598e141f2..7235da975f23 100644 --- a/Documentation/acpi/method-customizing.txt +++ b/Documentation/acpi/method-customizing.txt @@ -16,7 +16,8 @@ control method rather than override the entire DSDT, because kernel rebuild/reboot is not needed and test result can be got in minutes. Note: Only ACPI METHOD can be overridden, any other object types like - "Device", "OperationRegion", are not recognized. + "Device", "OperationRegion", are not recognized. Methods + declared inside scope operators are also not supported. Note: The same ACPI control method can be overridden for many times, and it's always the latest one that used by Linux/kernel. Note: To get the ACPI debug object output (Store (AAAA, Debug)), @@ -32,8 +33,6 @@ Note: To get the ACPI debug object output (Store (AAAA, Debug)), DefinitionBlock ("", "SSDT", 1, "", "", 0x20080715) { - External (ACON) - Method (\_SB_.AC._PSR, 0, NotSerialized) { Store ("In AC _PSR", Debug) @@ -42,9 +41,10 @@ Note: To get the ACPI debug object output (Store (AAAA, Debug)), } Note that the full pathname of the method in ACPI namespace should be used. - And remember to use "External" to declare external objects. e) assemble the file to generate the AML code of the method. - e.g. "iasl psr.asl" (psr.aml is generated as a result) + e.g. "iasl -vw 6084 psr.asl" (psr.aml is generated as a result) + If parameter "-vw 6084" is not supported by your iASL compiler, + please try a newer version. f) mount debugfs by "mount -t debugfs none /sys/kernel/debug" g) override the old method via the debugfs by running "cat /tmp/psr.aml > /sys/kernel/debug/acpi/custom_method" diff --git a/Documentation/admin-guide/LSM/apparmor.rst b/Documentation/admin-guide/LSM/apparmor.rst index 3e9734bd0e05..6cf81bbd7ce8 100644 --- a/Documentation/admin-guide/LSM/apparmor.rst +++ b/Documentation/admin-guide/LSM/apparmor.rst @@ -44,8 +44,8 @@ Links Mailing List - apparmor@lists.ubuntu.com -Wiki - http://apparmor.wiki.kernel.org/ +Wiki - http://wiki.apparmor.net -User space tools - https://launchpad.net/apparmor +User space tools - https://gitlab.com/apparmor -Kernel module - git://git.kernel.org/pub/scm/linux/kernel/git/jj/apparmor-dev.git +Kernel module - git://git.kernel.org/pub/scm/linux/kernel/git/jj/linux-apparmor diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 638342d0a095..efc7aa7a0670 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -256,7 +256,7 @@ (may crash computer or cause data corruption) ALSA [HW,ALSA] - See Documentation/sound/alsa/alsa-parameters.txt + See Documentation/sound/alsa-configuration.rst alignment= [KNL,ARM] Allow the default userspace alignment fault handler @@ -2926,9 +2926,6 @@ This will also cause panics on machine check exceptions. Useful together with panic=30 to trigger a reboot. - OSS [HW,OSS] - See Documentation/sound/oss/oss-parameters.txt - page_owner= [KNL] Boot-time page_owner enabling option. Storage of the information about who allocated each page is disabled in default. With this switch, @@ -4335,7 +4332,7 @@ [FTRACE] Set and start specified trace events in order to facilitate early boot debugging. The event-list is a comma separated list of trace events to enable. See - also Documentation/trace/events.txt + also Documentation/trace/events.rst trace_options=[option-list] [FTRACE] Enable or disable tracer options at boot. @@ -4350,7 +4347,7 @@ trace_options=stacktrace - See also Documentation/trace/ftrace.txt "trace options" + See also Documentation/trace/ftrace.rst "trace options" section. tp_printk[FTRACE] diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt index 86927029a52d..207eca58efaa 100644 --- a/Documentation/block/biodoc.txt +++ b/Documentation/block/biodoc.txt @@ -752,18 +752,6 @@ completion of the request to the block layer. This means ending tag operations before calling end_that_request_last()! For an example of a user of these helpers, see the IDE tagged command queueing support. -Certain hardware conditions may dictate a need to invalidate the block tag -queue. For instance, on IDE any tagged request error needs to clear both -the hardware and software block queue and enable the driver to sanely restart -all the outstanding requests. There's a third helper to do that: - - blk_queue_invalidate_tags(struct request_queue *q) - - Clear the internal block tag queue and re-add all the pending requests - to the request queue. The driver will receive them again on the - next request_fn run, just like it did the first time it encountered - them. - 3.2.5.2 Tag info Some block functions exist to query current tag status or to go from a @@ -805,8 +793,7 @@ Internally, block manages tags in the blk_queue_tag structure: Most of the above is simple and straight forward, however busy_list may need a bit of explaining. Normally we don't care too much about request ordering, but in the event of any barrier requests in the tag queue we need to ensure -that requests are restarted in the order they were queue. This may happen -if the driver needs to use blk_queue_invalidate_tags(). +that requests are restarted in the order they were queue. 3.3 I/O Submission diff --git a/Documentation/crypto/crypto_engine.rst b/Documentation/crypto/crypto_engine.rst index 8272ac92a14f..1d56221dfe35 100644 --- a/Documentation/crypto/crypto_engine.rst +++ b/Documentation/crypto/crypto_engine.rst @@ -8,11 +8,13 @@ The crypto engine API (CE), is a crypto queue manager. Requirement ----------- -You have to put at start of your tfm_ctx the struct crypto_engine_ctx -struct your_tfm_ctx { +You have to put at start of your tfm_ctx the struct crypto_engine_ctx:: + + struct your_tfm_ctx { struct crypto_engine_ctx enginectx; ... -}; + }; + Why: Since CE manage only crypto_async_request, it cannot know the underlying request_type and so have access only on the TFM. So using container_of for accessing __ctx is impossible. diff --git a/Documentation/device-mapper/writecache.txt b/Documentation/device-mapper/writecache.txt new file mode 100644 index 000000000000..4424fa2c67d7 --- /dev/null +++ b/Documentation/device-mapper/writecache.txt @@ -0,0 +1,68 @@ +The writecache target caches writes on persistent memory or on SSD. It +doesn't cache reads because reads are supposed to be cached in page cache +in normal RAM. + +When the device is constructed, the first sector should be zeroed or the +first sector should contain valid superblock from previous invocation. + +Constructor parameters: +1. type of the cache device - "p" or "s" + p - persistent memory + s - SSD +2. the underlying device that will be cached +3. the cache device +4. block size (4096 is recommended; the maximum block size is the page + size) +5. the number of optional parameters (the parameters with an argument + count as two) + high_watermark n (default: 50) + start writeback when the number of used blocks reach this + watermark + low_watermark x (default: 45) + stop writeback when the number of used blocks drops below + this watermark + writeback_jobs n (default: unlimited) + limit the number of blocks that are in flight during + writeback. Setting this value reduces writeback + throughput, but it may improve latency of read requests + autocommit_blocks n (default: 64 for pmem, 65536 for ssd) + when the application writes this amount of blocks without + issuing the FLUSH request, the blocks are automatically + commited + autocommit_time ms (default: 1000) + autocommit time in milliseconds. The data is automatically + commited if this time passes and no FLUSH request is + received + fua (by default on) + applicable only to persistent memory - use the FUA flag + when writing data from persistent memory back to the + underlying device + nofua + applicable only to persistent memory - don't use the FUA + flag when writing back data and send the FLUSH request + afterwards + - some underlying devices perform better with fua, some + with nofua. The user should test it + +Status: +1. error indicator - 0 if there was no error, otherwise error number +2. the number of blocks +3. the number of free blocks +4. the number of blocks under writeback + +Messages: + flush + flush the cache device. The message returns successfully + if the cache device was flushed without an error + flush_on_suspend + flush the cache device on next suspend. Use this message + when you are going to remove the cache device. The proper + sequence for removing the cache device is: + 1. send the "flush_on_suspend" message + 2. load an inactive table with a linear target that maps + to the underlying device + 3. suspend the device + 4. ask for status and verify that there are no errors + 5. resume the device, so that it will use the linear + target + 6. the cache device is now inactive and it can be deleted diff --git a/Documentation/devicetree/bindings/clock/st/st,clkgen.txt b/Documentation/devicetree/bindings/clock/st/st,clkgen.txt index 7364953d0d0b..45ac19bfa0a9 100644 --- a/Documentation/devicetree/bindings/clock/st/st,clkgen.txt +++ b/Documentation/devicetree/bindings/clock/st/st,clkgen.txt @@ -31,10 +31,10 @@ This binding uses the common clock binding[1]. Each subnode should use the binding described in [2]..[7] [1] Documentation/devicetree/bindings/clock/clock-bindings.txt -[3] Documentation/devicetree/bindings/clock/st,clkgen-mux.txt -[4] Documentation/devicetree/bindings/clock/st,clkgen-pll.txt -[7] Documentation/devicetree/bindings/clock/st,quadfs.txt -[8] Documentation/devicetree/bindings/clock/st,flexgen.txt +[3] Documentation/devicetree/bindings/clock/st/st,clkgen-mux.txt +[4] Documentation/devicetree/bindings/clock/st/st,clkgen-pll.txt +[7] Documentation/devicetree/bindings/clock/st/st,quadfs.txt +[8] Documentation/devicetree/bindings/clock/st/st,flexgen.txt Required properties: diff --git a/Documentation/devicetree/bindings/clock/ti/gate.txt b/Documentation/devicetree/bindings/clock/ti/gate.txt index 03f8fdee62a7..56d603c1f716 100644 --- a/Documentation/devicetree/bindings/clock/ti/gate.txt +++ b/Documentation/devicetree/bindings/clock/ti/gate.txt @@ -10,7 +10,7 @@ will be controlled instead and the corresponding hw-ops for that is used. [1] Documentation/devicetree/bindings/clock/clock-bindings.txt -[2] Documentation/devicetree/bindings/clock/gate-clock.txt +[2] Documentation/devicetree/bindings/clock/gpio-gate-clock.txt [3] Documentation/devicetree/bindings/clock/ti/clockdomain.txt Required properties: diff --git a/Documentation/devicetree/bindings/clock/ti/interface.txt b/Documentation/devicetree/bindings/clock/ti/interface.txt index 3111a409fea6..3f4704040140 100644 --- a/Documentation/devicetree/bindings/clock/ti/interface.txt +++ b/Documentation/devicetree/bindings/clock/ti/interface.txt @@ -9,7 +9,7 @@ companion clock finding (match corresponding functional gate clock) and hardware autoidle enable / disable. [1] Documentation/devicetree/bindings/clock/clock-bindings.txt -[2] Documentation/devicetree/bindings/clock/gate-clock.txt +[2] Documentation/devicetree/bindings/clock/gpio-gate-clock.txt Required properties: - compatible : shall be one of: diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt index d36f07e0a2bb..0551c78619de 100644 --- a/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt +++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt @@ -8,7 +8,7 @@ Required properties: "intermediate" - A parent of "cpu" clock which is used as "intermediate" clock source (usually MAINPLL) when the original CPU PLL is under transition and not stable yet. - Please refer to Documentation/devicetree/bindings/clk/clock-bindings.txt for + Please refer to Documentation/devicetree/bindings/clock/clock-bindings.txt for generic clock consumer properties. - operating-points-v2: Please refer to Documentation/devicetree/bindings/opp/opp.txt for detail. diff --git a/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt b/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt index d6d2833482c9..fc2bcbe26b1e 100644 --- a/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt +++ b/Documentation/devicetree/bindings/devfreq/rk3399_dmc.txt @@ -12,7 +12,7 @@ Required properties: - clocks: Phandles for clock specified in "clock-names" property - clock-names : The name of clock used by the DFI, must be "pclk_ddr_mon"; -- operating-points-v2: Refer to Documentation/devicetree/bindings/power/opp.txt +- operating-points-v2: Refer to Documentation/devicetree/bindings/opp/opp.txt for details. - center-supply: DMC supply node. - status: Marks the node enabled/disabled. diff --git a/Documentation/devicetree/bindings/display/bridge/tda998x.txt b/Documentation/devicetree/bindings/display/bridge/tda998x.txt index 1a4eaca40d94..f5a02f61dd36 100644 --- a/Documentation/devicetree/bindings/display/bridge/tda998x.txt +++ b/Documentation/devicetree/bindings/display/bridge/tda998x.txt @@ -30,7 +30,7 @@ Optional properties: - nxp,calib-gpios: calibration GPIO, which must correspond with the gpio used for the TDA998x interrupt pin. -[1] Documentation/sound/alsa/soc/DAI.txt +[1] Documentation/sound/soc/dai.rst [2] include/dt-bindings/display/tda998x.h Example: diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt index 039219df05c5..18a2cde2e5f3 100644 --- a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt +++ b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt @@ -34,7 +34,7 @@ Optional properties: - mali-supply : Phandle to regulator for the Mali device. Refer to Documentation/devicetree/bindings/regulator/regulator.txt for details. -- operating-points-v2 : Refer to Documentation/devicetree/bindings/power/opp.txt +- operating-points-v2 : Refer to Documentation/devicetree/bindings/opp/opp.txt for details. diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt index c1f65d1dac1d..63cd91176a68 100644 --- a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt +++ b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt @@ -44,7 +44,7 @@ Optional properties: - memory-region: Memory region to allocate from, as defined in - Documentation/devicetree/bindi/reserved-memory/reserved-memory.txt + Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt - mali-supply: Phandle to regulator for the Mali device, as defined in diff --git a/Documentation/devicetree/bindings/i2c/i2c-davinci.txt b/Documentation/devicetree/bindings/i2c/i2c-davinci.txt index 64e6e656c345..b745f3706120 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-davinci.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-davinci.txt @@ -24,7 +24,7 @@ Recommended properties : - clock-frequency : desired I2C bus clock frequency in Hz. - ti,has-pfunc: boolean; if defined, it indicates that SoC supports PFUNC registers. PFUNC registers allow to switch I2C pins to function as - GPIOs, so they can by toggled manually. + GPIOs, so they can be toggled manually. Example (enbw_cmc board): i2c@1c22000 { diff --git a/Documentation/devicetree/bindings/i2c/i2c-rcar.txt b/Documentation/devicetree/bindings/i2c/i2c-rcar.txt index 4a7811ecd954..7ce8fae55537 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-rcar.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-rcar.txt @@ -15,6 +15,7 @@ Required properties: "renesas,i2c-r8a7796" if the device is a part of a R8A7796 SoC. "renesas,i2c-r8a77965" if the device is a part of a R8A77965 SoC. "renesas,i2c-r8a77970" if the device is a part of a R8A77970 SoC. + "renesas,i2c-r8a77980" if the device is a part of a R8A77980 SoC. "renesas,i2c-r8a77995" if the device is a part of a R8A77995 SoC. "renesas,rcar-gen1-i2c" for a generic R-Car Gen1 compatible device. "renesas,rcar-gen2-i2c" for a generic R-Car Gen2 or RZ/G1 compatible diff --git a/Documentation/devicetree/bindings/i2c/i2c-s3c2410.txt b/Documentation/devicetree/bindings/i2c/i2c-s3c2410.txt index 89b3250f049b..66ae46d3bc2f 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-s3c2410.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-s3c2410.txt @@ -8,9 +8,7 @@ Required properties: (b) "samsung, s3c2440-i2c", for i2c compatible with s3c2440 i2c. (c) "samsung, s3c2440-hdmiphy-i2c", for s3c2440-like i2c used inside HDMIPHY block found on several samsung SoCs - (d) "samsung, exynos5440-i2c", for s3c2440-like i2c used - on EXYNOS5440 which does not need GPIO configuration. - (e) "samsung, exynos5-sata-phy-i2c", for s3c2440-like i2c used as + (d) "samsung, exynos5-sata-phy-i2c", for s3c2440-like i2c used as a host to SATA PHY controller on an internal bus. - reg: physical base address of the controller and length of memory mapped region. diff --git a/Documentation/devicetree/bindings/input/rmi4/rmi_2d_sensor.txt b/Documentation/devicetree/bindings/input/rmi4/rmi_2d_sensor.txt index f2c30c8b725d..9afffbdf6e28 100644 --- a/Documentation/devicetree/bindings/input/rmi4/rmi_2d_sensor.txt +++ b/Documentation/devicetree/bindings/input/rmi4/rmi_2d_sensor.txt @@ -12,7 +12,7 @@ Additional documentation for F11 can be found at: http://www.synaptics.com/sites/default/files/511-000136-01-Rev-E-RMI4-Interfacing-Guide.pdf Optional Touch Properties: -Description in Documentation/devicetree/bindings/input/touch +Description in Documentation/devicetree/bindings/input/touchscreen - touchscreen-inverted-x - touchscreen-inverted-y - touchscreen-swapped-x-y diff --git a/Documentation/devicetree/bindings/input/rotary-encoder.txt b/Documentation/devicetree/bindings/input/rotary-encoder.txt index f99fe5cdeaec..a644408b33b8 100644 --- a/Documentation/devicetree/bindings/input/rotary-encoder.txt +++ b/Documentation/devicetree/bindings/input/rotary-encoder.txt @@ -28,7 +28,7 @@ Deprecated properties: This property is deprecated. Instead, a 'steps-per-period ' value should be used, such as "rotary-encoder,steps-per-period = <2>". -See Documentation/input/rotary-encoder.txt for more information. +See Documentation/input/devices/rotary-encoder.rst for more information. Example: diff --git a/Documentation/devicetree/bindings/media/stih407-c8sectpfe.txt b/Documentation/devicetree/bindings/media/stih407-c8sectpfe.txt index c7888d6f6408..880d4d70c9fd 100644 --- a/Documentation/devicetree/bindings/media/stih407-c8sectpfe.txt +++ b/Documentation/devicetree/bindings/media/stih407-c8sectpfe.txt @@ -28,7 +28,7 @@ See: Documentation/devicetree/bindings/clock/clock-bindings.txt - pinctrl-names : a pinctrl state named tsin%d-serial or tsin%d-parallel (where %d is tsin-num) must be defined for each tsin child node. - pinctrl-0 : phandle referencing pin configuration for this tsin configuration -See: Documentation/devicetree/bindings/pinctrl/pinctrl-binding.txt +See: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt Required properties (tsin (child) node): diff --git a/Documentation/devicetree/bindings/mfd/as3722.txt b/Documentation/devicetree/bindings/mfd/as3722.txt index 0b2a6099aa20..5297b2210704 100644 --- a/Documentation/devicetree/bindings/mfd/as3722.txt +++ b/Documentation/devicetree/bindings/mfd/as3722.txt @@ -46,7 +46,7 @@ is required: Following properties are require if pin control setting is required at boot. - pinctrl-names: A pinctrl state named "default" be defined, using the - bindings in pinctrl/pinctrl-binding.txt. + bindings in pinctrl/pinctrl-bindings.txt. - pinctrl[0...n]: Properties to contain the phandle that refer to different nodes of pin control settings. These nodes represents the pin control setting of state 0 to state n. Each of these diff --git a/Documentation/devicetree/bindings/mfd/mt6397.txt b/Documentation/devicetree/bindings/mfd/mt6397.txt index d1df77f4d655..0ebd08af777d 100644 --- a/Documentation/devicetree/bindings/mfd/mt6397.txt +++ b/Documentation/devicetree/bindings/mfd/mt6397.txt @@ -12,7 +12,7 @@ MT6397/MT6323 is a multifunction device with the following sub modules: It is interfaced to host controller using SPI interface by a proprietary hardware called PMIC wrapper or pwrap. MT6397/MT6323 MFD is a child device of pwrap. See the following for pwarp node definitions: -Documentation/devicetree/bindings/soc/pwrap.txt +Documentation/devicetree/bindings/soc/mediatek/pwrap.txt This document describes the binding for MFD device and its sub module. diff --git a/Documentation/devicetree/bindings/mfd/stm32-timers.txt b/Documentation/devicetree/bindings/mfd/stm32-timers.txt index 1db6e0057a63..0e900b52e895 100644 --- a/Documentation/devicetree/bindings/mfd/stm32-timers.txt +++ b/Documentation/devicetree/bindings/mfd/stm32-timers.txt @@ -19,6 +19,11 @@ Required parameters: Optional parameters: - resets: Phandle to the parent reset controller. See ../reset/st,stm32-rcc.txt +- dmas: List of phandle to dma channels that can be used for + this timer instance. There may be up to 7 dma channels. +- dma-names: List of dma names. Must match 'dmas' property. Valid + names are: "ch1", "ch2", "ch3", "ch4", "up", "trig", + "com". Optional subnodes: - pwm: See ../pwm/pwm-stm32.txt @@ -44,3 +49,18 @@ Example: reg = <0>; }; }; + +Example with all dmas: + timer@40010000 { + ... + dmas = <&dmamux1 11 0x400 0x0>, + <&dmamux1 12 0x400 0x0>, + <&dmamux1 13 0x400 0x0>, + <&dmamux1 14 0x400 0x0>, + <&dmamux1 15 0x400 0x0>, + <&dmamux1 16 0x400 0x0>, + <&dmamux1 17 0x400 0x0>; + dma-names = "ch1", "ch2", "ch3", "ch4", "up", "trig", "com"; + ... + child nodes... + }; diff --git a/Documentation/devicetree/bindings/mfd/sun6i-prcm.txt b/Documentation/devicetree/bindings/mfd/sun6i-prcm.txt index dd2c06540485..daa091c2e67b 100644 --- a/Documentation/devicetree/bindings/mfd/sun6i-prcm.txt +++ b/Documentation/devicetree/bindings/mfd/sun6i-prcm.txt @@ -8,8 +8,8 @@ Required properties: - reg: The PRCM registers range The prcm node may contain several subdevices definitions: - - see Documentation/devicetree/clk/sunxi.txt for clock devices - - see Documentation/devicetree/reset/allwinner,sunxi-clock-reset.txt for reset + - see Documentation/devicetree/bindings/clock/sunxi.txt for clock devices + - see Documentation/devicetree/bindings/reset/allwinner,sunxi-clock-reset.txt for reset controller devices diff --git a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt index a58c173b7ab9..0419a63f73a0 100644 --- a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt +++ b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt @@ -62,7 +62,7 @@ Required properties for a slot (Deprecated - Recommend to use one slot per host) rest of the gpios (depending on the bus-width property) are the data lines in no particular order. The format of the gpio specifier depends on the gpio controller. -(Deprecated - Refer to Documentation/devicetree/binding/pinctrl/samsung-pinctrl.txt) +(Deprecated - Refer to Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt) Example: diff --git a/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt b/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt index 3149297b3933..f064528effed 100644 --- a/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt +++ b/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt @@ -12,7 +12,7 @@ Required properties: See: Documentation/devicetree/bindings/clock/clock-bindings.txt - pinctrl-names: A pinctrl state names "default" must be defined. - pinctrl-0: Phandle referencing pin configuration of the SDHCI controller. - See: Documentation/devicetree/bindings/pinctrl/pinctrl-binding.txt + See: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt Example: diff --git a/Documentation/devicetree/bindings/mmc/sdhci-st.txt b/Documentation/devicetree/bindings/mmc/sdhci-st.txt index 6b3d40ca395e..ccf82b4ee838 100644 --- a/Documentation/devicetree/bindings/mmc/sdhci-st.txt +++ b/Documentation/devicetree/bindings/mmc/sdhci-st.txt @@ -20,7 +20,7 @@ Required properties: - pinctrl-names: A pinctrl state names "default" must be defined. - pinctrl-0: Phandle referencing pin configuration of the sd/emmc controller. - See: Documentation/devicetree/bindings/pinctrl/pinctrl-binding.txt + See: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt - reg: This must provide the host controller base address and it can also contain the FlashSS Top register for TX/RX delay used by the driver diff --git a/Documentation/devicetree/bindings/net/dsa/ksz.txt b/Documentation/devicetree/bindings/net/dsa/ksz.txt index fd23904ac68e..a700943218ca 100644 --- a/Documentation/devicetree/bindings/net/dsa/ksz.txt +++ b/Documentation/devicetree/bindings/net/dsa/ksz.txt @@ -6,7 +6,7 @@ Required properties: - compatible: For external switch chips, compatible string must be exactly one of: "microchip,ksz9477" -See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional +See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional required and optional properties. Examples: diff --git a/Documentation/devicetree/bindings/net/dsa/mt7530.txt b/Documentation/devicetree/bindings/net/dsa/mt7530.txt index a9bc27b93ee3..aa3527f71fdc 100644 --- a/Documentation/devicetree/bindings/net/dsa/mt7530.txt +++ b/Documentation/devicetree/bindings/net/dsa/mt7530.txt @@ -31,7 +31,7 @@ Required properties for the child nodes within ports container: - phy-mode: String, must be either "trgmii" or "rgmii" for port labeled "cpu". -See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional +See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional required, optional properties and how the integrated switch subnodes must be specified. diff --git a/Documentation/devicetree/bindings/nvmem/zii,rave-sp-eeprom.txt b/Documentation/devicetree/bindings/nvmem/zii,rave-sp-eeprom.txt index d5e22fc67d66..0df79d9e07ec 100644 --- a/Documentation/devicetree/bindings/nvmem/zii,rave-sp-eeprom.txt +++ b/Documentation/devicetree/bindings/nvmem/zii,rave-sp-eeprom.txt @@ -18,7 +18,7 @@ Optional properties: Data cells: Data cells are child nodes of eerpom node, bindings for which are -documented in Documentation/bindings/nvmem/nvmem.txt +documented in Documentation/devicetree/bindings/nvmem/nvmem.txt Example: diff --git a/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt b/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt index 7bf9df047a1e..0dcb87d6554f 100644 --- a/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt +++ b/Documentation/devicetree/bindings/pci/hisilicon-pcie.txt @@ -3,7 +3,7 @@ HiSilicon Hip05 and Hip06 PCIe host bridge DT description HiSilicon PCIe host controller is based on the Synopsys DesignWare PCI core. It shares common functions with the PCIe DesignWare core driver and inherits common properties defined in -Documentation/devicetree/bindings/pci/designware-pci.txt. +Documentation/devicetree/bindings/pci/designware-pcie.txt. Additional properties are described here: diff --git a/Documentation/devicetree/bindings/pci/kirin-pcie.txt b/Documentation/devicetree/bindings/pci/kirin-pcie.txt index 6e217c63123d..6bbe43818ad5 100644 --- a/Documentation/devicetree/bindings/pci/kirin-pcie.txt +++ b/Documentation/devicetree/bindings/pci/kirin-pcie.txt @@ -3,7 +3,7 @@ HiSilicon Kirin SoCs PCIe host DT description Kirin PCIe host controller is based on the Synopsys DesignWare PCI core. It shares common functions with the PCIe DesignWare core driver and inherits common properties defined in -Documentation/devicetree/bindings/pci/designware-pci.txt. +Documentation/devicetree/bindings/pci/designware-pcie.txt. Additional properties are described here: diff --git a/Documentation/devicetree/bindings/pci/pci-keystone.txt b/Documentation/devicetree/bindings/pci/pci-keystone.txt index 7e05487544ed..3d4a209b0fd0 100644 --- a/Documentation/devicetree/bindings/pci/pci-keystone.txt +++ b/Documentation/devicetree/bindings/pci/pci-keystone.txt @@ -3,9 +3,9 @@ TI Keystone PCIe interface Keystone PCI host Controller is based on the Synopsys DesignWare PCI hardware version 3.65. It shares common functions with the PCIe DesignWare core driver and inherits common properties defined in -Documentation/devicetree/bindings/pci/designware-pci.txt +Documentation/devicetree/bindings/pci/designware-pcie.txt -Please refer to Documentation/devicetree/bindings/pci/designware-pci.txt +Please refer to Documentation/devicetree/bindings/pci/designware-pcie.txt for the details of DesignWare DT bindings. Additional properties are described here as well as properties that are not applicable. diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-max77620.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-max77620.txt index ad4fce3552bb..511fc234558b 100644 --- a/Documentation/devicetree/bindings/pinctrl/pinctrl-max77620.txt +++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-max77620.txt @@ -11,9 +11,9 @@ Optional Pinmux properties: -------------------------- Following properties are required if default setting of pins are required at boot. -- pinctrl-names: A pinctrl state named per . +- pinctrl-names: A pinctrl state named per . - pinctrl[0...n]: Properties to contain the phandle for pinctrl states per - . + . The pin configurations are defined as child of the pinctrl states node. Each sub-node have following properties: diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt index a677145ae6d1..625a22e2f211 100644 --- a/Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt +++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt @@ -101,9 +101,9 @@ Optional Pinmux properties: -------------------------- Following properties are required if default setting of pins are required at boot. -- pinctrl-names: A pinctrl state named per . +- pinctrl-names: A pinctrl state named per . - pinctrl[0...n]: Properties to contain the phandle for pinctrl states per - . + . The pin configurations are defined as child of the pinctrl states node. Each sub-node have following properties: diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-rk805.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-rk805.txt index eee3dc260934..cbcbd31e3ce8 100644 --- a/Documentation/devicetree/bindings/pinctrl/pinctrl-rk805.txt +++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-rk805.txt @@ -10,9 +10,9 @@ Optional Pinmux properties: -------------------------- Following properties are required if default setting of pins are required at boot. -- pinctrl-names: A pinctrl state named per . +- pinctrl-names: A pinctrl state named per . - pinctrl[0...n]: Properties to contain the phandle for pinctrl states per - . + . The pin configurations are defined as child of the pinctrl states node. Each sub-node have following properties: diff --git a/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt b/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt index b31d6bbeee16..726ec2875223 100644 --- a/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt +++ b/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt @@ -14,7 +14,7 @@ Required properties: datasheet - interrupts: Should contain one interrupt specifier for the GPC interrupt - clocks: Must contain an entry for each entry in clock-names. - See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details. + See Documentation/devicetree/bindings/clock/clock-bindings.txt for details. - clock-names: Must include the following entries: - ipg diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt index 4733f76cbe48..9b387f861aed 100644 --- a/Documentation/devicetree/bindings/power/power_domain.txt +++ b/Documentation/devicetree/bindings/power/power_domain.txt @@ -111,8 +111,8 @@ Example 3: ==PM domain consumers== Required properties: - - power-domains : A phandle and PM domain specifier as defined by bindings of - the power controller specified by phandle. + - power-domains : A list of PM domain specifiers, as defined by bindings of + the power controller that is the PM domain provider. Example: @@ -122,9 +122,18 @@ Example: power-domains = <&power 0>; }; -The node above defines a typical PM domain consumer device, which is located -inside a PM domain with index 0 of a power controller represented by a node -with the label "power". + leaky-device@12351000 { + compatible = "foo,i-leak-current"; + reg = <0x12351000 0x1000>; + power-domains = <&power 0>, <&power 1> ; + }; + +The first example above defines a typical PM domain consumer device, which is +located inside a PM domain with index 0 of a power controller represented by a +node with the label "power". +In the second example the consumer device are partitioned across two PM domains, +the first with index 0 and the second with index 1, of a power controller that +is represented by a node with the label "power. Optional properties: - required-opps: This contains phandle to an OPP node in another device's OPP diff --git a/Documentation/devicetree/bindings/power/supply/ab8500/btemp.txt b/Documentation/devicetree/bindings/power/supply/ab8500/btemp.txt index 0ba1bcc7f33a..f181e46d8e07 100644 --- a/Documentation/devicetree/bindings/power/supply/ab8500/btemp.txt +++ b/Documentation/devicetree/bindings/power/supply/ab8500/btemp.txt @@ -13,4 +13,4 @@ Required Properties: }; For information on battery specific node, Ref: -Documentation/devicetree/bindings/power_supply/ab8500/fg.txt +Documentation/devicetree/bindings/power/supply/ab8500/fg.txt diff --git a/Documentation/devicetree/bindings/power/supply/ab8500/chargalg.txt b/Documentation/devicetree/bindings/power/supply/ab8500/chargalg.txt index ef5328371122..56636f927203 100644 --- a/Documentation/devicetree/bindings/power/supply/ab8500/chargalg.txt +++ b/Documentation/devicetree/bindings/power/supply/ab8500/chargalg.txt @@ -13,4 +13,4 @@ ab8500_chargalg { }; For information on battery specific node, Ref: -Documentation/devicetree/bindings/power_supply/ab8500/fg.txt +Documentation/devicetree/bindings/power/supply/ab8500/fg.txt diff --git a/Documentation/devicetree/bindings/power/supply/ab8500/charger.txt b/Documentation/devicetree/bindings/power/supply/ab8500/charger.txt index 6bdbb08ea9e0..24ada03e07b4 100644 --- a/Documentation/devicetree/bindings/power/supply/ab8500/charger.txt +++ b/Documentation/devicetree/bindings/power/supply/ab8500/charger.txt @@ -22,4 +22,4 @@ Required Properties: }; For information on battery specific node, Ref: -Documentation/devicetree/bindings/power_supply/ab8500/fg.txt +Documentation/devicetree/bindings/power/supply/ab8500/fg.txt diff --git a/Documentation/devicetree/bindings/power/wakeup-source.txt b/Documentation/devicetree/bindings/power/wakeup-source.txt index 5d254ab13ebf..cfd74659fbed 100644 --- a/Documentation/devicetree/bindings/power/wakeup-source.txt +++ b/Documentation/devicetree/bindings/power/wakeup-source.txt @@ -22,7 +22,7 @@ List of legacy properties and respective binding document 3. "has-tpo" Documentation/devicetree/bindings/rtc/rtc-opal.txt 4. "linux,wakeup" Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt Documentation/devicetree/bindings/mfd/tc3589x.txt - Documentation/devicetree/bindings/input/ads7846.txt + Documentation/devicetree/bindings/input/touchscreen/ads7846.txt 5. "linux,keypad-wakeup" Documentation/devicetree/bindings/input/qcom,pm8xxx-keypad.txt 6. "linux,input-wakeup" Documentation/devicetree/bindings/input/samsung-keypad.txt 7. "nvidia,wakeup-source" Documentation/devicetree/bindings/input/nvidia,tegra20-kbc.txt diff --git a/Documentation/devicetree/bindings/serial/microchip,pic32-uart.txt b/Documentation/devicetree/bindings/serial/microchip,pic32-uart.txt index 7a34345d0ca3..c8dd440e9747 100644 --- a/Documentation/devicetree/bindings/serial/microchip,pic32-uart.txt +++ b/Documentation/devicetree/bindings/serial/microchip,pic32-uart.txt @@ -8,7 +8,7 @@ Required properties: See: Documentation/devicetree/bindings/clock/clock-bindings.txt - pinctrl-names: A pinctrl state names "default" must be defined. - pinctrl-0: Phandle referencing pin configuration of the UART peripheral. - See: Documentation/devicetree/bindings/pinctrl/pinctrl-binding.txt + See: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt Optional properties: - cts-gpios: CTS pin for UART diff --git a/Documentation/devicetree/bindings/sound/st,stm32-i2s.txt b/Documentation/devicetree/bindings/sound/st,stm32-i2s.txt index 4bda52042402..58c341300552 100644 --- a/Documentation/devicetree/bindings/sound/st,stm32-i2s.txt +++ b/Documentation/devicetree/bindings/sound/st,stm32-i2s.txt @@ -18,7 +18,7 @@ Required properties: See Documentation/devicetree/bindings/dma/stm32-dma.txt. - dma-names: Identifier for each DMA request line. Must be "tx" and "rx". - pinctrl-names: should contain only value "default" - - pinctrl-0: see Documentation/devicetree/bindings/pinctrl/pinctrl-stm32.txt + - pinctrl-0: see Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt Optional properties: - resets: Reference to a reset controller asserting the reset controller diff --git a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt index f301cdf0b7e6..3a3fc506e43a 100644 --- a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt +++ b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt @@ -37,7 +37,7 @@ SAI subnodes required properties: "tx": if sai sub-block is configured as playback DAI "rx": if sai sub-block is configured as capture DAI - pinctrl-names: should contain only value "default" - - pinctrl-0: see Documentation/devicetree/bindings/pinctrl/pinctrl-stm32.txt + - pinctrl-0: see Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt SAI subnodes Optional properties: - st,sync: specify synchronization mode. diff --git a/Documentation/devicetree/bindings/spi/spi-st-ssc.txt b/Documentation/devicetree/bindings/spi/spi-st-ssc.txt index fe54959ec957..1bdc4709e474 100644 --- a/Documentation/devicetree/bindings/spi/spi-st-ssc.txt +++ b/Documentation/devicetree/bindings/spi/spi-st-ssc.txt @@ -9,7 +9,7 @@ Required properties: - clocks : Must contain an entry for each name in clock-names See ../clk/* - pinctrl-names : Uses "default", can use "sleep" if provided - See ../pinctrl/pinctrl-binding.txt + See ../pinctrl/pinctrl-bindings.txt Optional properties: - cs-gpios : List of GPIO chip selects diff --git a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt index b957acff57aa..ad648d93d961 100644 --- a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt @@ -12,7 +12,6 @@ "samsung,exynos5420-tmu-ext-triminfo" for TMU channels 2, 3 and 4 Exynos5420 (Must pass triminfo base and triminfo clock) "samsung,exynos5433-tmu" - "samsung,exynos5440-tmu" "samsung,exynos7-tmu" - interrupt-parent : The phandle for the interrupt controller - reg : Address range of the thermal registers. For soc's which has multiple @@ -68,18 +67,7 @@ Example 1): #thermal-sensor-cells = <0>; }; -Example 2): - - tmuctrl_0: tmuctrl@160118 { - compatible = "samsung,exynos5440-tmu"; - reg = <0x160118 0x230>, <0x160368 0x10>; - interrupts = <0 58 0>; - clocks = <&clock 21>; - clock-names = "tmu_apbif"; - #thermal-sensor-cells = <0>; - }; - -Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register") +Example 2): (In case of Exynos5420 "with misplaced TRIMINFO register") tmu_cpu2: tmu@10068000 { compatible = "samsung,exynos5420-tmu-ext-triminfo"; reg = <0x10068000 0x100>, <0x1006c000 0x4>; diff --git a/Documentation/devicetree/bindings/thermal/imx-thermal.txt b/Documentation/devicetree/bindings/thermal/imx-thermal.txt index 379eb763073e..823e4176eef8 100644 --- a/Documentation/devicetree/bindings/thermal/imx-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/imx-thermal.txt @@ -1,8 +1,13 @@ * Temperature Monitor (TEMPMON) on Freescale i.MX SoCs Required properties: -- compatible : "fsl,imx6q-tempmon" for i.MX6Q, "fsl,imx6sx-tempmon" for i.MX6SX. - i.MX6SX has two more IRQs than i.MX6Q, one is IRQ_LOW and the other is IRQ_PANIC, +- compatible : must be one of following: + - "fsl,imx6q-tempmon" for i.MX6Q, + - "fsl,imx6sx-tempmon" for i.MX6SX, + - "fsl,imx7d-tempmon" for i.MX7S/D. +- interrupts : the interrupt output of the controller: + i.MX6Q has one IRQ which will be triggered when temperature is higher than high threshold, + i.MX6SX and i.MX7S/D have two more IRQs than i.MX6Q, one is IRQ_LOW and the other is IRQ_PANIC, when temperature is below than low threshold, IRQ_LOW will be triggered, when temperature is higher than panic threshold, system will auto reboot by SRC module. - fsl,tempmon : phandle pointer to system controller that contains TEMPMON diff --git a/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt b/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt index 0d73ea5e9c0c..41d6a443ad66 100644 --- a/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt @@ -12,6 +12,7 @@ Required properties: - "mediatek,mt8173-thermal" : For MT8173 family of SoCs - "mediatek,mt2701-thermal" : For MT2701 family of SoCs - "mediatek,mt2712-thermal" : For MT2712 family of SoCs + - "mediatek,mt7622-thermal" : For MT7622 SoC - reg: Address range of the thermal controller - interrupts: IRQ for the thermal controller - clocks, clock-names: Clocks needed for the thermal controller. required diff --git a/Documentation/devicetree/bindings/thermal/qcom-tsens.txt b/Documentation/devicetree/bindings/thermal/qcom-tsens.txt index 292ed89d900b..06195e8f35e2 100644 --- a/Documentation/devicetree/bindings/thermal/qcom-tsens.txt +++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.txt @@ -8,6 +8,7 @@ Required properties: - reg: Address range of the thermal registers - #thermal-sensor-cells : Should be 1. See ./thermal.txt for a description. +- #qcom,sensors: Number of sensors in tsens block - Refer to Documentation/devicetree/bindings/nvmem/nvmem.txt to know how to specify nvmem cells diff --git a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt index 39e7d4e61a63..cfa154bb0fa7 100644 --- a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt @@ -9,6 +9,7 @@ Required properties: Examples with soctypes are: - "renesas,r8a7795-thermal" (R-Car H3) - "renesas,r8a7796-thermal" (R-Car M3-W) + - "renesas,r8a77965-thermal" (R-Car M3-N) - reg : Address ranges of the thermal registers. Each sensor needs one address range. Sorting must be done in increasing order according to datasheet, i.e. @@ -18,7 +19,7 @@ Required properties: Optional properties: -- interrupts : interrupts routed to the TSC (3 for H3 and M3-W) +- interrupts : interrupts routed to the TSC (3 for H3, M3-W and M3-N) - power-domain : Must contain a reference to the power domain. This property is mandatory if the thermal sensor instance is part of a controllable power domain. diff --git a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt index 349e635f2d87..67c563f1b4c4 100644 --- a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt @@ -3,7 +3,8 @@ Required properties: - compatible : "renesas,thermal-", "renesas,rcar-gen2-thermal" (with thermal-zone) or - "renesas,rcar-thermal" (without thermal-zone) as fallback. + "renesas,rcar-thermal" (without thermal-zone) as + fallback except R-Car D3. Examples with soctypes are: - "renesas,thermal-r8a73a4" (R-Mobile APE6) - "renesas,thermal-r8a7743" (RZ/G1M) @@ -12,13 +13,15 @@ Required properties: - "renesas,thermal-r8a7791" (R-Car M2-W) - "renesas,thermal-r8a7792" (R-Car V2H) - "renesas,thermal-r8a7793" (R-Car M2-N) + - "renesas,thermal-r8a77995" (R-Car D3) - reg : Address range of the thermal registers. The 1st reg will be recognized as common register if it has "interrupts". Option properties: -- interrupts : use interrupt +- interrupts : If present should contain 3 interrupts for + R-Car D3 or 1 interrupt otherwise. Example (non interrupt support): diff --git a/Documentation/devicetree/bindings/thermal/uniphier-thermal.txt b/Documentation/devicetree/bindings/thermal/uniphier-thermal.txt index 686c0b42ed3f..ceb92a95727a 100644 --- a/Documentation/devicetree/bindings/thermal/uniphier-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/uniphier-thermal.txt @@ -8,6 +8,7 @@ Required properties: - compatible : - "socionext,uniphier-pxs2-thermal" : For UniPhier PXs2 SoC - "socionext,uniphier-ld20-thermal" : For UniPhier LD20 SoC + - "socionext,uniphier-pxs3-thermal" : For UniPhier PXs3 SoC - interrupts : IRQ for the temperature alarm - #thermal-sensor-cells : Should be 0. See ./thermal.txt for details. diff --git a/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt b/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt index 50a31536e975..252a05c5d976 100644 --- a/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt +++ b/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt @@ -16,7 +16,7 @@ A child node must exist to represent the core DWC3 IP block. The name of the node is not important. The content of the node is defined in dwc3.txt. Phy documentation is provided in the following places: -Documentation/devicetree/bindings/phy/rockchip,dwc3-usb-phy.txt +Documentation/devicetree/bindings/phy/qcom-dwc3-usb-phy.txt Example device nodes: diff --git a/Documentation/devicetree/bindings/watchdog/ingenic,jz4740-wdt.txt b/Documentation/devicetree/bindings/watchdog/ingenic,jz4740-wdt.txt index cb44918f01a8..ce1cb72d5345 100644 --- a/Documentation/devicetree/bindings/watchdog/ingenic,jz4740-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/ingenic,jz4740-wdt.txt @@ -3,10 +3,15 @@ Ingenic Watchdog Timer (WDT) Controller for JZ4740 & JZ4780 Required properties: compatible: "ingenic,jz4740-watchdog" or "ingenic,jz4780-watchdog" reg: Register address and length for watchdog registers +clocks: phandle to the RTC clock +clock-names: should be "rtc" Example: watchdog: jz4740-watchdog@10002000 { compatible = "ingenic,jz4740-watchdog"; - reg = <0x10002000 0x100>; + reg = <0x10002000 0x10>; + + clocks = <&cgu JZ4740_CLK_RTC>; + clock-names = "rtc"; }; diff --git a/Documentation/driver-api/gpio/consumer.rst b/Documentation/driver-api/gpio/consumer.rst index c71a50d85b50..aa03f389d41d 100644 --- a/Documentation/driver-api/gpio/consumer.rst +++ b/Documentation/driver-api/gpio/consumer.rst @@ -57,7 +57,7 @@ device that displays digits), an additional index argument can be specified:: enum gpiod_flags flags) For a more detailed description of the con_id parameter in the DeviceTree case -see Documentation/gpio/board.txt +see Documentation/driver-api/gpio/board.rst The flags parameter is used to optionally specify a direction and initial value for the GPIO. Values can be: diff --git a/Documentation/features/debug/stackprotector/arch-support.txt b/Documentation/features/debug/stackprotector/arch-support.txt index 74b89a9c8b3a..954ac1c95553 100644 --- a/Documentation/features/debug/stackprotector/arch-support.txt +++ b/Documentation/features/debug/stackprotector/arch-support.txt @@ -1,6 +1,6 @@ # # Feature name: stackprotector -# Kconfig: HAVE_CC_STACKPROTECTOR +# Kconfig: HAVE_STACKPROTECTOR # description: arch supports compiler driven stack overflow protection # ----------------------- diff --git a/Documentation/filesystems/ceph.txt b/Documentation/filesystems/ceph.txt index d7f011ddc150..8bf62240e10d 100644 --- a/Documentation/filesystems/ceph.txt +++ b/Documentation/filesystems/ceph.txt @@ -105,15 +105,13 @@ Mount Options address its connection to the monitor originates from. wsize=X - Specify the maximum write size in bytes. By default there is no - maximum. Ceph will normally size writes based on the file stripe - size. + Specify the maximum write size in bytes. Default: 16 MB. rsize=X - Specify the maximum read size in bytes. Default: 64 MB. + Specify the maximum read size in bytes. Default: 16 MB. rasize=X - Specify the maximum readahead. Default: 8 MB. + Specify the maximum readahead size in bytes. Default: 8 MB. mount_timeout=X Specify the timeout value for mount (in seconds), in the case diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx index cfd31d94c872..72d16f08e431 100644 --- a/Documentation/hwmon/ina2xx +++ b/Documentation/hwmon/ina2xx @@ -53,7 +53,7 @@ bus supply voltage. The shunt value in micro-ohms can be set via platform data or device tree at compile-time or via the shunt_resistor attribute in sysfs at run-time. Please -refer to the Documentation/devicetree/bindings/i2c/ina2xx.txt for bindings +refer to the Documentation/devicetree/bindings/hwmon/ina2xx.txt for bindings if the device tree is used. Additionally ina226 supports update_interval attribute as described in diff --git a/Documentation/i2c/busses/i2c-mlxcpld b/Documentation/i2c/busses/i2c-mlxcpld index 4e46c440b38d..925904aa9b57 100644 --- a/Documentation/i2c/busses/i2c-mlxcpld +++ b/Documentation/i2c/busses/i2c-mlxcpld @@ -20,6 +20,10 @@ The next transaction types are supported: - Write Byte/Block. Registers: +CPBLTY 0x0 - capability reg. + Bits [6:5] - transaction length. b01 - 72B is supported, + 36B in other case. + Bit 7 - SMBus block read support. CTRL 0x1 - control reg. Resets all the registers. HALF_CYC 0x4 - cycle reg. diff --git a/Documentation/i2c/busses/i2c-ocores b/Documentation/i2c/busses/i2c-ocores index 9e1dfe7553ad..4e713f4cdb2f 100644 --- a/Documentation/i2c/busses/i2c-ocores +++ b/Documentation/i2c/busses/i2c-ocores @@ -18,7 +18,7 @@ Usage i2c-ocores uses the platform bus, so you need to provide a struct platform_device with the base address and interrupt number. The dev.platform_data of the device should also point to a struct -ocores_i2c_platform_data (see linux/i2c-ocores.h) describing the +ocores_i2c_platform_data (see linux/platform_data/i2c-ocores.h) describing the distance between registers and the input clock speed. There is also a possibility to attach a list of i2c_board_info which the i2c-ocores driver will add to the bus upon creation. diff --git a/Documentation/i2c/muxes/i2c-mux-gpio b/Documentation/i2c/muxes/i2c-mux-gpio index 7a8d7d261632..893ecdfe6e43 100644 --- a/Documentation/i2c/muxes/i2c-mux-gpio +++ b/Documentation/i2c/muxes/i2c-mux-gpio @@ -30,12 +30,12 @@ i2c-mux-gpio uses the platform bus, so you need to provide a struct platform_device with the platform_data pointing to a struct i2c_mux_gpio_platform_data with the I2C adapter number of the master bus, the number of bus segments to create and the GPIO pins used -to control it. See include/linux/i2c-mux-gpio.h for details. +to control it. See include/linux/platform_data/i2c-mux-gpio.h for details. E.G. something like this for a MUX providing 4 bus segments controlled through 3 GPIO pins: -#include +#include #include static const unsigned myboard_gpiomux_gpios[] = { diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt index 0e966e8f9ec7..3534a84d206c 100644 --- a/Documentation/kbuild/kconfig-language.txt +++ b/Documentation/kbuild/kconfig-language.txt @@ -473,6 +473,24 @@ config option to 'y' no matter the dependencies. The dependencies are moved to the symbol GENERIC_IOMAP and we avoid the situation where select forces a symbol equals to 'y'. +Adding features that need compiler support +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +There are several features that need compiler support. The recommended way +to describe the dependency on the compiler feature is to use "depends on" +followed by a test macro. + +config STACKPROTECTOR + bool "Stack Protector buffer overflow detection" + depends on $(cc-option,-fstack-protector) + ... + +If you need to expose a compiler capability to makefiles and/or C source files, +CC_HAS_ is the recommended prefix for the config option. + +config CC_HAS_STACKPROTECTOR_NONE + def_bool $(cc-option,-fno-stack-protector) + Build as module only ~~~~~~~~~~~~~~~~~~~~ To restrict a component build to module-only, qualify its config symbol diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt index 22208bf2386d..cb3b0de83fc6 100644 --- a/Documentation/kprobes.txt +++ b/Documentation/kprobes.txt @@ -724,8 +724,8 @@ migrate your tool to one of the following options: See following documents: - - Documentation/trace/kprobetrace.txt - - Documentation/trace/events.txt + - Documentation/trace/kprobetrace.rst + - Documentation/trace/events.rst - tools/perf/Documentation/perf-probe.txt diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt index 00b6dfed573c..6cced88de6da 100644 --- a/Documentation/laptops/thinkpad-acpi.txt +++ b/Documentation/laptops/thinkpad-acpi.txt @@ -540,8 +540,10 @@ Events that are propagated by the driver to userspace: 0x6021 ALARM: a sensor is too hot 0x6022 ALARM: a sensor is extremely hot 0x6030 System thermal table changed +0x6032 Thermal Control command set completion (DYTC, Windows) 0x6040 Nvidia Optimus/AC adapter related (TO BE VERIFIED) 0x60C0 X1 Yoga 2016, Tablet mode status changed +0x60F0 Thermal Transformation changed (GMTS, Windows) Battery nearly empty alarms are a last resort attempt to get the operating system to hibernate or shutdown cleanly (0x2313), or shutdown diff --git a/Documentation/maintainer/pull-requests.rst b/Documentation/maintainer/pull-requests.rst index a19db3458b56..22b271de0304 100644 --- a/Documentation/maintainer/pull-requests.rst +++ b/Documentation/maintainer/pull-requests.rst @@ -41,7 +41,7 @@ named ``char-misc-next``, you would be using the following command:: that will create a signed tag called ``char-misc-4.15-rc1`` based on the last commit in the ``char-misc-next`` branch, and sign it with your gpg key -(see :ref:`Documentation/maintainer/configure_git.rst `). +(see :ref:`Documentation/maintainer/configure-git.rst `). Linus will only accept pull requests based on a signed tag. Other maintainers may differ. diff --git a/Documentation/networking/can.rst b/Documentation/networking/can.rst index d23c51abf8c6..2fd0b51a8c52 100644 --- a/Documentation/networking/can.rst +++ b/Documentation/networking/can.rst @@ -164,7 +164,7 @@ The Linux network devices (by default) just can handle the transmission and reception of media dependent frames. Due to the arbitration on the CAN bus the transmission of a low prio CAN-ID may be delayed by the reception of a high prio CAN frame. To -reflect the correct [*]_ traffic on the node the loopback of the sent +reflect the correct [#f1]_ traffic on the node the loopback of the sent data has to be performed right after a successful transmission. If the CAN network interface is not capable of performing the loopback for some reason the SocketCAN core can do this task as a fallback solution. @@ -175,7 +175,7 @@ networking behaviour for CAN applications. Due to some requests from the RT-SocketCAN group the loopback optionally may be disabled for each separate socket. See sockopts from the CAN RAW sockets in :ref:`socketcan-raw-sockets`. -.. [*] you really like to have this when you're running analyser +.. [#f1] you really like to have this when you're running analyser tools like 'candump' or 'cansniffer' on the (same) node. diff --git a/Documentation/riscv/pmu.txt b/Documentation/riscv/pmu.txt new file mode 100644 index 000000000000..b29f03a6d82f --- /dev/null +++ b/Documentation/riscv/pmu.txt @@ -0,0 +1,249 @@ +Supporting PMUs on RISC-V platforms +========================================== +Alan Kao , Mar 2018 + +Introduction +------------ + +As of this writing, perf_event-related features mentioned in The RISC-V ISA +Privileged Version 1.10 are as follows: +(please check the manual for more details) + +* [m|s]counteren +* mcycle[h], cycle[h] +* minstret[h], instret[h] +* mhpeventx, mhpcounterx[h] + +With such function set only, porting perf would require a lot of work, due to +the lack of the following general architectural performance monitoring features: + +* Enabling/Disabling counters + Counters are just free-running all the time in our case. +* Interrupt caused by counter overflow + No such feature in the spec. +* Interrupt indicator + It is not possible to have many interrupt ports for all counters, so an + interrupt indicator is required for software to tell which counter has + just overflowed. +* Writing to counters + There will be an SBI to support this since the kernel cannot modify the + counters [1]. Alternatively, some vendor considers to implement + hardware-extension for M-S-U model machines to write counters directly. + +This document aims to provide developers a quick guide on supporting their +PMUs in the kernel. The following sections briefly explain perf' mechanism +and todos. + +You may check previous discussions here [1][2]. Also, it might be helpful +to check the appendix for related kernel structures. + + +1. Initialization +----------------- + +*riscv_pmu* is a global pointer of type *struct riscv_pmu*, which contains +various methods according to perf's internal convention and PMU-specific +parameters. One should declare such instance to represent the PMU. By default, +*riscv_pmu* points to a constant structure *riscv_base_pmu*, which has very +basic support to a baseline QEMU model. + +Then he/she can either assign the instance's pointer to *riscv_pmu* so that +the minimal and already-implemented logic can be leveraged, or invent his/her +own *riscv_init_platform_pmu* implementation. + +In other words, existing sources of *riscv_base_pmu* merely provide a +reference implementation. Developers can flexibly decide how many parts they +can leverage, and in the most extreme case, they can customize every function +according to their needs. + + +2. Event Initialization +----------------------- + +When a user launches a perf command to monitor some events, it is first +interpreted by the userspace perf tool into multiple *perf_event_open* +system calls, and then each of them calls to the body of *event_init* +member function that was assigned in the previous step. In *riscv_base_pmu*'s +case, it is *riscv_event_init*. + +The main purpose of this function is to translate the event provided by user +into bitmap, so that HW-related control registers or counters can directly be +manipulated. The translation is based on the mappings and methods provided in +*riscv_pmu*. + +Note that some features can be done in this stage as well: + +(1) interrupt setting, which is stated in the next section; +(2) privilege level setting (user space only, kernel space only, both); +(3) destructor setting. Normally it is sufficient to apply *riscv_destroy_event*; +(4) tweaks for non-sampling events, which will be utilized by functions such as +*perf_adjust_period*, usually something like the follows: + +if (!is_sampling_event(event)) { + hwc->sample_period = x86_pmu.max_period; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); +} + +In the case of *riscv_base_pmu*, only (3) is provided for now. + + +3. Interrupt +------------ + +3.1. Interrupt Initialization + +This often occurs at the beginning of the *event_init* method. In common +practice, this should be a code segment like + +int x86_reserve_hardware(void) +{ + int err = 0; + + if (!atomic_inc_not_zero(&pmc_refcount)) { + mutex_lock(&pmc_reserve_mutex); + if (atomic_read(&pmc_refcount) == 0) { + if (!reserve_pmc_hardware()) + err = -EBUSY; + else + reserve_ds_buffers(); + } + if (!err) + atomic_inc(&pmc_refcount); + mutex_unlock(&pmc_reserve_mutex); + } + + return err; +} + +And the magic is in *reserve_pmc_hardware*, which usually does atomic +operations to make implemented IRQ accessible from some global function pointer. +*release_pmc_hardware* serves the opposite purpose, and it is used in event +destructors mentioned in previous section. + +(Note: From the implementations in all the architectures, the *reserve/release* +pair are always IRQ settings, so the *pmc_hardware* seems somehow misleading. +It does NOT deal with the binding between an event and a physical counter, +which will be introduced in the next section.) + +3.2. IRQ Structure + +Basically, a IRQ runs the following pseudo code: + +for each hardware counter that triggered this overflow + + get the event of this counter + + // following two steps are defined as *read()*, + // check the section Reading/Writing Counters for details. + count the delta value since previous interrupt + update the event->count (# event occurs) by adding delta, and + event->hw.period_left by subtracting delta + + if the event overflows + sample data + set the counter appropriately for the next overflow + + if the event overflows again + too frequently, throttle this event + fi + fi + +end for + +However as of this writing, none of the RISC-V implementations have designed an +interrupt for perf, so the details are to be completed in the future. + +4. Reading/Writing Counters +--------------------------- + +They seem symmetric but perf treats them quite differently. For reading, there +is a *read* interface in *struct pmu*, but it serves more than just reading. +According to the context, the *read* function not only reads the content of the +counter (event->count), but also updates the left period to the next interrupt +(event->hw.period_left). + +But the core of perf does not need direct write to counters. Writing counters +is hidden behind the abstraction of 1) *pmu->start*, literally start counting so one +has to set the counter to a good value for the next interrupt; 2) inside the IRQ +it should set the counter to the same resonable value. + +Reading is not a problem in RISC-V but writing would need some effort, since +counters are not allowed to be written by S-mode. + + +5. add()/del()/start()/stop() +----------------------------- + +Basic idea: add()/del() adds/deletes events to/from a PMU, and start()/stop() +starts/stop the counter of some event in the PMU. All of them take the same +arguments: *struct perf_event *event* and *int flag*. + +Consider perf as a state machine, then you will find that these functions serve +as the state transition process between those states. +Three states (event->hw.state) are defined: + +* PERF_HES_STOPPED: the counter is stopped +* PERF_HES_UPTODATE: the event->count is up-to-date +* PERF_HES_ARCH: arch-dependent usage ... we don't need this for now + +A normal flow of these state transitions are as follows: + +* A user launches a perf event, resulting in calling to *event_init*. +* When being context-switched in, *add* is called by the perf core, with a flag + PERF_EF_START, which means that the event should be started after it is added. + At this stage, a general event is bound to a physical counter, if any. + The state changes to PERF_HES_STOPPED and PERF_HES_UPTODATE, because it is now + stopped, and the (software) event count does not need updating. +** *start* is then called, and the counter is enabled. + With flag PERF_EF_RELOAD, it writes an appropriate value to the counter (check + previous section for detail). + Nothing is written if the flag does not contain PERF_EF_RELOAD. + The state now is reset to none, because it is neither stopped nor updated + (the counting already started) +* When being context-switched out, *del* is called. It then checks out all the + events in the PMU and calls *stop* to update their counts. +** *stop* is called by *del* + and the perf core with flag PERF_EF_UPDATE, and it often shares the same + subroutine as *read* with the same logic. + The state changes to PERF_HES_STOPPED and PERF_HES_UPTODATE, again. + +** Life cycle of these two pairs: *add* and *del* are called repeatedly as + tasks switch in-and-out; *start* and *stop* is also called when the perf core + needs a quick stop-and-start, for instance, when the interrupt period is being + adjusted. + +Current implementation is sufficient for now and can be easily extended to +features in the future. + +A. Related Structures +--------------------- + +* struct pmu: include/linux/perf_event.h +* struct riscv_pmu: arch/riscv/include/asm/perf_event.h + + Both structures are designed to be read-only. + + *struct pmu* defines some function pointer interfaces, and most of them take +*struct perf_event* as a main argument, dealing with perf events according to +perf's internal state machine (check kernel/events/core.c for details). + + *struct riscv_pmu* defines PMU-specific parameters. The naming follows the +convention of all other architectures. + +* struct perf_event: include/linux/perf_event.h +* struct hw_perf_event + + The generic structure that represents perf events, and the hardware-related +details. + +* struct riscv_hw_events: arch/riscv/include/asm/perf_event.h + + The structure that holds the status of events, has two fixed members: +the number of events and the array of the events. + +References +---------- + +[1] https://github.com/riscv/riscv-linux/pull/124 +[2] https://groups.google.com/a/groups.riscv.org/forum/#!topic/sw-dev/f19TmCNP6yA diff --git a/Documentation/security/self-protection.rst b/Documentation/security/self-protection.rst index 0f53826c78b9..e1ca698e0006 100644 --- a/Documentation/security/self-protection.rst +++ b/Documentation/security/self-protection.rst @@ -156,7 +156,7 @@ The classic stack buffer overflow involves writing past the expected end of a variable stored on the stack, ultimately writing a controlled value to the stack frame's stored return address. The most widely used defense is the presence of a stack canary between the stack variables and the -return address (``CONFIG_CC_STACKPROTECTOR``), which is verified just before +return address (``CONFIG_STACKPROTECTOR``), which is verified just before the function returns. Other defenses include things like shadow stacks. Stack depth overflow diff --git a/Documentation/sphinx/rstFlatTable.py b/Documentation/sphinx/rstFlatTable.py index 25feb0d35e7a..2019a55f6b18 100755 --- a/Documentation/sphinx/rstFlatTable.py +++ b/Documentation/sphinx/rstFlatTable.py @@ -53,8 +53,6 @@ from docutils.utils import SystemMessagePropagation # common globals # ============================================================================== -# The version numbering follows numbering of the specification -# (Documentation/books/kernel-doc-HOWTO). __version__ = '1.0' PY3 = sys.version_info[0] == 3 diff --git a/Documentation/trace/coresight.txt b/Documentation/trace/coresight.txt index 1d74ad0202b6..efbc832146e7 100644 --- a/Documentation/trace/coresight.txt +++ b/Documentation/trace/coresight.txt @@ -426,5 +426,5 @@ root@genericarmv8:~# Details on how to use the generic STM API can be found here [2]. [1]. Documentation/ABI/testing/sysfs-bus-coresight-devices-stm -[2]. Documentation/trace/stm.txt +[2]. Documentation/trace/stm.rst [3]. https://github.com/Linaro/perf-opencsd diff --git a/Documentation/trace/events.rst b/Documentation/trace/events.rst index 1afae55dc55c..696dc69b8158 100644 --- a/Documentation/trace/events.rst +++ b/Documentation/trace/events.rst @@ -8,7 +8,7 @@ Event Tracing 1. Introduction =============== -Tracepoints (see Documentation/trace/tracepoints.txt) can be used +Tracepoints (see Documentation/trace/tracepoints.rst) can be used without creating custom kernel modules to register probe functions using the event tracing infrastructure. diff --git a/Documentation/trace/ftrace-uses.rst b/Documentation/trace/ftrace-uses.rst index 00283b6dd101..1fbc69894eed 100644 --- a/Documentation/trace/ftrace-uses.rst +++ b/Documentation/trace/ftrace-uses.rst @@ -199,7 +199,7 @@ If @buf is NULL and reset is set, all functions will be enabled for tracing. The @buf can also be a glob expression to enable all functions that match a specific pattern. -See Filter Commands in :file:`Documentation/trace/ftrace.txt`. +See Filter Commands in :file:`Documentation/trace/ftrace.rst`. To just trace the schedule function: diff --git a/Documentation/trace/histogram.txt b/Documentation/trace/histogram.txt index b13771cb12c1..e73bcf9cb5f3 100644 --- a/Documentation/trace/histogram.txt +++ b/Documentation/trace/histogram.txt @@ -7,7 +7,7 @@ Histogram triggers are special event triggers that can be used to aggregate trace event data into histograms. For information on - trace events and event triggers, see Documentation/trace/events.txt. + trace events and event triggers, see Documentation/trace/events.rst. 2. Histogram Trigger Command diff --git a/Documentation/trace/intel_th.rst b/Documentation/trace/intel_th.rst index 990f13265178..19e2d633f3c7 100644 --- a/Documentation/trace/intel_th.rst +++ b/Documentation/trace/intel_th.rst @@ -38,7 +38,7 @@ description is at Documentation/ABI/testing/sysfs-bus-intel_th-devices-gth. STH registers an stm class device, through which it provides interface to userspace and kernelspace software trace sources. See -Documentation/trace/stm.txt for more information on that. +Documentation/trace/stm.rst for more information on that. MSU can be configured to collect trace data into a system memory buffer, which can later on be read from its device nodes via read() or diff --git a/Documentation/trace/tracepoint-analysis.rst b/Documentation/trace/tracepoint-analysis.rst index a4d3ff2e5efb..716326b9f152 100644 --- a/Documentation/trace/tracepoint-analysis.rst +++ b/Documentation/trace/tracepoint-analysis.rst @@ -6,7 +6,7 @@ Notes on Analysing Behaviour Using Events and Tracepoints 1. Introduction =============== -Tracepoints (see Documentation/trace/tracepoints.txt) can be used without +Tracepoints (see Documentation/trace/tracepoints.rst) can be used without creating custom kernel modules to register probe functions using the event tracing infrastructure. @@ -55,7 +55,7 @@ simple case of:: 3.1 System-Wide Event Enabling ------------------------------ -See Documentation/trace/events.txt for a proper description on how events +See Documentation/trace/events.rst for a proper description on how events can be enabled system-wide. A short example of enabling all events related to page allocation would look something like:: @@ -112,7 +112,7 @@ at that point. 3.4 Local Event Enabling ------------------------ -Documentation/trace/ftrace.txt describes how to enable events on a per-thread +Documentation/trace/ftrace.rst describes how to enable events on a per-thread basis using set_ftrace_pid. 3.5 Local Event Enablement with PCL @@ -137,7 +137,7 @@ basis using PCL such as follows. 4. Event Filtering ================== -Documentation/trace/ftrace.txt covers in-depth how to filter events in +Documentation/trace/ftrace.rst covers in-depth how to filter events in ftrace. Obviously using grep and awk of trace_pipe is an option as well as any script reading trace_pipe. diff --git a/Documentation/translations/ja_JP/howto.rst b/Documentation/translations/ja_JP/howto.rst index 8d7ed0cbbf5f..f3116381c26b 100644 --- a/Documentation/translations/ja_JP/howto.rst +++ b/Documentation/translations/ja_JP/howto.rst @@ -1,5 +1,5 @@ NOTE: -This is a version of Documentation/HOWTO translated into Japanese. +This is a version of Documentation/process/howto.rst translated into Japanese. This document is maintained by Tsugikazu Shibata If you find any difference between this document and the original file or a problem with the translation, please contact the maintainer of this file. @@ -109,7 +109,7 @@ linux-api@vger.kernel.org に送ることを勧めます。 ています。 カーネルに関して初めての人はここからスタートすると良い でしょう。 - :ref:`Documentation/Process/changes.rst ` + :ref:`Documentation/process/changes.rst ` このファイルはカーネルをうまく生成(訳注 build )し、走らせるのに最 小限のレベルで必要な数々のソフトウェアパッケージの一覧を示してい ます。 diff --git a/Documentation/translations/ko_KR/howto.rst b/Documentation/translations/ko_KR/howto.rst index 624654bdcd8a..a8197e072599 100644 --- a/Documentation/translations/ko_KR/howto.rst +++ b/Documentation/translations/ko_KR/howto.rst @@ -160,7 +160,7 @@ mtk.manpages@gmail.com의 메인테이너에게 보낼 것을 권장한다. 독특한 행동에 관하여 흔히 있는 오해들과 혼란들을 해소하고 있기 때문이다. - :ref:`Documentation/process/stable_kernel_rules.rst ` + :ref:`Documentation/process/stable-kernel-rules.rst ` 이 문서는 안정적인 커널 배포가 이루어지는 규칙을 설명하고 있으며 여러분들이 이러한 배포들 중 하나에 변경을 하길 원한다면 무엇을 해야 하는지를 설명한다. diff --git a/Documentation/translations/zh_CN/SubmittingDrivers b/Documentation/translations/zh_CN/SubmittingDrivers index 929385e4b194..15e73562f710 100644 --- a/Documentation/translations/zh_CN/SubmittingDrivers +++ b/Documentation/translations/zh_CN/SubmittingDrivers @@ -107,7 +107,7 @@ Linux 2.6: 程序测试的指导,请参阅 Documentation/power/drivers-testing.txt。有关驱动程序电 源管理问题相对全面的概述,请参阅 - Documentation/power/admin-guide/devices.rst。 + Documentation/driver-api/pm/devices.rst。 管理: 如果一个驱动程序的作者还在进行有效的维护,那么通常除了那 些明显正确且不需要任何检查的补丁以外,其他所有的补丁都会 diff --git a/Documentation/translations/zh_CN/gpio.txt b/Documentation/translations/zh_CN/gpio.txt index 4f8bf30a41dc..4cb1ba8b8fed 100644 --- a/Documentation/translations/zh_CN/gpio.txt +++ b/Documentation/translations/zh_CN/gpio.txt @@ -1,4 +1,4 @@ -Chinese translated version of Documentation/gpio.txt +Chinese translated version of Documentation/gpio If you have any comment or update to the content, please contact the original document maintainer directly. However, if you have a problem @@ -10,7 +10,7 @@ Maintainer: Grant Likely Linus Walleij Chinese maintainer: Fu Wei --------------------------------------------------------------------- -Documentation/gpio.txt 的中文翻译 +Documentation/gpio 的中文翻译 如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文 交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻 diff --git a/Documentation/translations/zh_CN/io_ordering.txt b/Documentation/translations/zh_CN/io_ordering.txt index e592daf4e014..1f8127bdd415 100644 --- a/Documentation/translations/zh_CN/io_ordering.txt +++ b/Documentation/translations/zh_CN/io_ordering.txt @@ -1,4 +1,4 @@ -Chinese translated version of Documentation/io_orderings.txt +Chinese translated version of Documentation/io_ordering.txt If you have any comment or update to the content, please contact the original document maintainer directly. However, if you have a problem diff --git a/Documentation/translations/zh_CN/magic-number.txt b/Documentation/translations/zh_CN/magic-number.txt index e9db693c0a23..7159cec04090 100644 --- a/Documentation/translations/zh_CN/magic-number.txt +++ b/Documentation/translations/zh_CN/magic-number.txt @@ -1,4 +1,4 @@ -Chinese translated version of Documentation/magic-number.txt +Chinese translated version of Documentation/process/magic-number.rst If you have any comment or update to the content, please post to LKML directly. However, if you have problem communicating in English you can also ask the @@ -7,7 +7,7 @@ translation is outdated or there is problem with translation. Chinese maintainer: Jia Wei Wei --------------------------------------------------------------------- -Documentation/magic-number.txt的中文翻译 +Documentation/process/magic-number.rst的中文翻译 如果想评论或更新本文的内容,请直接发信到LKML。如果你使用英文交流有困难的话,也可 以向中文版维护者求助。如果本翻译更新不及时或者翻译存在问题,请联系中文版维护者。 diff --git a/Documentation/translations/zh_CN/video4linux/omap3isp.txt b/Documentation/translations/zh_CN/video4linux/omap3isp.txt index 67ffbf352ae0..e9f29375aa95 100644 --- a/Documentation/translations/zh_CN/video4linux/omap3isp.txt +++ b/Documentation/translations/zh_CN/video4linux/omap3isp.txt @@ -1,4 +1,4 @@ -Chinese translated version of Documentation/video4linux/omap3isp.txt +Chinese translated version of Documentation/media/v4l-drivers/omap3isp.rst If you have any comment or update to the content, please contact the original document maintainer directly. However, if you have a problem @@ -11,7 +11,7 @@ Maintainer: Laurent Pinchart David Cohen Chinese maintainer: Fu Wei --------------------------------------------------------------------- -Documentation/video4linux/omap3isp.txt 的中文翻译 +Documentation/media/v4l-drivers/omap3isp.rst 的中文翻译 如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文 交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻 diff --git a/Documentation/translations/zh_CN/video4linux/v4l2-framework.txt b/Documentation/translations/zh_CN/video4linux/v4l2-framework.txt index c77c0f060864..66c7c568bd86 100644 --- a/Documentation/translations/zh_CN/video4linux/v4l2-framework.txt +++ b/Documentation/translations/zh_CN/video4linux/v4l2-framework.txt @@ -1,4 +1,4 @@ -Chinese translated version of Documentation/video4linux/v4l2-framework.txt +Chinese translated version of Documentation/media/media_kapi.rst If you have any comment or update to the content, please contact the original document maintainer directly. However, if you have a problem @@ -9,7 +9,7 @@ or if there is a problem with the translation. Maintainer: Mauro Carvalho Chehab Chinese maintainer: Fu Wei --------------------------------------------------------------------- -Documentation/video4linux/v4l2-framework.txt 的中文翻译 +Documentation/media/media_kapi.rst 的中文翻译 如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文 交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻 @@ -777,7 +777,7 @@ v4l2 核心 API 提供了一个处理视频缓冲的标准方法(称为“videob 线性 DMA(videobuf-dma-contig)以及大多用于 USB 设备的用 vmalloc 分配的缓冲(videobuf-vmalloc)。 -请参阅 Documentation/video4linux/videobuf,以获得更多关于 videobuf +请参阅 Documentation/media/kapi/v4l2-videobuf.rst,以获得更多关于 videobuf 层的使用信息。 v4l2_fh 结构体 diff --git a/Documentation/vfio-mediated-device.txt b/Documentation/vfio-mediated-device.txt index 1b3950346532..c3f69bcaf96e 100644 --- a/Documentation/vfio-mediated-device.txt +++ b/Documentation/vfio-mediated-device.txt @@ -145,6 +145,11 @@ The functions in the mdev_parent_ops structure are as follows: * create: allocate basic resources in a driver for a mediated device * remove: free resources in a driver when a mediated device is destroyed +(Note that mdev-core provides no implicit serialization of create/remove +callbacks per mdev parent device, per mdev type, or any other categorization. +Vendor drivers are expected to be fully asynchronous in this respect or +provide their own internal resource protection.) + The callbacks in the mdev_parent_ops structure are as follows: * open: open callback of mediated device diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 758bf403a169..495b7742ab58 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -1269,12 +1269,18 @@ struct kvm_cpuid_entry2 { __u32 padding[3]; }; -This ioctl returns x86 cpuid features which are supported by both the hardware -and kvm. Userspace can use the information returned by this ioctl to -construct cpuid information (for KVM_SET_CPUID2) that is consistent with -hardware, kernel, and userspace capabilities, and with user requirements (for -example, the user may wish to constrain cpuid to emulate older hardware, -or for feature consistency across a cluster). +This ioctl returns x86 cpuid features which are supported by both the +hardware and kvm in its default configuration. Userspace can use the +information returned by this ioctl to construct cpuid information (for +KVM_SET_CPUID2) that is consistent with hardware, kernel, and +userspace capabilities, and with user requirements (for example, the +user may wish to constrain cpuid to emulate older hardware, or for +feature consistency across a cluster). + +Note that certain capabilities, such as KVM_CAP_X86_DISABLE_EXITS, may +expose cpuid features (e.g. MONITOR) which are not supported by kvm in +its default configuration. If userspace enables such capabilities, it +is responsible for modifying the results of this ioctl appropriately. Userspace invokes KVM_GET_SUPPORTED_CPUID by passing a kvm_cpuid2 structure with the 'nent' field indicating the number of entries in the variable-size @@ -4603,3 +4609,12 @@ Architectures: s390 This capability indicates that kvm will implement the interfaces to handle reset, migration and nested KVM for branch prediction blocking. The stfle facility 82 should not be provided to the guest without this capability. + +8.14 KVM_CAP_HYPERV_TLBFLUSH + +Architectures: x86 + +This capability indicates that KVM supports paravirtualized Hyper-V TLB Flush +hypercalls: +HvFlushVirtualAddressSpace, HvFlushVirtualAddressSpaceEx, +HvFlushVirtualAddressList, HvFlushVirtualAddressListEx. diff --git a/Documentation/virtual/kvm/devices/arm-vgic-v3.txt b/Documentation/virtual/kvm/devices/arm-vgic-v3.txt index 9293b45abdb9..2408ab720ef7 100644 --- a/Documentation/virtual/kvm/devices/arm-vgic-v3.txt +++ b/Documentation/virtual/kvm/devices/arm-vgic-v3.txt @@ -27,16 +27,42 @@ Groups: VCPU and all of the redistributor pages are contiguous. Only valid for KVM_DEV_TYPE_ARM_VGIC_V3. This address needs to be 64K aligned. + + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION (rw, 64-bit) + The attribute data pointed to by kvm_device_attr.addr is a __u64 value: + bits: | 63 .... 52 | 51 .... 16 | 15 - 12 |11 - 0 + values: | count | base | flags | index + - index encodes the unique redistributor region index + - flags: reserved for future use, currently 0 + - base field encodes bits [51:16] of the guest physical base address + of the first redistributor in the region. + - count encodes the number of redistributors in the region. Must be + greater than 0. + There are two 64K pages for each redistributor in the region and + redistributors are laid out contiguously within the region. Regions + are filled with redistributors in the index order. The sum of all + region count fields must be greater than or equal to the number of + VCPUs. Redistributor regions must be registered in the incremental + index order, starting from index 0. + The characteristics of a specific redistributor region can be read + by presetting the index field in the attr data. + Only valid for KVM_DEV_TYPE_ARM_VGIC_V3. + + It is invalid to mix calls with KVM_VGIC_V3_ADDR_TYPE_REDIST and + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION attributes. + Errors: -E2BIG: Address outside of addressable IPA range - -EINVAL: Incorrectly aligned address + -EINVAL: Incorrectly aligned address, bad redistributor region + count/index, mixed redistributor region attribute usage -EEXIST: Address already configured + -ENOENT: Attempt to read the characteristics of a non existing + redistributor region -ENXIO: The group or attribute is unknown/unsupported for this device or hardware support is missing. -EFAULT: Invalid user pointer for attr->addr. - KVM_DEV_ARM_VGIC_GRP_DIST_REGS KVM_DEV_ARM_VGIC_GRP_REDIST_REGS Attributes: diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt index f50d45b1e967..e507a9e0421e 100644 --- a/Documentation/virtual/kvm/mmu.txt +++ b/Documentation/virtual/kvm/mmu.txt @@ -49,8 +49,8 @@ The mmu supports first-generation mmu hardware, which allows an atomic switch of the current paging mode and cr3 during guest entry, as well as two-dimensional paging (AMD's NPT and Intel's EPT). The emulated hardware it exposes is the traditional 2/3/4 level x86 mmu, with support for global -pages, pae, pse, pse36, cr0.wp, and 1GB pages. Work is in progress to support -exposing NPT capable hardware on NPT capable hosts. +pages, pae, pse, pse36, cr0.wp, and 1GB pages. Emulated hardware also +able to expose NPT capable hardware on NPT capable hosts. Translation =========== @@ -465,5 +465,5 @@ Further reading =============== - NPT presentation from KVM Forum 2008 - http://www.linux-kvm.org/wiki/images/c/c8/KvmForum2008%24kdf2008_21.pdf + http://www.linux-kvm.org/images/c/c8/KvmForum2008%24kdf2008_21.pdf diff --git a/Documentation/virtual/kvm/nested-vmx.txt b/Documentation/virtual/kvm/nested-vmx.txt index 8ed937de1163..97eb1353e962 100644 --- a/Documentation/virtual/kvm/nested-vmx.txt +++ b/Documentation/virtual/kvm/nested-vmx.txt @@ -31,17 +31,6 @@ L0, the guest hypervisor, which we call L1, and its nested guest, which we call L2. -Known limitations ------------------ - -The current code supports running Linux guests under KVM guests. -Only 64-bit guest hypervisors are supported. - -Additional patches for running Windows under guest KVM, and Linux under -guest VMware server, and support for nested EPT, are currently running in -the lab, and will be sent as follow-on patchsets. - - Running nested VMX ------------------ diff --git a/MAINTAINERS b/MAINTAINERS index 12b27679ae0c..9d5eeff51b5f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1732,7 +1732,8 @@ F: arch/arm/mach-npcm/ F: arch/arm/boot/dts/nuvoton-npcm* F: include/dt-bindings/clock/nuvoton,npcm7xx-clks.h F: drivers/*/*npcm* -F: Documentation/*/*npcm* +F: Documentation/devicetree/bindings/*/*npcm* +F: Documentation/devicetree/bindings/*/*/*npcm* ARM/NUVOTON W90X900 ARM ARCHITECTURE M: Wan ZongShun @@ -3079,7 +3080,7 @@ M: Clemens Ladisch L: alsa-devel@alsa-project.org (moderated for non-subscribers) T: git git://git.alsa-project.org/alsa-kernel.git S: Maintained -F: Documentation/sound/alsa/Bt87x.txt +F: Documentation/sound/cards/bt87x.rst F: sound/pci/bt87x.c BT8XXGPIO DRIVER @@ -3375,7 +3376,7 @@ M: David Howells M: David Woodhouse L: keyrings@vger.kernel.org S: Maintained -F: Documentation/module-signing.txt +F: Documentation/admin-guide/module-signing.rst F: certs/ F: scripts/sign-file.c F: scripts/extract-cert.c @@ -4513,7 +4514,7 @@ DRM DRIVER FOR ILITEK ILI9225 PANELS M: David Lechner S: Maintained F: drivers/gpu/drm/tinydrm/ili9225.c -F: Documentation/devicetree/bindings/display/ili9225.txt +F: Documentation/devicetree/bindings/display/ilitek,ili9225.txt DRM DRIVER FOR INTEL I810 VIDEO CARDS S: Orphan / Obsolete @@ -4599,13 +4600,13 @@ DRM DRIVER FOR SITRONIX ST7586 PANELS M: David Lechner S: Maintained F: drivers/gpu/drm/tinydrm/st7586.c -F: Documentation/devicetree/bindings/display/st7586.txt +F: Documentation/devicetree/bindings/display/sitronix,st7586.txt DRM DRIVER FOR SITRONIX ST7735R PANELS M: David Lechner S: Maintained F: drivers/gpu/drm/tinydrm/st7735r.c -F: Documentation/devicetree/bindings/display/st7735r.txt +F: Documentation/devicetree/bindings/display/sitronix,st7735r.txt DRM DRIVER FOR TDFX VIDEO CARDS S: Orphan / Obsolete @@ -4638,7 +4639,6 @@ F: drivers/gpu/drm/ F: drivers/gpu/vga/ F: Documentation/devicetree/bindings/display/ F: Documentation/devicetree/bindings/gpu/ -F: Documentation/devicetree/bindings/video/ F: Documentation/gpu/ F: include/drm/ F: include/uapi/drm/ @@ -4683,7 +4683,7 @@ M: Boris Brezillon L: dri-devel@lists.freedesktop.org S: Supported F: drivers/gpu/drm/atmel-hlcdc/ -F: Documentation/devicetree/bindings/drm/atmel/ +F: Documentation/devicetree/bindings/display/atmel/ T: git git://anongit.freedesktop.org/drm/drm-misc DRM DRIVERS FOR BRIDGE CHIPS @@ -4714,7 +4714,7 @@ S: Supported F: drivers/gpu/drm/fsl-dcu/ F: Documentation/devicetree/bindings/display/fsl,dcu.txt F: Documentation/devicetree/bindings/display/fsl,tcon.txt -F: Documentation/devicetree/bindings/display/panel/nec,nl4827hc19_05b.txt +F: Documentation/devicetree/bindings/display/panel/nec,nl4827hc19-05b.txt DRM DRIVERS FOR FREESCALE IMX M: Philipp Zabel @@ -4824,7 +4824,7 @@ M: Eric Anholt S: Supported F: drivers/gpu/drm/v3d/ F: include/uapi/drm/v3d_drm.h -F: Documentation/devicetree/bindings/display/brcm,bcm-v3d.txt +F: Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.txt T: git git://anongit.freedesktop.org/drm/drm-misc DRM DRIVERS FOR VC4 @@ -5735,7 +5735,7 @@ M: Madalin Bucur L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/freescale/fman -F: Documentation/devicetree/bindings/powerpc/fsl/fman.txt +F: Documentation/devicetree/bindings/net/fsl-fman.txt FREESCALE QORIQ PTP CLOCK DRIVER M: Yangbo Lu @@ -5953,14 +5953,14 @@ GENERIC GPIO I2C DRIVER M: Haavard Skinnemoen S: Supported F: drivers/i2c/busses/i2c-gpio.c -F: include/linux/i2c-gpio.h +F: include/linux/platform_data/i2c-gpio.h GENERIC GPIO I2C MULTIPLEXER DRIVER M: Peter Korsgaard L: linux-i2c@vger.kernel.org S: Supported F: drivers/i2c/muxes/i2c-mux-gpio.c -F: include/linux/i2c-mux-gpio.h +F: include/linux/platform_data/i2c-mux-gpio.h F: Documentation/i2c/muxes/i2c-mux-gpio GENERIC HDLC (WAN) DRIVERS @@ -6501,7 +6501,7 @@ L: linux-mm@kvack.org S: Maintained F: mm/hmm* F: include/linux/hmm* -F: Documentation/vm/hmm.txt +F: Documentation/vm/hmm.rst HOST AP DRIVER M: Jouni Malinen @@ -6620,7 +6620,7 @@ F: arch/x86/hyperv F: drivers/hid/hid-hyperv.c F: drivers/hv/ F: drivers/input/serio/hyperv-keyboard.c -F: drivers/pci/host/pci-hyperv.c +F: drivers/pci/controller/pci-hyperv.c F: drivers/net/hyperv/ F: drivers/scsi/storvsc_drv.c F: drivers/uio/uio_hv_generic.c @@ -6966,7 +6966,7 @@ IIO MULTIPLEXER M: Peter Rosin L: linux-iio@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/iio/multiplexer/iio-mux.txt +F: Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt F: drivers/iio/multiplexer/iio-mux.c IIO SUBSYSTEM AND DRIVERS @@ -7401,7 +7401,7 @@ F: drivers/platform/x86/intel-wmi-thunderbolt.c INTEL(R) TRACE HUB M: Alexander Shishkin S: Supported -F: Documentation/trace/intel_th.txt +F: Documentation/trace/intel_th.rst F: drivers/hwtracing/intel_th/ INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT) @@ -7425,7 +7425,7 @@ M: Linus Walleij L: linux-iio@vger.kernel.org S: Maintained F: drivers/iio/gyro/mpu3050* -F: Documentation/devicetree/bindings/iio/gyroscope/inv,mpu3050.txt +F: Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt IOC3 ETHERNET DRIVER M: Ralf Baechle @@ -8700,7 +8700,7 @@ M: Guenter Roeck L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/max6697 -F: Documentation/devicetree/bindings/i2c/max6697.txt +F: Documentation/devicetree/bindings/hwmon/max6697.txt F: drivers/hwmon/max6697.c F: include/linux/platform_data/max6697.h @@ -9080,7 +9080,7 @@ M: Martin Donnelly M: Martyn Welch S: Maintained F: drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c -F: Documentation/devicetree/bindings/video/bridge/megachips-stdpxxxx-ge-b850v3-fw.txt +F: Documentation/devicetree/bindings/display/bridge/megachips-stdpxxxx-ge-b850v3-fw.txt MEGARAID SCSI/SAS DRIVERS M: Kashyap Desai @@ -9417,10 +9417,12 @@ F: drivers/usb/image/microtek.* MIPS M: Ralf Baechle +M: Paul Burton M: James Hogan L: linux-mips@linux-mips.org W: http://www.linux-mips.org/ T: git git://git.linux-mips.org/pub/scm/ralf/linux.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git Q: http://patchwork.linux-mips.org/project/linux-mips/list/ S: Supported F: Documentation/devicetree/bindings/mips/ @@ -9522,7 +9524,7 @@ M: Subrahmanya Lingappa L: linux-pci@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt -F: drivers/pci/host/pcie-mobiveil.c +F: drivers/pci/controller/pcie-mobiveil.c MODULE SUPPORT M: Jessica Yu @@ -9663,7 +9665,7 @@ F: include/uapi/linux/mmc/ MULTIPLEXER SUBSYSTEM M: Peter Rosin S: Maintained -F: Documentation/ABI/testing/mux/sysfs-class-mux* +F: Documentation/ABI/testing/sysfs-class-mux* F: Documentation/devicetree/bindings/mux/ F: include/linux/dt-bindings/mux/ F: include/linux/mux/ @@ -9694,7 +9696,7 @@ MXSFB DRM DRIVER M: Marek Vasut S: Supported F: drivers/gpu/drm/mxsfb/ -F: Documentation/devicetree/bindings/display/mxsfb-drm.txt +F: Documentation/devicetree/bindings/display/mxsfb.txt MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) M: Chris Lee @@ -10242,7 +10244,7 @@ F: arch/powerpc/include/asm/pnv-ocxl.h F: drivers/misc/ocxl/ F: include/misc/ocxl* F: include/uapi/misc/ocxl.h -F: Documentation/accelerators/ocxl.txt +F: Documentation/accelerators/ocxl.rst OMAP AUDIO SUPPORT M: Peter Ujfalusi @@ -10271,18 +10273,16 @@ F: arch/arm/boot/dts/*am5* F: arch/arm/boot/dts/*dra7* OMAP DISPLAY SUBSYSTEM and FRAMEBUFFER SUPPORT (DSS2) -M: Tomi Valkeinen L: linux-omap@vger.kernel.org L: linux-fbdev@vger.kernel.org -S: Maintained +S: Orphan F: drivers/video/fbdev/omap2/ F: Documentation/arm/OMAP/DSS OMAP FRAMEBUFFER SUPPORT -M: Tomi Valkeinen L: linux-fbdev@vger.kernel.org L: linux-omap@vger.kernel.org -S: Maintained +S: Orphan F: drivers/video/fbdev/omap/ OMAP GENERAL PURPOSE MEMORY CONTROLLER SUPPORT @@ -10390,7 +10390,7 @@ F: arch/arm/mach-omap1/ F: arch/arm/plat-omap/ F: arch/arm/configs/omap1_defconfig F: drivers/i2c/busses/i2c-omap.c -F: include/linux/i2c-omap.h +F: include/linux/platform_data/i2c-omap.h OMAP2+ SUPPORT M: Tony Lindgren @@ -10422,7 +10422,7 @@ F: drivers/regulator/tps65218-regulator.c F: drivers/regulator/tps65910-regulator.c F: drivers/regulator/twl-regulator.c F: drivers/regulator/twl6030-regulator.c -F: include/linux/i2c-omap.h +F: include/linux/platform_data/i2c-omap.h ONION OMEGA2+ BOARD M: Harvey Hunt @@ -10726,7 +10726,7 @@ PARALLEL LCD/KEYPAD PANEL DRIVER M: Willy Tarreau M: Ksenija Stanojevic S: Odd Fixes -F: Documentation/misc-devices/lcd-panel-cgram.txt +F: Documentation/auxdisplay/lcd-panel-cgram.txt F: drivers/misc/panel.c PARALLEL PORT SUBSYSTEM @@ -10827,7 +10827,7 @@ L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/pci/aardvark-pci.txt -F: drivers/pci/host/pci-aardvark.c +F: drivers/pci/controller/pci-aardvark.c PCI DRIVER FOR ALTERA PCIE IP M: Ley Foon Tan @@ -10835,7 +10835,7 @@ L: rfi@lists.rocketboards.org (moderated for non-subscribers) L: linux-pci@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/pci/altera-pcie.txt -F: drivers/pci/host/pcie-altera.c +F: drivers/pci/controller/pcie-altera.c PCI DRIVER FOR APPLIEDMICRO XGENE M: Tanmay Inamdar @@ -10843,7 +10843,7 @@ L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/pci/xgene-pci.txt -F: drivers/pci/host/pci-xgene.c +F: drivers/pci/controller/pci-xgene.c PCI DRIVER FOR ARM VERSATILE PLATFORM M: Rob Herring @@ -10851,7 +10851,7 @@ L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/pci/versatile.txt -F: drivers/pci/host/pci-versatile.c +F: drivers/pci/controller/pci-versatile.c PCI DRIVER FOR ARMADA 8K M: Thomas Petazzoni @@ -10859,14 +10859,14 @@ L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/pci/pci-armada8k.txt -F: drivers/pci/dwc/pcie-armada8k.c +F: drivers/pci/controller/dwc/pcie-armada8k.c PCI DRIVER FOR CADENCE PCIE IP M: Alan Douglas L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/cdns,*.txt -F: drivers/pci/cadence/pcie-cadence* +F: drivers/pci/controller/pcie-cadence* PCI DRIVER FOR FREESCALE LAYERSCAPE M: Minghuan Lian @@ -10876,7 +10876,7 @@ L: linuxppc-dev@lists.ozlabs.org L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org S: Maintained -F: drivers/pci/dwc/*layerscape* +F: drivers/pci/controller/dwc/*layerscape* PCI DRIVER FOR GENERIC OF HOSTS M: Will Deacon @@ -10884,8 +10884,8 @@ L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/pci/host-generic-pci.txt -F: drivers/pci/host/pci-host-common.c -F: drivers/pci/host/pci-host-generic.c +F: drivers/pci/controller/pci-host-common.c +F: drivers/pci/controller/pci-host-generic.c PCI DRIVER FOR IMX6 M: Richard Zhu @@ -10894,14 +10894,14 @@ L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt -F: drivers/pci/dwc/*imx6* +F: drivers/pci/controller/dwc/*imx6* PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD) M: Keith Busch M: Jonathan Derrick L: linux-pci@vger.kernel.org S: Supported -F: drivers/pci/host/vmd.c +F: drivers/pci/controller/vmd.c PCI DRIVER FOR MICROSEMI SWITCHTEC M: Kurt Schwemmer @@ -10921,7 +10921,7 @@ M: Jason Cooper L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -F: drivers/pci/host/*mvebu* +F: drivers/pci/controller/*mvebu* PCI DRIVER FOR NVIDIA TEGRA M: Thierry Reding @@ -10929,14 +10929,14 @@ L: linux-tegra@vger.kernel.org L: linux-pci@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt -F: drivers/pci/host/pci-tegra.c +F: drivers/pci/controller/pci-tegra.c PCI DRIVER FOR RENESAS R-CAR M: Simon Horman L: linux-pci@vger.kernel.org L: linux-renesas-soc@vger.kernel.org S: Maintained -F: drivers/pci/host/*rcar* +F: drivers/pci/controller/*rcar* PCI DRIVER FOR SAMSUNG EXYNOS M: Jingoo Han @@ -10944,7 +10944,7 @@ L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) S: Maintained -F: drivers/pci/dwc/pci-exynos.c +F: drivers/pci/controller/dwc/pci-exynos.c PCI DRIVER FOR SYNOPSYS DESIGNWARE M: Jingoo Han @@ -10952,7 +10952,7 @@ M: Joao Pinto L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/designware-pcie.txt -F: drivers/pci/dwc/*designware* +F: drivers/pci/controller/dwc/*designware* PCI DRIVER FOR TI DRA7XX M: Kishon Vijay Abraham I @@ -10960,14 +10960,14 @@ L: linux-omap@vger.kernel.org L: linux-pci@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/pci/ti-pci.txt -F: drivers/pci/dwc/pci-dra7xx.c +F: drivers/pci/controller/dwc/pci-dra7xx.c PCI DRIVER FOR TI KEYSTONE M: Murali Karicheri L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -F: drivers/pci/dwc/*keystone* +F: drivers/pci/controller/dwc/*keystone* PCI ENDPOINT SUBSYSTEM M: Kishon Vijay Abraham I @@ -11000,7 +11000,7 @@ L: rfi@lists.rocketboards.org (moderated for non-subscribers) L: linux-pci@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/pci/altera-pcie-msi.txt -F: drivers/pci/host/pcie-altera-msi.c +F: drivers/pci/controller/pcie-altera-msi.c PCI MSI DRIVER FOR APPLIEDMICRO XGENE M: Duc Dang @@ -11008,7 +11008,7 @@ L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/pci/xgene-pci-msi.txt -F: drivers/pci/host/pci-xgene-msi.c +F: drivers/pci/controller/pci-xgene-msi.c PCI SUBSYSTEM M: Bjorn Helgaas @@ -11034,9 +11034,7 @@ L: linux-pci@vger.kernel.org Q: http://patchwork.ozlabs.org/project/linux-pci/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git/ S: Supported -F: drivers/pci/cadence/ -F: drivers/pci/host/ -F: drivers/pci/dwc/ +F: drivers/pci/controller/ PCIE DRIVER FOR AXIS ARTPEC M: Jesper Nilsson @@ -11044,7 +11042,7 @@ L: linux-arm-kernel@axis.com L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/axis,artpec* -F: drivers/pci/dwc/*artpec* +F: drivers/pci/controller/dwc/*artpec* PCIE DRIVER FOR CAVIUM THUNDERX M: David Daney @@ -11052,22 +11050,22 @@ L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported F: Documentation/devicetree/bindings/pci/pci-thunder-* -F: drivers/pci/host/pci-thunder-* +F: drivers/pci/controller/pci-thunder-* PCIE DRIVER FOR HISILICON M: Zhou Wang L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/hisilicon-pcie.txt -F: drivers/pci/dwc/pcie-hisi.c +F: drivers/pci/controller/dwc/pcie-hisi.c PCIE DRIVER FOR HISILICON KIRIN M: Xiaowei Song M: Binghui Wang L: linux-pci@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/pci/pcie-kirin.txt -F: drivers/pci/dwc/pcie-kirin.c +F: Documentation/devicetree/bindings/pci/kirin-pcie.txt +F: drivers/pci/controller/dwc/pcie-kirin.c PCIE DRIVER FOR HISILICON STB M: Jianguo Sun @@ -11075,7 +11073,7 @@ M: Shawn Guo L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt -F: drivers/pci/dwc/pcie-histb.c +F: drivers/pci/controller/dwc/pcie-histb.c PCIE DRIVER FOR MEDIATEK M: Ryder Lee @@ -11083,14 +11081,14 @@ L: linux-pci@vger.kernel.org L: linux-mediatek@lists.infradead.org S: Supported F: Documentation/devicetree/bindings/pci/mediatek* -F: drivers/pci/host/*mediatek* +F: drivers/pci/controller/*mediatek* PCIE DRIVER FOR QUALCOMM MSM M: Stanimir Varbanov L: linux-pci@vger.kernel.org L: linux-arm-msm@vger.kernel.org S: Maintained -F: drivers/pci/dwc/*qcom* +F: drivers/pci/controller/dwc/*qcom* PCIE DRIVER FOR ROCKCHIP M: Shawn Lin @@ -11098,20 +11096,20 @@ L: linux-pci@vger.kernel.org L: linux-rockchip@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/pci/rockchip-pcie* -F: drivers/pci/host/pcie-rockchip* +F: drivers/pci/controller/pcie-rockchip* PCI DRIVER FOR V3 SEMICONDUCTOR V360EPC M: Linus Walleij L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/v3-v360epc-pci.txt -F: drivers/pci/host/pci-v3-semi.c +F: drivers/pci/controller/pci-v3-semi.c PCIE DRIVER FOR ST SPEAR13XX M: Pratyush Anand L: linux-pci@vger.kernel.org S: Maintained -F: drivers/pci/dwc/*spear* +F: drivers/pci/controller/dwc/*spear* PCMCIA SUBSYSTEM M: Dominik Brodowski @@ -12179,7 +12177,7 @@ F: drivers/mtd/nand/raw/r852.h RISC-V ARCHITECTURE M: Palmer Dabbelt -M: Albert Ou +M: Albert Ou L: linux-riscv@lists.infradead.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux.git S: Supported @@ -12457,7 +12455,7 @@ L: linux-crypto@vger.kernel.org L: linux-samsung-soc@vger.kernel.org S: Maintained F: drivers/crypto/exynos-rng.c -F: Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt +F: Documentation/devicetree/bindings/rng/samsung,exynos4-rng.txt SAMSUNG EXYNOS TRUE RANDOM NUMBER GENERATOR (TRNG) DRIVER M: Łukasz Stelmach @@ -12939,6 +12937,14 @@ F: drivers/media/usb/siano/ F: drivers/media/usb/siano/ F: drivers/media/mmc/siano/ +SIFIVE DRIVERS +M: Palmer Dabbelt +L: linux-riscv@lists.infradead.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux.git +S: Supported +K: sifive +N: sifive + SILEAD TOUCHSCREEN DRIVER M: Hans de Goede L: linux-input@vger.kernel.org @@ -13291,7 +13297,7 @@ M: Vinod Koul L: alsa-devel@alsa-project.org (moderated for non-subscribers) T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git S: Supported -F: Documentation/sound/alsa/compress_offload.txt +F: Documentation/sound/designs/compress-offload.rst F: include/sound/compress_driver.h F: include/uapi/sound/compress_* F: sound/core/compress_offload.c @@ -13312,7 +13318,7 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers) W: http://alsa-project.org/main/index.php/ASoC S: Supported F: Documentation/devicetree/bindings/sound/ -F: Documentation/sound/alsa/soc/ +F: Documentation/sound/soc/ F: sound/soc/ F: include/sound/soc* @@ -13571,7 +13577,7 @@ F: drivers/*/stm32-*timer* F: drivers/pwm/pwm-stm32* F: include/linux/*/stm32-*tim* F: Documentation/ABI/testing/*timer-stm32 -F: Documentation/devicetree/bindings/*/stm32-*timer +F: Documentation/devicetree/bindings/*/stm32-*timer* F: Documentation/devicetree/bindings/pwm/pwm-stm32* STMMAC ETHERNET DRIVER @@ -13794,7 +13800,7 @@ SYSTEM TRACE MODULE CLASS M: Alexander Shishkin S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/ash/stm.git -F: Documentation/trace/stm.txt +F: Documentation/trace/stm.rst F: drivers/hwtracing/stm/ F: include/linux/stm.h F: include/uapi/linux/stm.h @@ -14471,7 +14477,7 @@ M: Steven Rostedt M: Ingo Molnar T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core S: Maintained -F: Documentation/trace/ftrace.txt +F: Documentation/trace/ftrace.rst F: arch/*/*/*/ftrace.h F: arch/*/kernel/ftrace.c F: include/*/ftrace.h @@ -14940,7 +14946,7 @@ M: Heikki Krogerus L: linux-usb@vger.kernel.org S: Maintained F: Documentation/ABI/testing/sysfs-class-typec -F: Documentation/usb/typec.rst +F: Documentation/driver-api/usb/typec.rst F: drivers/usb/typec/ F: include/linux/usb/typec.h @@ -15007,8 +15013,7 @@ F: drivers/media/usb/zr364xx/ USER-MODE LINUX (UML) M: Jeff Dike M: Richard Weinberger -L: user-mode-linux-devel@lists.sourceforge.net -L: user-mode-linux-user@lists.sourceforge.net +L: linux-um@lists.infradead.org W: http://user-mode-linux.sourceforge.net T: git git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml.git S: Maintained @@ -15770,7 +15775,7 @@ YEALINK PHONE DRIVER M: Henk Vergonet L: usbb2k-api-dev@nongnu.org S: Maintained -F: Documentation/input/yealink.rst +F: Documentation/input/devices/yealink.rst F: drivers/input/misc/yealink.* Z8530 DRIVER FOR AX.25 diff --git a/Makefile b/Makefile index 019a5a020606..ca2af1ab91eb 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 -PATCHLEVEL = 17 +PATCHLEVEL = 18 SUBLEVEL = 0 -EXTRAVERSION = +EXTRAVERSION = -rc1 NAME = Merciless Moray # *DOCUMENTATION* @@ -442,8 +442,6 @@ export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL export KBUILD_ARFLAGS -export CC_VERSION_TEXT := $(shell $(CC) --version | head -n 1) - # When compiling out-of-tree modules, put MODVERDIR in the module # tree rather than in the kernel tree. The kernel tree might # even be read-only. @@ -514,6 +512,12 @@ ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/cc-can-link.sh $(CC)), y) export CC_CAN_LINK endif +# The expansion should be delayed until arch/$(SRCARCH)/Makefile is included. +# Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile. +# CC_VERSION_TEXT is referenced from Kconfig (so it needs export), +# and from include/config/auto.conf.cmd to detect the compiler upgrade. +CC_VERSION_TEXT = $(shell $(CC) --version | head -n 1) + ifeq ($(config-targets),1) # =========================================================================== # *config targets only - make sure prerequisites are updated, and descend @@ -523,7 +527,7 @@ ifeq ($(config-targets),1) # KBUILD_DEFCONFIG may point out an alternative default configuration # used for 'make defconfig' include arch/$(SRCARCH)/Makefile -export KBUILD_DEFCONFIG KBUILD_KCONFIG +export KBUILD_DEFCONFIG KBUILD_KCONFIG CC_VERSION_TEXT config: scripts_basic outputmakefile FORCE $(Q)$(MAKE) $(build)=scripts/kconfig $@ @@ -585,12 +589,32 @@ virt-y := virt/ endif # KBUILD_EXTMOD ifeq ($(dot-config),1) -# Read in config -include include/config/auto.conf +endif +# The all: target is the default when no target is given on the +# command line. +# This allow a user to issue only 'make' to build a kernel including modules +# Defaults to vmlinux, but the arch makefile usually adds further targets +all: vmlinux + +CFLAGS_GCOV := -fprofile-arcs -ftest-coverage \ + $(call cc-option,-fno-tree-loop-im) \ + $(call cc-disable-warning,maybe-uninitialized,) +export CFLAGS_GCOV + +# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default +# values of the respective KBUILD_* variables +ARCH_CPPFLAGS := +ARCH_AFLAGS := +ARCH_CFLAGS := +include arch/$(SRCARCH)/Makefile + +ifeq ($(dot-config),1) ifeq ($(KBUILD_EXTMOD),) -# Read in dependencies to all Kconfig* files, make sure to run -# oldconfig if changes are detected. +# Read in dependencies to all Kconfig* files, make sure to run syncconfig if +# changes are detected. This should be included after arch/$(SRCARCH)/Makefile +# because some architectures define CROSS_COMPILE there. -include include/config/auto.conf.cmd # To avoid any implicit rule to kick in, define an empty command @@ -622,24 +646,6 @@ else include/config/auto.conf: ; endif # $(dot-config) -# The all: target is the default when no target is given on the -# command line. -# This allow a user to issue only 'make' to build a kernel including modules -# Defaults to vmlinux, but the arch makefile usually adds further targets -all: vmlinux - -CFLAGS_GCOV := -fprofile-arcs -ftest-coverage \ - $(call cc-option,-fno-tree-loop-im) \ - $(call cc-disable-warning,maybe-uninitialized,) -export CFLAGS_GCOV CFLAGS_KCOV - -# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default -# values of the respective KBUILD_* variables -ARCH_CPPFLAGS := -ARCH_AFLAGS := -ARCH_CFLAGS := -include arch/$(SRCARCH)/Makefile - KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) @@ -680,55 +686,11 @@ ifneq ($(CONFIG_FRAME_WARN),0) KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN}) endif -# This selects the stack protector compiler flag. Testing it is delayed -# until after .config has been reprocessed, in the prepare-compiler-check -# target. -ifdef CONFIG_CC_STACKPROTECTOR_AUTO - stackp-flag := $(call cc-option,-fstack-protector-strong,$(call cc-option,-fstack-protector)) - stackp-name := AUTO -else -ifdef CONFIG_CC_STACKPROTECTOR_REGULAR - stackp-flag := -fstack-protector - stackp-name := REGULAR -else -ifdef CONFIG_CC_STACKPROTECTOR_STRONG - stackp-flag := -fstack-protector-strong - stackp-name := STRONG -else - # If either there is no stack protector for this architecture or - # CONFIG_CC_STACKPROTECTOR_NONE is selected, we're done, and $(stackp-name) - # is empty, skipping all remaining stack protector tests. - # - # Force off for distro compilers that enable stack protector by default. - KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector) -endif -endif -endif -# Find arch-specific stack protector compiler sanity-checking script. -ifdef stackp-name -ifneq ($(stackp-flag),) - stackp-path := $(srctree)/scripts/gcc-$(SRCARCH)_$(BITS)-has-stack-protector.sh - stackp-check := $(wildcard $(stackp-path)) - # If the wildcard test matches a test script, run it to check functionality. - ifdef stackp-check - ifneq ($(shell $(CONFIG_SHELL) $(stackp-check) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y) - stackp-broken := y - endif - endif - ifndef stackp-broken - # If the stack protector is functional, enable code that depends on it. - KBUILD_CPPFLAGS += -DCONFIG_CC_STACKPROTECTOR - # Either we've already detected the flag (for AUTO) or we'll fail the - # build in the prepare-compiler-check rule (for specific flag). - KBUILD_CFLAGS += $(stackp-flag) - else - # We have to make sure stack protector is unconditionally disabled if - # the compiler is broken (in case we're going to continue the build in - # AUTO mode). - KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector) - endif -endif -endif +stackp-flags-$(CONFIG_CC_HAS_STACKPROTECTOR_NONE) := -fno-stack-protector +stackp-flags-$(CONFIG_STACKPROTECTOR) := -fstack-protector +stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong + +KBUILD_CFLAGS += $(stackp-flags-y) ifeq ($(cc-name),clang) KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,) @@ -1112,7 +1074,7 @@ endif # prepare2 creates a makefile if using a separate output directory. # From this point forward, .config has been reprocessed, so any rules # that need to depend on updated CONFIG_* values can be checked here. -prepare2: prepare3 prepare-compiler-check outputmakefile asm-generic +prepare2: prepare3 outputmakefile asm-generic prepare1: prepare2 $(version_h) $(autoksyms_h) include/generated/utsrelease.h \ include/config/auto.conf @@ -1138,43 +1100,6 @@ uapi-asm-generic: PHONY += prepare-objtool prepare-objtool: $(objtool_target) -# Check for CONFIG flags that require compiler support. Abort the build -# after .config has been processed, but before the kernel build starts. -# -# For security-sensitive CONFIG options, we don't want to fallback and/or -# silently change which compiler flags will be used, since that leads to -# producing kernels with different security feature characteristics -# depending on the compiler used. (For example, "But I selected -# CC_STACKPROTECTOR_STRONG! Why did it build with _REGULAR?!") -PHONY += prepare-compiler-check -prepare-compiler-check: FORCE -# Make sure compiler supports requested stack protector flag. -ifdef stackp-name - # Warn about CONFIG_CC_STACKPROTECTOR_AUTO having found no option. - ifeq ($(stackp-flag),) - @echo CONFIG_CC_STACKPROTECTOR_$(stackp-name): \ - Compiler does not support any known stack-protector >&2 - else - # Fail if specifically requested stack protector is missing. - ifeq ($(call cc-option, $(stackp-flag)),) - @echo Cannot use CONFIG_CC_STACKPROTECTOR_$(stackp-name): \ - $(stackp-flag) not supported by compiler >&2 && exit 1 - endif - endif -endif -# Make sure compiler does not have buggy stack-protector support. If a -# specific stack-protector was requested, fail the build, otherwise warn. -ifdef stackp-broken - ifeq ($(stackp-name),AUTO) - @echo CONFIG_CC_STACKPROTECTOR_$(stackp-name): \ - $(stackp-flag) available but compiler is broken: disabling >&2 - else - @echo Cannot use CONFIG_CC_STACKPROTECTOR_$(stackp-name): \ - $(stackp-flag) available but compiler is broken >&2 && exit 1 - endif -endif - @: - # Generate some files # --------------------------------------------------------------------------- diff --git a/arch/Kconfig b/arch/Kconfig index 86ae4c4edd6f..1aa59063f1fd 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -403,7 +403,16 @@ config SECCOMP_FILTER in terms of Berkeley Packet Filter programs which implement task-defined system call filtering polices. - See Documentation/prctl/seccomp_filter.txt for details. + See Documentation/userspace-api/seccomp_filter.rst for details. + +preferred-plugin-hostcc := $(if-success,[ $(gcc-version) -ge 40800 ],$(HOSTCXX),$(HOSTCC)) + +config PLUGIN_HOSTCC + string + default "$(shell,$(srctree)/scripts/gcc-plugin.sh "$(preferred-plugin-hostcc)" "$(HOSTCXX)" "$(CC)")" + help + Host compiler used to build GCC plugins. This can be $(HOSTCXX), + $(HOSTCC), or a null string if GCC plugin is unsupported. config HAVE_GCC_PLUGINS bool @@ -414,7 +423,7 @@ config HAVE_GCC_PLUGINS menuconfig GCC_PLUGINS bool "GCC plugins" depends on HAVE_GCC_PLUGINS - depends on !COMPILE_TEST + depends on PLUGIN_HOSTCC != "" help GCC plugins are loadable modules that provide extra features to the compiler. They are useful for runtime instrumentation and static analysis. @@ -424,7 +433,7 @@ menuconfig GCC_PLUGINS config GCC_PLUGIN_CYC_COMPLEXITY bool "Compute the cyclomatic complexity of a function" if EXPERT depends on GCC_PLUGINS - depends on !COMPILE_TEST + depends on !COMPILE_TEST # too noisy help The complexity M of a function's control flow graph is defined as: M = E - N + 2P @@ -484,6 +493,7 @@ config GCC_PLUGIN_STRUCTLEAK config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL bool "Force initialize all struct type variables passed by reference" depends on GCC_PLUGIN_STRUCTLEAK + depends on !COMPILE_TEST help Zero initialize any struct type local variable that may be passed by reference without having been initialized. @@ -491,7 +501,7 @@ config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL config GCC_PLUGIN_STRUCTLEAK_VERBOSE bool "Report forcefully initialized variables" depends on GCC_PLUGIN_STRUCTLEAK - depends on !COMPILE_TEST + depends on !COMPILE_TEST # too noisy help This option will cause a warning to be printed each time the structleak plugin finds a variable it thinks needs to be @@ -531,7 +541,7 @@ config GCC_PLUGIN_RANDSTRUCT config GCC_PLUGIN_RANDSTRUCT_PERFORMANCE bool "Use cacheline-aware structure randomization" depends on GCC_PLUGIN_RANDSTRUCT - depends on !COMPILE_TEST + depends on !COMPILE_TEST # do not reduce test coverage help If you say Y here, the RANDSTRUCT randomization will make a best effort at restricting randomization to cacheline-sized @@ -539,17 +549,20 @@ config GCC_PLUGIN_RANDSTRUCT_PERFORMANCE in structures. This reduces the performance hit of RANDSTRUCT at the cost of weakened randomization. -config HAVE_CC_STACKPROTECTOR +config HAVE_STACKPROTECTOR bool help An arch should select this symbol if: - - its compiler supports the -fstack-protector option - it has implemented a stack canary (e.g. __stack_chk_guard) -choice - prompt "Stack Protector buffer overflow detection" - depends on HAVE_CC_STACKPROTECTOR - default CC_STACKPROTECTOR_AUTO +config CC_HAS_STACKPROTECTOR_NONE + def_bool $(cc-option,-fno-stack-protector) + +config STACKPROTECTOR + bool "Stack Protector buffer overflow detection" + depends on HAVE_STACKPROTECTOR + depends on $(cc-option,-fstack-protector) + default y help This option turns on the "stack-protector" GCC feature. This feature puts, at the beginning of functions, a canary value on @@ -559,14 +572,6 @@ choice overwrite the canary, which gets detected and the attack is then neutralized via a kernel panic. -config CC_STACKPROTECTOR_NONE - bool "None" - help - Disable "stack-protector" GCC feature. - -config CC_STACKPROTECTOR_REGULAR - bool "Regular" - help Functions will have the stack-protector canary logic added if they have an 8-byte or larger character array on the stack. @@ -577,8 +582,11 @@ config CC_STACKPROTECTOR_REGULAR about 3% of all kernel functions, which increases kernel code size by about 0.3%. -config CC_STACKPROTECTOR_STRONG - bool "Strong" +config STACKPROTECTOR_STRONG + bool "Strong Stack Protector" + depends on STACKPROTECTOR + depends on $(cc-option,-fstack-protector-strong) + default y help Functions will have the stack-protector canary logic added in any of the following conditions: @@ -596,14 +604,6 @@ config CC_STACKPROTECTOR_STRONG about 20% of all kernel functions, which increases the kernel code size by about 2%. -config CC_STACKPROTECTOR_AUTO - bool "Automatic" - help - If the compiler supports it, the best available stack-protector - option will be chosen. - -endchoice - config HAVE_ARCH_WITHIN_STACK_FRAMES bool help diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 2a78bdef9a24..54eeb8d00bc6 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -8,9 +8,10 @@ config ARM select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FORTIFY_SOURCE + select ARCH_HAS_KCOV select ARCH_HAS_PTE_SPECIAL if ARM_LPAE - select ARCH_HAS_SET_MEMORY select ARCH_HAS_PHYS_TO_DMA + select ARCH_HAS_SET_MEMORY select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL select ARCH_HAS_STRICT_MODULE_RWX if MMU select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST @@ -57,7 +58,6 @@ config ARM select HAVE_ARCH_TRACEHOOK select HAVE_ARM_SMCCC if CPU_V7 select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32 - select HAVE_CC_STACKPROTECTOR select HAVE_CONTEXT_TRACKING select HAVE_C_RECORDMCOUNT select HAVE_DEBUG_KMEMLEAK @@ -92,6 +92,7 @@ config ARM select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE) select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RSEQ + select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS select HAVE_UID16 select HAVE_VIRT_CPU_ACCOUNTING_GEN @@ -1301,7 +1302,7 @@ config SMP will run faster if you say N here. See also , - and the SMP-HOWTO available at + and the SMP-HOWTO available at . If you don't know what to do here, say N. diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index a3c5fbcad4ab..1f5a5ffe7fcf 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -25,6 +25,9 @@ endif GCOV_PROFILE := n +# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. +KCOV_INSTRUMENT := n + # # Architecture dependencies # diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 869080bedb89..ec1a5fd0d294 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -35,7 +35,7 @@ * Start addresses are inclusive and end addresses are exclusive; * start addresses should be rounded down, end addresses up. * - * See Documentation/cachetlb.txt for more information. + * See Documentation/core-api/cachetlb.rst for more information. * Please note that the implementation of these, and the required * effects are cache-type (VIVT/VIPT/PIPT) specific. * diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 2d75e77bf7bb..1f1fe4109b02 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -281,6 +281,7 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); +static inline bool kvm_arch_check_sve_has_vhe(void) { return true; } static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} @@ -304,8 +305,13 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); -/* All host FP/SIMD state is restored on guest exit, so nothing to save: */ -static inline void kvm_fpsimd_flush_cpu_state(void) {} +/* + * VFP/NEON switching is all done by the hyp switch code, so no need to + * coordinate with host context handling for this state: + */ +static inline void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {} static inline void kvm_arm_vhe_guest_enter(void) {} static inline void kvm_arm_vhe_guest_exit(void) {} @@ -340,4 +346,8 @@ static inline int kvm_arm_have_ssbd(void) static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {} +#define __KVM_HAVE_ARCH_VM_ALLOC +struct kvm *kvm_arch_alloc_vm(void); +void kvm_arch_free_vm(struct kvm *kvm); + #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index caae4843cb70..16e006f708ca 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h @@ -91,6 +91,7 @@ struct kvm_regs { #define KVM_VGIC_V3_ADDR_TYPE_DIST 2 #define KVM_VGIC_V3_ADDR_TYPE_REDIST 3 #define KVM_VGIC_ITS_ADDR_TYPE 4 +#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION 5 #define KVM_VGIC_V3_DIST_SIZE SZ_64K #define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K) diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 27c5381518d8..974d8d7d1bcd 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -61,7 +61,7 @@ int main(void) { DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); #endif BLANK(); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 1752033b0070..179a9f6bd1e3 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -791,7 +791,7 @@ ENTRY(__switch_to) ldr r6, [r2, #TI_CPU_DOMAIN] #endif switch_tls r1, r4, r5, r3, r7 -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) ldr r7, [r2, #TI_TASK] ldr r8, =__stack_chk_guard .if (TSK_STACK_CANARY > IMM12_MASK) @@ -807,7 +807,7 @@ ENTRY(__switch_to) ldr r0, =thread_notify_head mov r1, #THREAD_NOTIFY_SWITCH bl atomic_notifier_call_chain -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) str r7, [r8] #endif THUMB( mov ip, r4 ) diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 1523cb18b109..225d1c58d2de 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -39,7 +39,7 @@ #include #include -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR #include unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index b9786f491873..1df21a61e379 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -286,7 +286,7 @@ asmlinkage long sys_oabi_epoll_wait(int epfd, return -EINVAL; if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents)) return -EFAULT; - kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL); + kbuf = kmalloc_array(maxevents, sizeof(*kbuf), GFP_KERNEL); if (!kbuf) return -ENOMEM; fs = get_fs(); @@ -324,7 +324,7 @@ asmlinkage long sys_oabi_semtimedop(int semid, return -EINVAL; if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops)) return -EFAULT; - sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL); + sops = kmalloc_array(nsops, sizeof(*sops), GFP_KERNEL); if (!sops) return -ENOMEM; err = 0; diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 7fc0638f263a..d2b5ec9c4b92 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -23,3 +23,11 @@ obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o obj-$(CONFIG_KVM_ARM_HOST) += switch.o CFLAGS_switch.o += $(CFLAGS_ARMV7VE) obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o + +# KVM code is run at a different exception code with a different map, so +# compiler instrumentation that inserts callbacks or checks into the code may +# cause crashes. Just disable it. +GCOV_PROFILE := n +KASAN_SANITIZE := n +UBSAN_SANITIZE := n +KCOV_INSTRUMENT := n diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c index e7b350f18f5f..16d71bac0061 100644 --- a/arch/arm/mach-footbridge/dc21285.c +++ b/arch/arm/mach-footbridge/dc21285.c @@ -252,7 +252,7 @@ int __init dc21285_setup(int nr, struct pci_sys_data *sys) if (nr || !footbridge_cfn_mode()) return 0; - res = kzalloc(sizeof(struct resource) * 2, GFP_KERNEL); + res = kcalloc(2, sizeof(struct resource), GFP_KERNEL); if (!res) { printk("out of memory for root bus resources"); return 0; diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index bcf3df59f71b..6835b17113e5 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c @@ -421,7 +421,7 @@ int ixp4xx_setup(int nr, struct pci_sys_data *sys) if (nr >= 1) return 0; - res = kzalloc(sizeof(*res) * 2, GFP_KERNEL); + res = kcalloc(2, sizeof(*res), GFP_KERNEL); if (res == NULL) { /* * If we're out of memory this early, something is wrong, diff --git a/arch/arm/mach-ks8695/board-acs5k.c b/arch/arm/mach-ks8695/board-acs5k.c index 937eb1d47e7b..ef835d82cdb9 100644 --- a/arch/arm/mach-ks8695/board-acs5k.c +++ b/arch/arm/mach-ks8695/board-acs5k.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/arm/mach-omap1/board-htcherald.c b/arch/arm/mach-omap1/board-htcherald.c index 67d46690a56e..da8f3fc3180f 100644 --- a/arch/arm/mach-omap1/board-htcherald.c +++ b/arch/arm/mach-omap1/board-htcherald.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-omap1/common.h b/arch/arm/mach-omap1/common.h index d83ff257eaa8..c6537d2c2859 100644 --- a/arch/arm/mach-omap1/common.h +++ b/arch/arm/mach-omap1/common.h @@ -27,7 +27,7 @@ #define __ARCH_ARM_MACH_OMAP1_COMMON_H #include -#include +#include #include #include diff --git a/arch/arm/mach-omap1/i2c.c b/arch/arm/mach-omap1/i2c.c index 5bdf3c4190f9..9250f263ac51 100644 --- a/arch/arm/mach-omap1/i2c.c +++ b/arch/arm/mach-omap1/i2c.c @@ -20,7 +20,7 @@ */ #include -#include +#include #include #include "soc.h" diff --git a/arch/arm/mach-omap1/mcbsp.c b/arch/arm/mach-omap1/mcbsp.c index 8ed67f8d1762..27e22e702f96 100644 --- a/arch/arm/mach-omap1/mcbsp.c +++ b/arch/arm/mach-omap1/mcbsp.c @@ -389,7 +389,7 @@ static void omap_mcbsp_register_board_cfg(struct resource *res, int res_count, { int i; - omap_mcbsp_devices = kzalloc(size * sizeof(struct platform_device *), + omap_mcbsp_devices = kcalloc(size, sizeof(struct platform_device *), GFP_KERNEL); if (!omap_mcbsp_devices) { printk(KERN_ERR "Could not register McBSP devices\n"); diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index dff3750e432f..129455e822e4 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c index 0103548b0b15..af545193f673 100644 --- a/arch/arm/mach-omap2/hsmmc.c +++ b/arch/arm/mach-omap2/hsmmc.c @@ -34,7 +34,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c, { char *hc_name; - hc_name = kzalloc(sizeof(char) * (HSMMC_NAME_LEN + 1), GFP_KERNEL); + hc_name = kzalloc(HSMMC_NAME_LEN + 1, GFP_KERNEL); if (!hc_name) { kfree(hc_name); return -ENOMEM; diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index ac219b9e6a4c..41c7b905980a 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -162,7 +162,7 @@ static int omap_device_build_from_dt(struct platform_device *pdev) !omap_hwmod_parse_module_range(NULL, node, &res)) return -ENODEV; - hwmods = kzalloc(sizeof(struct omap_hwmod *) * oh_cnt, GFP_KERNEL); + hwmods = kcalloc(oh_cnt, sizeof(struct omap_hwmod *), GFP_KERNEL); if (!hwmods) { ret = -ENOMEM; goto odbfd_exit; @@ -413,7 +413,7 @@ omap_device_copy_resources(struct omap_hwmod *oh, goto error; } - res = kzalloc(sizeof(*res) * 2, GFP_KERNEL); + res = kcalloc(2, sizeof(*res), GFP_KERNEL); if (!res) return -ENOMEM; diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c index fe66cf247874..d684fac8f592 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c @@ -13,7 +13,7 @@ * XXX these should be marked initdata for multi-OMAP kernels */ -#include +#include #include #include "omap_hwmod.h" diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c index 74eefd30518c..abef9f6f9bf5 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c @@ -13,7 +13,7 @@ * XXX these should be marked initdata for multi-OMAP kernels */ -#include +#include #include #include diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c index 53e1ac3724f2..c9483bc06228 100644 --- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c @@ -14,7 +14,7 @@ * GNU General Public License for more details. */ -#include +#include #include "omap_hwmod.h" #include "omap_hwmod_common_data.h" diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index d93f9ea4119e..23e6a41a18eb 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -15,7 +15,7 @@ * XXX these should be marked initdata for multi-OMAP kernels */ -#include +#include #include #include diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 234ee0eec815..a95dbac57a81 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c index 887a30fa775b..115473d441cd 100644 --- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index a27c2fed298c..e6c7061a8e73 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c index 021b5a8b9c0a..058a37e6d11c 100644 --- a/arch/arm/mach-omap2/prm_common.c +++ b/arch/arm/mach-omap2/prm_common.c @@ -285,10 +285,11 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup) prcm_irq_setup = irq_setup; - prcm_irq_chips = kzalloc(sizeof(void *) * nr_regs, GFP_KERNEL); - prcm_irq_setup->saved_mask = kzalloc(sizeof(u32) * nr_regs, GFP_KERNEL); - prcm_irq_setup->priority_mask = kzalloc(sizeof(u32) * nr_regs, - GFP_KERNEL); + prcm_irq_chips = kcalloc(nr_regs, sizeof(void *), GFP_KERNEL); + prcm_irq_setup->saved_mask = kcalloc(nr_regs, sizeof(u32), + GFP_KERNEL); + prcm_irq_setup->priority_mask = kcalloc(nr_regs, sizeof(u32), + GFP_KERNEL); if (!prcm_irq_chips || !prcm_irq_setup->saved_mask || !prcm_irq_setup->priority_mask) diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c index 0adb1bd6208e..4d475f6f4a77 100644 --- a/arch/arm/mach-pxa/palmz72.c +++ b/arch/arm/mach-pxa/palmz72.c @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c index 207dcc2e94e7..ab2f89266bbd 100644 --- a/arch/arm/mach-pxa/viper.c +++ b/arch/arm/mach-pxa/viper.c @@ -35,7 +35,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c index f45aed2519ba..406487e76a5c 100644 --- a/arch/arm/mach-sa1100/simpad.c +++ b/arch/arm/mach-sa1100/simpad.c @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include "generic.h" diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c index 21c064267af5..0f5381d13494 100644 --- a/arch/arm/mach-vexpress/spc.c +++ b/arch/arm/mach-vexpress/spc.c @@ -403,7 +403,7 @@ static int ve_spc_populate_opps(uint32_t cluster) uint32_t data = 0, off, ret, idx; struct ve_spc_opp *opps; - opps = kzalloc(sizeof(*opps) * MAX_OPPS, GFP_KERNEL); + opps = kcalloc(MAX_OPPS, sizeof(*opps), GFP_KERNEL); if (!opps) return -ENOMEM; diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index af27f1c22d93..be0fa7e39c26 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -2162,8 +2162,8 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size) goto err; mapping->bitmap_size = bitmap_size; - mapping->bitmaps = kzalloc(extensions * sizeof(unsigned long *), - GFP_KERNEL); + mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *), + GFP_KERNEL); if (!mapping->bitmaps) goto err2; diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index 61e281cb29fb..a1606d950251 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -20,7 +20,7 @@ #include "mm.h" #ifdef CONFIG_ARM_LPAE -#define __pgd_alloc() kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL) +#define __pgd_alloc() kmalloc_array(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL) #define __pgd_free(pgd) kfree(pgd) #else #define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2) diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c index 9ed0129bed3c..14db14152909 100644 --- a/arch/arm/probes/kprobes/test-core.c +++ b/arch/arm/probes/kprobes/test-core.c @@ -766,8 +766,9 @@ static int coverage_start_fn(const struct decode_header *h, void *args) static int coverage_start(const union decode_item *table) { - coverage.base = kmalloc(MAX_COVERAGE_ENTRIES * - sizeof(struct coverage_entry), GFP_KERNEL); + coverage.base = kmalloc_array(MAX_COVERAGE_ENTRIES, + sizeof(struct coverage_entry), + GFP_KERNEL); coverage.num_entries = 0; coverage.nesting = 0; return table_iter(table, coverage_start_fn, &coverage); diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile index bb4118213fee..f4efff9d3afb 100644 --- a/arch/arm/vdso/Makefile +++ b/arch/arm/vdso/Makefile @@ -30,6 +30,9 @@ CFLAGS_vgettimeofday.o = -O2 # Disable gcov profiling for VDSO code GCOV_PROFILE := n +# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. +KCOV_INSTRUMENT := n + # Force dependency $(obj)/vdso.o : $(obj)/vdso.so diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 9795b59aa28a..42c090cf0292 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -46,6 +46,7 @@ config ARM64 select ARCH_USE_QUEUED_RWLOCKS select ARCH_SUPPORTS_MEMORY_FAILURE select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000 || CC_IS_CLANG select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_WANT_COMPAT_IPC_PARSE_VERSION select ARCH_WANT_FRAME_POINTERS @@ -102,7 +103,6 @@ config ARM64 select HAVE_ARM_SMCCC select HAVE_EBPF_JIT select HAVE_C_RECORDMCOUNT - select HAVE_CC_STACKPROTECTOR select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL select HAVE_CONTEXT_TRACKING @@ -127,6 +127,7 @@ config ARM64 select HAVE_PERF_USER_STACK_DUMP select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RCU_TABLE_FREE + select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS select HAVE_KPROBES select HAVE_KRETPROBES @@ -1128,6 +1129,7 @@ endmenu config ARM64_SVE bool "ARM Scalable Vector Extension support" default y + depends on !KVM || ARM64_VHE help The Scalable Vector Extension (SVE) is an extension to the AArch64 execution state which complements and extends the SIMD functionality @@ -1153,6 +1155,12 @@ config ARM64_SVE booting the kernel. If unsure and you are not observing these symptoms, you should assume that it is safe to say Y. + CPUs that support SVE are architecturally required to support the + Virtualization Host Extensions (VHE), so the kernel makes no + provision for supporting SVE alongside KVM without VHE enabled. + Thus, you will need to enable CONFIG_ARM64_VHE if you want to support + KVM in the same kernel image. + config ARM64_MODULE_PLTS bool select HAVE_MOD_ARCH_SPECIFIC diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 3c353b4715dc..45272266dafb 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -56,12 +56,6 @@ KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) KBUILD_AFLAGS += $(call cc-option,-mabi=lp64) -ifeq ($(cc-name),clang) -KBUILD_CFLAGS += -DCONFIG_ARCH_SUPPORTS_INT128 -else -KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0500, -DCONFIG_ARCH_SUPPORTS_INT128) -endif - ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) KBUILD_CPPFLAGS += -mbig-endian CHECKFLAGS += -D__AARCH64EB__ diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 0094c6653b06..d264a7274811 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -36,7 +36,7 @@ * Start addresses are inclusive and end addresses are exclusive; start * addresses should be rounded down, end addresses up. * - * See Documentation/cachetlb.txt for more information. Please note that + * See Documentation/core-api/cachetlb.rst for more information. Please note that * the implementation assumes non-aliasing VIPT D-cache and (aliasing) * VIPT I-cache. * diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 55bc1f073bfb..1717ba1db35d 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -11,9 +11,7 @@ #include #include -#include #include -#include #include /* @@ -510,33 +508,6 @@ static inline bool system_supports_sve(void) cpus_have_const_cap(ARM64_SVE); } -/* - * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE - * vector length. - * - * Use only if SVE is present. - * This function clobbers the SVE vector length. - */ -static inline u64 read_zcr_features(void) -{ - u64 zcr; - unsigned int vq_max; - - /* - * Set the maximum possible VL, and write zeroes to all other - * bits to see if they stick. - */ - sve_kernel_enable(NULL); - write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1); - - zcr = read_sysreg_s(SYS_ZCR_EL1); - zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */ - vq_max = sve_vq_from_vl(sve_get_vl()); - zcr |= vq_max - 1; /* set LEN field to maximum effective value */ - - return zcr; -} - #define ARM64_SSBD_UNKNOWN -1 #define ARM64_SSBD_FORCE_DISABLE 0 #define ARM64_SSBD_KERNEL 1 diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index aa7162ae93e3..fa92747a49c8 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -18,6 +18,8 @@ #include #include +#include +#include #ifndef __ASSEMBLY__ @@ -41,6 +43,8 @@ struct task_struct; extern void fpsimd_save_state(struct user_fpsimd_state *state); extern void fpsimd_load_state(struct user_fpsimd_state *state); +extern void fpsimd_save(void); + extern void fpsimd_thread_switch(struct task_struct *next); extern void fpsimd_flush_thread(void); @@ -49,12 +53,27 @@ extern void fpsimd_preserve_current_state(void); extern void fpsimd_restore_current_state(void); extern void fpsimd_update_current_state(struct user_fpsimd_state const *state); +extern void fpsimd_bind_task_to_cpu(void); +extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state); + extern void fpsimd_flush_task_state(struct task_struct *target); +extern void fpsimd_flush_cpu_state(void); extern void sve_flush_cpu_state(void); /* Maximum VL that SVE VL-agnostic software can transparently support */ #define SVE_VL_ARCH_MAX 0x100 +/* Offset of FFR in the SVE register dump */ +static inline size_t sve_ffr_offset(int vl) +{ + return SVE_SIG_FFR_OFFSET(sve_vq_from_vl(vl)) - SVE_SIG_REGS_OFFSET; +} + +static inline void *sve_pffr(struct thread_struct *thread) +{ + return (char *)thread->sve_state + sve_ffr_offset(thread->sve_vl); +} + extern void sve_save_state(void *state, u32 *pfpsr); extern void sve_load_state(void const *state, u32 const *pfpsr, unsigned long vq_minus_1); @@ -63,6 +82,8 @@ extern unsigned int sve_get_vl(void); struct arm64_cpu_capabilities; extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused); +extern u64 read_zcr_features(void); + extern int __ro_after_init sve_max_vl; #ifdef CONFIG_ARM64_SVE diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 951b2076a5e2..102b5a5c47b6 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -33,19 +33,19 @@ /* The hyp-stub will return this for any kvm_call_hyp() call */ #define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR -#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0 -#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT) +#ifndef __ASSEMBLY__ + +#include /* Translate a kernel address of @sym into its equivalent linear mapping */ #define kvm_ksym_ref(sym) \ ({ \ void *val = &sym; \ if (!is_kernel_in_hyp_mode()) \ - val = phys_to_virt((u64)&sym - kimage_voffset); \ + val = lm_alias(&sym); \ val; \ }) -#ifndef __ASSEMBLY__ struct kvm; struct kvm_vcpu; diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 95d8a0e15b5f..fda9a8ca48be 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -30,6 +30,7 @@ #include #include #include +#include #define __KVM_HAVE_ARCH_INTC_INITIALIZED @@ -219,8 +220,8 @@ struct kvm_vcpu_arch { /* State of various workarounds, see kvm_asm.h for bit assignment */ u64 workaround_flags; - /* Guest debug state */ - u64 debug_flags; + /* Miscellaneous vcpu state flags */ + u64 flags; /* * We maintain more than a single set of debug registers to support @@ -241,6 +242,10 @@ struct kvm_vcpu_arch { /* Pointer to host CPU context */ kvm_cpu_context_t *host_cpu_context; + + struct thread_info *host_thread_info; /* hyp VA */ + struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ + struct { /* {Break,watch}point registers */ struct kvm_guest_debug_arch regs; @@ -296,6 +301,12 @@ struct kvm_vcpu_arch { bool sysregs_loaded_on_cpu; }; +/* vcpu_arch flags field values: */ +#define KVM_ARM64_DEBUG_DIRTY (1 << 0) +#define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ +#define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ +#define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */ + #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) /* @@ -397,6 +408,19 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, kvm_call_hyp(__kvm_set_tpidr_el2, tpidr_el2); } +static inline bool kvm_arch_check_sve_has_vhe(void) +{ + /* + * The Arm architecture specifies that implementation of SVE + * requires VHE also to be implemented. The KVM code for arm64 + * relies on this when SVE is present: + */ + if (system_supports_sve()) + return has_vhe(); + else + return true; +} + static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} @@ -423,15 +447,18 @@ static inline void __cpu_init_stage2(void) "PARange is %d bits, unsupported configuration!", parange); } -/* - * All host FP/SIMD state is restored on guest exit, so nothing needs - * doing here except in the SVE case: -*/ -static inline void kvm_fpsimd_flush_cpu_state(void) +/* Guest/host FPSIMD coordination helpers */ +int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); + +#ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */ +static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) { - if (system_supports_sve()) - sve_flush_cpu_state(); + return kvm_arch_vcpu_run_map_fp(vcpu); } +#endif static inline void kvm_arm_vhe_guest_enter(void) { @@ -481,4 +508,8 @@ static inline int kvm_arm_have_ssbd(void) void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu); void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu); +#define __KVM_HAVE_ARCH_VM_ALLOC +struct kvm *kvm_arch_alloc_vm(void); +void kvm_arch_free_vm(struct kvm *kvm); + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 65ab83e8926e..a73ae1e49200 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -158,7 +158,9 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset, /* Sync TPIDR_EL0 back to thread_struct for current */ void tls_preserve_current_state(void); -#define INIT_THREAD { } +#define INIT_THREAD { \ + .fpsimd_cpu = NR_CPUS, \ +} static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) { @@ -249,6 +251,17 @@ void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused); extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */ extern void __init minsigstksz_setup(void); +/* + * Not at the top of the file due to a direct #include cycle between + * and . Deferring this #include + * ensures that contents of processor.h are visible to fpsimd.h even if + * processor.h is included first. + * + * These prctl helpers are the only things in this file that require + * fpsimd.h. The core code expects them to be in this header. + */ +#include + /* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */ #define SVE_SET_VL(arg) sve_set_current_vl(arg) #define SVE_GET_VL() sve_get_current_vl() diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index cbcf11b5e637..cb2c10a8f0a8 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -45,12 +45,6 @@ struct thread_info { int preempt_count; /* 0 => preemptable, <0 => bug */ }; -#define INIT_THREAD_INFO(tsk) \ -{ \ - .preempt_count = INIT_PREEMPT_COUNT, \ - .addr_limit = KERNEL_DS, \ -} - #define thread_saved_pc(tsk) \ ((unsigned long)(tsk->thread.cpu_context.pc)) #define thread_saved_sp(tsk) \ @@ -118,5 +112,12 @@ void arch_release_task_struct(struct task_struct *tsk); _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ _TIF_NOHZ) +#define INIT_THREAD_INFO(tsk) \ +{ \ + .flags = _TIF_FOREIGN_FPSTATE, \ + .preempt_count = INIT_PREEMPT_COUNT, \ + .addr_limit = KERNEL_DS, \ +} + #endif /* __KERNEL__ */ #endif /* __ASM_THREAD_INFO_H */ diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 04b3256f8e6d..4e76630dd655 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -91,6 +91,7 @@ struct kvm_regs { #define KVM_VGIC_V3_ADDR_TYPE_DIST 2 #define KVM_VGIC_V3_ADDR_TYPE_REDIST 3 #define KVM_VGIC_ITS_ADDR_TYPE 4 +#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION 5 #define KVM_VGIC_V3_DIST_SIZE SZ_64K #define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K) diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index 97d45d5151d4..d4707abb2f16 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -234,8 +234,8 @@ static void __init register_insn_emulation_sysctl(void) struct insn_emulation *insn; struct ctl_table *insns_sysctl, *sysctl; - insns_sysctl = kzalloc(sizeof(*sysctl) * (nr_insn_emulated + 1), - GFP_KERNEL); + insns_sysctl = kcalloc(nr_insn_emulated + 1, sizeof(*sysctl), + GFP_KERNEL); raw_spin_lock_irqsave(&insn_emulation_lock, flags); list_for_each_entry(insn, &insn_emulation, node) { diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 3b527ae46e49..84c68b14f1b2 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -36,12 +36,14 @@ #include #include #include +#include #include #include #include #include #include +#include #include #include #include @@ -117,7 +119,6 @@ */ struct fpsimd_last_state_struct { struct user_fpsimd_state *st; - bool sve_in_use; }; static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state); @@ -158,19 +159,6 @@ static void sve_free(struct task_struct *task) __sve_free(task); } - -/* Offset of FFR in the SVE register dump */ -static size_t sve_ffr_offset(int vl) -{ - return SVE_SIG_FFR_OFFSET(sve_vq_from_vl(vl)) - SVE_SIG_REGS_OFFSET; -} - -static void *sve_pffr(struct task_struct *task) -{ - return (char *)task->thread.sve_state + - sve_ffr_offset(task->thread.sve_vl); -} - static void change_cpacr(u64 val, u64 mask) { u64 cpacr = read_sysreg(CPACR_EL1); @@ -251,31 +239,24 @@ static void task_fpsimd_load(void) WARN_ON(!in_softirq() && !irqs_disabled()); if (system_supports_sve() && test_thread_flag(TIF_SVE)) - sve_load_state(sve_pffr(current), + sve_load_state(sve_pffr(¤t->thread), ¤t->thread.uw.fpsimd_state.fpsr, sve_vq_from_vl(current->thread.sve_vl) - 1); else fpsimd_load_state(¤t->thread.uw.fpsimd_state); - - if (system_supports_sve()) { - /* Toggle SVE trapping for userspace if needed */ - if (test_thread_flag(TIF_SVE)) - sve_user_enable(); - else - sve_user_disable(); - - /* Serialised by exception return to user */ - } } /* - * Ensure current's FPSIMD/SVE storage in thread_struct is up to date - * with respect to the CPU registers. + * Ensure FPSIMD/SVE storage in memory for the loaded context is up to + * date with respect to the CPU registers. * * Softirqs (and preemption) must be disabled. */ -static void task_fpsimd_save(void) +void fpsimd_save(void) { + struct user_fpsimd_state *st = __this_cpu_read(fpsimd_last_state.st); + /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */ + WARN_ON(!in_softirq() && !irqs_disabled()); if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { @@ -290,10 +271,9 @@ static void task_fpsimd_save(void) return; } - sve_save_state(sve_pffr(current), - ¤t->thread.uw.fpsimd_state.fpsr); + sve_save_state(sve_pffr(¤t->thread), &st->fpsr); } else - fpsimd_save_state(¤t->thread.uw.fpsimd_state); + fpsimd_save_state(st); } } @@ -588,7 +568,7 @@ int sve_set_vector_length(struct task_struct *task, if (task == current) { local_bh_disable(); - task_fpsimd_save(); + fpsimd_save(); set_thread_flag(TIF_FOREIGN_FPSTATE); } @@ -608,10 +588,8 @@ int sve_set_vector_length(struct task_struct *task, task->thread.sve_vl = vl; out: - if (flags & PR_SVE_VL_INHERIT) - set_tsk_thread_flag(task, TIF_SVE_VL_INHERIT); - else - clear_tsk_thread_flag(task, TIF_SVE_VL_INHERIT); + update_tsk_thread_flag(task, TIF_SVE_VL_INHERIT, + flags & PR_SVE_VL_INHERIT); return 0; } @@ -755,6 +733,33 @@ void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) isb(); } +/* + * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE + * vector length. + * + * Use only if SVE is present. + * This function clobbers the SVE vector length. + */ +u64 read_zcr_features(void) +{ + u64 zcr; + unsigned int vq_max; + + /* + * Set the maximum possible VL, and write zeroes to all other + * bits to see if they stick. + */ + sve_kernel_enable(NULL); + write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1); + + zcr = read_sysreg_s(SYS_ZCR_EL1); + zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */ + vq_max = sve_vq_from_vl(sve_get_vl()); + zcr |= vq_max - 1; /* set LEN field to maximum effective value */ + + return zcr; +} + void __init sve_setup(void) { u64 zcr; @@ -829,7 +834,7 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) local_bh_disable(); - task_fpsimd_save(); + fpsimd_save(); fpsimd_to_sve(current); /* Force ret_to_user to reload the registers: */ @@ -882,31 +887,25 @@ asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) void fpsimd_thread_switch(struct task_struct *next) { + bool wrong_task, wrong_cpu; + if (!system_supports_fpsimd()) return; - /* - * Save the current FPSIMD state to memory, but only if whatever is in - * the registers is in fact the most recent userland FPSIMD state of - * 'current'. - */ - if (current->mm) - task_fpsimd_save(); - if (next->mm) { - /* - * If we are switching to a task whose most recent userland - * FPSIMD state is already in the registers of *this* cpu, - * we can skip loading the state from memory. Otherwise, set - * the TIF_FOREIGN_FPSTATE flag so the state will be loaded - * upon the next return to userland. - */ - if (__this_cpu_read(fpsimd_last_state.st) == - &next->thread.uw.fpsimd_state - && next->thread.fpsimd_cpu == smp_processor_id()) - clear_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE); - else - set_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE); - } + /* Save unsaved fpsimd state, if any: */ + fpsimd_save(); + + /* + * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's + * state. For kernel threads, FPSIMD registers are never loaded + * and wrong_task and wrong_cpu will always be true. + */ + wrong_task = __this_cpu_read(fpsimd_last_state.st) != + &next->thread.uw.fpsimd_state; + wrong_cpu = next->thread.fpsimd_cpu != smp_processor_id(); + + update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE, + wrong_task || wrong_cpu); } void fpsimd_flush_thread(void) @@ -972,7 +971,7 @@ void fpsimd_preserve_current_state(void) return; local_bh_disable(); - task_fpsimd_save(); + fpsimd_save(); local_bh_enable(); } @@ -992,14 +991,33 @@ void fpsimd_signal_preserve_current_state(void) * Associate current's FPSIMD context with this cpu * Preemption must be disabled when calling this function. */ -static void fpsimd_bind_to_cpu(void) +void fpsimd_bind_task_to_cpu(void) { struct fpsimd_last_state_struct *last = this_cpu_ptr(&fpsimd_last_state); last->st = ¤t->thread.uw.fpsimd_state; - last->sve_in_use = test_thread_flag(TIF_SVE); current->thread.fpsimd_cpu = smp_processor_id(); + + if (system_supports_sve()) { + /* Toggle SVE trapping for userspace if needed */ + if (test_thread_flag(TIF_SVE)) + sve_user_enable(); + else + sve_user_disable(); + + /* Serialised by exception return to user */ + } +} + +void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st) +{ + struct fpsimd_last_state_struct *last = + this_cpu_ptr(&fpsimd_last_state); + + WARN_ON(!in_softirq() && !irqs_disabled()); + + last->st = st; } /* @@ -1016,7 +1034,7 @@ void fpsimd_restore_current_state(void) if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { task_fpsimd_load(); - fpsimd_bind_to_cpu(); + fpsimd_bind_task_to_cpu(); } local_bh_enable(); @@ -1039,9 +1057,9 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state) fpsimd_to_sve(current); task_fpsimd_load(); + fpsimd_bind_task_to_cpu(); - if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) - fpsimd_bind_to_cpu(); + clear_thread_flag(TIF_FOREIGN_FPSTATE); local_bh_enable(); } @@ -1054,29 +1072,12 @@ void fpsimd_flush_task_state(struct task_struct *t) t->thread.fpsimd_cpu = NR_CPUS; } -static inline void fpsimd_flush_cpu_state(void) +void fpsimd_flush_cpu_state(void) { __this_cpu_write(fpsimd_last_state.st, NULL); + set_thread_flag(TIF_FOREIGN_FPSTATE); } -/* - * Invalidate any task SVE state currently held in this CPU's regs. - * - * This is used to prevent the kernel from trying to reuse SVE register data - * that is detroyed by KVM guest enter/exit. This function should go away when - * KVM SVE support is implemented. Don't use it for anything else. - */ -#ifdef CONFIG_ARM64_SVE -void sve_flush_cpu_state(void) -{ - struct fpsimd_last_state_struct const *last = - this_cpu_ptr(&fpsimd_last_state); - - if (last->st && last->sve_in_use) - fpsimd_flush_cpu_state(); -} -#endif /* CONFIG_ARM64_SVE */ - #ifdef CONFIG_KERNEL_MODE_NEON DEFINE_PER_CPU(bool, kernel_neon_busy); @@ -1110,11 +1111,8 @@ void kernel_neon_begin(void) __this_cpu_write(kernel_neon_busy, true); - /* Save unsaved task fpsimd state, if any: */ - if (current->mm) { - task_fpsimd_save(); - set_thread_flag(TIF_FOREIGN_FPSTATE); - } + /* Save unsaved fpsimd state, if any: */ + fpsimd_save(); /* Invalidate any task state remaining in the fpsimd regs: */ fpsimd_flush_cpu_state(); @@ -1236,13 +1234,10 @@ static int fpsimd_cpu_pm_notifier(struct notifier_block *self, { switch (cmd) { case CPU_PM_ENTER: - if (current->mm) - task_fpsimd_save(); + fpsimd_save(); fpsimd_flush_cpu_state(); break; case CPU_PM_EXIT: - if (current->mm) - set_thread_flag(TIF_FOREIGN_FPSTATE); break; case CPU_PM_ENTER_FAILED: default: diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index f08a2ed9db0d..e10bc363f533 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -59,7 +59,7 @@ #include #include -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR #include unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index bd732644c2f6..5c338ce5a7fa 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index a2e3a5af1113..47b23bf617c7 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -39,6 +39,7 @@ config KVM select HAVE_KVM_IRQ_ROUTING select IRQ_BYPASS_MANAGER select HAVE_KVM_IRQ_BYPASS + select HAVE_KVM_VCPU_RUN_PID_CHANGE ---help--- Support hosting virtualized guest machines. We don't support KVM with 16K page tables yet, due to the multiple diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 93afff91cb7c..0f2a135ba15b 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -19,7 +19,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/psci.o $(KVM)/arm/perf.o kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o va_layout.o kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o -kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o +kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o fpsimd.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/aarch32.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index a1f4ebdfe6d3..00d422336a45 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -103,7 +103,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) * * Additionally, KVM only traps guest accesses to the debug registers if * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY - * flag on vcpu->arch.debug_flags). Since the guest must not interfere + * flag on vcpu->arch.flags). Since the guest must not interfere * with the hardware state when debugging the guest, we must ensure that * trapping is enabled whenever we are debugging the guest using the * debug registers. @@ -111,7 +111,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) { - bool trap_debug = !(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY); + bool trap_debug = !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY); unsigned long mdscr; trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug); @@ -184,7 +184,7 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1); vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state; - vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; + vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; trap_debug = true; trace_kvm_arm_set_regset("BKPTS", get_num_brps(), @@ -206,7 +206,7 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) /* If KDE or MDE are set, perform a full save/restore cycle. */ if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE)) - vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; + vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2); trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1)); diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c new file mode 100644 index 000000000000..dc6ecfa5a2d2 --- /dev/null +++ b/arch/arm64/kvm/fpsimd.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/arm64/kvm/fpsimd.c: Guest/host FPSIMD context coordination helpers + * + * Copyright 2018 Arm Limited + * Author: Dave Martin + */ +#include +#include +#include +#include +#include +#include +#include + +/* + * Called on entry to KVM_RUN unless this vcpu previously ran at least + * once and the most recent prior KVM_RUN for this vcpu was called from + * the same task as current (highly likely). + * + * This is guaranteed to execute before kvm_arch_vcpu_load_fp(vcpu), + * such that on entering hyp the relevant parts of current are already + * mapped. + */ +int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) +{ + int ret; + + struct thread_info *ti = ¤t->thread_info; + struct user_fpsimd_state *fpsimd = ¤t->thread.uw.fpsimd_state; + + /* + * Make sure the host task thread flags and fpsimd state are + * visible to hyp: + */ + ret = create_hyp_mappings(ti, ti + 1, PAGE_HYP); + if (ret) + goto error; + + ret = create_hyp_mappings(fpsimd, fpsimd + 1, PAGE_HYP); + if (ret) + goto error; + + vcpu->arch.host_thread_info = kern_hyp_va(ti); + vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd); +error: + return ret; +} + +/* + * Prepare vcpu for saving the host's FPSIMD state and loading the guest's. + * The actual loading is done by the FPSIMD access trap taken to hyp. + * + * Here, we just set the correct metadata to indicate that the FPSIMD + * state in the cpu regs (if any) belongs to current on the host. + * + * TIF_SVE is backed up here, since it may get clobbered with guest state. + * This flag is restored by kvm_arch_vcpu_put_fp(vcpu). + */ +void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) +{ + BUG_ON(!current->mm); + + vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | KVM_ARM64_HOST_SVE_IN_USE); + vcpu->arch.flags |= KVM_ARM64_FP_HOST; + if (test_thread_flag(TIF_SVE)) + vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE; +} + +/* + * If the guest FPSIMD state was loaded, update the host's context + * tracking data mark the CPU FPSIMD regs as dirty and belonging to vcpu + * so that they will be written back if the kernel clobbers them due to + * kernel-mode NEON before re-entry into the guest. + */ +void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) +{ + WARN_ON_ONCE(!irqs_disabled()); + + if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { + fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.gp_regs.fp_regs); + clear_thread_flag(TIF_FOREIGN_FPSTATE); + clear_thread_flag(TIF_SVE); + } +} + +/* + * Write back the vcpu FPSIMD regs if they are dirty, and invalidate the + * cpu FPSIMD regs so that they can't be spuriously reused if this vcpu + * disappears and another task or vcpu appears that recycles the same + * struct fpsimd_state. + */ +void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) +{ + local_bh_disable(); + + update_thread_flag(TIF_SVE, + vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE); + + if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { + /* Clean guest FP state to memory and invalidate cpu view */ + fpsimd_save(); + fpsimd_flush_cpu_state(); + } else if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { + /* Ensure user trap controls are correctly restored */ + fpsimd_bind_task_to_cpu(); + } + + local_bh_enable(); +} diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c index 3e717f66f011..50009766e5e5 100644 --- a/arch/arm64/kvm/hyp/debug-sr.c +++ b/arch/arm64/kvm/hyp/debug-sr.c @@ -163,7 +163,7 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu) if (!has_vhe()) __debug_save_spe_nvhe(&vcpu->arch.host_debug_state.pmscr_el1); - if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)) + if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)) return; host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); @@ -185,7 +185,7 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu) if (!has_vhe()) __debug_restore_spe_nvhe(vcpu->arch.host_debug_state.pmscr_el1); - if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)) + if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)) return; host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); @@ -196,7 +196,7 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu) __debug_save_state(vcpu, guest_dbg, guest_ctxt); __debug_restore_state(vcpu, host_dbg, host_ctxt); - vcpu->arch.debug_flags &= ~KVM_ARM64_DEBUG_DIRTY; + vcpu->arch.flags &= ~KVM_ARM64_DEBUG_DIRTY; } u32 __hyp_text __kvm_get_mdcr_el2(void) diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index e41a161d313a..fad1e164fe48 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -166,46 +166,3 @@ abort_guest_exit_end: orr x0, x0, x5 1: ret ENDPROC(__guest_exit) - -ENTRY(__fpsimd_guest_restore) - // x0: esr - // x1: vcpu - // x2-x29,lr: vcpu regs - // vcpu x0-x1 on the stack - stp x2, x3, [sp, #-16]! - stp x4, lr, [sp, #-16]! - -alternative_if_not ARM64_HAS_VIRT_HOST_EXTN - mrs x2, cptr_el2 - bic x2, x2, #CPTR_EL2_TFP - msr cptr_el2, x2 -alternative_else - mrs x2, cpacr_el1 - orr x2, x2, #CPACR_EL1_FPEN - msr cpacr_el1, x2 -alternative_endif - isb - - mov x3, x1 - - ldr x0, [x3, #VCPU_HOST_CONTEXT] - kern_hyp_va x0 - add x0, x0, #CPU_GP_REG_OFFSET(CPU_FP_REGS) - bl __fpsimd_save_state - - add x2, x3, #VCPU_CONTEXT - add x0, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) - bl __fpsimd_restore_state - - // Skip restoring fpexc32 for AArch64 guests - mrs x1, hcr_el2 - tbnz x1, #HCR_RW_SHIFT, 1f - ldr x4, [x3, #VCPU_FPEXC32_EL2] - msr fpexc32_el2, x4 -1: - ldp x4, lr, [sp], #16 - ldp x2, x3, [sp], #16 - ldp x0, x1, [sp], #16 - - eret -ENDPROC(__fpsimd_guest_restore) diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 05d836979032..24b4fbafe3e4 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -149,25 +149,6 @@ wa_epilogue: el1_trap: get_vcpu_ptr x1, x0 - - mrs x0, esr_el2 - lsr x0, x0, #ESR_ELx_EC_SHIFT - /* - * x0: ESR_EC - * x1: vcpu pointer - */ - - /* - * We trap the first access to the FP/SIMD to save the host context - * and restore the guest context lazily. - * If FP/SIMD is not implemented, handle the trap and inject an - * undefined instruction exception to the guest. - */ -alternative_if_not ARM64_HAS_NO_FPSIMD - cmp x0, #ESR_ELx_EC_FP_ASIMD - b.eq __fpsimd_guest_restore -alternative_else_nop_endif - mov x0, #ARM_EXCEPTION_TRAP b __guest_exit diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index c50cedc447f1..d496ef579859 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -22,21 +22,25 @@ #include +#include #include #include +#include #include #include #include #include +#include +#include -static bool __hyp_text __fpsimd_enabled_nvhe(void) +/* Check whether the FP regs were dirtied while in the host-side run loop: */ +static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu) { - return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP); -} + if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE) + vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | + KVM_ARM64_FP_HOST); -static bool fpsimd_enabled_vhe(void) -{ - return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN); + return !!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED); } /* Save the 32-bit only FPSIMD system register state */ @@ -93,7 +97,10 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu) val = read_sysreg(cpacr_el1); val |= CPACR_EL1_TTA; - val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN); + val &= ~CPACR_EL1_ZEN; + if (!update_fp_enabled(vcpu)) + val &= ~CPACR_EL1_FPEN; + write_sysreg(val, cpacr_el1); write_sysreg(kvm_get_hyp_vector(), vbar_el1); @@ -106,7 +113,10 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) __activate_traps_common(vcpu); val = CPTR_EL2_DEFAULT; - val |= CPTR_EL2_TTA | CPTR_EL2_TFP | CPTR_EL2_TZ; + val |= CPTR_EL2_TTA | CPTR_EL2_TZ; + if (!update_fp_enabled(vcpu)) + val |= CPTR_EL2_TFP; + write_sysreg(val, cptr_el2); } @@ -319,6 +329,50 @@ static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu) } } +static bool __hyp_text __hyp_switch_fpsimd(struct kvm_vcpu *vcpu) +{ + struct user_fpsimd_state *host_fpsimd = vcpu->arch.host_fpsimd_state; + + if (has_vhe()) + write_sysreg(read_sysreg(cpacr_el1) | CPACR_EL1_FPEN, + cpacr_el1); + else + write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP, + cptr_el2); + + isb(); + + if (vcpu->arch.flags & KVM_ARM64_FP_HOST) { + /* + * In the SVE case, VHE is assumed: it is enforced by + * Kconfig and kvm_arch_init(). + */ + if (system_supports_sve() && + (vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE)) { + struct thread_struct *thread = container_of( + host_fpsimd, + struct thread_struct, uw.fpsimd_state); + + sve_save_state(sve_pffr(thread), &host_fpsimd->fpsr); + } else { + __fpsimd_save_state(host_fpsimd); + } + + vcpu->arch.flags &= ~KVM_ARM64_FP_HOST; + } + + __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs); + + /* Skip restoring fpexc32 for AArch64 guests */ + if (!(read_sysreg(hcr_el2) & HCR_RW)) + write_sysreg(vcpu->arch.ctxt.sys_regs[FPEXC32_EL2], + fpexc32_el2); + + vcpu->arch.flags |= KVM_ARM64_FP_ENABLED; + + return true; +} + /* * Return true when we were able to fixup the guest exit and should return to * the guest, false when we should restore the host state and return to the @@ -335,11 +389,23 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) * same PC once the SError has been injected, and replay the * trapping instruction. */ - if (*exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu)) + if (*exit_code != ARM_EXCEPTION_TRAP) + goto exit; + + /* + * We trap the first access to the FP/SIMD to save the host context + * and restore the guest context lazily. + * If FP/SIMD is not implemented, handle the trap and inject an + * undefined instruction exception to the guest. + */ + if (system_supports_fpsimd() && + kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_FP_ASIMD) + return __hyp_switch_fpsimd(vcpu); + + if (!__populate_fault_info(vcpu)) return true; - if (static_branch_unlikely(&vgic_v2_cpuif_trap) && - *exit_code == ARM_EXCEPTION_TRAP) { + if (static_branch_unlikely(&vgic_v2_cpuif_trap)) { bool valid; valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW && @@ -351,12 +417,8 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) if (valid) { int ret = __vgic_v2_perform_cpuif_access(vcpu); - if (ret == 1) { - if (__skip_instr(vcpu)) - return true; - else - *exit_code = ARM_EXCEPTION_TRAP; - } + if (ret == 1 && __skip_instr(vcpu)) + return true; if (ret == -1) { /* Promote an illegal access to an @@ -369,23 +431,21 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS; *exit_code = ARM_EXCEPTION_EL1_SERROR; } + + goto exit; } } if (static_branch_unlikely(&vgic_v3_cpuif_trap) && - *exit_code == ARM_EXCEPTION_TRAP && (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 || kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) { int ret = __vgic_v3_perform_cpuif_access(vcpu); - if (ret == 1) { - if (__skip_instr(vcpu)) - return true; - else - *exit_code = ARM_EXCEPTION_TRAP; - } + if (ret == 1 && __skip_instr(vcpu)) + return true; } +exit: /* Return to the host kernel and handle the exit */ return false; } @@ -428,7 +488,6 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *guest_ctxt; - bool fp_enabled; u64 exit_code; host_ctxt = vcpu->arch.host_cpu_context; @@ -454,19 +513,14 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) __set_host_arch_workaround_state(vcpu); - fp_enabled = fpsimd_enabled_vhe(); - sysreg_save_guest_state_vhe(guest_ctxt); __deactivate_traps(vcpu); sysreg_restore_host_state_vhe(host_ctxt); - if (fp_enabled) { - __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs); - __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs); + if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) __fpsimd_save_fpexc32(vcpu); - } __debug_switch_to_host(vcpu); @@ -478,7 +532,6 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *guest_ctxt; - bool fp_enabled; u64 exit_code; vcpu = kern_hyp_va(vcpu); @@ -514,8 +567,6 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) __set_host_arch_workaround_state(vcpu); - fp_enabled = __fpsimd_enabled_nvhe(); - __sysreg_save_state_nvhe(guest_ctxt); __sysreg32_save_state(vcpu); __timer_disable_traps(vcpu); @@ -526,11 +577,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) __sysreg_restore_state_nvhe(host_ctxt); - if (fp_enabled) { - __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs); - __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs); + if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) __fpsimd_save_fpexc32(vcpu); - } /* * This must come after restoring the host sysregs, since a non-VHE diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index b3894df6bf1a..35bc16832efe 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -196,7 +196,7 @@ void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) sysreg[DACR32_EL2] = read_sysreg(dacr32_el2); sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2); - if (has_vhe() || vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY) + if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY) sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2); } @@ -218,7 +218,7 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu) write_sysreg(sysreg[DACR32_EL2], dacr32_el2); write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2); - if (has_vhe() || vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY) + if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY) write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2); } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 6e3b969391fd..a4363735d3f8 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include @@ -338,7 +337,7 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu, { if (p->is_write) { vcpu_write_sys_reg(vcpu, p->regval, r->reg); - vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; + vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; } else { p->regval = vcpu_read_sys_reg(vcpu, r->reg); } @@ -369,7 +368,7 @@ static void reg_to_dbg(struct kvm_vcpu *vcpu, } *dbg_reg = val; - vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; + vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; } static void dbg_to_reg(struct kvm_vcpu *vcpu, @@ -1441,7 +1440,7 @@ static bool trap_debug32(struct kvm_vcpu *vcpu, { if (p->is_write) { vcpu_cp14(vcpu, r->reg) = p->regval; - vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; + vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; } else { p->regval = vcpu_cp14(vcpu, r->reg); } @@ -1473,7 +1472,7 @@ static bool trap_xvr(struct kvm_vcpu *vcpu, val |= p->regval << 32; *dbg_reg = val; - vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; + vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; } else { p->regval = *dbg_reg >> 32; } diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 301417ae2ba8..c127f94da8e2 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -263,7 +263,7 @@ static int asids_init(void) */ WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus()); atomic64_set(&asid_generation, ASID_FIRST_VERSION); - asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map), + asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map), GFP_KERNEL); if (!asid_map) panic("Failed to allocate bitmap for %lu ASIDs\n", diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 1b18b4722420..325cfb3b858a 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -310,7 +310,7 @@ static void __init arm64_memory_present(void) } #endif -static phys_addr_t memory_limit = (phys_addr_t)ULLONG_MAX; +static phys_addr_t memory_limit = PHYS_ADDR_MAX; /* * Limit the memory size that was specified via FDT. @@ -401,7 +401,7 @@ void __init arm64_memblock_init(void) * high up in memory, add back the kernel region that must be accessible * via the linear mapping. */ - if (memory_limit != (phys_addr_t)ULLONG_MAX) { + if (memory_limit != PHYS_ADDR_MAX) { memblock_mem_limit_remove_map(memory_limit); memblock_add(__pa_symbol(_text), (u64)(_end - _text)); } @@ -666,7 +666,7 @@ __setup("keepinitrd", keepinitrd_setup); */ static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p) { - if (memory_limit != (phys_addr_t)ULLONG_MAX) { + if (memory_limit != PHYS_ADDR_MAX) { pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20); } else { pr_emerg("Memory Limit: none\n"); diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h index aef02f7ca8aa..65125d0b02dd 100644 --- a/arch/hexagon/include/asm/pgtable.h +++ b/arch/hexagon/include/asm/pgtable.h @@ -30,7 +30,6 @@ /* A handy thing to have if one has the RAM. Declared in head.S */ extern unsigned long empty_zero_page; -extern unsigned long zero_page_mask; /* * The PTE model described here is that of the Hexagon Virtual Machine, diff --git a/arch/hexagon/kernel/setup.c b/arch/hexagon/kernel/setup.c index 6981949f5df3..dc8c7e75b5d1 100644 --- a/arch/hexagon/kernel/setup.c +++ b/arch/hexagon/kernel/setup.c @@ -66,7 +66,7 @@ void __init setup_arch(char **cmdline_p) */ __vmsetvec(_K_VM_event_vector); - printk(KERN_INFO "PHYS_OFFSET=0x%08x\n", PHYS_OFFSET); + printk(KERN_INFO "PHYS_OFFSET=0x%08lx\n", PHYS_OFFSET); /* * Simulator has a few differences from the hardware. diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c index 192584d5ac2f..1495d45e472d 100644 --- a/arch/hexagon/mm/init.c +++ b/arch/hexagon/mm/init.c @@ -39,9 +39,6 @@ unsigned long __phys_offset; /* physical kernel offset >> 12 */ /* Set as variable to limit PMD copies */ int max_kernel_seg = 0x303; -/* think this should be (page_size-1) the way it's used...*/ -unsigned long zero_page_mask; - /* indicate pfn's of high memory */ unsigned long highstart_pfn, highend_pfn; diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index 94f8bf777afa..dfe40cbdf3b3 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -350,7 +350,8 @@ init_record_index_pools(void) /* - 3 - */ slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1; slidx_pool.buffer = - kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL); + kmalloc_array(slidx_pool.max_idx, sizeof(slidx_list_t), + GFP_KERNEL); return slidx_pool.buffer ? 0 : -ENOMEM; } diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index d76529cbff20..9b820f7a6a98 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -85,7 +85,7 @@ static int __init topology_init(void) } #endif - sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL); + sysfs_cpus = kcalloc(NR_CPUS, sizeof(struct ia64_cpu), GFP_KERNEL); if (!sysfs_cpus) panic("kzalloc in topology_init failed - NR_CPUS too big?"); @@ -319,8 +319,8 @@ static int cpu_cache_sysfs_init(unsigned int cpu) return -1; } - this_cache=kzalloc(sizeof(struct cache_info)*unique_caches, - GFP_KERNEL); + this_cache=kcalloc(unique_caches, sizeof(struct cache_info), + GFP_KERNEL); if (this_cache == NULL) return -ENOMEM; diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index 46ecc5d948aa..acf10eb9da15 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -430,8 +430,9 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size) int cpu = smp_processor_id(); if (!ia64_idtrs[cpu]) { - ia64_idtrs[cpu] = kmalloc(2 * IA64_TR_ALLOC_MAX * - sizeof (struct ia64_tr_entry), GFP_KERNEL); + ia64_idtrs[cpu] = kmalloc_array(2 * IA64_TR_ALLOC_MAX, + sizeof(struct ia64_tr_entry), + GFP_KERNEL); if (!ia64_idtrs[cpu]) return -ENOMEM; } diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c index 8479e9a7ce16..102aabad6d20 100644 --- a/arch/ia64/sn/kernel/io_common.c +++ b/arch/ia64/sn/kernel/io_common.c @@ -132,7 +132,7 @@ static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device, printk_once(KERN_WARNING "PROM version < 4.50 -- implementing old PROM flush WAR\n"); - war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL); + war_list = kcalloc(DEV_PER_WIDGET, sizeof(*war_list), GFP_KERNEL); BUG_ON(!war_list); SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST, diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 85d095154902..d9b576df4f82 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c @@ -474,7 +474,8 @@ void __init sn_irq_lh_init(void) { int i; - sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL); + sn_irq_lh = kmalloc_array(NR_IRQS, sizeof(struct list_head *), + GFP_KERNEL); if (!sn_irq_lh) panic("SN PCI INIT: Failed to allocate memory for PCI init\n"); diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c index 8dbbef4a4f47..7195df1da121 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c @@ -184,7 +184,7 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont /* Setup the PMU ATE map */ soft->pbi_int_ate_resource.lowest_free_index = 0; soft->pbi_int_ate_resource.ate = - kzalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL); + kcalloc(soft->pbi_int_ate_size, sizeof(u64), GFP_KERNEL); if (!soft->pbi_int_ate_resource.ate) { kfree(soft); diff --git a/arch/microblaze/include/asm/cacheflush.h b/arch/microblaze/include/asm/cacheflush.h index ffea82a16d2c..b091de77b15b 100644 --- a/arch/microblaze/include/asm/cacheflush.h +++ b/arch/microblaze/include/asm/cacheflush.h @@ -19,7 +19,7 @@ #include #include -/* Look at Documentation/cachetlb.txt */ +/* Look at Documentation/core-api/cachetlb.rst */ /* * Cache handling functions. diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 7074b2215f36..3f9deec70b92 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -22,6 +22,11 @@ config MIPS select GENERIC_CPU_AUTOPROBE select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW + select GENERIC_LIB_ASHLDI3 + select GENERIC_LIB_ASHRDI3 + select GENERIC_LIB_CMPDI2 + select GENERIC_LIB_LSHRDI3 + select GENERIC_LIB_UCMPDI2 select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK if !CAVIUM_OCTEON_SOC select GENERIC_SMP_IDLE_THREAD @@ -36,7 +41,6 @@ config MIPS select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT select HAVE_CBPF_JIT if (!64BIT && !CPU_MICROMIPS) select HAVE_EBPF_JIT if (64BIT && !CPU_MICROMIPS) - select HAVE_CC_STACKPROTECTOR select HAVE_CONTEXT_TRACKING select HAVE_COPY_THREAD_TLS select HAVE_C_RECORDMCOUNT @@ -61,6 +65,7 @@ config MIPS select HAVE_OPROFILE select HAVE_PERF_EVENTS select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP select IRQ_FORCED_THREADING diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c index 4e79dbd54a33..fa75d75b5ba9 100644 --- a/arch/mips/alchemy/board-gpr.c +++ b/arch/mips/alchemy/board-gpr.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c index 6b6f6851df92..d129475fd40d 100644 --- a/arch/mips/alchemy/common/clock.c +++ b/arch/mips/alchemy/common/clock.c @@ -985,7 +985,7 @@ static int __init alchemy_clk_setup_imux(int ctype) return -ENODEV; } - a = kzalloc((sizeof(*a)) * 6, GFP_KERNEL); + a = kcalloc(6, sizeof(*a), GFP_KERNEL); if (!a) return -ENOMEM; diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c index fc482d900ddd..4ca2c28878e0 100644 --- a/arch/mips/alchemy/common/dbdma.c +++ b/arch/mips/alchemy/common/dbdma.c @@ -411,8 +411,8 @@ u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries) * and if we try that first we are likely to not waste larger * slabs of memory. */ - desc_base = (u32)kmalloc(entries * sizeof(au1x_ddma_desc_t), - GFP_KERNEL|GFP_DMA); + desc_base = (u32)kmalloc_array(entries, sizeof(au1x_ddma_desc_t), + GFP_KERNEL|GFP_DMA); if (desc_base == 0) return 0; @@ -1050,7 +1050,7 @@ static int __init dbdma_setup(unsigned int irq, dbdev_tab_t *idtable) { int ret; - dbdev_tab = kzalloc(sizeof(dbdev_tab_t) * DBDEV_TAB_SIZE, GFP_KERNEL); + dbdev_tab = kcalloc(DBDEV_TAB_SIZE, sizeof(dbdev_tab_t), GFP_KERNEL); if (!dbdev_tab) return -ENOMEM; diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c index d77a64f4c78b..1454d9f6ab2d 100644 --- a/arch/mips/alchemy/common/platform.c +++ b/arch/mips/alchemy/common/platform.c @@ -115,7 +115,7 @@ static void __init alchemy_setup_uarts(int ctype) uartclk = clk_get_rate(clk); clk_put(clk); - ports = kzalloc(s * (c + 1), GFP_KERNEL); + ports = kcalloc(s, (c + 1), GFP_KERNEL); if (!ports) { printk(KERN_INFO "Alchemy: no memory for UART data\n"); return; @@ -198,7 +198,7 @@ static unsigned long alchemy_ehci_data[][2] __initdata = { static int __init _new_usbres(struct resource **r, struct platform_device **d) { - *r = kzalloc(sizeof(struct resource) * 2, GFP_KERNEL); + *r = kcalloc(2, sizeof(struct resource), GFP_KERNEL); if (!*r) return -ENOMEM; *d = kzalloc(sizeof(struct platform_device), GFP_KERNEL); diff --git a/arch/mips/alchemy/devboards/platform.c b/arch/mips/alchemy/devboards/platform.c index 4640edab207c..203854ddd1bb 100644 --- a/arch/mips/alchemy/devboards/platform.c +++ b/arch/mips/alchemy/devboards/platform.c @@ -103,7 +103,7 @@ int __init db1x_register_pcmcia_socket(phys_addr_t pcmcia_attr_start, if (stschg_irq) cnt++; - sr = kzalloc(sizeof(struct resource) * cnt, GFP_KERNEL); + sr = kcalloc(cnt, sizeof(struct resource), GFP_KERNEL); if (!sr) return -ENOMEM; @@ -178,7 +178,7 @@ int __init db1x_register_norflash(unsigned long size, int width, return -EINVAL; ret = -ENOMEM; - parts = kzalloc(sizeof(struct mtd_partition) * 5, GFP_KERNEL); + parts = kcalloc(5, sizeof(struct mtd_partition), GFP_KERNEL); if (!parts) goto out; diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c index edfaef0d73a4..a80910d2738c 100644 --- a/arch/mips/bcm47xx/board.c +++ b/arch/mips/bcm47xx/board.c @@ -172,6 +172,8 @@ struct bcm47xx_board_type_list1 bcm47xx_board_list_board_id[] __initconst = { {{BCM47XX_BOARD_NETGEAR_WNDR4000, "Netgear WNDR4000"}, "U12H181T00_NETGEAR"}, {{BCM47XX_BOARD_NETGEAR_WNDR4500V1, "Netgear WNDR4500 V1"}, "U12H189T00_NETGEAR"}, {{BCM47XX_BOARD_NETGEAR_WNDR4500V2, "Netgear WNDR4500 V2"}, "U12H224T00_NETGEAR"}, + {{BCM47XX_BOARD_NETGEAR_WNR1000_V3, "Netgear WNR1000 V3"}, "U12H139T00_NETGEAR"}, + {{BCM47XX_BOARD_NETGEAR_WNR1000_V3, "Netgear WNR1000 V3"}, "U12H139T50_NETGEAR"}, {{BCM47XX_BOARD_NETGEAR_WNR2000, "Netgear WNR2000"}, "U12H114T00_NETGEAR"}, {{BCM47XX_BOARD_NETGEAR_WNR3500L, "Netgear WNR3500L"}, "U12H136T99_NETGEAR"}, {{BCM47XX_BOARD_NETGEAR_WNR3500U, "Netgear WNR3500U"}, "U12H136T00_NETGEAR"}, diff --git a/arch/mips/bcm47xx/buttons.c b/arch/mips/bcm47xx/buttons.c index 88d400d256c4..977990a609ba 100644 --- a/arch/mips/bcm47xx/buttons.c +++ b/arch/mips/bcm47xx/buttons.c @@ -411,6 +411,12 @@ bcm47xx_buttons_netgear_wndr4500v1[] __initconst = { BCM47XX_GPIO_KEY(6, KEY_RESTART), }; +static const struct gpio_keys_button +bcm47xx_buttons_netgear_wnr1000_v3[] __initconst = { + BCM47XX_GPIO_KEY(2, KEY_WPS_BUTTON), + BCM47XX_GPIO_KEY(3, KEY_RESTART), +}; + static const struct gpio_keys_button bcm47xx_buttons_netgear_wnr3500lv1[] __initconst = { BCM47XX_GPIO_KEY(4, KEY_RESTART), @@ -670,6 +676,9 @@ int __init bcm47xx_buttons_register(void) case BCM47XX_BOARD_NETGEAR_WNDR4500V1: err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wndr4500v1); break; + case BCM47XX_BOARD_NETGEAR_WNR1000_V3: + err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wnr1000_v3); + break; case BCM47XX_BOARD_NETGEAR_WNR3500L: err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wnr3500lv1); break; diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c index 34a7b3fbdfd9..d85fcdac8bf0 100644 --- a/arch/mips/bcm47xx/leds.c +++ b/arch/mips/bcm47xx/leds.c @@ -497,6 +497,12 @@ bcm47xx_leds_netgear_wndr4500v1[] __initconst = { BCM47XX_GPIO_LED(14, "green", "usb2", 1, LEDS_GPIO_DEFSTATE_OFF), }; +static const struct gpio_led +bcm47xx_leds_netgear_wnr1000_v3[] __initconst = { + BCM47XX_GPIO_LED(0, "blue", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(1, "green", "wps", 0, LEDS_GPIO_DEFSTATE_OFF), +}; + static const struct gpio_led bcm47xx_leds_netgear_wnr3500lv1[] __initconst = { BCM47XX_GPIO_LED(0, "blue", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF), @@ -532,7 +538,7 @@ bcm47xx_leds_simpletech_simpleshare[] __initconst = { * Init **************************************************/ -static struct gpio_led_platform_data bcm47xx_leds_pdata; +static struct gpio_led_platform_data bcm47xx_leds_pdata __initdata; #define bcm47xx_set_pdata(dev_leds) do { \ bcm47xx_leds_pdata.leds = dev_leds; \ @@ -758,6 +764,9 @@ void __init bcm47xx_leds_register(void) case BCM47XX_BOARD_NETGEAR_WNDR4500V1: bcm47xx_set_pdata(bcm47xx_leds_netgear_wndr4500v1); break; + case BCM47XX_BOARD_NETGEAR_WNR1000_V3: + bcm47xx_set_pdata(bcm47xx_leds_netgear_wnr1000_v3); + break; case BCM47XX_BOARD_NETGEAR_WNR3500L: bcm47xx_set_pdata(bcm47xx_leds_netgear_wnr3500lv1); break; diff --git a/arch/mips/bmips/dma.c b/arch/mips/bmips/dma.c index 04790f4e1805..6dec30842b2f 100644 --- a/arch/mips/bmips/dma.c +++ b/arch/mips/bmips/dma.c @@ -94,7 +94,7 @@ static int __init bmips_init_dma_ranges(void) goto out_bad; /* add a dummy (zero) entry at the end as a sentinel */ - bmips_dma_ranges = kzalloc(sizeof(struct bmips_dma_range) * (len + 1), + bmips_dma_ranges = kcalloc(len + 1, sizeof(struct bmips_dma_range), GFP_KERNEL); if (!bmips_dma_ranges) goto out_bad; diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index adce180f3ee4..abe77add8789 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -46,10 +46,13 @@ $(obj)/uart-ath79.c: $(srctree)/arch/mips/ath79/early_printk.c vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o $(obj)/bswapsi.o -extra-y += ashldi3.c bswapsi.c -$(obj)/ashldi3.o $(obj)/bswapsi.o: KBUILD_CFLAGS += -I$(srctree)/arch/mips/lib -$(obj)/ashldi3.c $(obj)/bswapsi.c: $(obj)/%.c: $(srctree)/arch/mips/lib/%.c - $(call cmd,shipped) +extra-y += ashldi3.c +$(obj)/ashldi3.c: $(obj)/%.c: $(srctree)/lib/%.c FORCE + $(call if_changed,shipped) + +extra-y += bswapsi.c +$(obj)/bswapsi.c: $(obj)/%.c: $(srctree)/arch/mips/lib/%.c FORCE + $(call if_changed,shipped) targets := $(notdir $(vmlinuzobjs-y)) diff --git a/arch/mips/boot/dts/brcm/Makefile b/arch/mips/boot/dts/brcm/Makefile index d8787c9a499e..d85f446cc0ce 100644 --- a/arch/mips/boot/dts/brcm/Makefile +++ b/arch/mips/boot/dts/brcm/Makefile @@ -34,4 +34,4 @@ dtb-$(CONFIG_DT_NONE) += \ bcm97425svmb.dtb \ bcm97435svmb.dtb -obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y)) +obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y)) diff --git a/arch/mips/boot/dts/cavium-octeon/Makefile b/arch/mips/boot/dts/cavium-octeon/Makefile index 24a8efcd7b03..17aef35f311b 100644 --- a/arch/mips/boot/dts/cavium-octeon/Makefile +++ b/arch/mips/boot/dts/cavium-octeon/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 dtb-$(CONFIG_CAVIUM_OCTEON_SOC) += octeon_3xxx.dtb octeon_68xx.dtb -obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y)) +obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y)) diff --git a/arch/mips/boot/dts/ingenic/Makefile b/arch/mips/boot/dts/ingenic/Makefile index 5b1361a89e02..9cc48441eb71 100644 --- a/arch/mips/boot/dts/ingenic/Makefile +++ b/arch/mips/boot/dts/ingenic/Makefile @@ -3,4 +3,4 @@ dtb-$(CONFIG_JZ4740_QI_LB60) += qi_lb60.dtb dtb-$(CONFIG_JZ4770_GCW0) += gcw0.dtb dtb-$(CONFIG_JZ4780_CI20) += ci20.dtb -obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y)) +obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y)) diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi index cd5185bb90ae..26c6b561d6f7 100644 --- a/arch/mips/boot/dts/ingenic/jz4740.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi @@ -45,6 +45,14 @@ cgu: jz4740-cgu@10000000 { #clock-cells = <1>; }; + watchdog: watchdog@10002000 { + compatible = "ingenic,jz4740-watchdog"; + reg = <0x10002000 0x10>; + + clocks = <&cgu JZ4740_CLK_RTC>; + clock-names = "rtc"; + }; + rtc_dev: rtc@10003000 { compatible = "ingenic,jz4740-rtc"; reg = <0x10003000 0x40>; diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi index b72e53bb7292..aa4e8f75ff5d 100644 --- a/arch/mips/boot/dts/ingenic/jz4780.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi @@ -222,7 +222,10 @@ uart4: serial@10034000 { watchdog: watchdog@10002000 { compatible = "ingenic,jz4780-watchdog"; - reg = <0x10002000 0x100>; + reg = <0x10002000 0x10>; + + clocks = <&cgu JZ4780_CLK_RTCLK>; + clock-names = "rtc"; }; nemc: nemc@13410000 { diff --git a/arch/mips/boot/dts/lantiq/Makefile b/arch/mips/boot/dts/lantiq/Makefile index 51ab9c1dff42..f5dfc06242b9 100644 --- a/arch/mips/boot/dts/lantiq/Makefile +++ b/arch/mips/boot/dts/lantiq/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 dtb-$(CONFIG_DT_EASY50712) += easy50712.dtb -obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y)) +obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y)) diff --git a/arch/mips/boot/dts/mscc/Makefile b/arch/mips/boot/dts/mscc/Makefile index c51164537c02..3c6aed9f5439 100644 --- a/arch/mips/boot/dts/mscc/Makefile +++ b/arch/mips/boot/dts/mscc/Makefile @@ -1,3 +1,3 @@ dtb-$(CONFIG_LEGACY_BOARD_OCELOT) += ocelot_pcb123.dtb -obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y)) +obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y)) diff --git a/arch/mips/boot/dts/mscc/ocelot.dtsi b/arch/mips/boot/dts/mscc/ocelot.dtsi index dd239cab2f9d..4f33dbc67348 100644 --- a/arch/mips/boot/dts/mscc/ocelot.dtsi +++ b/arch/mips/boot/dts/mscc/ocelot.dtsi @@ -91,6 +91,72 @@ uart2: serial@100800 { status = "disabled"; }; + switch@1010000 { + compatible = "mscc,vsc7514-switch"; + reg = <0x1010000 0x10000>, + <0x1030000 0x10000>, + <0x1080000 0x100>, + <0x10d0000 0x10000>, + <0x11e0000 0x100>, + <0x11f0000 0x100>, + <0x1200000 0x100>, + <0x1210000 0x100>, + <0x1220000 0x100>, + <0x1230000 0x100>, + <0x1240000 0x100>, + <0x1250000 0x100>, + <0x1260000 0x100>, + <0x1270000 0x100>, + <0x1280000 0x100>, + <0x1800000 0x80000>, + <0x1880000 0x10000>; + reg-names = "sys", "rew", "qs", "hsio", "port0", + "port1", "port2", "port3", "port4", "port5", + "port6", "port7", "port8", "port9", "port10", + "qsys", "ana"; + interrupts = <21 22>; + interrupt-names = "xtr", "inj"; + + ethernet-ports { + #address-cells = <1>; + #size-cells = <0>; + + port0: port@0 { + reg = <0>; + }; + port1: port@1 { + reg = <1>; + }; + port2: port@2 { + reg = <2>; + }; + port3: port@3 { + reg = <3>; + }; + port4: port@4 { + reg = <4>; + }; + port5: port@5 { + reg = <5>; + }; + port6: port@6 { + reg = <6>; + }; + port7: port@7 { + reg = <7>; + }; + port8: port@8 { + reg = <8>; + }; + port9: port@9 { + reg = <9>; + }; + port10: port@10 { + reg = <10>; + }; + }; + }; + reset@1070008 { compatible = "mscc,ocelot-chip-reset"; reg = <0x1070008 0x4>; @@ -113,5 +179,27 @@ uart2_pins: uart2-pins { function = "uart2"; }; }; + + mdio0: mdio@107009c { + #address-cells = <1>; + #size-cells = <0>; + compatible = "mscc,ocelot-miim"; + reg = <0x107009c 0x36>, <0x10700f0 0x8>; + interrupts = <14>; + status = "disabled"; + + phy0: ethernet-phy@0 { + reg = <0>; + }; + phy1: ethernet-phy@1 { + reg = <1>; + }; + phy2: ethernet-phy@2 { + reg = <2>; + }; + phy3: ethernet-phy@3 { + reg = <3>; + }; + }; }; }; diff --git a/arch/mips/boot/dts/mscc/ocelot_pcb123.dts b/arch/mips/boot/dts/mscc/ocelot_pcb123.dts index 29d6414f8886..4ccd65379059 100644 --- a/arch/mips/boot/dts/mscc/ocelot_pcb123.dts +++ b/arch/mips/boot/dts/mscc/ocelot_pcb123.dts @@ -25,3 +25,23 @@ &uart0 { &uart2 { status = "okay"; }; + +&mdio0 { + status = "okay"; +}; + +&port0 { + phy-handle = <&phy0>; +}; + +&port1 { + phy-handle = <&phy1>; +}; + +&port2 { + phy-handle = <&phy2>; +}; + +&port3 { + phy-handle = <&phy3>; +}; diff --git a/arch/mips/boot/dts/mti/Makefile b/arch/mips/boot/dts/mti/Makefile index 3508720cb6d9..b5f7426998b1 100644 --- a/arch/mips/boot/dts/mti/Makefile +++ b/arch/mips/boot/dts/mti/Makefile @@ -2,4 +2,4 @@ dtb-$(CONFIG_MIPS_MALTA) += malta.dtb dtb-$(CONFIG_LEGACY_BOARD_SEAD3) += sead3.dtb -obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y)) +obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y)) diff --git a/arch/mips/boot/dts/netlogic/Makefile b/arch/mips/boot/dts/netlogic/Makefile index d630b27950f0..45af4224494f 100644 --- a/arch/mips/boot/dts/netlogic/Makefile +++ b/arch/mips/boot/dts/netlogic/Makefile @@ -5,4 +5,4 @@ dtb-$(CONFIG_DT_XLP_FVP) += xlp_fvp.dtb dtb-$(CONFIG_DT_XLP_GVP) += xlp_gvp.dtb dtb-$(CONFIG_DT_XLP_RVP) += xlp_rvp.dtb -obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y)) +obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y)) diff --git a/arch/mips/boot/dts/pic32/Makefile b/arch/mips/boot/dts/pic32/Makefile index ba9bcef8fde9..fb57f36324db 100644 --- a/arch/mips/boot/dts/pic32/Makefile +++ b/arch/mips/boot/dts/pic32/Makefile @@ -4,4 +4,4 @@ dtb-$(CONFIG_DTB_PIC32_MZDA_SK) += pic32mzda_sk.dtb dtb-$(CONFIG_DTB_PIC32_NONE) += \ pic32mzda_sk.dtb -obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y)) +obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y)) diff --git a/arch/mips/boot/dts/ralink/Makefile b/arch/mips/boot/dts/ralink/Makefile index 94bee5b38b53..6c26dfa0a903 100644 --- a/arch/mips/boot/dts/ralink/Makefile +++ b/arch/mips/boot/dts/ralink/Makefile @@ -6,4 +6,4 @@ dtb-$(CONFIG_DTB_MT7620A_EVAL) += mt7620a_eval.dtb dtb-$(CONFIG_DTB_OMEGA2P) += omega2p.dtb dtb-$(CONFIG_DTB_VOCORE2) += vocore2.dtb -obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y)) +obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y)) diff --git a/arch/mips/configs/qi_lb60_defconfig b/arch/mips/configs/qi_lb60_defconfig index 3b02ff9a7c64..d8b7211a7b0f 100644 --- a/arch/mips/configs/qi_lb60_defconfig +++ b/arch/mips/configs/qi_lb60_defconfig @@ -72,6 +72,8 @@ CONFIG_POWER_SUPPLY=y CONFIG_BATTERY_JZ4740=y CONFIG_CHARGER_GPIO=y # CONFIG_HWMON is not set +CONFIG_WATCHDOG=y +CONFIG_JZ4740_WDT=y CONFIG_MFD_JZ4740_ADC=y CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y diff --git a/arch/mips/dec/time.c b/arch/mips/dec/time.c index a2a150e4fbc2..c38686f89a18 100644 --- a/arch/mips/dec/time.c +++ b/arch/mips/dec/time.c @@ -19,7 +19,7 @@ #include #include -void read_persistent_clock(struct timespec *ts) +void read_persistent_clock64(struct timespec64 *ts) { unsigned int year, mon, day, hour, min, sec, real_year; unsigned long flags; @@ -54,19 +54,20 @@ void read_persistent_clock(struct timespec *ts) year += real_year - 72 + 2000; - ts->tv_sec = mktime(year, mon, day, hour, min, sec); + ts->tv_sec = mktime64(year, mon, day, hour, min, sec); ts->tv_nsec = 0; } /* - * In order to set the CMOS clock precisely, rtc_mips_set_mmss has to + * In order to set the CMOS clock precisely, update_persistent_clock64 has to * be called 500 ms after the second nowtime has started, because when * nowtime is written into the registers of the CMOS clock, it will * jump to the next second precisely 500 ms later. Check the Dallas * DS1287 data sheet for details. */ -int rtc_mips_set_mmss(unsigned long nowtime) +int update_persistent_clock64(struct timespec64 now) { + time64_t nowtime = now.tv_sec; int retval = 0; int real_seconds, real_minutes, cmos_minutes; unsigned char save_control, save_freq_select; @@ -91,8 +92,7 @@ int rtc_mips_set_mmss(unsigned long nowtime) * messing with unknown time zones but requires your * RTC not to be off by more than 15 minutes */ - real_seconds = nowtime % 60; - real_minutes = nowtime / 60; + real_minutes = div_s64_rem(nowtime, 60, &real_seconds); if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1) real_minutes += 30; /* correct for half hour time zone */ real_minutes %= 60; diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 5f74590e0bea..9cdb4e4ce258 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -535,6 +535,13 @@ # define cpu_has_shared_ftlb_entries 0 #endif +#ifdef CONFIG_MIPS_MT_SMP +# define cpu_has_mipsmt_pertccounters \ + (cpu_data[0].options & MIPS_CPU_MT_PER_TC_PERF_COUNTERS) +#else +# define cpu_has_mipsmt_pertccounters 0 +#endif /* CONFIG_MIPS_MT_SMP */ + /* * Guest capabilities */ diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index d39324c4adf1..5b9d02ef4f60 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -418,6 +418,8 @@ enum cpu_type_enum { MBIT_ULL(54) /* CPU shares FTLB RAM with another */ #define MIPS_CPU_SHARED_FTLB_ENTRIES \ MBIT_ULL(55) /* CPU shares FTLB entries with another */ +#define MIPS_CPU_MT_PER_TC_PERF_COUNTERS \ + MBIT_ULL(56) /* CPU has perf counters implemented per TC (MIPSMT ASE) */ /* * CPU ASE encodings diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h index cbf9da7f2f94..0ef8893e07f8 100644 --- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h +++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h @@ -110,6 +110,7 @@ enum bcm47xx_board { BCM47XX_BOARD_NETGEAR_WNDR4000, BCM47XX_BOARD_NETGEAR_WNDR4500V1, BCM47XX_BOARD_NETGEAR_WNDR4500V2, + BCM47XX_BOARD_NETGEAR_WNR1000_V3, BCM47XX_BOARD_NETGEAR_WNR2000, BCM47XX_BOARD_NETGEAR_WNR3500L, BCM47XX_BOARD_NETGEAR_WNR3500U, diff --git a/arch/mips/include/asm/mach-jz4740/platform.h b/arch/mips/include/asm/mach-jz4740/platform.h index 3645974b7f65..c0c932ac72a7 100644 --- a/arch/mips/include/asm/mach-jz4740/platform.h +++ b/arch/mips/include/asm/mach-jz4740/platform.h @@ -29,7 +29,6 @@ extern struct platform_device jz4740_i2s_device; extern struct platform_device jz4740_pcm_device; extern struct platform_device jz4740_codec_device; extern struct platform_device jz4740_adc_device; -extern struct platform_device jz4740_wdt_device; extern struct platform_device jz4740_pwm_device; extern struct platform_device jz4740_dma_device; diff --git a/arch/mips/include/asm/mc146818-time.h b/arch/mips/include/asm/mc146818-time.h index 9e1ad26abdc0..cbf5cec345f1 100644 --- a/arch/mips/include/asm/mc146818-time.h +++ b/arch/mips/include/asm/mc146818-time.h @@ -86,7 +86,7 @@ static inline int mc146818_set_rtc_mmss(unsigned long nowtime) return retval; } -static inline unsigned long mc146818_get_cmos_time(void) +static inline time64_t mc146818_get_cmos_time(void) { unsigned int year, mon, day, hour, min, sec; unsigned long flags; @@ -113,7 +113,7 @@ static inline unsigned long mc146818_get_cmos_time(void) spin_unlock_irqrestore(&rtc_lock, flags); year = mc146818_decode_year(year); - return mktime(year, mon, day, hour, min, sec); + return mktime64(year, mon, day, hour, min, sec); } #endif /* __ASM_MC146818_TIME_H */ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index f65859784a4c..ae461d91cd1f 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -685,6 +685,11 @@ #define MIPS_CONF7_IAR (_ULCAST_(1) << 10) #define MIPS_CONF7_AR (_ULCAST_(1) << 16) +/* Config7 Bits specific to MIPS Technologies. */ + +/* Performance counters implemented Per TC */ +#define MTI_CONF7_PTC (_ULCAST_(1) << 19) + /* WatchLo* register definitions */ #define MIPS_WATCHLO_IRW (_ULCAST_(0x7) << 0) diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h index 17d4cd20f18c..b85ec64ee7e9 100644 --- a/arch/mips/include/asm/time.h +++ b/arch/mips/include/asm/time.h @@ -21,15 +21,6 @@ extern spinlock_t rtc_lock; -/* - * RTC ops. By default, they point to weak no-op RTC functions. - * rtc_mips_set_time - reverse the above translation and set time to RTC. - * rtc_mips_set_mmss - similar to rtc_set_time, but only min and sec need - * to be set. Used by RTC sync-up. - */ -extern int rtc_mips_set_time(unsigned long); -extern int rtc_mips_set_mmss(unsigned long); - /* * board specific routines required by time_init(). */ diff --git a/arch/mips/jz4740/platform.c b/arch/mips/jz4740/platform.c index 5b7cdd67a9d9..cbc5f8e87230 100644 --- a/arch/mips/jz4740/platform.c +++ b/arch/mips/jz4740/platform.c @@ -233,22 +233,6 @@ struct platform_device jz4740_adc_device = { .resource = jz4740_adc_resources, }; -/* Watchdog */ -static struct resource jz4740_wdt_resources[] = { - { - .start = JZ4740_WDT_BASE_ADDR, - .end = JZ4740_WDT_BASE_ADDR + 0x10 - 1, - .flags = IORESOURCE_MEM, - }, -}; - -struct platform_device jz4740_wdt_device = { - .name = "jz4740-wdt", - .id = -1, - .num_resources = ARRAY_SIZE(jz4740_wdt_resources), - .resource = jz4740_wdt_resources, -}; - /* PWM */ struct platform_device jz4740_pwm_device = { .name = "jz4740-pwm", diff --git a/arch/mips/jz4740/reset.c b/arch/mips/jz4740/reset.c index 67780c4b6573..5bf0cf44b55f 100644 --- a/arch/mips/jz4740/reset.c +++ b/arch/mips/jz4740/reset.c @@ -12,18 +12,9 @@ * */ -#include -#include -#include -#include - #include -#include -#include - #include "reset.h" -#include "clock.h" static void jz4740_halt(void) { @@ -36,29 +27,7 @@ static void jz4740_halt(void) } } -#define JZ_REG_WDT_DATA 0x00 -#define JZ_REG_WDT_COUNTER_ENABLE 0x04 -#define JZ_REG_WDT_COUNTER 0x08 -#define JZ_REG_WDT_CTRL 0x0c - -static void jz4740_restart(char *command) -{ - void __iomem *wdt_base = ioremap(JZ4740_WDT_BASE_ADDR, 0x0f); - - jz4740_timer_enable_watchdog(); - - writeb(0, wdt_base + JZ_REG_WDT_COUNTER_ENABLE); - - writew(0, wdt_base + JZ_REG_WDT_COUNTER); - writew(0, wdt_base + JZ_REG_WDT_DATA); - writew(BIT(2), wdt_base + JZ_REG_WDT_CTRL); - - writeb(1, wdt_base + JZ_REG_WDT_COUNTER_ENABLE); - jz4740_halt(); -} - void jz4740_reset_init(void) { - _machine_restart = jz4740_restart; _machine_halt = jz4740_halt; } diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index c1cd41456d42..cbe4742d2fff 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -83,7 +83,7 @@ void output_task_defines(void) OFFSET(TASK_FLAGS, task_struct, flags); OFFSET(TASK_MM, task_struct, mm); OFFSET(TASK_PID, task_struct, pid); -#if defined(CONFIG_CC_STACKPROTECTOR) +#if defined(CONFIG_STACKPROTECTOR) OFFSET(TASK_STACK_CANARY, task_struct, stack_canary); #endif DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct)); diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 6b07b739f914..b2509c19cfb5 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -414,6 +414,14 @@ static int __init ftlb_disable(char *s) __setup("noftlb", ftlb_disable); +/* + * Check if the CPU has per tc perf counters + */ +static inline void cpu_set_mt_per_tc_perf(struct cpuinfo_mips *c) +{ + if (read_c0_config7() & MTI_CONF7_PTC) + c->options |= MIPS_CPU_MT_PER_TC_PERF_COUNTERS; +} static inline void check_errata(void) { @@ -1572,6 +1580,7 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_34K; c->writecombine = _CACHE_UNCACHED; __cpu_name[cpu] = "MIPS 34Kc"; + cpu_set_mt_per_tc_perf(c); break; case PRID_IMP_74K: c->cputype = CPU_74K; @@ -1592,6 +1601,7 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_1004K; c->writecombine = _CACHE_UNCACHED; __cpu_name[cpu] = "MIPS 1004Kc"; + cpu_set_mt_per_tc_perf(c); break; case PRID_IMP_1074K: c->cputype = CPU_1074K; @@ -1601,10 +1611,12 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) case PRID_IMP_INTERAPTIV_UP: c->cputype = CPU_INTERAPTIV; __cpu_name[cpu] = "MIPS interAptiv"; + cpu_set_mt_per_tc_perf(c); break; case PRID_IMP_INTERAPTIV_MP: c->cputype = CPU_INTERAPTIV; __cpu_name[cpu] = "MIPS interAptiv (multi)"; + cpu_set_mt_per_tc_perf(c); break; case PRID_IMP_PROAPTIV_UP: c->cputype = CPU_PROAPTIV; diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index e42113fe2762..896080b445c2 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -61,7 +61,7 @@ #endif 3: -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) PTR_LA t8, __stack_chk_guard LONG_L t9, TASK_STACK_CANARY(a1) LONG_S t9, 0(t8) diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index ee73550f0b9a..413863508f6f 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -129,20 +129,14 @@ static struct mips_pmu mipspmu; #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS -static int cpu_has_mipsmt_pertccounters; - static DEFINE_RWLOCK(pmuint_rwlock); #if defined(CONFIG_CPU_BMIPS5000) #define vpe_id() (cpu_has_mipsmt_pertccounters ? \ 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK)) #else -/* - * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because - * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs. - */ #define vpe_id() (cpu_has_mipsmt_pertccounters ? \ - 0 : smp_processor_id()) + 0 : cpu_vpe_id(¤t_cpu_data)) #endif /* Copied from op_model_mipsxx.c */ @@ -329,7 +323,11 @@ static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc, static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) { + struct perf_event *event = container_of(evt, struct perf_event, hw); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); +#ifdef CONFIG_MIPS_MT_SMP + unsigned int range = evt->event_base >> 24; +#endif /* CONFIG_MIPS_MT_SMP */ WARN_ON(idx < 0 || idx >= mipspmu.num_counters); @@ -337,11 +335,37 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) (evt->config_base & M_PERFCTL_CONFIG_MASK) | /* Make sure interrupt enabled. */ MIPS_PERFCTRL_IE; - if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) + +#ifdef CONFIG_CPU_BMIPS5000 + { /* enable the counter for the calling thread */ cpuc->saved_ctrl[idx] |= (1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC; + } +#else +#ifdef CONFIG_MIPS_MT_SMP + if (range > V) { + /* The counter is processor wide. Set it up to count all TCs. */ + pr_debug("Enabling perf counter for all TCs\n"); + cpuc->saved_ctrl[idx] |= M_TC_EN_ALL; + } else +#endif /* CONFIG_MIPS_MT_SMP */ + { + unsigned int cpu, ctrl; + /* + * Set up the counter for a particular CPU when event->cpu is + * a valid CPU number. Otherwise set up the counter for the CPU + * scheduling this thread. + */ + cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id(); + + ctrl = M_PERFCTL_VPEID(cpu_vpe_id(&cpu_data[cpu])); + ctrl |= M_TC_EN_VPE; + cpuc->saved_ctrl[idx] |= ctrl; + pr_debug("Enabling perf counter for CPU%d\n", cpu); + } +#endif /* CONFIG_CPU_BMIPS5000 */ /* * We do not actually let the counter run. Leave it until start(). */ @@ -655,13 +679,14 @@ static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev) * event_id. */ #ifdef CONFIG_MIPS_MT_SMP - return ((unsigned int)pev->range << 24) | - (pev->cntr_mask & 0xffff00) | - (pev->event_id & 0xff); -#else - return (pev->cntr_mask & 0xffff00) | - (pev->event_id & 0xff); -#endif + if (num_possible_cpus() > 1) + return ((unsigned int)pev->range << 24) | + (pev->cntr_mask & 0xffff00) | + (pev->event_id & 0xff); + else +#endif /* CONFIG_MIPS_MT_SMP */ + return ((pev->cntr_mask & 0xffff00) | + (pev->event_id & 0xff)); } static const struct mips_perf_event *mipspmu_map_general_event(int idx) @@ -1265,37 +1290,6 @@ static const struct mips_perf_event xlp_cache_map }, }; -#ifdef CONFIG_MIPS_MT_SMP -static void check_and_calc_range(struct perf_event *event, - const struct mips_perf_event *pev) -{ - struct hw_perf_event *hwc = &event->hw; - - if (event->cpu >= 0) { - if (pev->range > V) { - /* - * The user selected an event that is processor - * wide, while expecting it to be VPE wide. - */ - hwc->config_base |= M_TC_EN_ALL; - } else { - /* - * FIXME: cpu_data[event->cpu].vpe_id reports 0 - * for both CPUs. - */ - hwc->config_base |= M_PERFCTL_VPEID(event->cpu); - hwc->config_base |= M_TC_EN_VPE; - } - } else - hwc->config_base |= M_TC_EN_ALL; -} -#else -static void check_and_calc_range(struct perf_event *event, - const struct mips_perf_event *pev) -{ -} -#endif - static int __hw_perf_event_init(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; @@ -1331,10 +1325,6 @@ static int __hw_perf_event_init(struct perf_event *event) */ hwc->config_base = MIPS_PERFCTRL_IE; - /* Calculate range bits and validate it. */ - if (num_possible_cpus() > 1) - check_and_calc_range(event, pev); - hwc->event_base = mipspmu_perf_event_encode(pev); if (PERF_TYPE_RAW == event->attr.type) mutex_unlock(&raw_event_mutex); @@ -1723,7 +1713,6 @@ init_hw_perf_events(void) } #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS - cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19); if (!cpu_has_mipsmt_pertccounters) counters = counters_total_to_per_cpu(counters); #endif diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 3775a8d694fb..8d85046adcc8 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -180,7 +180,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp, return 0; } -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR #include unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 0c0c23c9c9f5..9f6c3f2aa2e2 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -811,7 +811,7 @@ long arch_ptrace(struct task_struct *child, long request, /* * The odd registers are actually the high * order bits of the values stored in the even - * registers - unless we're using r2k_switch.S. + * registers. */ tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE], addr & 1); @@ -906,7 +906,7 @@ long arch_ptrace(struct task_struct *child, long request, /* * The odd registers are actually the high * order bits of the values stored in the even - * registers - unless we're using r2k_switch.S. + * registers. */ set_fpr32(&fregs[(addr & ~1) - FPR_BASE], addr & 1, data); diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index f30c381d3e1c..7edc629304c8 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c @@ -103,7 +103,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, /* * The odd registers are actually the high * order bits of the values stored in the even - * registers - unless we're using r2k_switch.S. + * registers. */ tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE], addr & 1); @@ -216,7 +216,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, /* * The odd registers are actually the high * order bits of the values stored in the even - * registers - unless we're using r2k_switch.S. + * registers. */ set_fpr32(&fregs[(addr & ~1) - FPR_BASE], addr & 1, data); diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S index 665897139f30..71b1aafae1bb 100644 --- a/arch/mips/kernel/r2300_switch.S +++ b/arch/mips/kernel/r2300_switch.S @@ -36,7 +36,7 @@ LEAF(resume) cpu_save_nonscratch a0 sw ra, THREAD_REG31(a0) -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) PTR_LA t8, __stack_chk_guard LONG_L t9, TASK_STACK_CANARY(a1) LONG_S t9, 0(t8) diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index 17cf9341c1cf..58232ae6cfae 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -31,7 +31,7 @@ cpu_save_nonscratch a0 LONG_S ra, THREAD_REG31(a0) -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) PTR_LA t8, __stack_chk_guard LONG_L t9, TASK_STACK_CANARY(a1) LONG_S t9, 0(t8) diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 563188ac6fa2..2c96c0c68116 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -93,7 +93,7 @@ void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type) * If the region reaches the top of the physical address space, adjust * the size slightly so that (start + size) doesn't overflow */ - if (start + size - 1 == (phys_addr_t)ULLONG_MAX) + if (start + size - 1 == PHYS_ADDR_MAX) --size; /* Sanity check */ @@ -376,7 +376,7 @@ static void __init bootmem_init(void) unsigned long reserved_end; unsigned long mapstart = ~0UL; unsigned long bootmap_size; - phys_addr_t ramstart = (phys_addr_t)ULLONG_MAX; + phys_addr_t ramstart = PHYS_ADDR_MAX; bool bootmap_valid = false; int i; diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index a6ebc8135112..bfe02ded25d1 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -34,21 +34,6 @@ DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL(rtc_lock); -int __weak rtc_mips_set_time(unsigned long sec) -{ - return -ENODEV; -} - -int __weak rtc_mips_set_mmss(unsigned long nowtime) -{ - return rtc_mips_set_time(nowtime); -} - -int update_persistent_clock(struct timespec now) -{ - return rtc_mips_set_mmss(now.tv_sec); -} - static int null_perf_irq(void) { return 0; diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 544ea21bfef9..0bef238d2c0c 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -872,7 +872,7 @@ static ssize_t vpe_write(struct file *file, const char __user *buffer, return -ENODEV; if ((count + v->len) > v->plen) { - pr_warn("VPE loader: elf size too big. Perhaps strip uneeded symbols\n"); + pr_warn("VPE loader: elf size too big. Perhaps strip unneeded symbols\n"); return -ENOMEM; } diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 0f725e9cee8f..7cd76f93a438 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -1076,7 +1076,7 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) return -ENOIOCTLCMD; } -int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } diff --git a/arch/mips/lasat/ds1603.c b/arch/mips/lasat/ds1603.c index 8bd5cf820eed..e6ce39fefa78 100644 --- a/arch/mips/lasat/ds1603.c +++ b/arch/mips/lasat/ds1603.c @@ -136,7 +136,7 @@ static void rtc_end_op(void) lasat_ndelay(1000); } -void read_persistent_clock(struct timespec *ts) +void read_persistent_clock64(struct timespec64 *ts) { unsigned long word; unsigned long flags; @@ -152,14 +152,19 @@ void read_persistent_clock(struct timespec *ts) ts->tv_nsec = 0; } -int rtc_mips_set_mmss(unsigned long time) +int update_persistent_clock64(struct timespec64 now) { + time64_t time = now.tv_sec; unsigned long flags; spin_lock_irqsave(&rtc_lock, flags); rtc_init_op(); rtc_write_byte(SET_TIME_CMD); - rtc_write_word(time); + /* + * Due to the hardware limitation, we cast to 'unsigned long' type, + * so it will overflow in year 2106 on 32-bit machine. + */ + rtc_write_word((unsigned long)time); rtc_end_op(); spin_unlock_irqrestore(&rtc_lock, flags); diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c index 6f7422400f32..ead07c243c6a 100644 --- a/arch/mips/lasat/sysctl.c +++ b/arch/mips/lasat/sysctl.c @@ -73,8 +73,16 @@ int proc_dolasatrtc(struct ctl_table *table, int write, if (r) return r; - if (write) - rtc_mips_set_mmss(rtctmp); + if (write) { + /* + * Due to the RTC hardware limitation, we can not actually + * use the full 64-bit range here. + */ + ts.tv_sec = rtctmp; + ts.tv_nsec = 0; + + update_persistent_clock64(ts); + } return 0; } diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index e84e12655fa8..6537e022ef62 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile @@ -16,5 +16,4 @@ obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o # libgcc-style stuff needed in the kernel -obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o multi3.o \ - ucmpdi2.o +obj-y += bswapsi.o bswapdi.o multi3.o diff --git a/arch/mips/lib/ashldi3.c b/arch/mips/lib/ashldi3.c deleted file mode 100644 index 24cd6903e797..000000000000 --- a/arch/mips/lib/ashldi3.c +++ /dev/null @@ -1,30 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include - -#include "libgcc.h" - -long long notrace __ashldi3(long long u, word_type b) -{ - DWunion uu, w; - word_type bm; - - if (b == 0) - return u; - - uu.ll = u; - bm = 32 - b; - - if (bm <= 0) { - w.s.low = 0; - w.s.high = (unsigned int) uu.s.low << -bm; - } else { - const unsigned int carries = (unsigned int) uu.s.low >> bm; - - w.s.low = (unsigned int) uu.s.low << b; - w.s.high = ((unsigned int) uu.s.high << b) | carries; - } - - return w.ll; -} - -EXPORT_SYMBOL(__ashldi3); diff --git a/arch/mips/lib/ashrdi3.c b/arch/mips/lib/ashrdi3.c deleted file mode 100644 index 23f5295af51e..000000000000 --- a/arch/mips/lib/ashrdi3.c +++ /dev/null @@ -1,32 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include - -#include "libgcc.h" - -long long notrace __ashrdi3(long long u, word_type b) -{ - DWunion uu, w; - word_type bm; - - if (b == 0) - return u; - - uu.ll = u; - bm = 32 - b; - - if (bm <= 0) { - /* w.s.high = 1..1 or 0..0 */ - w.s.high = - uu.s.high >> 31; - w.s.low = uu.s.high >> -bm; - } else { - const unsigned int carries = (unsigned int) uu.s.high << bm; - - w.s.high = uu.s.high >> b; - w.s.low = ((unsigned int) uu.s.low >> b) | carries; - } - - return w.ll; -} - -EXPORT_SYMBOL(__ashrdi3); diff --git a/arch/mips/lib/cmpdi2.c b/arch/mips/lib/cmpdi2.c deleted file mode 100644 index 93cfc785927d..000000000000 --- a/arch/mips/lib/cmpdi2.c +++ /dev/null @@ -1,28 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include - -#include "libgcc.h" - -word_type notrace __cmpdi2(long long a, long long b) -{ - const DWunion au = { - .ll = a - }; - const DWunion bu = { - .ll = b - }; - - if (au.s.high < bu.s.high) - return 0; - else if (au.s.high > bu.s.high) - return 2; - - if ((unsigned int) au.s.low < (unsigned int) bu.s.low) - return 0; - else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) - return 2; - - return 1; -} - -EXPORT_SYMBOL(__cmpdi2); diff --git a/arch/mips/lib/lshrdi3.c b/arch/mips/lib/lshrdi3.c deleted file mode 100644 index 914b971aca3b..000000000000 --- a/arch/mips/lib/lshrdi3.c +++ /dev/null @@ -1,30 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include - -#include "libgcc.h" - -long long notrace __lshrdi3(long long u, word_type b) -{ - DWunion uu, w; - word_type bm; - - if (b == 0) - return u; - - uu.ll = u; - bm = 32 - b; - - if (bm <= 0) { - w.s.high = 0; - w.s.low = (unsigned int) uu.s.high >> -bm; - } else { - const unsigned int carries = (unsigned int) uu.s.high << bm; - - w.s.high = (unsigned int) uu.s.high >> b; - w.s.low = ((unsigned int) uu.s.low >> b) | carries; - } - - return w.ll; -} - -EXPORT_SYMBOL(__lshrdi3); diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S index f7327979a8f8..1cc306520a55 100644 --- a/arch/mips/lib/memset.S +++ b/arch/mips/lib/memset.S @@ -95,7 +95,7 @@ sltiu t0, a2, STORSIZE /* very small region? */ bnez t0, .Lsmall_memset\@ - andi t0, a0, STORMASK /* aligned? */ + andi t0, a0, STORMASK /* aligned? */ #ifdef CONFIG_CPU_MICROMIPS move t8, a1 /* used by 'swp' instruction */ @@ -103,12 +103,12 @@ #endif #ifndef CONFIG_CPU_DADDI_WORKAROUNDS beqz t0, 1f - PTR_SUBU t0, STORSIZE /* alignment in bytes */ + PTR_SUBU t0, STORSIZE /* alignment in bytes */ #else .set noat li AT, STORSIZE beqz t0, 1f - PTR_SUBU t0, AT /* alignment in bytes */ + PTR_SUBU t0, AT /* alignment in bytes */ .set at #endif @@ -149,7 +149,7 @@ 1: ori t1, a2, 0x3f /* # of full blocks */ xori t1, 0x3f beqz t1, .Lmemset_partial\@ /* no block to fill */ - andi t0, a2, 0x40-STORSIZE + andi t0, a2, 0x40-STORSIZE PTR_ADDU t1, a0 /* end address */ .set reorder @@ -174,7 +174,7 @@ .set at #endif jr t1 - PTR_ADDU a0, t0 /* dest ptr */ + PTR_ADDU a0, t0 /* dest ptr */ .set push .set noreorder @@ -186,7 +186,7 @@ beqz a2, 1f #ifndef CONFIG_CPU_MIPSR6 - PTR_ADDU a0, a2 /* What's left */ + PTR_ADDU a0, a2 /* What's left */ R10KCBARRIER(0(ra)) #ifdef __MIPSEB__ EX(LONG_S_R, a1, -1(a0), .Llast_fixup\@) @@ -194,7 +194,7 @@ EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@) #endif #else - PTR_SUBU t0, $0, a2 + PTR_SUBU t0, $0, a2 PTR_ADDIU t0, 1 STORE_BYTE(0) STORE_BYTE(1) @@ -210,11 +210,11 @@ 0: #endif 1: jr ra - move a2, zero + move a2, zero .Lsmall_memset\@: beqz a2, 2f - PTR_ADDU t1, a0, a2 + PTR_ADDU t1, a0, a2 1: PTR_ADDIU a0, 1 /* fill bytewise */ R10KCBARRIER(0(ra)) @@ -222,7 +222,7 @@ EX(sb, a1, -1(a0), .Lsmall_fixup\@) 2: jr ra /* done */ - move a2, zero + move a2, zero .if __memset == 1 END(memset) .set __memset, 0 @@ -238,7 +238,7 @@ .Lfirst_fixup\@: jr ra - nop + nop .Lfwd_fixup\@: PTR_L t0, TI_TASK($28) @@ -246,7 +246,7 @@ LONG_L t0, THREAD_BUADDR(t0) LONG_ADDU a2, t1 jr ra - LONG_SUBU a2, t0 + LONG_SUBU a2, t0 .Lpartial_fixup\@: PTR_L t0, TI_TASK($28) @@ -254,7 +254,7 @@ LONG_L t0, THREAD_BUADDR(t0) LONG_ADDU a2, a0 jr ra - LONG_SUBU a2, t0 + LONG_SUBU a2, t0 .Llast_fixup\@: jr ra @@ -278,7 +278,7 @@ LEAF(memset) EXPORT_SYMBOL(memset) beqz a1, 1f - move v0, a0 /* result */ + move v0, a0 /* result */ andi a1, 0xff /* spread fillword */ LONG_SLL t1, a1, 8 diff --git a/arch/mips/lib/ucmpdi2.c b/arch/mips/lib/ucmpdi2.c deleted file mode 100644 index c31c78ca4175..000000000000 --- a/arch/mips/lib/ucmpdi2.c +++ /dev/null @@ -1,22 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include - -#include "libgcc.h" - -word_type notrace __ucmpdi2(unsigned long long a, unsigned long long b) -{ - const DWunion au = {.ll = a}; - const DWunion bu = {.ll = b}; - - if ((unsigned int) au.s.high < (unsigned int) bu.s.high) - return 0; - else if ((unsigned int) au.s.high > (unsigned int) bu.s.high) - return 2; - if ((unsigned int) au.s.low < (unsigned int) bu.s.low) - return 0; - else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) - return 2; - return 1; -} - -EXPORT_SYMBOL(__ucmpdi2); diff --git a/arch/mips/loongson64/common/time.c b/arch/mips/loongson64/common/time.c index e1a5382ad47e..0ba53c55ff33 100644 --- a/arch/mips/loongson64/common/time.c +++ b/arch/mips/loongson64/common/time.c @@ -29,7 +29,7 @@ void __init plat_time_init(void) #endif } -void read_persistent_clock(struct timespec *ts) +void read_persistent_clock64(struct timespec64 *ts) { ts->tv_sec = mc146818_get_cmos_time(); ts->tv_nsec = 0; diff --git a/arch/mips/mm/sc-debugfs.c b/arch/mips/mm/sc-debugfs.c index 2e2132d3f5c7..2a116084216f 100644 --- a/arch/mips/mm/sc-debugfs.c +++ b/arch/mips/mm/sc-debugfs.c @@ -31,17 +31,10 @@ static ssize_t sc_prefetch_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { - char buf[32]; - ssize_t buf_size; bool enabled; int err; - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - - buf[buf_size] = '\0'; - err = strtobool(buf, &enabled); + err = kstrtobool_from_user(user_buf, count, &enabled); if (err) return err; diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index 66c866740ff2..d22b7edc3886 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -134,7 +134,7 @@ static void __init estimate_frequencies(void) } } -void read_persistent_clock(struct timespec *ts) +void read_persistent_clock64(struct timespec64 *ts) { ts->tv_sec = mc146818_get_cmos_time(); ts->tv_nsec = 0; diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c index c3e4c18ef8d4..7c04b17f4a48 100644 --- a/arch/mips/oprofile/op_model_mipsxx.c +++ b/arch/mips/oprofile/op_model_mipsxx.c @@ -36,7 +36,6 @@ static int perfcount_irq; #endif #ifdef CONFIG_MIPS_MT_SMP -static int cpu_has_mipsmt_pertccounters; #define WHAT (MIPS_PERFCTRL_MT_EN_VPE | \ M_PERFCTL_VPEID(cpu_vpe_id(¤t_cpu_data))) #define vpe_id() (cpu_has_mipsmt_pertccounters ? \ @@ -326,7 +325,6 @@ static int __init mipsxx_init(void) } #ifdef CONFIG_MIPS_MT_SMP - cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19); if (!cpu_has_mipsmt_pertccounters) counters = counters_total_to_per_cpu(counters); #endif diff --git a/arch/mips/sibyte/swarm/rtc_m41t81.c b/arch/mips/sibyte/swarm/rtc_m41t81.c index e62466445f08..4ac8ccdf56bb 100644 --- a/arch/mips/sibyte/swarm/rtc_m41t81.c +++ b/arch/mips/sibyte/swarm/rtc_m41t81.c @@ -141,13 +141,13 @@ static int m41t81_write(uint8_t addr, int b) return 0; } -int m41t81_set_time(unsigned long t) +int m41t81_set_time(time64_t t) { struct rtc_time tm; unsigned long flags; /* Note we don't care about the century */ - rtc_time_to_tm(t, &tm); + rtc_time64_to_tm(t, &tm); /* * Note the write order matters as it ensures the correctness. @@ -188,7 +188,7 @@ int m41t81_set_time(unsigned long t) return 0; } -unsigned long m41t81_get_time(void) +time64_t m41t81_get_time(void) { unsigned int year, mon, day, hour, min, sec; unsigned long flags; @@ -218,7 +218,7 @@ unsigned long m41t81_get_time(void) year += 2000; - return mktime(year, mon, day, hour, min, sec); + return mktime64(year, mon, day, hour, min, sec); } int m41t81_probe(void) diff --git a/arch/mips/sibyte/swarm/rtc_xicor1241.c b/arch/mips/sibyte/swarm/rtc_xicor1241.c index 50a82c495427..2dcaaa7e3bfa 100644 --- a/arch/mips/sibyte/swarm/rtc_xicor1241.c +++ b/arch/mips/sibyte/swarm/rtc_xicor1241.c @@ -109,13 +109,13 @@ static int xicor_write(uint8_t addr, int b) } } -int xicor_set_time(unsigned long t) +int xicor_set_time(time64_t t) { struct rtc_time tm; int tmp; unsigned long flags; - rtc_time_to_tm(t, &tm); + rtc_time64_to_tm(t, &tm); tm.tm_year += 1900; spin_lock_irqsave(&rtc_lock, flags); @@ -168,7 +168,7 @@ int xicor_set_time(unsigned long t) return 0; } -unsigned long xicor_get_time(void) +time64_t xicor_get_time(void) { unsigned int year, mon, day, hour, min, sec, y2k; unsigned long flags; @@ -201,7 +201,7 @@ unsigned long xicor_get_time(void) year += (y2k * 100); - return mktime(year, mon, day, hour, min, sec); + return mktime64(year, mon, day, hour, min, sec); } int xicor_probe(void) diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c index 494fb0a475ac..152ca71cc2d7 100644 --- a/arch/mips/sibyte/swarm/setup.c +++ b/arch/mips/sibyte/swarm/setup.c @@ -57,12 +57,12 @@ extern void sb1250_setup(void); #endif extern int xicor_probe(void); -extern int xicor_set_time(unsigned long); -extern unsigned long xicor_get_time(void); +extern int xicor_set_time(time64_t); +extern time64_t xicor_get_time(void); extern int m41t81_probe(void); -extern int m41t81_set_time(unsigned long); -extern unsigned long m41t81_get_time(void); +extern int m41t81_set_time(time64_t); +extern time64_t m41t81_get_time(void); const char *get_system_type(void) { @@ -87,9 +87,9 @@ enum swarm_rtc_type { enum swarm_rtc_type swarm_rtc_type; -void read_persistent_clock(struct timespec *ts) +void read_persistent_clock64(struct timespec64 *ts) { - unsigned long sec; + time64_t sec; switch (swarm_rtc_type) { case RTC_XICOR: @@ -102,15 +102,17 @@ void read_persistent_clock(struct timespec *ts) case RTC_NONE: default: - sec = mktime(2000, 1, 1, 0, 0, 0); + sec = mktime64(2000, 1, 1, 0, 0, 0); break; } ts->tv_sec = sec; ts->tv_nsec = 0; } -int rtc_mips_set_time(unsigned long sec) +int update_persistent_clock64(struct timespec64 now) { + time64_t sec = now.tv_sec; + switch (swarm_rtc_type) { case RTC_XICOR: return xicor_set_time(sec); diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c index 0eb7d1e8821b..dbace1f3e1a9 100644 --- a/arch/mips/sni/time.c +++ b/arch/mips/sni/time.c @@ -171,9 +171,3 @@ void __init plat_time_init(void) } setup_pit_timer(); } - -void read_persistent_clock(struct timespec *ts) -{ - ts->tv_sec = -1; - ts->tv_nsec = 0; -} diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c index fd26fadc8617..ef29a9c2ffd6 100644 --- a/arch/mips/txx9/rbtx4939/setup.c +++ b/arch/mips/txx9/rbtx4939/setup.c @@ -219,7 +219,7 @@ static int __init rbtx4939_led_probe(struct platform_device *pdev) "nand-disk", }; - leds_data = kzalloc(sizeof(*leds_data) * RBTX4939_MAX_7SEGLEDS, + leds_data = kcalloc(RBTX4939_MAX_7SEGLEDS, sizeof(*leds_data), GFP_KERNEL); if (!leds_data) return -ENOMEM; diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 4d8f64d48597..c480770fabcd 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -275,7 +275,7 @@ config SMP machines, but will use only one CPU of a multiprocessor machine. On a uniprocessor machine, the kernel will run faster if you say N. - See also and the SMP-HOWTO + See also and the SMP-HOWTO available at . If you don't know what to do here, say N. diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index eaba5920234d..9f2b75fe2c2d 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -190,7 +190,7 @@ config PPC select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACER - select HAVE_GCC_PLUGINS + select HAVE_GCC_PLUGINS if GCC_VERSION >= 50200 # plugin support on gcc <= 5.1 is buggy on PPC select HAVE_GENERIC_GUP select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx) select HAVE_IDE @@ -461,23 +461,9 @@ config LD_HEAD_STUB_CATCH If unsure, say "N". -config DISABLE_MPROFILE_KERNEL - bool "Disable use of mprofile-kernel for kernel tracing" - depends on PPC64 && CPU_LITTLE_ENDIAN - default y - help - Selecting this options disables use of the mprofile-kernel ABI for - kernel tracing. That will cause options such as live patching - (CONFIG_LIVEPATCH) which depend on CONFIG_DYNAMIC_FTRACE_WITH_REGS to - be disabled also. - - If you have a toolchain which supports mprofile-kernel, then you can - disable this. Otherwise leave it enabled. If you're not sure, say - "Y". - config MPROFILE_KERNEL depends on PPC64 && CPU_LITTLE_ENDIAN - def_bool !DISABLE_MPROFILE_KERNEL + def_bool $(success,$(srctree)/arch/powerpc/tools/gcc-check-mprofile-kernel.sh $(CC) -I$(srctree)/include -D__KERNEL__) config HOTPLUG_CPU bool "Support for enabling/disabling CPUs" diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 9b52e42e581b..bd06a3ccda31 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -161,18 +161,7 @@ CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64 endif ifdef CONFIG_MPROFILE_KERNEL - ifeq ($(shell $(srctree)/arch/powerpc/tools/gcc-check-mprofile-kernel.sh $(CC) -I$(srctree)/include -D__KERNEL__),OK) - CC_FLAGS_FTRACE := -pg -mprofile-kernel - KBUILD_CPPFLAGS += -DCC_USING_MPROFILE_KERNEL - else - # If the user asked for mprofile-kernel but the toolchain doesn't - # support it, emit a warning and deliberately break the build later - # with mprofile-kernel-not-supported. We would prefer to make this an - # error right here, but then the user would never be able to run - # oldconfig to change their configuration. - $(warning Compiler does not support mprofile-kernel, set CONFIG_DISABLE_MPROFILE_KERNEL) - CC_FLAGS_FTRACE := -mprofile-kernel-not-supported - endif + CC_FLAGS_FTRACE := -pg -mprofile-kernel endif CFLAGS-$(CONFIG_CELL_CPU) += $(call cc-option,-mcpu=cell) diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index aa9e785c59c2..7841b8a60657 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -134,7 +134,13 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip); void pnv_power9_force_smt4_catch(void); void pnv_power9_force_smt4_release(void); +/* Transaction memory related */ void tm_enable(void); void tm_disable(void); void tm_abort(uint8_t cause); + +struct kvm_vcpu; +void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr); +void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr); + #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index e7377b73cfec..1f345a0b6ba2 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -104,6 +104,7 @@ struct kvmppc_vcore { ulong vtb; /* virtual timebase */ ulong conferring_threads; unsigned int halt_poll_ns; + atomic_t online_count; }; struct kvmppc_vcpu_book3s { @@ -209,6 +210,7 @@ extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags); +extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac); extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper, u32 val); extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); @@ -256,6 +258,21 @@ extern int kvmppc_hcall_impl_pr(unsigned long cmd); extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd); extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu); extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu); + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu); +void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu); +void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu); +void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu); +#else +static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {} +static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {} +static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {} +static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {} +#endif + +void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); + extern int kvm_irq_bypass; static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) @@ -274,12 +291,12 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) { - vcpu->arch.gpr[num] = val; + vcpu->arch.regs.gpr[num] = val; } static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) { - return vcpu->arch.gpr[num]; + return vcpu->arch.regs.gpr[num]; } static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) @@ -294,42 +311,42 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) { - vcpu->arch.xer = val; + vcpu->arch.regs.xer = val; } static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu) { - return vcpu->arch.xer; + return vcpu->arch.regs.xer; } static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) { - vcpu->arch.ctr = val; + vcpu->arch.regs.ctr = val; } static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) { - return vcpu->arch.ctr; + return vcpu->arch.regs.ctr; } static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) { - vcpu->arch.lr = val; + vcpu->arch.regs.link = val; } static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) { - return vcpu->arch.lr; + return vcpu->arch.regs.link; } static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) { - vcpu->arch.pc = val; + vcpu->arch.regs.nip = val; } static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) { - return vcpu->arch.pc; + return vcpu->arch.regs.nip; } static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu); diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index c424e44f4c00..dc435a5af7d6 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -483,15 +483,15 @@ static inline u64 sanitize_msr(u64 msr) static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu) { vcpu->arch.cr = vcpu->arch.cr_tm; - vcpu->arch.xer = vcpu->arch.xer_tm; - vcpu->arch.lr = vcpu->arch.lr_tm; - vcpu->arch.ctr = vcpu->arch.ctr_tm; + vcpu->arch.regs.xer = vcpu->arch.xer_tm; + vcpu->arch.regs.link = vcpu->arch.lr_tm; + vcpu->arch.regs.ctr = vcpu->arch.ctr_tm; vcpu->arch.amr = vcpu->arch.amr_tm; vcpu->arch.ppr = vcpu->arch.ppr_tm; vcpu->arch.dscr = vcpu->arch.dscr_tm; vcpu->arch.tar = vcpu->arch.tar_tm; - memcpy(vcpu->arch.gpr, vcpu->arch.gpr_tm, - sizeof(vcpu->arch.gpr)); + memcpy(vcpu->arch.regs.gpr, vcpu->arch.gpr_tm, + sizeof(vcpu->arch.regs.gpr)); vcpu->arch.fp = vcpu->arch.fp_tm; vcpu->arch.vr = vcpu->arch.vr_tm; vcpu->arch.vrsave = vcpu->arch.vrsave_tm; @@ -500,15 +500,15 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu) static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu) { vcpu->arch.cr_tm = vcpu->arch.cr; - vcpu->arch.xer_tm = vcpu->arch.xer; - vcpu->arch.lr_tm = vcpu->arch.lr; - vcpu->arch.ctr_tm = vcpu->arch.ctr; + vcpu->arch.xer_tm = vcpu->arch.regs.xer; + vcpu->arch.lr_tm = vcpu->arch.regs.link; + vcpu->arch.ctr_tm = vcpu->arch.regs.ctr; vcpu->arch.amr_tm = vcpu->arch.amr; vcpu->arch.ppr_tm = vcpu->arch.ppr; vcpu->arch.dscr_tm = vcpu->arch.dscr; vcpu->arch.tar_tm = vcpu->arch.tar; - memcpy(vcpu->arch.gpr_tm, vcpu->arch.gpr, - sizeof(vcpu->arch.gpr)); + memcpy(vcpu->arch.gpr_tm, vcpu->arch.regs.gpr, + sizeof(vcpu->arch.regs.gpr)); vcpu->arch.fp_tm = vcpu->arch.fp; vcpu->arch.vr_tm = vcpu->arch.vr; vcpu->arch.vrsave_tm = vcpu->arch.vrsave; diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h index bc6e29e4dfd4..d513e3ed1c65 100644 --- a/arch/powerpc/include/asm/kvm_booke.h +++ b/arch/powerpc/include/asm/kvm_booke.h @@ -36,12 +36,12 @@ static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) { - vcpu->arch.gpr[num] = val; + vcpu->arch.regs.gpr[num] = val; } static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) { - return vcpu->arch.gpr[num]; + return vcpu->arch.regs.gpr[num]; } static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) @@ -56,12 +56,12 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) { - vcpu->arch.xer = val; + vcpu->arch.regs.xer = val; } static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu) { - return vcpu->arch.xer; + return vcpu->arch.regs.xer; } static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) @@ -72,32 +72,32 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) { - vcpu->arch.ctr = val; + vcpu->arch.regs.ctr = val; } static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) { - return vcpu->arch.ctr; + return vcpu->arch.regs.ctr; } static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) { - vcpu->arch.lr = val; + vcpu->arch.regs.link = val; } static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) { - return vcpu->arch.lr; + return vcpu->arch.regs.link; } static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) { - vcpu->arch.pc = val; + vcpu->arch.regs.nip = val; } static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) { - return vcpu->arch.pc; + return vcpu->arch.regs.nip; } static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 17498e9a26e4..fa4efa7e88f7 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -269,7 +269,6 @@ struct kvm_arch { unsigned long host_lpcr; unsigned long sdr1; unsigned long host_sdr1; - int tlbie_lock; unsigned long lpcr; unsigned long vrma_slb_v; int mmu_ready; @@ -454,6 +453,12 @@ struct mmio_hpte_cache { #define KVMPPC_VSX_COPY_WORD 1 #define KVMPPC_VSX_COPY_DWORD 2 #define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP 3 +#define KVMPPC_VSX_COPY_WORD_LOAD_DUMP 4 + +#define KVMPPC_VMX_COPY_BYTE 8 +#define KVMPPC_VMX_COPY_HWORD 9 +#define KVMPPC_VMX_COPY_WORD 10 +#define KVMPPC_VMX_COPY_DWORD 11 struct openpic; @@ -486,7 +491,7 @@ struct kvm_vcpu_arch { struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; #endif - ulong gpr[32]; + struct pt_regs regs; struct thread_fp_state fp; @@ -521,14 +526,10 @@ struct kvm_vcpu_arch { u32 qpr[32]; #endif - ulong pc; - ulong ctr; - ulong lr; #ifdef CONFIG_PPC_BOOK3S ulong tar; #endif - ulong xer; u32 cr; #ifdef CONFIG_PPC_BOOK3S @@ -626,7 +627,6 @@ struct kvm_vcpu_arch { struct thread_vr_state vr_tm; u32 vrsave_tm; /* also USPRG0 */ - #endif #ifdef CONFIG_KVM_EXIT_TIMING @@ -681,16 +681,17 @@ struct kvm_vcpu_arch { * Number of simulations for vsx. * If we use 2*8bytes to simulate 1*16bytes, * then the number should be 2 and - * mmio_vsx_copy_type=KVMPPC_VSX_COPY_DWORD. + * mmio_copy_type=KVMPPC_VSX_COPY_DWORD. * If we use 4*4bytes to simulate 1*16bytes, * the number should be 4 and * mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD. */ u8 mmio_vsx_copy_nums; u8 mmio_vsx_offset; - u8 mmio_vsx_copy_type; u8 mmio_vsx_tx_sx_enabled; u8 mmio_vmx_copy_nums; + u8 mmio_vmx_offset; + u8 mmio_copy_type; u8 osi_needed; u8 osi_enabled; u8 papr_enabled; @@ -772,6 +773,8 @@ struct kvm_vcpu_arch { u64 busy_preempt; u32 emul_inst; + + u32 online; #endif #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index abe7032cdb54..e991821dd7fa 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -52,7 +52,7 @@ enum emulation_result { EMULATE_EXIT_USER, /* emulation requires exit to user-space */ }; -enum instruction_type { +enum instruction_fetch_type { INST_GENERIC, INST_SC, /* system call */ }; @@ -81,10 +81,10 @@ extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian, int mmio_sign_extend); -extern int kvmppc_handle_load128_by2x64(struct kvm_run *run, - struct kvm_vcpu *vcpu, unsigned int rt, int is_default_endian); -extern int kvmppc_handle_store128_by2x64(struct kvm_run *run, - struct kvm_vcpu *vcpu, unsigned int rs, int is_default_endian); +extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int rt, unsigned int bytes, int is_default_endian); +extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int rs, unsigned int bytes, int is_default_endian); extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, u64 val, unsigned int bytes, int is_default_endian); @@ -93,7 +93,7 @@ extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, int is_default_endian); extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, - enum instruction_type type, u32 *inst); + enum instruction_fetch_type type, u32 *inst); extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); @@ -265,6 +265,8 @@ union kvmppc_one_reg { vector128 vval; u64 vsxval[2]; u32 vsx32val[4]; + u16 vsx16val[8]; + u8 vsx8val[16]; struct { u64 addr; u64 length; @@ -324,13 +326,14 @@ struct kvmppc_ops { int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info); int (*set_smt_mode)(struct kvm *kvm, unsigned long mode, unsigned long flags); + void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr); }; extern struct kvmppc_ops *kvmppc_hv_ops; extern struct kvmppc_ops *kvmppc_pr_ops; static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu, - enum instruction_type type, u32 *inst) + enum instruction_fetch_type type, u32 *inst) { int ret = EMULATE_DONE; u32 fetched_inst; diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index d8374f984f39..d61b0818e267 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -14,7 +14,7 @@ #include -#ifdef CC_USING_MPROFILE_KERNEL +#ifdef CONFIG_MPROFILE_KERNEL #define MODULE_ARCH_VERMAGIC_FTRACE "mprofile-kernel " #else #define MODULE_ARCH_VERMAGIC_FTRACE "" diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 75c5b2cd9d66..562568414cf4 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -385,6 +385,7 @@ #define SPRN_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */ #define SPRN_PSSCR_PR 0x337 /* PSSCR ISA 3.0, privileged mode access */ #define SPRN_PMCR 0x374 /* Power Management Control Register */ +#define SPRN_RWMR 0x375 /* Region-Weighting Mode Register */ /* HFSCR and FSCR bit numbers are the same */ #define FSCR_SCV_LG 12 /* Enable System Call Vectored */ diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 833ed9a16adf..1b32b56a03d3 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -633,6 +633,7 @@ struct kvm_ppc_cpu_char { #define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd) #define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe) +#define KVM_REG_PPC_ONLINE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf) /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 9fc9e0977009..0a0544335950 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -426,20 +426,20 @@ int main(void) OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack); OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid); OFFSET(VCPU_GUEST_PID, kvm_vcpu, arch.pid); - OFFSET(VCPU_GPRS, kvm_vcpu, arch.gpr); + OFFSET(VCPU_GPRS, kvm_vcpu, arch.regs.gpr); OFFSET(VCPU_VRSAVE, kvm_vcpu, arch.vrsave); OFFSET(VCPU_FPRS, kvm_vcpu, arch.fp.fpr); #ifdef CONFIG_ALTIVEC OFFSET(VCPU_VRS, kvm_vcpu, arch.vr.vr); #endif - OFFSET(VCPU_XER, kvm_vcpu, arch.xer); - OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr); - OFFSET(VCPU_LR, kvm_vcpu, arch.lr); + OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer); + OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr); + OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link); #ifdef CONFIG_PPC_BOOK3S OFFSET(VCPU_TAR, kvm_vcpu, arch.tar); #endif OFFSET(VCPU_CR, kvm_vcpu, arch.cr); - OFFSET(VCPU_PC, kvm_vcpu, arch.pc); + OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr); OFFSET(VCPU_SRR0, kvm_vcpu, arch.shregs.srr0); @@ -696,10 +696,10 @@ int main(void) #else /* CONFIG_PPC_BOOK3S */ OFFSET(VCPU_CR, kvm_vcpu, arch.cr); - OFFSET(VCPU_XER, kvm_vcpu, arch.xer); - OFFSET(VCPU_LR, kvm_vcpu, arch.lr); - OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr); - OFFSET(VCPU_PC, kvm_vcpu, arch.pc); + OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer); + OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link); + OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr); + OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip); OFFSET(VCPU_SPRG9, kvm_vcpu, arch.sprg9); OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst); OFFSET(VCPU_FAULT_DEAR, kvm_vcpu, arch.fault_dear); diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 1b7419579820..b8d61e019d06 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -466,7 +466,7 @@ static unsigned long stub_for_addr(const Elf64_Shdr *sechdrs, return (unsigned long)&stubs[i]; } -#ifdef CC_USING_MPROFILE_KERNEL +#ifdef CONFIG_MPROFILE_KERNEL static bool is_mprofile_mcount_callsite(const char *name, u32 *instruction) { if (strcmp("_mcount", name)) @@ -753,7 +753,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, #ifdef CONFIG_DYNAMIC_FTRACE -#ifdef CC_USING_MPROFILE_KERNEL +#ifdef CONFIG_MPROFILE_KERNEL #define PACATOC offsetof(struct paca_struct, kernel_toc) diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index f915db93cd42..44d66c33d59d 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c @@ -559,7 +559,8 @@ static int __init rtas_event_scan_init(void) rtas_error_log_max = rtas_get_error_log_max(); rtas_error_log_buffer_max = rtas_error_log_max + sizeof(int); - rtas_log_buf = vmalloc(rtas_error_log_buffer_max*LOG_NUMBER); + rtas_log_buf = vmalloc(array_size(LOG_NUMBER, + rtas_error_log_buffer_max)); if (!rtas_log_buf) { printk(KERN_ERR "rtasd: no memory\n"); return -ENOMEM; diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index c076a32093fd..4bfbb54dee51 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -144,7 +144,7 @@ __ftrace_make_nop(struct module *mod, return -EINVAL; } -#ifdef CC_USING_MPROFILE_KERNEL +#ifdef CONFIG_MPROFILE_KERNEL /* When using -mkernel_profile there is no load to jump over */ pop = PPC_INST_NOP; @@ -188,7 +188,7 @@ __ftrace_make_nop(struct module *mod, pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, op); return -EINVAL; } -#endif /* CC_USING_MPROFILE_KERNEL */ +#endif /* CONFIG_MPROFILE_KERNEL */ if (patch_instruction((unsigned int *)ip, pop)) { pr_err("Patching NOP failed.\n"); @@ -324,7 +324,7 @@ int ftrace_make_nop(struct module *mod, * They should effectively be a NOP, and follow formal constraints, * depending on the ABI. Return false if they don't. */ -#ifndef CC_USING_MPROFILE_KERNEL +#ifndef CONFIG_MPROFILE_KERNEL static int expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) { diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index b44ec104a5a1..d2205b97628c 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -791,7 +791,7 @@ static int __init vdso_init(void) #ifdef CONFIG_VDSO32 /* Make sure pages are in the correct state */ - vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 2), + vdso32_pagelist = kcalloc(vdso32_pages + 2, sizeof(struct page *), GFP_KERNEL); BUG_ON(vdso32_pagelist == NULL); for (i = 0; i < vdso32_pages; i++) { @@ -805,7 +805,7 @@ static int __init vdso_init(void) #endif #ifdef CONFIG_PPC64 - vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 2), + vdso64_pagelist = kcalloc(vdso64_pages + 2, sizeof(struct page *), GFP_KERNEL); BUG_ON(vdso64_pagelist == NULL); for (i = 0; i < vdso64_pages; i++) { diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 4b19da8c87ae..f872c04bb5b1 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -63,6 +63,9 @@ kvm-pr-y := \ book3s_64_mmu.o \ book3s_32_mmu.o +kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ + tm.o + ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ book3s_rmhandlers.o diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 97d4a112648f..edaf4720d156 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -134,7 +134,7 @@ void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) { kvmppc_unfixup_split_real(vcpu); kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); - kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags); + kvmppc_set_srr1(vcpu, (kvmppc_get_msr(vcpu) & ~0x783f0000ul) | flags); kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); vcpu->arch.mmu.reset_msr(vcpu); } @@ -256,18 +256,15 @@ void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar, { kvmppc_set_dar(vcpu, dar); kvmppc_set_dsisr(vcpu, flags); - kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); + kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0); } -EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage); /* used by kvm_hv */ +EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage); void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags) { - u64 msr = kvmppc_get_msr(vcpu); - msr &= ~(SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT); - msr |= flags & (SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT); - kvmppc_set_msr_fast(vcpu, msr); - kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); + kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags); } +EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage); static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) @@ -450,8 +447,8 @@ int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, return r; } -int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, - u32 *inst) +int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, + enum instruction_fetch_type type, u32 *inst) { ulong pc = kvmppc_get_pc(vcpu); int r; @@ -509,8 +506,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; - vcpu_load(vcpu); - regs->pc = kvmppc_get_pc(vcpu); regs->cr = kvmppc_get_cr(vcpu); regs->ctr = kvmppc_get_ctr(vcpu); @@ -532,7 +527,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) regs->gpr[i] = kvmppc_get_gpr(vcpu, i); - vcpu_put(vcpu); return 0; } @@ -540,8 +534,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; - vcpu_load(vcpu); - kvmppc_set_pc(vcpu, regs->pc); kvmppc_set_cr(vcpu, regs->cr); kvmppc_set_ctr(vcpu, regs->ctr); @@ -562,7 +554,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) kvmppc_set_gpr(vcpu, i, regs->gpr[i]); - vcpu_put(vcpu); return 0; } diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h index 4ad5e287b8bc..14ef03501d21 100644 --- a/arch/powerpc/kvm/book3s.h +++ b/arch/powerpc/kvm/book3s.h @@ -31,4 +31,10 @@ extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, extern int kvmppc_book3s_init_pr(void); extern void kvmppc_book3s_exit_pr(void); +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +extern void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val); +#else +static inline void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) {} +#endif + #endif diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 1992676c7a94..45c8ea4a0487 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c @@ -52,7 +52,7 @@ static inline bool check_debug_ip(struct kvm_vcpu *vcpu) { #ifdef DEBUG_MMU_PTE_IP - return vcpu->arch.pc == DEBUG_MMU_PTE_IP; + return vcpu->arch.regs.nip == DEBUG_MMU_PTE_IP; #else return true; #endif diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index a93d719edc90..cf9d686e8162 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -38,7 +38,16 @@ static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu) { - kvmppc_set_msr(vcpu, vcpu->arch.intr_msr); + unsigned long msr = vcpu->arch.intr_msr; + unsigned long cur_msr = kvmppc_get_msr(vcpu); + + /* If transactional, change to suspend mode on IRQ delivery */ + if (MSR_TM_TRANSACTIONAL(cur_msr)) + msr |= MSR_TS_S; + else + msr |= cur_msr & MSR_TS_MASK; + + kvmppc_set_msr(vcpu, msr); } static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index a670fa5fbe50..7f3a8cf5d66f 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -108,7 +108,7 @@ int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order) npte = 1ul << (order - 4); /* Allocate reverse map array */ - rev = vmalloc(sizeof(struct revmap_entry) * npte); + rev = vmalloc(array_size(npte, sizeof(struct revmap_entry))); if (!rev) { if (cma) kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT)); @@ -272,6 +272,9 @@ int kvmppc_mmu_hv_init(void) if (!cpu_has_feature(CPU_FTR_HVMODE)) return -EINVAL; + if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE)) + return -EINVAL; + /* POWER7 has 10-bit LPIDs (12-bit in POWER8) */ host_lpid = mfspr(SPRN_LPID); rsvd_lpid = LPID_RSVD; diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 481da8f93fa4..176f911ee983 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -139,44 +139,24 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, return 0; } -#ifdef CONFIG_PPC_64K_PAGES -#define MMU_BASE_PSIZE MMU_PAGE_64K -#else -#define MMU_BASE_PSIZE MMU_PAGE_4K -#endif - static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, unsigned int pshift) { - int psize = MMU_BASE_PSIZE; + unsigned long psize = PAGE_SIZE; - if (pshift >= PUD_SHIFT) - psize = MMU_PAGE_1G; - else if (pshift >= PMD_SHIFT) - psize = MMU_PAGE_2M; - addr &= ~0xfffUL; - addr |= mmu_psize_defs[psize].ap << 5; - asm volatile("ptesync": : :"memory"); - asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1) - : : "r" (addr), "r" (kvm->arch.lpid) : "memory"); - if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) - asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1) - : : "r" (addr), "r" (kvm->arch.lpid) : "memory"); - asm volatile("eieio ; tlbsync ; ptesync": : :"memory"); + if (pshift) + psize = 1UL << pshift; + + addr &= ~(psize - 1); + radix__flush_tlb_lpid_page(kvm->arch.lpid, addr, psize); } -static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned long addr) +static void kvmppc_radix_flush_pwc(struct kvm *kvm) { - unsigned long rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */ - - asm volatile("ptesync": : :"memory"); - /* RIC=1 PRS=0 R=1 IS=2 */ - asm volatile(PPC_TLBIE_5(%0, %1, 1, 0, 1) - : : "r" (rb), "r" (kvm->arch.lpid) : "memory"); - asm volatile("eieio ; tlbsync ; ptesync": : :"memory"); + radix__flush_pwc_lpid(kvm->arch.lpid); } -unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, +static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, unsigned long clr, unsigned long set, unsigned long addr, unsigned int shift) { @@ -228,6 +208,167 @@ static void kvmppc_pmd_free(pmd_t *pmdp) kmem_cache_free(kvm_pmd_cache, pmdp); } +static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, + unsigned long gpa, unsigned int shift) + +{ + unsigned long page_size = 1ul << shift; + unsigned long old; + + old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); + kvmppc_radix_tlbie_page(kvm, gpa, shift); + if (old & _PAGE_DIRTY) { + unsigned long gfn = gpa >> PAGE_SHIFT; + struct kvm_memory_slot *memslot; + + memslot = gfn_to_memslot(kvm, gfn); + if (memslot && memslot->dirty_bitmap) + kvmppc_update_dirty_map(memslot, gfn, page_size); + } +} + +/* + * kvmppc_free_p?d are used to free existing page tables, and recursively + * descend and clear and free children. + * Callers are responsible for flushing the PWC. + * + * When page tables are being unmapped/freed as part of page fault path + * (full == false), ptes are not expected. There is code to unmap them + * and emit a warning if encountered, but there may already be data + * corruption due to the unexpected mappings. + */ +static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full) +{ + if (full) { + memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE); + } else { + pte_t *p = pte; + unsigned long it; + + for (it = 0; it < PTRS_PER_PTE; ++it, ++p) { + if (pte_val(*p) == 0) + continue; + WARN_ON_ONCE(1); + kvmppc_unmap_pte(kvm, p, + pte_pfn(*p) << PAGE_SHIFT, + PAGE_SHIFT); + } + } + + kvmppc_pte_free(pte); +} + +static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full) +{ + unsigned long im; + pmd_t *p = pmd; + + for (im = 0; im < PTRS_PER_PMD; ++im, ++p) { + if (!pmd_present(*p)) + continue; + if (pmd_is_leaf(*p)) { + if (full) { + pmd_clear(p); + } else { + WARN_ON_ONCE(1); + kvmppc_unmap_pte(kvm, (pte_t *)p, + pte_pfn(*(pte_t *)p) << PAGE_SHIFT, + PMD_SHIFT); + } + } else { + pte_t *pte; + + pte = pte_offset_map(p, 0); + kvmppc_unmap_free_pte(kvm, pte, full); + pmd_clear(p); + } + } + kvmppc_pmd_free(pmd); +} + +static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud) +{ + unsigned long iu; + pud_t *p = pud; + + for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) { + if (!pud_present(*p)) + continue; + if (pud_huge(*p)) { + pud_clear(p); + } else { + pmd_t *pmd; + + pmd = pmd_offset(p, 0); + kvmppc_unmap_free_pmd(kvm, pmd, true); + pud_clear(p); + } + } + pud_free(kvm->mm, pud); +} + +void kvmppc_free_radix(struct kvm *kvm) +{ + unsigned long ig; + pgd_t *pgd; + + if (!kvm->arch.pgtable) + return; + pgd = kvm->arch.pgtable; + for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) { + pud_t *pud; + + if (!pgd_present(*pgd)) + continue; + pud = pud_offset(pgd, 0); + kvmppc_unmap_free_pud(kvm, pud); + pgd_clear(pgd); + } + pgd_free(kvm->mm, kvm->arch.pgtable); + kvm->arch.pgtable = NULL; +} + +static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd, + unsigned long gpa) +{ + pte_t *pte = pte_offset_kernel(pmd, 0); + + /* + * Clearing the pmd entry then flushing the PWC ensures that the pte + * page no longer be cached by the MMU, so can be freed without + * flushing the PWC again. + */ + pmd_clear(pmd); + kvmppc_radix_flush_pwc(kvm); + + kvmppc_unmap_free_pte(kvm, pte, false); +} + +static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, + unsigned long gpa) +{ + pmd_t *pmd = pmd_offset(pud, 0); + + /* + * Clearing the pud entry then flushing the PWC ensures that the pmd + * page and any children pte pages will no longer be cached by the MMU, + * so can be freed without flushing the PWC again. + */ + pud_clear(pud); + kvmppc_radix_flush_pwc(kvm); + + kvmppc_unmap_free_pmd(kvm, pmd, false); +} + +/* + * There are a number of bits which may differ between different faults to + * the same partition scope entry. RC bits, in the course of cleaning and + * aging. And the write bit can change, either the access could have been + * upgraded, or a read fault could happen concurrently with a write fault + * that sets those bits first. + */ +#define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)) + static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, unsigned int level, unsigned long mmu_seq) { @@ -235,7 +376,6 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, pud_t *pud, *new_pud = NULL; pmd_t *pmd, *new_pmd = NULL; pte_t *ptep, *new_ptep = NULL; - unsigned long old; int ret; /* Traverse the guest's 2nd-level tree, allocate new levels needed */ @@ -273,42 +413,39 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, if (pud_huge(*pud)) { unsigned long hgpa = gpa & PUD_MASK; + /* Check if we raced and someone else has set the same thing */ + if (level == 2) { + if (pud_raw(*pud) == pte_raw(pte)) { + ret = 0; + goto out_unlock; + } + /* Valid 1GB page here already, add our extra bits */ + WARN_ON_ONCE((pud_val(*pud) ^ pte_val(pte)) & + PTE_BITS_MUST_MATCH); + kvmppc_radix_update_pte(kvm, (pte_t *)pud, + 0, pte_val(pte), hgpa, PUD_SHIFT); + ret = 0; + goto out_unlock; + } /* * If we raced with another CPU which has just put * a 1GB pte in after we saw a pmd page, try again. */ - if (level <= 1 && !new_pmd) { + if (!new_pmd) { ret = -EAGAIN; goto out_unlock; } - /* Check if we raced and someone else has set the same thing */ - if (level == 2 && pud_raw(*pud) == pte_raw(pte)) { - ret = 0; - goto out_unlock; - } /* Valid 1GB page here already, remove it */ - old = kvmppc_radix_update_pte(kvm, (pte_t *)pud, - ~0UL, 0, hgpa, PUD_SHIFT); - kvmppc_radix_tlbie_page(kvm, hgpa, PUD_SHIFT); - if (old & _PAGE_DIRTY) { - unsigned long gfn = hgpa >> PAGE_SHIFT; - struct kvm_memory_slot *memslot; - memslot = gfn_to_memslot(kvm, gfn); - if (memslot && memslot->dirty_bitmap) - kvmppc_update_dirty_map(memslot, - gfn, PUD_SIZE); - } + kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT); } if (level == 2) { if (!pud_none(*pud)) { /* * There's a page table page here, but we wanted to * install a large page, so remove and free the page - * table page. new_pmd will be NULL since level == 2. + * table page. */ - new_pmd = pmd_offset(pud, 0); - pud_clear(pud); - kvmppc_radix_flush_pwc(kvm, gpa); + kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa); } kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte); ret = 0; @@ -324,42 +461,40 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, if (pmd_is_leaf(*pmd)) { unsigned long lgpa = gpa & PMD_MASK; + /* Check if we raced and someone else has set the same thing */ + if (level == 1) { + if (pmd_raw(*pmd) == pte_raw(pte)) { + ret = 0; + goto out_unlock; + } + /* Valid 2MB page here already, add our extra bits */ + WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) & + PTE_BITS_MUST_MATCH); + kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd), + 0, pte_val(pte), lgpa, PMD_SHIFT); + ret = 0; + goto out_unlock; + } + /* * If we raced with another CPU which has just put * a 2MB pte in after we saw a pte page, try again. */ - if (level == 0 && !new_ptep) { + if (!new_ptep) { ret = -EAGAIN; goto out_unlock; } - /* Check if we raced and someone else has set the same thing */ - if (level == 1 && pmd_raw(*pmd) == pte_raw(pte)) { - ret = 0; - goto out_unlock; - } /* Valid 2MB page here already, remove it */ - old = kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd), - ~0UL, 0, lgpa, PMD_SHIFT); - kvmppc_radix_tlbie_page(kvm, lgpa, PMD_SHIFT); - if (old & _PAGE_DIRTY) { - unsigned long gfn = lgpa >> PAGE_SHIFT; - struct kvm_memory_slot *memslot; - memslot = gfn_to_memslot(kvm, gfn); - if (memslot && memslot->dirty_bitmap) - kvmppc_update_dirty_map(memslot, - gfn, PMD_SIZE); - } + kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT); } if (level == 1) { if (!pmd_none(*pmd)) { /* * There's a page table page here, but we wanted to * install a large page, so remove and free the page - * table page. new_ptep will be NULL since level == 1. + * table page. */ - new_ptep = pte_offset_kernel(pmd, 0); - pmd_clear(pmd); - kvmppc_radix_flush_pwc(kvm, gpa); + kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa); } kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte); ret = 0; @@ -378,12 +513,12 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, ret = 0; goto out_unlock; } - /* PTE was previously valid, so invalidate it */ - old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT, - 0, gpa, 0); - kvmppc_radix_tlbie_page(kvm, gpa, 0); - if (old & _PAGE_DIRTY) - mark_page_dirty(kvm, gpa >> PAGE_SHIFT); + /* Valid page here already, add our extra bits */ + WARN_ON_ONCE((pte_val(*ptep) ^ pte_val(pte)) & + PTE_BITS_MUST_MATCH); + kvmppc_radix_update_pte(kvm, ptep, 0, pte_val(pte), gpa, 0); + ret = 0; + goto out_unlock; } kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte); ret = 0; @@ -565,9 +700,13 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned long mask = (1ul << shift) - PAGE_SIZE; pte = __pte(pte_val(pte) | (hva & mask)); } - if (!(writing || upgrade_write)) - pte = __pte(pte_val(pte) & ~ _PAGE_WRITE); - pte = __pte(pte_val(pte) | _PAGE_EXEC); + pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED); + if (writing || upgrade_write) { + if (pte_val(pte) & _PAGE_WRITE) + pte = __pte(pte_val(pte) | _PAGE_DIRTY); + } else { + pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY)); + } } /* Allocate space in the tree and write the PTE */ @@ -734,51 +873,6 @@ int kvmppc_init_vm_radix(struct kvm *kvm) return 0; } -void kvmppc_free_radix(struct kvm *kvm) -{ - unsigned long ig, iu, im; - pte_t *pte; - pmd_t *pmd; - pud_t *pud; - pgd_t *pgd; - - if (!kvm->arch.pgtable) - return; - pgd = kvm->arch.pgtable; - for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) { - if (!pgd_present(*pgd)) - continue; - pud = pud_offset(pgd, 0); - for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++pud) { - if (!pud_present(*pud)) - continue; - if (pud_huge(*pud)) { - pud_clear(pud); - continue; - } - pmd = pmd_offset(pud, 0); - for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) { - if (pmd_is_leaf(*pmd)) { - pmd_clear(pmd); - continue; - } - if (!pmd_present(*pmd)) - continue; - pte = pte_offset_map(pmd, 0); - memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE); - kvmppc_pte_free(pte); - pmd_clear(pmd); - } - kvmppc_pmd_free(pmd_offset(pud, 0)); - pud_clear(pud); - } - pud_free(kvm->mm, pud_offset(pgd, 0)); - pgd_clear(pgd); - } - pgd_free(kvm->mm, kvm->arch.pgtable); - kvm->arch.pgtable = NULL; -} - static void pte_ctor(void *addr) { memset(addr, 0, RADIX_PTE_TABLE_SIZE); diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 4dffa611376d..d066e37551ec 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -176,14 +176,12 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, if (!tbltmp) continue; - /* - * Make sure hardware table parameters are exactly the same; - * this is used in the TCE handlers where boundary checks - * use only the first attached table. - */ - if ((tbltmp->it_page_shift == stt->page_shift) && - (tbltmp->it_offset == stt->offset) && - (tbltmp->it_size == stt->size)) { + /* Make sure hardware table parameters are compatible */ + if ((tbltmp->it_page_shift <= stt->page_shift) && + (tbltmp->it_offset << tbltmp->it_page_shift == + stt->offset << stt->page_shift) && + (tbltmp->it_size << tbltmp->it_page_shift == + stt->size << stt->page_shift)) { /* * Reference the table to avoid races with * add/remove DMA windows. @@ -237,7 +235,7 @@ static void release_spapr_tce_table(struct rcu_head *head) kfree(stt); } -static int kvm_spapr_tce_fault(struct vm_fault *vmf) +static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf) { struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data; struct page *page; @@ -302,7 +300,8 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, int ret = -ENOMEM; int i; - if (!args->size) + if (!args->size || args->page_shift < 12 || args->page_shift > 34 || + (args->offset + args->size > (ULLONG_MAX >> args->page_shift))) return -EINVAL; size = _ALIGN_UP(args->size, PAGE_SIZE >> 3); @@ -396,7 +395,7 @@ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm, return H_SUCCESS; } -static long kvmppc_tce_iommu_unmap(struct kvm *kvm, +static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm, struct iommu_table *tbl, unsigned long entry) { enum dma_data_direction dir = DMA_NONE; @@ -416,7 +415,24 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm, return ret; } -long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, +static long kvmppc_tce_iommu_unmap(struct kvm *kvm, + struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, + unsigned long entry) +{ + unsigned long i, ret = H_SUCCESS; + unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); + unsigned long io_entry = entry * subpages; + + for (i = 0; i < subpages; ++i) { + ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i); + if (ret != H_SUCCESS) + break; + } + + return ret; +} + +long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, unsigned long entry, unsigned long ua, enum dma_data_direction dir) { @@ -453,6 +469,27 @@ long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, return 0; } +static long kvmppc_tce_iommu_map(struct kvm *kvm, + struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, + unsigned long entry, unsigned long ua, + enum dma_data_direction dir) +{ + unsigned long i, pgoff, ret = H_SUCCESS; + unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); + unsigned long io_entry = entry * subpages; + + for (i = 0, pgoff = 0; i < subpages; + ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) { + + ret = kvmppc_tce_iommu_do_map(kvm, tbl, + io_entry + i, ua + pgoff, dir); + if (ret != H_SUCCESS) + break; + } + + return ret; +} + long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba, unsigned long tce) { @@ -491,10 +528,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { if (dir == DMA_NONE) - ret = kvmppc_tce_iommu_unmap(vcpu->kvm, + ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt, stit->tbl, entry); else - ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl, + ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl, entry, ua, dir); if (ret == H_SUCCESS) @@ -570,7 +607,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, return H_PARAMETER; list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { - ret = kvmppc_tce_iommu_map(vcpu->kvm, + ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl, entry + i, ua, iommu_tce_direction(tce)); @@ -615,10 +652,10 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, return H_PARAMETER; list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { - unsigned long entry = ioba >> stit->tbl->it_page_shift; + unsigned long entry = ioba >> stt->page_shift; for (i = 0; i < npages; ++i) { - ret = kvmppc_tce_iommu_unmap(vcpu->kvm, + ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt, stit->tbl, entry + i); if (ret == H_SUCCESS) diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 6651f736a0b1..925fc316a104 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -221,7 +221,7 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm, return H_SUCCESS; } -static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm, +static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm, struct iommu_table *tbl, unsigned long entry) { enum dma_data_direction dir = DMA_NONE; @@ -245,7 +245,24 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm, return ret; } -static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, +static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm, + struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, + unsigned long entry) +{ + unsigned long i, ret = H_SUCCESS; + unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); + unsigned long io_entry = entry * subpages; + + for (i = 0; i < subpages; ++i) { + ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i); + if (ret != H_SUCCESS) + break; + } + + return ret; +} + +static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, unsigned long entry, unsigned long ua, enum dma_data_direction dir) { @@ -290,6 +307,27 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, return 0; } +static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, + struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, + unsigned long entry, unsigned long ua, + enum dma_data_direction dir) +{ + unsigned long i, pgoff, ret = H_SUCCESS; + unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); + unsigned long io_entry = entry * subpages; + + for (i = 0, pgoff = 0; i < subpages; + ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) { + + ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl, + io_entry + i, ua + pgoff, dir); + if (ret != H_SUCCESS) + break; + } + + return ret; +} + long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba, unsigned long tce) { @@ -327,10 +365,10 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { if (dir == DMA_NONE) - ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, + ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt, stit->tbl, entry); else - ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, + ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt, stit->tbl, entry, ua, dir); if (ret == H_SUCCESS) @@ -477,7 +515,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, return H_PARAMETER; list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { - ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, + ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt, stit->tbl, entry + i, ua, iommu_tce_direction(tce)); @@ -526,10 +564,10 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, return H_PARAMETER; list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { - unsigned long entry = ioba >> stit->tbl->it_page_shift; + unsigned long entry = ioba >> stt->page_shift; for (i = 0; i < npages; ++i) { - ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, + ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt, stit->tbl, entry + i); if (ret == H_SUCCESS) @@ -571,7 +609,7 @@ long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, page = stt->pages[idx / TCES_PER_PAGE]; tbl = (u64 *)page_address(page); - vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE]; + vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE]; return H_SUCCESS; } diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 68d68983948e..36b11c5a0dbb 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -23,7 +23,9 @@ #include #include #include +#include #include "book3s.h" +#include #define OP_19_XOP_RFID 18 #define OP_19_XOP_RFI 50 @@ -47,6 +49,12 @@ #define OP_31_XOP_EIOIO 854 #define OP_31_XOP_SLBMFEE 915 +#define OP_31_XOP_TBEGIN 654 +#define OP_31_XOP_TABORT 910 + +#define OP_31_XOP_TRECLAIM 942 +#define OP_31_XOP_TRCHKPT 1006 + /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */ #define OP_31_XOP_DCBZ 1010 @@ -87,6 +95,157 @@ static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level) return true; } +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu) +{ + memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0], + sizeof(vcpu->arch.gpr_tm)); + memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp, + sizeof(struct thread_fp_state)); + memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr, + sizeof(struct thread_vr_state)); + vcpu->arch.ppr_tm = vcpu->arch.ppr; + vcpu->arch.dscr_tm = vcpu->arch.dscr; + vcpu->arch.amr_tm = vcpu->arch.amr; + vcpu->arch.ctr_tm = vcpu->arch.regs.ctr; + vcpu->arch.tar_tm = vcpu->arch.tar; + vcpu->arch.lr_tm = vcpu->arch.regs.link; + vcpu->arch.cr_tm = vcpu->arch.cr; + vcpu->arch.xer_tm = vcpu->arch.regs.xer; + vcpu->arch.vrsave_tm = vcpu->arch.vrsave; +} + +static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu) +{ + memcpy(&vcpu->arch.regs.gpr[0], &vcpu->arch.gpr_tm[0], + sizeof(vcpu->arch.regs.gpr)); + memcpy(&vcpu->arch.fp, &vcpu->arch.fp_tm, + sizeof(struct thread_fp_state)); + memcpy(&vcpu->arch.vr, &vcpu->arch.vr_tm, + sizeof(struct thread_vr_state)); + vcpu->arch.ppr = vcpu->arch.ppr_tm; + vcpu->arch.dscr = vcpu->arch.dscr_tm; + vcpu->arch.amr = vcpu->arch.amr_tm; + vcpu->arch.regs.ctr = vcpu->arch.ctr_tm; + vcpu->arch.tar = vcpu->arch.tar_tm; + vcpu->arch.regs.link = vcpu->arch.lr_tm; + vcpu->arch.cr = vcpu->arch.cr_tm; + vcpu->arch.regs.xer = vcpu->arch.xer_tm; + vcpu->arch.vrsave = vcpu->arch.vrsave_tm; +} + +static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val) +{ + unsigned long guest_msr = kvmppc_get_msr(vcpu); + int fc_val = ra_val ? ra_val : 1; + uint64_t texasr; + + /* CR0 = 0 | MSR[TS] | 0 */ + vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) | + (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1)) + << CR0_SHIFT); + + preempt_disable(); + tm_enable(); + texasr = mfspr(SPRN_TEXASR); + kvmppc_save_tm_pr(vcpu); + kvmppc_copyfrom_vcpu_tm(vcpu); + + /* failure recording depends on Failure Summary bit */ + if (!(texasr & TEXASR_FS)) { + texasr &= ~TEXASR_FC; + texasr |= ((u64)fc_val << TEXASR_FC_LG) | TEXASR_FS; + + texasr &= ~(TEXASR_PR | TEXASR_HV); + if (kvmppc_get_msr(vcpu) & MSR_PR) + texasr |= TEXASR_PR; + + if (kvmppc_get_msr(vcpu) & MSR_HV) + texasr |= TEXASR_HV; + + vcpu->arch.texasr = texasr; + vcpu->arch.tfiar = kvmppc_get_pc(vcpu); + mtspr(SPRN_TEXASR, texasr); + mtspr(SPRN_TFIAR, vcpu->arch.tfiar); + } + tm_disable(); + /* + * treclaim need quit to non-transactional state. + */ + guest_msr &= ~(MSR_TS_MASK); + kvmppc_set_msr(vcpu, guest_msr); + preempt_enable(); + + if (vcpu->arch.shadow_fscr & FSCR_TAR) + mtspr(SPRN_TAR, vcpu->arch.tar); +} + +static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu) +{ + unsigned long guest_msr = kvmppc_get_msr(vcpu); + + preempt_disable(); + /* + * need flush FP/VEC/VSX to vcpu save area before + * copy. + */ + kvmppc_giveup_ext(vcpu, MSR_VSX); + kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); + kvmppc_copyto_vcpu_tm(vcpu); + kvmppc_save_tm_sprs(vcpu); + + /* + * as a result of trecheckpoint. set TS to suspended. + */ + guest_msr &= ~(MSR_TS_MASK); + guest_msr |= MSR_TS_S; + kvmppc_set_msr(vcpu, guest_msr); + kvmppc_restore_tm_pr(vcpu); + preempt_enable(); +} + +/* emulate tabort. at guest privilege state */ +void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) +{ + /* currently we only emulate tabort. but no emulation of other + * tabort variants since there is no kernel usage of them at + * present. + */ + unsigned long guest_msr = kvmppc_get_msr(vcpu); + uint64_t org_texasr; + + preempt_disable(); + tm_enable(); + org_texasr = mfspr(SPRN_TEXASR); + tm_abort(ra_val); + + /* CR0 = 0 | MSR[TS] | 0 */ + vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) | + (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1)) + << CR0_SHIFT); + + vcpu->arch.texasr = mfspr(SPRN_TEXASR); + /* failure recording depends on Failure Summary bit, + * and tabort will be treated as nops in non-transactional + * state. + */ + if (!(org_texasr & TEXASR_FS) && + MSR_TM_ACTIVE(guest_msr)) { + vcpu->arch.texasr &= ~(TEXASR_PR | TEXASR_HV); + if (guest_msr & MSR_PR) + vcpu->arch.texasr |= TEXASR_PR; + + if (guest_msr & MSR_HV) + vcpu->arch.texasr |= TEXASR_HV; + + vcpu->arch.tfiar = kvmppc_get_pc(vcpu); + } + tm_disable(); + preempt_enable(); +} + +#endif + int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { @@ -117,11 +276,28 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, case 19: switch (get_xop(inst)) { case OP_19_XOP_RFID: - case OP_19_XOP_RFI: + case OP_19_XOP_RFI: { + unsigned long srr1 = kvmppc_get_srr1(vcpu); +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + unsigned long cur_msr = kvmppc_get_msr(vcpu); + + /* + * add rules to fit in ISA specification regarding TM + * state transistion in TM disable/Suspended state, + * and target TM state is TM inactive(00) state. (the + * change should be suppressed). + */ + if (((cur_msr & MSR_TM) == 0) && + ((srr1 & MSR_TM) == 0) && + MSR_TM_SUSPENDED(cur_msr) && + !MSR_TM_ACTIVE(srr1)) + srr1 |= MSR_TS_S; +#endif kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu)); - kvmppc_set_msr(vcpu, kvmppc_get_srr1(vcpu)); + kvmppc_set_msr(vcpu, srr1); *advance = 0; break; + } default: emulated = EMULATE_FAIL; @@ -304,6 +480,140 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, break; } +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + case OP_31_XOP_TBEGIN: + { + if (!cpu_has_feature(CPU_FTR_TM)) + break; + + if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { + kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); + emulated = EMULATE_AGAIN; + break; + } + + if (!(kvmppc_get_msr(vcpu) & MSR_PR)) { + preempt_disable(); + vcpu->arch.cr = (CR0_TBEGIN_FAILURE | + (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT))); + + vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT | + (((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) + << TEXASR_FC_LG)); + + if ((inst >> 21) & 0x1) + vcpu->arch.texasr |= TEXASR_ROT; + + if (kvmppc_get_msr(vcpu) & MSR_HV) + vcpu->arch.texasr |= TEXASR_HV; + + vcpu->arch.tfhar = kvmppc_get_pc(vcpu) + 4; + vcpu->arch.tfiar = kvmppc_get_pc(vcpu); + + kvmppc_restore_tm_sprs(vcpu); + preempt_enable(); + } else + emulated = EMULATE_FAIL; + break; + } + case OP_31_XOP_TABORT: + { + ulong guest_msr = kvmppc_get_msr(vcpu); + unsigned long ra_val = 0; + + if (!cpu_has_feature(CPU_FTR_TM)) + break; + + if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { + kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); + emulated = EMULATE_AGAIN; + break; + } + + /* only emulate for privilege guest, since problem state + * guest can run with TM enabled and we don't expect to + * trap at here for that case. + */ + WARN_ON(guest_msr & MSR_PR); + + if (ra) + ra_val = kvmppc_get_gpr(vcpu, ra); + + kvmppc_emulate_tabort(vcpu, ra_val); + break; + } + case OP_31_XOP_TRECLAIM: + { + ulong guest_msr = kvmppc_get_msr(vcpu); + unsigned long ra_val = 0; + + if (!cpu_has_feature(CPU_FTR_TM)) + break; + + if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { + kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); + emulated = EMULATE_AGAIN; + break; + } + + /* generate interrupts based on priorities */ + if (guest_msr & MSR_PR) { + /* Privileged Instruction type Program Interrupt */ + kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); + emulated = EMULATE_AGAIN; + break; + } + + if (!MSR_TM_ACTIVE(guest_msr)) { + /* TM bad thing interrupt */ + kvmppc_core_queue_program(vcpu, SRR1_PROGTM); + emulated = EMULATE_AGAIN; + break; + } + + if (ra) + ra_val = kvmppc_get_gpr(vcpu, ra); + kvmppc_emulate_treclaim(vcpu, ra_val); + break; + } + case OP_31_XOP_TRCHKPT: + { + ulong guest_msr = kvmppc_get_msr(vcpu); + unsigned long texasr; + + if (!cpu_has_feature(CPU_FTR_TM)) + break; + + if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { + kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); + emulated = EMULATE_AGAIN; + break; + } + + /* generate interrupt based on priorities */ + if (guest_msr & MSR_PR) { + /* Privileged Instruction type Program Intr */ + kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); + emulated = EMULATE_AGAIN; + break; + } + + tm_enable(); + texasr = mfspr(SPRN_TEXASR); + tm_disable(); + + if (MSR_TM_ACTIVE(guest_msr) || + !(texasr & (TEXASR_FS))) { + /* TM bad thing interrupt */ + kvmppc_core_queue_program(vcpu, SRR1_PROGTM); + emulated = EMULATE_AGAIN; + break; + } + + kvmppc_emulate_trchkpt(vcpu); + break; + } +#endif default: emulated = EMULATE_FAIL; } @@ -465,13 +775,38 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) break; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM case SPRN_TFHAR: - vcpu->arch.tfhar = spr_val; - break; case SPRN_TEXASR: - vcpu->arch.texasr = spr_val; - break; case SPRN_TFIAR: - vcpu->arch.tfiar = spr_val; + if (!cpu_has_feature(CPU_FTR_TM)) + break; + + if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { + kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); + emulated = EMULATE_AGAIN; + break; + } + + if (MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)) && + !((MSR_TM_SUSPENDED(kvmppc_get_msr(vcpu))) && + (sprn == SPRN_TFHAR))) { + /* it is illegal to mtspr() TM regs in + * other than non-transactional state, with + * the exception of TFHAR in suspend state. + */ + kvmppc_core_queue_program(vcpu, SRR1_PROGTM); + emulated = EMULATE_AGAIN; + break; + } + + tm_enable(); + if (sprn == SPRN_TFHAR) + mtspr(SPRN_TFHAR, spr_val); + else if (sprn == SPRN_TEXASR) + mtspr(SPRN_TEXASR, spr_val); + else + mtspr(SPRN_TFIAR, spr_val); + tm_disable(); + break; #endif #endif @@ -618,13 +953,25 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val break; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM case SPRN_TFHAR: - *spr_val = vcpu->arch.tfhar; - break; case SPRN_TEXASR: - *spr_val = vcpu->arch.texasr; - break; case SPRN_TFIAR: - *spr_val = vcpu->arch.tfiar; + if (!cpu_has_feature(CPU_FTR_TM)) + break; + + if (!(kvmppc_get_msr(vcpu) & MSR_TM)) { + kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG); + emulated = EMULATE_AGAIN; + break; + } + + tm_enable(); + if (sprn == SPRN_TFHAR) + *spr_val = mfspr(SPRN_TFHAR); + else if (sprn == SPRN_TEXASR) + *spr_val = mfspr(SPRN_TEXASR); + else if (sprn == SPRN_TFIAR) + *spr_val = mfspr(SPRN_TFIAR); + tm_disable(); break; #endif #endif diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index cb6d2313b19f..de686b340f4a 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -123,6 +123,32 @@ static bool no_mixing_hpt_and_radix; static void kvmppc_end_cede(struct kvm_vcpu *vcpu); static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); +/* + * RWMR values for POWER8. These control the rate at which PURR + * and SPURR count and should be set according to the number of + * online threads in the vcore being run. + */ +#define RWMR_RPA_P8_1THREAD 0x164520C62609AECA +#define RWMR_RPA_P8_2THREAD 0x7FFF2908450D8DA9 +#define RWMR_RPA_P8_3THREAD 0x164520C62609AECA +#define RWMR_RPA_P8_4THREAD 0x199A421245058DA9 +#define RWMR_RPA_P8_5THREAD 0x164520C62609AECA +#define RWMR_RPA_P8_6THREAD 0x164520C62609AECA +#define RWMR_RPA_P8_7THREAD 0x164520C62609AECA +#define RWMR_RPA_P8_8THREAD 0x164520C62609AECA + +static unsigned long p8_rwmr_values[MAX_SMT_THREADS + 1] = { + RWMR_RPA_P8_1THREAD, + RWMR_RPA_P8_1THREAD, + RWMR_RPA_P8_2THREAD, + RWMR_RPA_P8_3THREAD, + RWMR_RPA_P8_4THREAD, + RWMR_RPA_P8_5THREAD, + RWMR_RPA_P8_6THREAD, + RWMR_RPA_P8_7THREAD, + RWMR_RPA_P8_8THREAD, +}; + static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc, int *ip) { @@ -371,13 +397,13 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); pr_err("pc = %.16lx msr = %.16llx trap = %x\n", - vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap); + vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap); for (r = 0; r < 16; ++r) pr_err("r%2d = %.16lx r%d = %.16lx\n", r, kvmppc_get_gpr(vcpu, r), r+16, kvmppc_get_gpr(vcpu, r+16)); pr_err("ctr = %.16lx lr = %.16lx\n", - vcpu->arch.ctr, vcpu->arch.lr); + vcpu->arch.regs.ctr, vcpu->arch.regs.link); pr_err("srr0 = %.16llx srr1 = %.16llx\n", vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); pr_err("sprg0 = %.16llx sprg1 = %.16llx\n", @@ -385,7 +411,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) pr_err("sprg2 = %.16llx sprg3 = %.16llx\n", vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n", - vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr); + vcpu->arch.cr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); pr_err("fault dar = %.16lx dsisr = %.8x\n", vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); @@ -1526,6 +1552,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, *val = get_reg_val(id, vcpu->arch.dec_expires + vcpu->arch.vcore->tb_offset); break; + case KVM_REG_PPC_ONLINE: + *val = get_reg_val(id, vcpu->arch.online); + break; default: r = -EINVAL; break; @@ -1757,6 +1786,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, vcpu->arch.dec_expires = set_reg_val(id, *val) - vcpu->arch.vcore->tb_offset; break; + case KVM_REG_PPC_ONLINE: + i = set_reg_val(id, *val); + if (i && !vcpu->arch.online) + atomic_inc(&vcpu->arch.vcore->online_count); + else if (!i && vcpu->arch.online) + atomic_dec(&vcpu->arch.vcore->online_count); + vcpu->arch.online = i; + break; default: r = -EINVAL; break; @@ -2850,6 +2887,25 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) } } + /* + * On POWER8, set RWMR register. + * Since it only affects PURR and SPURR, it doesn't affect + * the host, so we don't save/restore the host value. + */ + if (is_power8) { + unsigned long rwmr_val = RWMR_RPA_P8_8THREAD; + int n_online = atomic_read(&vc->online_count); + + /* + * Use the 8-thread value if we're doing split-core + * or if the vcore's online count looks bogus. + */ + if (split == 1 && threads_per_subcore == MAX_SMT_THREADS && + n_online >= 1 && n_online <= MAX_SMT_THREADS) + rwmr_val = p8_rwmr_values[n_online]; + mtspr(SPRN_RWMR, rwmr_val); + } + /* Start all the threads */ active = 0; for (sub = 0; sub < core_info.n_subcores; ++sub) { @@ -2902,6 +2958,32 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) for (sub = 0; sub < core_info.n_subcores; ++sub) spin_unlock(&core_info.vc[sub]->lock); + if (kvm_is_radix(vc->kvm)) { + int tmp = pcpu; + + /* + * Do we need to flush the process scoped TLB for the LPAR? + * + * On POWER9, individual threads can come in here, but the + * TLB is shared between the 4 threads in a core, hence + * invalidating on one thread invalidates for all. + * Thus we make all 4 threads use the same bit here. + * + * Hash must be flushed in realmode in order to use tlbiel. + */ + mtspr(SPRN_LPID, vc->kvm->arch.lpid); + isync(); + + if (cpu_has_feature(CPU_FTR_ARCH_300)) + tmp &= ~0x3UL; + + if (cpumask_test_cpu(tmp, &vc->kvm->arch.need_tlb_flush)) { + radix__local_flush_tlb_lpid_guest(vc->kvm->arch.lpid); + /* Clear the bit after the TLB flush */ + cpumask_clear_cpu(tmp, &vc->kvm->arch.need_tlb_flush); + } + } + /* * Interrupts will be enabled once we get into the guest, * so tell lockdep that we're about to enable interrupts. @@ -3356,6 +3438,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) } #endif + /* + * Force online to 1 for the sake of old userspace which doesn't + * set it. + */ + if (!vcpu->arch.online) { + atomic_inc(&vcpu->arch.vcore->online_count); + vcpu->arch.online = 1; + } + kvmppc_core_prepare_to_enter(vcpu); /* No need to go into the guest when all we'll do is come back out */ @@ -3548,7 +3639,7 @@ static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free, static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot, unsigned long npages) { - slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); + slot->arch.rmap = vzalloc(array_size(npages, sizeof(*slot->arch.rmap))); if (!slot->arch.rmap) return -ENOMEM; @@ -3955,8 +4046,7 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) */ snprintf(buf, sizeof(buf), "vm%d", current->pid); kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir); - if (!IS_ERR_OR_NULL(kvm->arch.debugfs_dir)) - kvmppc_mmu_debugfs_init(kvm); + kvmppc_mmu_debugfs_init(kvm); return 0; } diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index de18299f92b7..d4a3f4da409b 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -18,6 +18,7 @@ #include #include +#include #include #include #include @@ -211,9 +212,9 @@ long kvmppc_h_random(struct kvm_vcpu *vcpu) /* Only need to do the expensive mfmsr() on radix */ if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR)) - r = powernv_get_random_long(&vcpu->arch.gpr[4]); + r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]); else - r = powernv_get_random_real_mode(&vcpu->arch.gpr[4]); + r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]); if (r) return H_SUCCESS; @@ -562,7 +563,7 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) { if (!kvmppc_xics_enabled(vcpu)) return H_TOO_HARD; - vcpu->arch.gpr[5] = get_tb(); + vcpu->arch.regs.gpr[5] = get_tb(); if (xive_enabled()) { if (is_rm()) return xive_rm_h_xirr(vcpu); @@ -633,7 +634,19 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) void kvmppc_bad_interrupt(struct pt_regs *regs) { - die("Bad interrupt in KVM entry/exit code", regs, SIGABRT); + /* + * 100 could happen at any time, 200 can happen due to invalid real + * address access for example (or any time due to a hardware problem). + */ + if (TRAP(regs) == 0x100) { + get_paca()->in_nmi++; + system_reset_exception(regs); + get_paca()->in_nmi--; + } else if (TRAP(regs) == 0x200) { + machine_check_exception(regs); + } else { + die("Bad interrupt in KVM entry/exit code", regs, SIGABRT); + } panic("Bad KVM trap"); } diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index 0e8493033288..82f2ff9410b6 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S @@ -137,7 +137,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) /* * We return here in virtual mode after the guest exits * with something that we can't handle in real mode. - * Interrupts are enabled again at this point. + * Interrupts are still hard-disabled. */ /* diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 78e6a392330f..1f22d9e977d4 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -418,7 +418,8 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, long pte_index, unsigned long pteh, unsigned long ptel) { return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel, - vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]); + vcpu->arch.pgdir, true, + &vcpu->arch.regs.gpr[4]); } #ifdef __BIG_ENDIAN__ @@ -434,24 +435,6 @@ static inline int is_mmio_hpte(unsigned long v, unsigned long r) (HPTE_R_KEY_HI | HPTE_R_KEY_LO)); } -static inline int try_lock_tlbie(unsigned int *lock) -{ - unsigned int tmp, old; - unsigned int token = LOCK_TOKEN; - - asm volatile("1:lwarx %1,0,%2\n" - " cmpwi cr0,%1,0\n" - " bne 2f\n" - " stwcx. %3,0,%2\n" - " bne- 1b\n" - " isync\n" - "2:" - : "=&r" (tmp), "=&r" (old) - : "r" (lock), "r" (token) - : "cc", "memory"); - return old == 0; -} - static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, long npages, int global, bool need_sync) { @@ -463,8 +446,6 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, * the RS field, this is backwards-compatible with P7 and P8. */ if (global) { - while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) - cpu_relax(); if (need_sync) asm volatile("ptesync" : : : "memory"); for (i = 0; i < npages; ++i) { @@ -483,7 +464,6 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, } asm volatile("eieio; tlbsync; ptesync" : : : "memory"); - kvm->arch.tlbie_lock = 0; } else { if (need_sync) asm volatile("ptesync" : : : "memory"); @@ -561,13 +541,13 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long pte_index, unsigned long avpn) { return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn, - &vcpu->arch.gpr[4]); + &vcpu->arch.regs.gpr[4]); } long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; - unsigned long *args = &vcpu->arch.gpr[4]; + unsigned long *args = &vcpu->arch.regs.gpr[4]; __be64 *hp, *hptes[4]; unsigned long tlbrb[4]; long int i, j, k, n, found, indexes[4]; @@ -787,8 +767,8 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C)); r &= ~HPTE_GR_RESERVED; } - vcpu->arch.gpr[4 + i * 2] = v; - vcpu->arch.gpr[5 + i * 2] = r; + vcpu->arch.regs.gpr[4 + i * 2] = v; + vcpu->arch.regs.gpr[5 + i * 2] = r; } return H_SUCCESS; } @@ -834,7 +814,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags, } } } - vcpu->arch.gpr[4] = gr; + vcpu->arch.regs.gpr[4] = gr; ret = H_SUCCESS; out: unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); @@ -881,7 +861,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, kvmppc_set_dirty_from_hpte(kvm, v, gr); } } - vcpu->arch.gpr[4] = gr; + vcpu->arch.regs.gpr[4] = gr; ret = H_SUCCESS; out: unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c index 2a862618f072..758d1d23215e 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xics.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c @@ -517,7 +517,7 @@ unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu) } while (!icp_rm_try_update(icp, old_state, new_state)); /* Return the result in GPR4 */ - vcpu->arch.gpr[4] = xirr; + vcpu->arch.regs.gpr[4] = xirr; return check_too_hard(xics, icp); } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index b97d261d3b89..153988d878e8 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -39,8 +39,6 @@ BEGIN_FTR_SECTION; \ extsw reg, reg; \ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) -#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) - /* Values in HSTATE_NAPPING(r13) */ #define NAPPING_CEDE 1 #define NAPPING_NOVCPU 2 @@ -639,6 +637,10 @@ kvmppc_hv_entry: /* Primary thread switches to guest partition. */ cmpwi r6,0 bne 10f + + /* Radix has already switched LPID and flushed core TLB */ + bne cr7, 22f + lwz r7,KVM_LPID(r9) BEGIN_FTR_SECTION ld r6,KVM_SDR1(r9) @@ -650,7 +652,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) mtspr SPRN_LPID,r7 isync - /* See if we need to flush the TLB */ + /* See if we need to flush the TLB. Hash has to be done in RM */ lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ BEGIN_FTR_SECTION /* @@ -677,15 +679,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) li r7,0x800 /* IS field = 0b10 */ ptesync li r0,0 /* RS for P9 version of tlbiel */ - bne cr7, 29f 28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */ addi r7,r7,0x1000 bdnz 28b - b 30f -29: PPC_TLBIEL(7,0,2,1,1) /* for radix, RIC=2, PRS=1, R=1 */ - addi r7,r7,0x1000 - bdnz 29b -30: ptesync + ptesync 23: ldarx r7,0,r6 /* clear the bit after TLB flushed */ andc r7,r7,r8 stdcx. r7,0,r6 @@ -799,7 +796,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) /* * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR */ - bl kvmppc_restore_tm + mr r3, r4 + ld r4, VCPU_MSR(r3) + bl kvmppc_restore_tm_hv + ld r4, HSTATE_KVM_VCPU(r13) 91: #endif @@ -1783,7 +1783,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) /* * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR */ - bl kvmppc_save_tm + mr r3, r9 + ld r4, VCPU_MSR(r3) + bl kvmppc_save_tm_hv + ld r9, HSTATE_KVM_VCPU(r13) 91: #endif @@ -2686,8 +2689,9 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) /* * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR */ - ld r9, HSTATE_KVM_VCPU(r13) - bl kvmppc_save_tm + ld r3, HSTATE_KVM_VCPU(r13) + ld r4, VCPU_MSR(r3) + bl kvmppc_save_tm_hv 91: #endif @@ -2805,7 +2809,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) /* * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR */ - bl kvmppc_restore_tm + mr r3, r4 + ld r4, VCPU_MSR(r3) + bl kvmppc_restore_tm_hv + ld r4, HSTATE_KVM_VCPU(r13) 91: #endif @@ -3126,11 +3133,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * Save transactional state and TM-related registers. - * Called with r9 pointing to the vcpu struct. + * Called with r3 pointing to the vcpu struct and r4 containing + * the guest MSR value. * This can modify all checkpointed registers, but - * restores r1, r2 and r9 (vcpu pointer) before exit. + * restores r1 and r2 before exit. */ -kvmppc_save_tm: +kvmppc_save_tm_hv: + /* See if we need to handle fake suspend mode */ +BEGIN_FTR_SECTION + b __kvmppc_save_tm +END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) + + lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */ + cmpwi r0, 0 + beq __kvmppc_save_tm + + /* The following code handles the fake_suspend = 1 case */ mflr r0 std r0, PPC_LR_STKOFF(r1) stdu r1, -PPC_MIN_STKFRM(r1) @@ -3141,59 +3159,37 @@ kvmppc_save_tm: rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG mtmsrd r8 - ld r5, VCPU_MSR(r9) - rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 - beq 1f /* TM not active in guest. */ - - std r1, HSTATE_HOST_R1(r13) - li r3, TM_CAUSE_KVM_RESCHED - -BEGIN_FTR_SECTION - lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */ - cmpwi r0, 0 - beq 3f rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */ beq 4f -BEGIN_FTR_SECTION_NESTED(96) +BEGIN_FTR_SECTION bl pnv_power9_force_smt4_catch -END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96) +END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) nop - b 6f -3: - /* Emulation of the treclaim instruction needs TEXASR before treclaim */ - mfspr r6, SPRN_TEXASR - std r6, VCPU_ORIG_TEXASR(r9) -6: -END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) - /* Clear the MSR RI since r1, r13 are all going to be foobar. */ + std r1, HSTATE_HOST_R1(r13) + + /* Clear the MSR RI since r1, r13 may be foobar. */ li r5, 0 mtmsrd r5, 1 - /* All GPRs are volatile at this point. */ + /* We have to treclaim here because that's the only way to do S->N */ + li r3, TM_CAUSE_KVM_RESCHED TRECLAIM(R3) - /* Temporarily store r13 and r9 so we have some regs to play with */ - SET_SCRATCH0(r13) - GET_PACA(r13) - std r9, PACATMSCRATCH(r13) - - /* If doing TM emulation on POWER9 DD2.2, check for fake suspend mode */ -BEGIN_FTR_SECTION - lbz r9, HSTATE_FAKE_SUSPEND(r13) - cmpwi r9, 0 - beq 2f /* * We were in fake suspend, so we are not going to save the * register state as the guest checkpointed state (since * we already have it), therefore we can now use any volatile GPR. */ - /* Reload stack pointer and TOC. */ + /* Reload PACA pointer, stack pointer and TOC. */ + GET_PACA(r13) ld r1, HSTATE_HOST_R1(r13) ld r2, PACATOC(r13) + /* Set MSR RI now we have r1 and r13 back. */ li r5, MSR_RI mtmsrd r5, 1 + HMT_MEDIUM ld r6, HSTATE_DSCR(r13) mtspr SPRN_DSCR, r6 @@ -3208,85 +3204,9 @@ END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96) li r0, PSSCR_FAKE_SUSPEND andc r3, r3, r0 mtspr SPRN_PSSCR, r3 - ld r9, HSTATE_KVM_VCPU(r13) + /* Don't save TEXASR, use value from last exit in real suspend state */ - b 11f -2: -END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) - ld r9, HSTATE_KVM_VCPU(r13) - - /* Get a few more GPRs free. */ - std r29, VCPU_GPRS_TM(29)(r9) - std r30, VCPU_GPRS_TM(30)(r9) - std r31, VCPU_GPRS_TM(31)(r9) - - /* Save away PPR and DSCR soon so don't run with user values. */ - mfspr r31, SPRN_PPR - HMT_MEDIUM - mfspr r30, SPRN_DSCR - ld r29, HSTATE_DSCR(r13) - mtspr SPRN_DSCR, r29 - - /* Save all but r9, r13 & r29-r31 */ - reg = 0 - .rept 29 - .if (reg != 9) && (reg != 13) - std reg, VCPU_GPRS_TM(reg)(r9) - .endif - reg = reg + 1 - .endr - /* ... now save r13 */ - GET_SCRATCH0(r4) - std r4, VCPU_GPRS_TM(13)(r9) - /* ... and save r9 */ - ld r4, PACATMSCRATCH(r13) - std r4, VCPU_GPRS_TM(9)(r9) - - /* Reload stack pointer and TOC. */ - ld r1, HSTATE_HOST_R1(r13) - ld r2, PACATOC(r13) - - /* Set MSR RI now we have r1 and r13 back. */ - li r5, MSR_RI - mtmsrd r5, 1 - - /* Save away checkpinted SPRs. */ - std r31, VCPU_PPR_TM(r9) - std r30, VCPU_DSCR_TM(r9) - mflr r5 - mfcr r6 - mfctr r7 - mfspr r8, SPRN_AMR - mfspr r10, SPRN_TAR - mfxer r11 - std r5, VCPU_LR_TM(r9) - stw r6, VCPU_CR_TM(r9) - std r7, VCPU_CTR_TM(r9) - std r8, VCPU_AMR_TM(r9) - std r10, VCPU_TAR_TM(r9) - std r11, VCPU_XER_TM(r9) - - /* Restore r12 as trap number. */ - lwz r12, VCPU_TRAP(r9) - - /* Save FP/VSX. */ - addi r3, r9, VCPU_FPRS_TM - bl store_fp_state - addi r3, r9, VCPU_VRS_TM - bl store_vr_state - mfspr r6, SPRN_VRSAVE - stw r6, VCPU_VRSAVE_TM(r9) -1: - /* - * We need to save these SPRs after the treclaim so that the software - * error code is recorded correctly in the TEXASR. Also the user may - * change these outside of a transaction, so they must always be - * context switched. - */ - mfspr r7, SPRN_TEXASR - std r7, VCPU_TEXASR(r9) -11: mfspr r5, SPRN_TFHAR mfspr r6, SPRN_TFIAR std r5, VCPU_TFHAR(r9) @@ -3299,149 +3219,63 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) /* * Restore transactional state and TM-related registers. - * Called with r4 pointing to the vcpu struct. + * Called with r3 pointing to the vcpu struct + * and r4 containing the guest MSR value. * This potentially modifies all checkpointed registers. - * It restores r1, r2, r4 from the PACA. + * It restores r1 and r2 from the PACA. */ -kvmppc_restore_tm: - mflr r0 - std r0, PPC_LR_STKOFF(r1) - - /* Turn on TM/FP/VSX/VMX so we can restore them. */ - mfmsr r5 - li r6, MSR_TM >> 32 - sldi r6, r6, 32 - or r5, r5, r6 - ori r5, r5, MSR_FP - oris r5, r5, (MSR_VEC | MSR_VSX)@h - mtmsrd r5 - - /* - * The user may change these outside of a transaction, so they must - * always be context switched. - */ - ld r5, VCPU_TFHAR(r4) - ld r6, VCPU_TFIAR(r4) - ld r7, VCPU_TEXASR(r4) - mtspr SPRN_TFHAR, r5 - mtspr SPRN_TFIAR, r6 - mtspr SPRN_TEXASR, r7 - - li r0, 0 - stb r0, HSTATE_FAKE_SUSPEND(r13) - ld r5, VCPU_MSR(r4) - rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 - beqlr /* TM not active in guest */ - std r1, HSTATE_HOST_R1(r13) - - /* Make sure the failure summary is set, otherwise we'll program check - * when we trechkpt. It's possible that this might have been not set - * on a kvmppc_set_one_reg() call but we shouldn't let this crash the - * host. - */ - oris r7, r7, (TEXASR_FS)@h - mtspr SPRN_TEXASR, r7 - +kvmppc_restore_tm_hv: /* * If we are doing TM emulation for the guest on a POWER9 DD2, * then we don't actually do a trechkpt -- we either set up * fake-suspend mode, or emulate a TM rollback. */ BEGIN_FTR_SECTION - b .Ldo_tm_fake_load -END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) + b __kvmppc_restore_tm +END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) + mflr r0 + std r0, PPC_LR_STKOFF(r1) + + li r0, 0 + stb r0, HSTATE_FAKE_SUSPEND(r13) + + /* Turn on TM so we can restore TM SPRs */ + mfmsr r5 + li r0, 1 + rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG + mtmsrd r5 /* - * We need to load up the checkpointed state for the guest. - * We need to do this early as it will blow away any GPRs, VSRs and - * some SPRs. + * The user may change these outside of a transaction, so they must + * always be context switched. */ + ld r5, VCPU_TFHAR(r3) + ld r6, VCPU_TFIAR(r3) + ld r7, VCPU_TEXASR(r3) + mtspr SPRN_TFHAR, r5 + mtspr SPRN_TFIAR, r6 + mtspr SPRN_TEXASR, r7 - mr r31, r4 - addi r3, r31, VCPU_FPRS_TM - bl load_fp_state - addi r3, r31, VCPU_VRS_TM - bl load_vr_state - mr r4, r31 - lwz r7, VCPU_VRSAVE_TM(r4) - mtspr SPRN_VRSAVE, r7 + rldicl. r5, r4, 64 - MSR_TS_S_LG, 62 + beqlr /* TM not active in guest */ - ld r5, VCPU_LR_TM(r4) - lwz r6, VCPU_CR_TM(r4) - ld r7, VCPU_CTR_TM(r4) - ld r8, VCPU_AMR_TM(r4) - ld r9, VCPU_TAR_TM(r4) - ld r10, VCPU_XER_TM(r4) - mtlr r5 - mtcr r6 - mtctr r7 - mtspr SPRN_AMR, r8 - mtspr SPRN_TAR, r9 - mtxer r10 + /* Make sure the failure summary is set */ + oris r7, r7, (TEXASR_FS)@h + mtspr SPRN_TEXASR, r7 - /* - * Load up PPR and DSCR values but don't put them in the actual SPRs - * till the last moment to avoid running with userspace PPR and DSCR for - * too long. - */ - ld r29, VCPU_DSCR_TM(r4) - ld r30, VCPU_PPR_TM(r4) - - std r2, PACATMSCRATCH(r13) /* Save TOC */ - - /* Clear the MSR RI since r1, r13 are all going to be foobar. */ - li r5, 0 - mtmsrd r5, 1 - - /* Load GPRs r0-r28 */ - reg = 0 - .rept 29 - ld reg, VCPU_GPRS_TM(reg)(r31) - reg = reg + 1 - .endr - - mtspr SPRN_DSCR, r29 - mtspr SPRN_PPR, r30 - - /* Load final GPRs */ - ld 29, VCPU_GPRS_TM(29)(r31) - ld 30, VCPU_GPRS_TM(30)(r31) - ld 31, VCPU_GPRS_TM(31)(r31) - - /* TM checkpointed state is now setup. All GPRs are now volatile. */ - TRECHKPT - - /* Now let's get back the state we need. */ - HMT_MEDIUM - GET_PACA(r13) - ld r29, HSTATE_DSCR(r13) - mtspr SPRN_DSCR, r29 - ld r4, HSTATE_KVM_VCPU(r13) - ld r1, HSTATE_HOST_R1(r13) - ld r2, PACATMSCRATCH(r13) - - /* Set the MSR RI since we have our registers back. */ - li r5, MSR_RI - mtmsrd r5, 1 -9: - ld r0, PPC_LR_STKOFF(r1) - mtlr r0 - blr - -.Ldo_tm_fake_load: cmpwi r5, 1 /* check for suspended state */ bgt 10f stb r5, HSTATE_FAKE_SUSPEND(r13) - b 9b /* and return */ + b 9f /* and return */ 10: stdu r1, -PPC_MIN_STKFRM(r1) /* guest is in transactional state, so simulate rollback */ - mr r3, r4 bl kvmhv_emulate_tm_rollback nop - ld r4, HSTATE_KVM_VCPU(r13) /* our vcpu pointer has been trashed */ addi r1, r1, PPC_MIN_STKFRM - b 9b -#endif +9: ld r0, PPC_LR_STKOFF(r1) + mtlr r0 + blr +#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ /* * We come here if we get any exception or interrupt while we are @@ -3572,6 +3406,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) bcl 20, 31, .+4 5: mflr r3 addi r3, r3, 9f - 5b + li r4, -1 + rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */ ld r4, PACAKMSR(r13) mtspr SPRN_SRR0, r3 mtspr SPRN_SRR1, r4 diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c index bf710ad3a6d7..008285058f9b 100644 --- a/arch/powerpc/kvm/book3s_hv_tm.c +++ b/arch/powerpc/kvm/book3s_hv_tm.c @@ -19,7 +19,7 @@ static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause) u64 texasr, tfiar; u64 msr = vcpu->arch.shregs.msr; - tfiar = vcpu->arch.pc & ~0x3ull; + tfiar = vcpu->arch.regs.nip & ~0x3ull; texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT; if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) texasr |= TEXASR_SUSP; @@ -57,8 +57,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) (newmsr & MSR_TM))); newmsr = sanitize_msr(newmsr); vcpu->arch.shregs.msr = newmsr; - vcpu->arch.cfar = vcpu->arch.pc - 4; - vcpu->arch.pc = vcpu->arch.shregs.srr0; + vcpu->arch.cfar = vcpu->arch.regs.nip - 4; + vcpu->arch.regs.nip = vcpu->arch.shregs.srr0; return RESUME_GUEST; case PPC_INST_RFEBB: @@ -90,8 +90,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) vcpu->arch.bescr = bescr; msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; vcpu->arch.shregs.msr = msr; - vcpu->arch.cfar = vcpu->arch.pc - 4; - vcpu->arch.pc = vcpu->arch.ebbrr; + vcpu->arch.cfar = vcpu->arch.regs.nip - 4; + vcpu->arch.regs.nip = vcpu->arch.ebbrr; return RESUME_GUEST; case PPC_INST_MTMSRD: diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c index d98ccfd2b88c..b2c7c6fca4f9 100644 --- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c @@ -35,8 +35,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) return 0; newmsr = sanitize_msr(newmsr); vcpu->arch.shregs.msr = newmsr; - vcpu->arch.cfar = vcpu->arch.pc - 4; - vcpu->arch.pc = vcpu->arch.shregs.srr0; + vcpu->arch.cfar = vcpu->arch.regs.nip - 4; + vcpu->arch.regs.nip = vcpu->arch.shregs.srr0; return 1; case PPC_INST_RFEBB: @@ -58,8 +58,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) mtspr(SPRN_BESCR, bescr); msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; vcpu->arch.shregs.msr = msr; - vcpu->arch.cfar = vcpu->arch.pc - 4; - vcpu->arch.pc = mfspr(SPRN_EBBRR); + vcpu->arch.cfar = vcpu->arch.regs.nip - 4; + vcpu->arch.regs.nip = mfspr(SPRN_EBBRR); return 1; case PPC_INST_MTMSRD: @@ -103,7 +103,7 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu) { vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */ - vcpu->arch.pc = vcpu->arch.tfhar; + vcpu->arch.regs.nip = vcpu->arch.tfhar; copy_from_checkpoint(vcpu); vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000; } diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index d3f304d06adf..c3b8006f0eac 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -42,6 +42,8 @@ #include #include #include +#include +#include #include "book3s.h" @@ -53,7 +55,9 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, ulong msr); -static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); +#ifdef CONFIG_PPC_BOOK3S_64 +static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac); +#endif /* Some compatibility defines */ #ifdef CONFIG_PPC_BOOK3S_32 @@ -114,6 +118,8 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) if (kvmppc_is_split_real(vcpu)) kvmppc_fixup_split_real(vcpu); + + kvmppc_restore_tm_pr(vcpu); } static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) @@ -133,6 +139,7 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); + kvmppc_save_tm_pr(vcpu); /* Enable AIL if supported */ if (cpu_has_feature(CPU_FTR_HVMODE) && @@ -147,25 +154,25 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu) { struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - svcpu->gpr[0] = vcpu->arch.gpr[0]; - svcpu->gpr[1] = vcpu->arch.gpr[1]; - svcpu->gpr[2] = vcpu->arch.gpr[2]; - svcpu->gpr[3] = vcpu->arch.gpr[3]; - svcpu->gpr[4] = vcpu->arch.gpr[4]; - svcpu->gpr[5] = vcpu->arch.gpr[5]; - svcpu->gpr[6] = vcpu->arch.gpr[6]; - svcpu->gpr[7] = vcpu->arch.gpr[7]; - svcpu->gpr[8] = vcpu->arch.gpr[8]; - svcpu->gpr[9] = vcpu->arch.gpr[9]; - svcpu->gpr[10] = vcpu->arch.gpr[10]; - svcpu->gpr[11] = vcpu->arch.gpr[11]; - svcpu->gpr[12] = vcpu->arch.gpr[12]; - svcpu->gpr[13] = vcpu->arch.gpr[13]; + svcpu->gpr[0] = vcpu->arch.regs.gpr[0]; + svcpu->gpr[1] = vcpu->arch.regs.gpr[1]; + svcpu->gpr[2] = vcpu->arch.regs.gpr[2]; + svcpu->gpr[3] = vcpu->arch.regs.gpr[3]; + svcpu->gpr[4] = vcpu->arch.regs.gpr[4]; + svcpu->gpr[5] = vcpu->arch.regs.gpr[5]; + svcpu->gpr[6] = vcpu->arch.regs.gpr[6]; + svcpu->gpr[7] = vcpu->arch.regs.gpr[7]; + svcpu->gpr[8] = vcpu->arch.regs.gpr[8]; + svcpu->gpr[9] = vcpu->arch.regs.gpr[9]; + svcpu->gpr[10] = vcpu->arch.regs.gpr[10]; + svcpu->gpr[11] = vcpu->arch.regs.gpr[11]; + svcpu->gpr[12] = vcpu->arch.regs.gpr[12]; + svcpu->gpr[13] = vcpu->arch.regs.gpr[13]; svcpu->cr = vcpu->arch.cr; - svcpu->xer = vcpu->arch.xer; - svcpu->ctr = vcpu->arch.ctr; - svcpu->lr = vcpu->arch.lr; - svcpu->pc = vcpu->arch.pc; + svcpu->xer = vcpu->arch.regs.xer; + svcpu->ctr = vcpu->arch.regs.ctr; + svcpu->lr = vcpu->arch.regs.link; + svcpu->pc = vcpu->arch.regs.nip; #ifdef CONFIG_PPC_BOOK3S_64 svcpu->shadow_fscr = vcpu->arch.shadow_fscr; #endif @@ -182,10 +189,45 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu) svcpu_put(svcpu); } +static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) +{ + ulong guest_msr = kvmppc_get_msr(vcpu); + ulong smsr = guest_msr; + + /* Guest MSR values */ +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE | + MSR_TM | MSR_TS_MASK; +#else + smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE; +#endif + /* Process MSR values */ + smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; + /* External providers the guest reserved */ + smsr |= (guest_msr & vcpu->arch.guest_owned_ext); + /* 64-bit Process MSR values */ +#ifdef CONFIG_PPC_BOOK3S_64 + smsr |= MSR_ISF | MSR_HV; +#endif +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* + * in guest privileged state, we want to fail all TM transactions. + * So disable MSR TM bit so that all tbegin. will be able to be + * trapped into host. + */ + if (!(guest_msr & MSR_PR)) + smsr &= ~MSR_TM; +#endif + vcpu->arch.shadow_msr = smsr; +} + /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) { struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + ulong old_msr; +#endif /* * Maybe we were already preempted and synced the svcpu from @@ -194,25 +236,25 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) if (!svcpu->in_use) goto out; - vcpu->arch.gpr[0] = svcpu->gpr[0]; - vcpu->arch.gpr[1] = svcpu->gpr[1]; - vcpu->arch.gpr[2] = svcpu->gpr[2]; - vcpu->arch.gpr[3] = svcpu->gpr[3]; - vcpu->arch.gpr[4] = svcpu->gpr[4]; - vcpu->arch.gpr[5] = svcpu->gpr[5]; - vcpu->arch.gpr[6] = svcpu->gpr[6]; - vcpu->arch.gpr[7] = svcpu->gpr[7]; - vcpu->arch.gpr[8] = svcpu->gpr[8]; - vcpu->arch.gpr[9] = svcpu->gpr[9]; - vcpu->arch.gpr[10] = svcpu->gpr[10]; - vcpu->arch.gpr[11] = svcpu->gpr[11]; - vcpu->arch.gpr[12] = svcpu->gpr[12]; - vcpu->arch.gpr[13] = svcpu->gpr[13]; + vcpu->arch.regs.gpr[0] = svcpu->gpr[0]; + vcpu->arch.regs.gpr[1] = svcpu->gpr[1]; + vcpu->arch.regs.gpr[2] = svcpu->gpr[2]; + vcpu->arch.regs.gpr[3] = svcpu->gpr[3]; + vcpu->arch.regs.gpr[4] = svcpu->gpr[4]; + vcpu->arch.regs.gpr[5] = svcpu->gpr[5]; + vcpu->arch.regs.gpr[6] = svcpu->gpr[6]; + vcpu->arch.regs.gpr[7] = svcpu->gpr[7]; + vcpu->arch.regs.gpr[8] = svcpu->gpr[8]; + vcpu->arch.regs.gpr[9] = svcpu->gpr[9]; + vcpu->arch.regs.gpr[10] = svcpu->gpr[10]; + vcpu->arch.regs.gpr[11] = svcpu->gpr[11]; + vcpu->arch.regs.gpr[12] = svcpu->gpr[12]; + vcpu->arch.regs.gpr[13] = svcpu->gpr[13]; vcpu->arch.cr = svcpu->cr; - vcpu->arch.xer = svcpu->xer; - vcpu->arch.ctr = svcpu->ctr; - vcpu->arch.lr = svcpu->lr; - vcpu->arch.pc = svcpu->pc; + vcpu->arch.regs.xer = svcpu->xer; + vcpu->arch.regs.ctr = svcpu->ctr; + vcpu->arch.regs.link = svcpu->lr; + vcpu->arch.regs.nip = svcpu->pc; vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; vcpu->arch.fault_dar = svcpu->fault_dar; vcpu->arch.fault_dsisr = svcpu->fault_dsisr; @@ -228,12 +270,116 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb; if (cpu_has_feature(CPU_FTR_ARCH_207S)) vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic; + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* + * Unlike other MSR bits, MSR[TS]bits can be changed at guest without + * notifying host: + * modified by unprivileged instructions like "tbegin"/"tend"/ + * "tresume"/"tsuspend" in PR KVM guest. + * + * It is necessary to sync here to calculate a correct shadow_msr. + * + * privileged guest's tbegin will be failed at present. So we + * only take care of problem state guest. + */ + old_msr = kvmppc_get_msr(vcpu); + if (unlikely((old_msr & MSR_PR) && + (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) != + (old_msr & (MSR_TS_MASK)))) { + old_msr &= ~(MSR_TS_MASK); + old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)); + kvmppc_set_msr_fast(vcpu, old_msr); + kvmppc_recalc_shadow_msr(vcpu); + } +#endif + svcpu->in_use = false; out: svcpu_put(svcpu); } +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) +{ + tm_enable(); + vcpu->arch.tfhar = mfspr(SPRN_TFHAR); + vcpu->arch.texasr = mfspr(SPRN_TEXASR); + vcpu->arch.tfiar = mfspr(SPRN_TFIAR); + tm_disable(); +} + +void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) +{ + tm_enable(); + mtspr(SPRN_TFHAR, vcpu->arch.tfhar); + mtspr(SPRN_TEXASR, vcpu->arch.texasr); + mtspr(SPRN_TFIAR, vcpu->arch.tfiar); + tm_disable(); +} + +/* loadup math bits which is enabled at kvmppc_get_msr() but not enabled at + * hardware. + */ +static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu) +{ + ulong exit_nr; + ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) & + (MSR_FP | MSR_VEC | MSR_VSX); + + if (!ext_diff) + return; + + if (ext_diff == MSR_FP) + exit_nr = BOOK3S_INTERRUPT_FP_UNAVAIL; + else if (ext_diff == MSR_VEC) + exit_nr = BOOK3S_INTERRUPT_ALTIVEC; + else + exit_nr = BOOK3S_INTERRUPT_VSX; + + kvmppc_handle_ext(vcpu, exit_nr, ext_diff); +} + +void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) +{ + if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) { + kvmppc_save_tm_sprs(vcpu); + return; + } + + kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); + kvmppc_giveup_ext(vcpu, MSR_VSX); + + preempt_disable(); + _kvmppc_save_tm_pr(vcpu, mfmsr()); + preempt_enable(); +} + +void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) +{ + if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) { + kvmppc_restore_tm_sprs(vcpu); + if (kvmppc_get_msr(vcpu) & MSR_TM) { + kvmppc_handle_lost_math_exts(vcpu); + if (vcpu->arch.fscr & FSCR_TAR) + kvmppc_handle_fac(vcpu, FSCR_TAR_LG); + } + return; + } + + preempt_disable(); + _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu)); + preempt_enable(); + + if (kvmppc_get_msr(vcpu) & MSR_TM) { + kvmppc_handle_lost_math_exts(vcpu); + if (vcpu->arch.fscr & FSCR_TAR) + kvmppc_handle_fac(vcpu, FSCR_TAR_LG); + } +} +#endif + static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) { int r = 1; /* Indicate we want to get back into the guest */ @@ -306,32 +452,29 @@ static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte) /*****************************************/ -static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) -{ - ulong guest_msr = kvmppc_get_msr(vcpu); - ulong smsr = guest_msr; - - /* Guest MSR values */ - smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE; - /* Process MSR values */ - smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; - /* External providers the guest reserved */ - smsr |= (guest_msr & vcpu->arch.guest_owned_ext); - /* 64-bit Process MSR values */ -#ifdef CONFIG_PPC_BOOK3S_64 - smsr |= MSR_ISF | MSR_HV; -#endif - vcpu->arch.shadow_msr = smsr; -} - static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) { - ulong old_msr = kvmppc_get_msr(vcpu); + ulong old_msr; + + /* For PAPR guest, make sure MSR reflects guest mode */ + if (vcpu->arch.papr_enabled) + msr = (msr & ~MSR_HV) | MSR_ME; #ifdef EXIT_DEBUG printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); #endif +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* We should never target guest MSR to TS=10 && PR=0, + * since we always fail transaction for guest privilege + * state. + */ + if (!(msr & MSR_PR) && MSR_TM_TRANSACTIONAL(msr)) + kvmppc_emulate_tabort(vcpu, + TM_CAUSE_KVM_FAC_UNAV | TM_CAUSE_PERSISTENT); +#endif + + old_msr = kvmppc_get_msr(vcpu); msr &= to_book3s(vcpu)->msr_mask; kvmppc_set_msr_fast(vcpu, msr); kvmppc_recalc_shadow_msr(vcpu); @@ -387,6 +530,11 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) /* Preload FPU if it's enabled */ if (kvmppc_get_msr(vcpu) & MSR_FP) kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (kvmppc_get_msr(vcpu) & MSR_TM) + kvmppc_handle_lost_math_exts(vcpu); +#endif } void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) @@ -584,24 +732,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, pte.may_execute = !data; } - if (page_found == -ENOENT) { - /* Page not found in guest PTE entries */ - u64 ssrr1 = vcpu->arch.shadow_srr1; - u64 msr = kvmppc_get_msr(vcpu); - kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); - kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr); - kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL)); - kvmppc_book3s_queue_irqprio(vcpu, vec); - } else if (page_found == -EPERM) { - /* Storage protection */ - u32 dsisr = vcpu->arch.fault_dsisr; - u64 ssrr1 = vcpu->arch.shadow_srr1; - u64 msr = kvmppc_get_msr(vcpu); - kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); - dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT; - kvmppc_set_dsisr(vcpu, dsisr); - kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL)); - kvmppc_book3s_queue_irqprio(vcpu, vec); + if (page_found == -ENOENT || page_found == -EPERM) { + /* Page not found in guest PTE entries, or protection fault */ + u64 flags; + + if (page_found == -EPERM) + flags = DSISR_PROTFAULT; + else + flags = DSISR_NOHPTE; + if (data) { + flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE; + kvmppc_core_queue_data_storage(vcpu, eaddr, flags); + } else { + kvmppc_core_queue_inst_storage(vcpu, flags); + } } else if (page_found == -EINVAL) { /* Page not found in guest SLB */ kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); @@ -683,7 +827,7 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) } /* Give up facility (TAR / EBB / DSCR) */ -static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac) +void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac) { #ifdef CONFIG_PPC_BOOK3S_64 if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) { @@ -802,7 +946,7 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) #ifdef CONFIG_PPC_BOOK3S_64 -static void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac) +void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac) { /* Inject the Interrupt Cause field and trigger a guest interrupt */ vcpu->arch.fscr &= ~(0xffULL << 56); @@ -864,6 +1008,18 @@ static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac) break; } +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* Since we disabled MSR_TM at privilege state, the mfspr instruction + * for TM spr can trigger TM fac unavailable. In this case, the + * emulation is handled by kvmppc_emulate_fac(), which invokes + * kvmppc_emulate_mfspr() finally. But note the mfspr can include + * RT for NV registers. So it need to restore those NV reg to reflect + * the update. + */ + if ((fac == FSCR_TM_LG) && !(kvmppc_get_msr(vcpu) & MSR_PR)) + return RESUME_GUEST_NV; +#endif + return RESUME_GUEST; } @@ -872,7 +1028,12 @@ void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr) if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) { /* TAR got dropped, drop it in shadow too */ kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); + } else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) { + vcpu->arch.fscr = fscr; + kvmppc_handle_fac(vcpu, FSCR_TAR_LG); + return; } + vcpu->arch.fscr = fscr; } #endif @@ -1017,10 +1178,8 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); r = RESUME_GUEST; } else { - u64 msr = kvmppc_get_msr(vcpu); - msr |= shadow_srr1 & 0x58000000; - kvmppc_set_msr_fast(vcpu, msr); - kvmppc_book3s_queue_irqprio(vcpu, exit_nr); + kvmppc_core_queue_inst_storage(vcpu, + shadow_srr1 & 0x58000000); r = RESUME_GUEST; } break; @@ -1059,9 +1218,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); srcu_read_unlock(&vcpu->kvm->srcu, idx); } else { - kvmppc_set_dar(vcpu, dar); - kvmppc_set_dsisr(vcpu, fault_dsisr); - kvmppc_book3s_queue_irqprio(vcpu, exit_nr); + kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr); r = RESUME_GUEST; } break; @@ -1092,10 +1249,13 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, case BOOK3S_INTERRUPT_EXTERNAL: case BOOK3S_INTERRUPT_EXTERNAL_LEVEL: case BOOK3S_INTERRUPT_EXTERNAL_HV: + case BOOK3S_INTERRUPT_H_VIRT: vcpu->stat.ext_intr_exits++; r = RESUME_GUEST; break; + case BOOK3S_INTERRUPT_HMI: case BOOK3S_INTERRUPT_PERFMON: + case BOOK3S_INTERRUPT_SYSTEM_RESET: r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_PROGRAM: @@ -1225,8 +1385,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, } #ifdef CONFIG_PPC_BOOK3S_64 case BOOK3S_INTERRUPT_FAC_UNAVAIL: - kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); - r = RESUME_GUEST; + r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); break; #endif case BOOK3S_INTERRUPT_MACHINE_CHECK: @@ -1379,6 +1538,73 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, else *val = get_reg_val(id, 0); break; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + case KVM_REG_PPC_TFHAR: + *val = get_reg_val(id, vcpu->arch.tfhar); + break; + case KVM_REG_PPC_TFIAR: + *val = get_reg_val(id, vcpu->arch.tfiar); + break; + case KVM_REG_PPC_TEXASR: + *val = get_reg_val(id, vcpu->arch.texasr); + break; + case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: + *val = get_reg_val(id, + vcpu->arch.gpr_tm[id-KVM_REG_PPC_TM_GPR0]); + break; + case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: + { + int i, j; + + i = id - KVM_REG_PPC_TM_VSR0; + if (i < 32) + for (j = 0; j < TS_FPRWIDTH; j++) + val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; + else { + if (cpu_has_feature(CPU_FTR_ALTIVEC)) + val->vval = vcpu->arch.vr_tm.vr[i-32]; + else + r = -ENXIO; + } + break; + } + case KVM_REG_PPC_TM_CR: + *val = get_reg_val(id, vcpu->arch.cr_tm); + break; + case KVM_REG_PPC_TM_XER: + *val = get_reg_val(id, vcpu->arch.xer_tm); + break; + case KVM_REG_PPC_TM_LR: + *val = get_reg_val(id, vcpu->arch.lr_tm); + break; + case KVM_REG_PPC_TM_CTR: + *val = get_reg_val(id, vcpu->arch.ctr_tm); + break; + case KVM_REG_PPC_TM_FPSCR: + *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); + break; + case KVM_REG_PPC_TM_AMR: + *val = get_reg_val(id, vcpu->arch.amr_tm); + break; + case KVM_REG_PPC_TM_PPR: + *val = get_reg_val(id, vcpu->arch.ppr_tm); + break; + case KVM_REG_PPC_TM_VRSAVE: + *val = get_reg_val(id, vcpu->arch.vrsave_tm); + break; + case KVM_REG_PPC_TM_VSCR: + if (cpu_has_feature(CPU_FTR_ALTIVEC)) + *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); + else + r = -ENXIO; + break; + case KVM_REG_PPC_TM_DSCR: + *val = get_reg_val(id, vcpu->arch.dscr_tm); + break; + case KVM_REG_PPC_TM_TAR: + *val = get_reg_val(id, vcpu->arch.tar_tm); + break; +#endif default: r = -EINVAL; break; @@ -1412,6 +1638,72 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_LPCR_64: kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val)); break; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + case KVM_REG_PPC_TFHAR: + vcpu->arch.tfhar = set_reg_val(id, *val); + break; + case KVM_REG_PPC_TFIAR: + vcpu->arch.tfiar = set_reg_val(id, *val); + break; + case KVM_REG_PPC_TEXASR: + vcpu->arch.texasr = set_reg_val(id, *val); + break; + case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: + vcpu->arch.gpr_tm[id - KVM_REG_PPC_TM_GPR0] = + set_reg_val(id, *val); + break; + case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: + { + int i, j; + + i = id - KVM_REG_PPC_TM_VSR0; + if (i < 32) + for (j = 0; j < TS_FPRWIDTH; j++) + vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; + else + if (cpu_has_feature(CPU_FTR_ALTIVEC)) + vcpu->arch.vr_tm.vr[i-32] = val->vval; + else + r = -ENXIO; + break; + } + case KVM_REG_PPC_TM_CR: + vcpu->arch.cr_tm = set_reg_val(id, *val); + break; + case KVM_REG_PPC_TM_XER: + vcpu->arch.xer_tm = set_reg_val(id, *val); + break; + case KVM_REG_PPC_TM_LR: + vcpu->arch.lr_tm = set_reg_val(id, *val); + break; + case KVM_REG_PPC_TM_CTR: + vcpu->arch.ctr_tm = set_reg_val(id, *val); + break; + case KVM_REG_PPC_TM_FPSCR: + vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); + break; + case KVM_REG_PPC_TM_AMR: + vcpu->arch.amr_tm = set_reg_val(id, *val); + break; + case KVM_REG_PPC_TM_PPR: + vcpu->arch.ppr_tm = set_reg_val(id, *val); + break; + case KVM_REG_PPC_TM_VRSAVE: + vcpu->arch.vrsave_tm = set_reg_val(id, *val); + break; + case KVM_REG_PPC_TM_VSCR: + if (cpu_has_feature(CPU_FTR_ALTIVEC)) + vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); + else + r = -ENXIO; + break; + case KVM_REG_PPC_TM_DSCR: + vcpu->arch.dscr_tm = set_reg_val(id, *val); + break; + case KVM_REG_PPC_TM_TAR: + vcpu->arch.tar_tm = set_reg_val(id, *val); + break; +#endif default: r = -EINVAL; break; @@ -1687,6 +1979,17 @@ static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, return 0; } + +static int kvm_configure_mmu_pr(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) +{ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + return -ENODEV; + /* Require flags and process table base and size to all be zero. */ + if (cfg->flags || cfg->process_table) + return -EINVAL; + return 0; +} + #else static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, struct kvm_ppc_smmu_info *info) @@ -1735,9 +2038,12 @@ static void kvmppc_core_destroy_vm_pr(struct kvm *kvm) static int kvmppc_core_check_processor_compat_pr(void) { /* - * Disable KVM for Power9 untill the required bits merged. + * PR KVM can work on POWER9 inside a guest partition + * running in HPT mode. It can't work if we are using + * radix translation (because radix provides no way for + * a process to have unique translations in quadrant 3). */ - if (cpu_has_feature(CPU_FTR_ARCH_300)) + if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled()) return -EIO; return 0; } @@ -1781,7 +2087,9 @@ static struct kvmppc_ops kvm_ops_pr = { .arch_vm_ioctl = kvm_arch_vm_ioctl_pr, #ifdef CONFIG_PPC_BOOK3S_64 .hcall_implemented = kvmppc_hcall_impl_pr, + .configure_mmu = kvm_configure_mmu_pr, #endif + .giveup_ext = kvmppc_giveup_ext, }; diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 93a180ceefad..98ccc7ec5d48 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S @@ -383,6 +383,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) */ PPC_LL r6, HSTATE_HOST_MSR(r13) +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* + * We don't want to change MSR[TS] bits via rfi here. + * The actual TM handling logic will be in host with + * recovered DR/IR bits after HSTATE_VMHANDLER. + * And MSR_TM can be enabled in HOST_MSR so rfid may + * not suppress this change and can lead to exception. + * Manually set MSR to prevent TS state change here. + */ + mfmsr r7 + rldicl r7, r7, 64 - MSR_TS_S_LG, 62 + rldimi r6, r7, MSR_TS_S_LG, 63 - MSR_TS_T_LG +#endif PPC_LL r8, HSTATE_VMHANDLER(r13) #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c index 99c3620b40d9..6e41ba7ec8f4 100644 --- a/arch/powerpc/kvm/book3s_xive_template.c +++ b/arch/powerpc/kvm/book3s_xive_template.c @@ -334,7 +334,7 @@ X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu) */ /* Return interrupt and old CPPR in GPR4 */ - vcpu->arch.gpr[4] = hirq | (old_cppr << 24); + vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24); return H_SUCCESS; } @@ -369,7 +369,7 @@ X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll); /* Return interrupt and old CPPR in GPR4 */ - vcpu->arch.gpr[4] = hirq | (xc->cppr << 24); + vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24); return H_SUCCESS; } diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 876d4f294fdd..a9ca016da670 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -77,8 +77,10 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) { int i; - printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr); - printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); + printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip, + vcpu->arch.shared->msr); + printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link, + vcpu->arch.regs.ctr); printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, vcpu->arch.shared->srr1); @@ -491,24 +493,25 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, if (allowed) { switch (int_class) { case INT_CLASS_NONCRIT: - set_guest_srr(vcpu, vcpu->arch.pc, + set_guest_srr(vcpu, vcpu->arch.regs.nip, vcpu->arch.shared->msr); break; case INT_CLASS_CRIT: - set_guest_csrr(vcpu, vcpu->arch.pc, + set_guest_csrr(vcpu, vcpu->arch.regs.nip, vcpu->arch.shared->msr); break; case INT_CLASS_DBG: - set_guest_dsrr(vcpu, vcpu->arch.pc, + set_guest_dsrr(vcpu, vcpu->arch.regs.nip, vcpu->arch.shared->msr); break; case INT_CLASS_MC: - set_guest_mcsrr(vcpu, vcpu->arch.pc, + set_guest_mcsrr(vcpu, vcpu->arch.regs.nip, vcpu->arch.shared->msr); break; } - vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; + vcpu->arch.regs.nip = vcpu->arch.ivpr | + vcpu->arch.ivor[priority]; if (update_esr == true) kvmppc_set_esr(vcpu, vcpu->arch.queued_esr); if (update_dear == true) @@ -826,7 +829,7 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) case EMULATE_FAIL: printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", - __func__, vcpu->arch.pc, vcpu->arch.last_inst); + __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst); /* For debugging, encode the failing instruction and * report it to userspace. */ run->hw.hardware_exit_reason = ~0ULL << 32; @@ -875,7 +878,7 @@ static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu) */ vcpu->arch.dbsr = 0; run->debug.arch.status = 0; - run->debug.arch.address = vcpu->arch.pc; + run->debug.arch.address = vcpu->arch.regs.nip; if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) { run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT; @@ -971,7 +974,7 @@ static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu, case EMULATE_FAIL: pr_debug("%s: load instruction from guest address %lx failed\n", - __func__, vcpu->arch.pc); + __func__, vcpu->arch.regs.nip); /* For debugging, encode the failing instruction and * report it to userspace. */ run->hw.hardware_exit_reason = ~0ULL << 32; @@ -1169,7 +1172,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, case BOOKE_INTERRUPT_SPE_FP_DATA: case BOOKE_INTERRUPT_SPE_FP_ROUND: printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n", - __func__, exit_nr, vcpu->arch.pc); + __func__, exit_nr, vcpu->arch.regs.nip); run->hw.hardware_exit_reason = exit_nr; r = RESUME_HOST; break; @@ -1299,7 +1302,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, } case BOOKE_INTERRUPT_ITLB_MISS: { - unsigned long eaddr = vcpu->arch.pc; + unsigned long eaddr = vcpu->arch.regs.nip; gpa_t gpaddr; gfn_t gfn; int gtlb_index; @@ -1391,7 +1394,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) int i; int r; - vcpu->arch.pc = 0; + vcpu->arch.regs.nip = 0; vcpu->arch.shared->pir = vcpu->vcpu_id; kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ kvmppc_set_msr(vcpu, 0); @@ -1440,10 +1443,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) vcpu_load(vcpu); - regs->pc = vcpu->arch.pc; + regs->pc = vcpu->arch.regs.nip; regs->cr = kvmppc_get_cr(vcpu); - regs->ctr = vcpu->arch.ctr; - regs->lr = vcpu->arch.lr; + regs->ctr = vcpu->arch.regs.ctr; + regs->lr = vcpu->arch.regs.link; regs->xer = kvmppc_get_xer(vcpu); regs->msr = vcpu->arch.shared->msr; regs->srr0 = kvmppc_get_srr0(vcpu); @@ -1471,10 +1474,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) vcpu_load(vcpu); - vcpu->arch.pc = regs->pc; + vcpu->arch.regs.nip = regs->pc; kvmppc_set_cr(vcpu, regs->cr); - vcpu->arch.ctr = regs->ctr; - vcpu->arch.lr = regs->lr; + vcpu->arch.regs.ctr = regs->ctr; + vcpu->arch.regs.link = regs->lr; kvmppc_set_xer(vcpu, regs->xer); kvmppc_set_msr(vcpu, regs->msr); kvmppc_set_srr0(vcpu, regs->srr0); diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index a82f64502de1..d23e582f0fee 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c @@ -34,19 +34,19 @@ static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) { - vcpu->arch.pc = vcpu->arch.shared->srr0; + vcpu->arch.regs.nip = vcpu->arch.shared->srr0; kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); } static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu) { - vcpu->arch.pc = vcpu->arch.dsrr0; + vcpu->arch.regs.nip = vcpu->arch.dsrr0; kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); } static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu) { - vcpu->arch.pc = vcpu->arch.csrr0; + vcpu->arch.regs.nip = vcpu->arch.csrr0; kvmppc_set_msr(vcpu, vcpu->arch.csrr1); } diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 990db69a1d0b..3f8189eb56ed 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c @@ -53,7 +53,7 @@ static int dbell2prio(ulong param) static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb) { - ulong param = vcpu->arch.gpr[rb]; + ulong param = vcpu->arch.regs.gpr[rb]; int prio = dbell2prio(param); if (prio < 0) @@ -65,7 +65,7 @@ static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb) static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb) { - ulong param = vcpu->arch.gpr[rb]; + ulong param = vcpu->arch.regs.gpr[rb]; int prio = dbell2prio(rb); int pir = param & PPC_DBELL_PIR_MASK; int i; @@ -94,7 +94,7 @@ static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu, switch (get_oc(inst)) { case EHPRIV_OC_DEBUG: run->exit_reason = KVM_EXIT_DEBUG; - run->debug.arch.address = vcpu->arch.pc; + run->debug.arch.address = vcpu->arch.regs.nip; run->debug.arch.status = 0; kvmppc_account_exit(vcpu, DEBUG_EXITS); emulated = EMULATE_EXIT_USER; diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c index ddbf8f0284c0..24296f4cadc6 100644 --- a/arch/powerpc/kvm/e500_mmu.c +++ b/arch/powerpc/kvm/e500_mmu.c @@ -513,7 +513,7 @@ void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) { unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); - kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as); + kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.regs.nip, as); } void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index c878b4ffb86f..8f2985e46f6f 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -625,8 +625,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, } #ifdef CONFIG_KVM_BOOKE_HV -int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, - u32 *instr) +int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, + enum instruction_fetch_type type, u32 *instr) { gva_t geaddr; hpa_t addr; @@ -715,8 +715,8 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, return EMULATE_DONE; } #else -int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, - u32 *instr) +int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, + enum instruction_fetch_type type, u32 *instr) { return EMULATE_AGAIN; } diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c index a382e15135e6..afde788be141 100644 --- a/arch/powerpc/kvm/emulate_loadstore.c +++ b/arch/powerpc/kvm/emulate_loadstore.c @@ -31,6 +31,7 @@ #include #include #include +#include #include "timing.h" #include "trace.h" @@ -84,8 +85,9 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) struct kvm_run *run = vcpu->run; u32 inst; int ra, rs, rt; - enum emulation_result emulated; + enum emulation_result emulated = EMULATE_FAIL; int advance = 1; + struct instruction_op op; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); @@ -107,580 +109,276 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst); vcpu->arch.mmio_vsx_copy_nums = 0; vcpu->arch.mmio_vsx_offset = 0; - vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE; + vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE; vcpu->arch.mmio_sp64_extend = 0; vcpu->arch.mmio_sign_extend = 0; vcpu->arch.mmio_vmx_copy_nums = 0; + vcpu->arch.mmio_vmx_offset = 0; + vcpu->arch.mmio_host_swabbed = 0; + + emulated = EMULATE_FAIL; + vcpu->arch.regs.msr = vcpu->arch.shared->msr; + vcpu->arch.regs.ccr = vcpu->arch.cr; + if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) { + int type = op.type & INSTR_TYPE_MASK; + int size = GETSIZE(op.type); + + switch (type) { + case LOAD: { + int instr_byte_swap = op.type & BYTEREV; + + if (op.type & SIGNEXT) + emulated = kvmppc_handle_loads(run, vcpu, + op.reg, size, !instr_byte_swap); + else + emulated = kvmppc_handle_load(run, vcpu, + op.reg, size, !instr_byte_swap); + + if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) + kvmppc_set_gpr(vcpu, op.update_reg, op.ea); - switch (get_op(inst)) { - case 31: - switch (get_xop(inst)) { - case OP_31_XOP_LWZX: - emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; + } +#ifdef CONFIG_PPC_FPU + case LOAD_FP: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + + if (op.type & FPCONV) + vcpu->arch.mmio_sp64_extend = 1; + + if (op.type & SIGNEXT) + emulated = kvmppc_handle_loads(run, vcpu, + KVM_MMIO_REG_FPR|op.reg, size, 1); + else + emulated = kvmppc_handle_load(run, vcpu, + KVM_MMIO_REG_FPR|op.reg, size, 1); + + if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) + kvmppc_set_gpr(vcpu, op.update_reg, op.ea); - case OP_31_XOP_LWZUX: - emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; +#endif +#ifdef CONFIG_ALTIVEC + case LOAD_VMX: + if (kvmppc_check_altivec_disabled(vcpu)) + return EMULATE_DONE; - case OP_31_XOP_LBZX: - emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); + /* Hardware enforces alignment of VMX accesses */ + vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1); + vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1); + + if (size == 16) { /* lvx */ + vcpu->arch.mmio_copy_type = + KVMPPC_VMX_COPY_DWORD; + } else if (size == 4) { /* lvewx */ + vcpu->arch.mmio_copy_type = + KVMPPC_VMX_COPY_WORD; + } else if (size == 2) { /* lvehx */ + vcpu->arch.mmio_copy_type = + KVMPPC_VMX_COPY_HWORD; + } else if (size == 1) { /* lvebx */ + vcpu->arch.mmio_copy_type = + KVMPPC_VMX_COPY_BYTE; + } else + break; + + vcpu->arch.mmio_vmx_offset = + (vcpu->arch.vaddr_accessed & 0xf)/size; + + if (size == 16) { + vcpu->arch.mmio_vmx_copy_nums = 2; + emulated = kvmppc_handle_vmx_load(run, + vcpu, KVM_MMIO_REG_VMX|op.reg, + 8, 1); + } else { + vcpu->arch.mmio_vmx_copy_nums = 1; + emulated = kvmppc_handle_vmx_load(run, vcpu, + KVM_MMIO_REG_VMX|op.reg, + size, 1); + } break; +#endif +#ifdef CONFIG_VSX + case LOAD_VSX: { + int io_size_each; - case OP_31_XOP_LBZUX: - emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + if (op.vsx_flags & VSX_CHECK_VEC) { + if (kvmppc_check_altivec_disabled(vcpu)) + return EMULATE_DONE; + } else { + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + } + + if (op.vsx_flags & VSX_FPCONV) + vcpu->arch.mmio_sp64_extend = 1; + + if (op.element_size == 8) { + if (op.vsx_flags & VSX_SPLAT) + vcpu->arch.mmio_copy_type = + KVMPPC_VSX_COPY_DWORD_LOAD_DUMP; + else + vcpu->arch.mmio_copy_type = + KVMPPC_VSX_COPY_DWORD; + } else if (op.element_size == 4) { + if (op.vsx_flags & VSX_SPLAT) + vcpu->arch.mmio_copy_type = + KVMPPC_VSX_COPY_WORD_LOAD_DUMP; + else + vcpu->arch.mmio_copy_type = + KVMPPC_VSX_COPY_WORD; + } else + break; + + if (size < op.element_size) { + /* precision convert case: lxsspx, etc */ + vcpu->arch.mmio_vsx_copy_nums = 1; + io_size_each = size; + } else { /* lxvw4x, lxvd2x, etc */ + vcpu->arch.mmio_vsx_copy_nums = + size/op.element_size; + io_size_each = op.element_size; + } + + emulated = kvmppc_handle_vsx_load(run, vcpu, + KVM_MMIO_REG_VSX | (op.reg & 0x1f), + io_size_each, 1, op.type & SIGNEXT); break; + } +#endif + case STORE: + /* if need byte reverse, op.val has been reversed by + * analyse_instr(). + */ + emulated = kvmppc_handle_store(run, vcpu, op.val, + size, 1); + + if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) + kvmppc_set_gpr(vcpu, op.update_reg, op.ea); + + break; +#ifdef CONFIG_PPC_FPU + case STORE_FP: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + + /* The FP registers need to be flushed so that + * kvmppc_handle_store() can read actual FP vals + * from vcpu->arch. + */ + if (vcpu->kvm->arch.kvm_ops->giveup_ext) + vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, + MSR_FP); + + if (op.type & FPCONV) + vcpu->arch.mmio_sp64_extend = 1; - case OP_31_XOP_STDX: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), 8, 1); - break; + VCPU_FPR(vcpu, op.reg), size, 1); - case OP_31_XOP_STDUX: - emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), 8, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; + if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) + kvmppc_set_gpr(vcpu, op.update_reg, op.ea); - case OP_31_XOP_STWX: - emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), 4, 1); break; +#endif +#ifdef CONFIG_ALTIVEC + case STORE_VMX: + if (kvmppc_check_altivec_disabled(vcpu)) + return EMULATE_DONE; + + /* Hardware enforces alignment of VMX accesses. */ + vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1); + vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1); + + if (vcpu->kvm->arch.kvm_ops->giveup_ext) + vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, + MSR_VEC); + if (size == 16) { /* stvx */ + vcpu->arch.mmio_copy_type = + KVMPPC_VMX_COPY_DWORD; + } else if (size == 4) { /* stvewx */ + vcpu->arch.mmio_copy_type = + KVMPPC_VMX_COPY_WORD; + } else if (size == 2) { /* stvehx */ + vcpu->arch.mmio_copy_type = + KVMPPC_VMX_COPY_HWORD; + } else if (size == 1) { /* stvebx */ + vcpu->arch.mmio_copy_type = + KVMPPC_VMX_COPY_BYTE; + } else + break; + + vcpu->arch.mmio_vmx_offset = + (vcpu->arch.vaddr_accessed & 0xf)/size; + + if (size == 16) { + vcpu->arch.mmio_vmx_copy_nums = 2; + emulated = kvmppc_handle_vmx_store(run, + vcpu, op.reg, 8, 1); + } else { + vcpu->arch.mmio_vmx_copy_nums = 1; + emulated = kvmppc_handle_vmx_store(run, + vcpu, op.reg, size, 1); + } - case OP_31_XOP_STWUX: - emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), 4, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; +#endif +#ifdef CONFIG_VSX + case STORE_VSX: { + int io_size_each; - case OP_31_XOP_STBX: - emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), 1, 1); + if (op.vsx_flags & VSX_CHECK_VEC) { + if (kvmppc_check_altivec_disabled(vcpu)) + return EMULATE_DONE; + } else { + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + } + + if (vcpu->kvm->arch.kvm_ops->giveup_ext) + vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, + MSR_VSX); + + if (op.vsx_flags & VSX_FPCONV) + vcpu->arch.mmio_sp64_extend = 1; + + if (op.element_size == 8) + vcpu->arch.mmio_copy_type = + KVMPPC_VSX_COPY_DWORD; + else if (op.element_size == 4) + vcpu->arch.mmio_copy_type = + KVMPPC_VSX_COPY_WORD; + else + break; + + if (size < op.element_size) { + /* precise conversion case, like stxsspx */ + vcpu->arch.mmio_vsx_copy_nums = 1; + io_size_each = size; + } else { /* stxvw4x, stxvd2x, etc */ + vcpu->arch.mmio_vsx_copy_nums = + size/op.element_size; + io_size_each = op.element_size; + } + + emulated = kvmppc_handle_vsx_store(run, vcpu, + op.reg & 0x1f, io_size_each, 1); break; - - case OP_31_XOP_STBUX: - emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), 1, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; - - case OP_31_XOP_LHAX: - emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); - break; - - case OP_31_XOP_LHAUX: - emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; - - case OP_31_XOP_LHZX: - emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); - break; - - case OP_31_XOP_LHZUX: - emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; - - case OP_31_XOP_STHX: - emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), 2, 1); - break; - - case OP_31_XOP_STHUX: - emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), 2, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; - - case OP_31_XOP_DCBST: - case OP_31_XOP_DCBF: - case OP_31_XOP_DCBI: + } +#endif + case CACHEOP: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as * normal writes, or the host kernel has handled dcache - * coherence. */ - break; - - case OP_31_XOP_LWBRX: - emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); - break; - - case OP_31_XOP_STWBRX: - emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), 4, 0); - break; - - case OP_31_XOP_LHBRX: - emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); - break; - - case OP_31_XOP_STHBRX: - emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), 2, 0); - break; - - case OP_31_XOP_LDBRX: - emulated = kvmppc_handle_load(run, vcpu, rt, 8, 0); - break; - - case OP_31_XOP_STDBRX: - emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), 8, 0); - break; - - case OP_31_XOP_LDX: - emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); - break; - - case OP_31_XOP_LDUX: - emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; - - case OP_31_XOP_LWAX: - emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1); - break; - - case OP_31_XOP_LWAUX: - emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; - -#ifdef CONFIG_PPC_FPU - case OP_31_XOP_LFSX: - if (kvmppc_check_fp_disabled(vcpu)) - return EMULATE_DONE; - vcpu->arch.mmio_sp64_extend = 1; - emulated = kvmppc_handle_load(run, vcpu, - KVM_MMIO_REG_FPR|rt, 4, 1); - break; - - case OP_31_XOP_LFSUX: - if (kvmppc_check_fp_disabled(vcpu)) - return EMULATE_DONE; - vcpu->arch.mmio_sp64_extend = 1; - emulated = kvmppc_handle_load(run, vcpu, - KVM_MMIO_REG_FPR|rt, 4, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; - - case OP_31_XOP_LFDX: - if (kvmppc_check_fp_disabled(vcpu)) - return EMULATE_DONE; - emulated = kvmppc_handle_load(run, vcpu, - KVM_MMIO_REG_FPR|rt, 8, 1); - break; - - case OP_31_XOP_LFDUX: - if (kvmppc_check_fp_disabled(vcpu)) - return EMULATE_DONE; - emulated = kvmppc_handle_load(run, vcpu, - KVM_MMIO_REG_FPR|rt, 8, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; - - case OP_31_XOP_LFIWAX: - if (kvmppc_check_fp_disabled(vcpu)) - return EMULATE_DONE; - emulated = kvmppc_handle_loads(run, vcpu, - KVM_MMIO_REG_FPR|rt, 4, 1); - break; - - case OP_31_XOP_LFIWZX: - if (kvmppc_check_fp_disabled(vcpu)) - return EMULATE_DONE; - emulated = kvmppc_handle_load(run, vcpu, - KVM_MMIO_REG_FPR|rt, 4, 1); - break; - - case OP_31_XOP_STFSX: - if (kvmppc_check_fp_disabled(vcpu)) - return EMULATE_DONE; - vcpu->arch.mmio_sp64_extend = 1; - emulated = kvmppc_handle_store(run, vcpu, - VCPU_FPR(vcpu, rs), 4, 1); - break; - - case OP_31_XOP_STFSUX: - if (kvmppc_check_fp_disabled(vcpu)) - return EMULATE_DONE; - vcpu->arch.mmio_sp64_extend = 1; - emulated = kvmppc_handle_store(run, vcpu, - VCPU_FPR(vcpu, rs), 4, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; - - case OP_31_XOP_STFDX: - if (kvmppc_check_fp_disabled(vcpu)) - return EMULATE_DONE; - emulated = kvmppc_handle_store(run, vcpu, - VCPU_FPR(vcpu, rs), 8, 1); - break; - - case OP_31_XOP_STFDUX: - if (kvmppc_check_fp_disabled(vcpu)) - return EMULATE_DONE; - emulated = kvmppc_handle_store(run, vcpu, - VCPU_FPR(vcpu, rs), 8, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; - - case OP_31_XOP_STFIWX: - if (kvmppc_check_fp_disabled(vcpu)) - return EMULATE_DONE; - emulated = kvmppc_handle_store(run, vcpu, - VCPU_FPR(vcpu, rs), 4, 1); - break; -#endif - -#ifdef CONFIG_VSX - case OP_31_XOP_LXSDX: - if (kvmppc_check_vsx_disabled(vcpu)) - return EMULATE_DONE; - vcpu->arch.mmio_vsx_copy_nums = 1; - vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; - emulated = kvmppc_handle_vsx_load(run, vcpu, - KVM_MMIO_REG_VSX|rt, 8, 1, 0); - break; - - case OP_31_XOP_LXSSPX: - if (kvmppc_check_vsx_disabled(vcpu)) - return EMULATE_DONE; - vcpu->arch.mmio_vsx_copy_nums = 1; - vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; - vcpu->arch.mmio_sp64_extend = 1; - emulated = kvmppc_handle_vsx_load(run, vcpu, - KVM_MMIO_REG_VSX|rt, 4, 1, 0); - break; - - case OP_31_XOP_LXSIWAX: - if (kvmppc_check_vsx_disabled(vcpu)) - return EMULATE_DONE; - vcpu->arch.mmio_vsx_copy_nums = 1; - vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; - emulated = kvmppc_handle_vsx_load(run, vcpu, - KVM_MMIO_REG_VSX|rt, 4, 1, 1); - break; - - case OP_31_XOP_LXSIWZX: - if (kvmppc_check_vsx_disabled(vcpu)) - return EMULATE_DONE; - vcpu->arch.mmio_vsx_copy_nums = 1; - vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; - emulated = kvmppc_handle_vsx_load(run, vcpu, - KVM_MMIO_REG_VSX|rt, 4, 1, 0); - break; - - case OP_31_XOP_LXVD2X: - /* - * In this case, the official load/store process is like this: - * Step1, exit from vm by page fault isr, then kvm save vsr. - * Please see guest_exit_cont->store_fp_state->SAVE_32VSRS - * as reference. - * - * Step2, copy data between memory and VCPU - * Notice: for LXVD2X/STXVD2X/LXVW4X/STXVW4X, we use - * 2copies*8bytes or 4copies*4bytes - * to simulate one copy of 16bytes. - * Also there is an endian issue here, we should notice the - * layout of memory. - * Please see MARCO of LXVD2X_ROT/STXVD2X_ROT as more reference. - * If host is little-endian, kvm will call XXSWAPD for - * LXVD2X_ROT/STXVD2X_ROT. - * So, if host is little-endian, - * the postion of memeory should be swapped. - * - * Step3, return to guest, kvm reset register. - * Please see kvmppc_hv_entry->load_fp_state->REST_32VSRS - * as reference. - */ - if (kvmppc_check_vsx_disabled(vcpu)) - return EMULATE_DONE; - vcpu->arch.mmio_vsx_copy_nums = 2; - vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; - emulated = kvmppc_handle_vsx_load(run, vcpu, - KVM_MMIO_REG_VSX|rt, 8, 1, 0); - break; - - case OP_31_XOP_LXVW4X: - if (kvmppc_check_vsx_disabled(vcpu)) - return EMULATE_DONE; - vcpu->arch.mmio_vsx_copy_nums = 4; - vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD; - emulated = kvmppc_handle_vsx_load(run, vcpu, - KVM_MMIO_REG_VSX|rt, 4, 1, 0); - break; - - case OP_31_XOP_LXVDSX: - if (kvmppc_check_vsx_disabled(vcpu)) - return EMULATE_DONE; - vcpu->arch.mmio_vsx_copy_nums = 1; - vcpu->arch.mmio_vsx_copy_type = - KVMPPC_VSX_COPY_DWORD_LOAD_DUMP; - emulated = kvmppc_handle_vsx_load(run, vcpu, - KVM_MMIO_REG_VSX|rt, 8, 1, 0); - break; - - case OP_31_XOP_STXSDX: - if (kvmppc_check_vsx_disabled(vcpu)) - return EMULATE_DONE; - vcpu->arch.mmio_vsx_copy_nums = 1; - vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; - emulated = kvmppc_handle_vsx_store(run, vcpu, - rs, 8, 1); - break; - - case OP_31_XOP_STXSSPX: - if (kvmppc_check_vsx_disabled(vcpu)) - return EMULATE_DONE; - vcpu->arch.mmio_vsx_copy_nums = 1; - vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; - vcpu->arch.mmio_sp64_extend = 1; - emulated = kvmppc_handle_vsx_store(run, vcpu, - rs, 4, 1); - break; - - case OP_31_XOP_STXSIWX: - if (kvmppc_check_vsx_disabled(vcpu)) - return EMULATE_DONE; - vcpu->arch.mmio_vsx_offset = 1; - vcpu->arch.mmio_vsx_copy_nums = 1; - vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD; - emulated = kvmppc_handle_vsx_store(run, vcpu, - rs, 4, 1); - break; - - case OP_31_XOP_STXVD2X: - if (kvmppc_check_vsx_disabled(vcpu)) - return EMULATE_DONE; - vcpu->arch.mmio_vsx_copy_nums = 2; - vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; - emulated = kvmppc_handle_vsx_store(run, vcpu, - rs, 8, 1); - break; - - case OP_31_XOP_STXVW4X: - if (kvmppc_check_vsx_disabled(vcpu)) - return EMULATE_DONE; - vcpu->arch.mmio_vsx_copy_nums = 4; - vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD; - emulated = kvmppc_handle_vsx_store(run, vcpu, - rs, 4, 1); - break; -#endif /* CONFIG_VSX */ - -#ifdef CONFIG_ALTIVEC - case OP_31_XOP_LVX: - if (kvmppc_check_altivec_disabled(vcpu)) - return EMULATE_DONE; - vcpu->arch.vaddr_accessed &= ~0xFULL; - vcpu->arch.paddr_accessed &= ~0xFULL; - vcpu->arch.mmio_vmx_copy_nums = 2; - emulated = kvmppc_handle_load128_by2x64(run, vcpu, - KVM_MMIO_REG_VMX|rt, 1); - break; - - case OP_31_XOP_STVX: - if (kvmppc_check_altivec_disabled(vcpu)) - return EMULATE_DONE; - vcpu->arch.vaddr_accessed &= ~0xFULL; - vcpu->arch.paddr_accessed &= ~0xFULL; - vcpu->arch.mmio_vmx_copy_nums = 2; - emulated = kvmppc_handle_store128_by2x64(run, vcpu, - rs, 1); - break; -#endif /* CONFIG_ALTIVEC */ - - default: - emulated = EMULATE_FAIL; - break; - } - break; - - case OP_LWZ: - emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); - break; - -#ifdef CONFIG_PPC_FPU - case OP_STFS: - if (kvmppc_check_fp_disabled(vcpu)) - return EMULATE_DONE; - vcpu->arch.mmio_sp64_extend = 1; - emulated = kvmppc_handle_store(run, vcpu, - VCPU_FPR(vcpu, rs), - 4, 1); - break; - - case OP_STFSU: - if (kvmppc_check_fp_disabled(vcpu)) - return EMULATE_DONE; - vcpu->arch.mmio_sp64_extend = 1; - emulated = kvmppc_handle_store(run, vcpu, - VCPU_FPR(vcpu, rs), - 4, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; - - case OP_STFD: - if (kvmppc_check_fp_disabled(vcpu)) - return EMULATE_DONE; - emulated = kvmppc_handle_store(run, vcpu, - VCPU_FPR(vcpu, rs), - 8, 1); - break; - - case OP_STFDU: - if (kvmppc_check_fp_disabled(vcpu)) - return EMULATE_DONE; - emulated = kvmppc_handle_store(run, vcpu, - VCPU_FPR(vcpu, rs), - 8, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; -#endif - - case OP_LD: - rt = get_rt(inst); - switch (inst & 3) { - case 0: /* ld */ - emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); - break; - case 1: /* ldu */ - emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; - case 2: /* lwa */ - emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1); + * coherence. + */ + emulated = EMULATE_DONE; break; default: - emulated = EMULATE_FAIL; - } - break; - - case OP_LWZU: - emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; - - case OP_LBZ: - emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); - break; - - case OP_LBZU: - emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; - - case OP_STW: - emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 4, 1); - break; - - case OP_STD: - rs = get_rs(inst); - switch (inst & 3) { - case 0: /* std */ - emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), 8, 1); break; - case 1: /* stdu */ - emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), 8, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; - default: - emulated = EMULATE_FAIL; } - break; - - case OP_STWU: - emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), 4, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; - - case OP_STB: - emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), 1, 1); - break; - - case OP_STBU: - emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), 1, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; - - case OP_LHZ: - emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); - break; - - case OP_LHZU: - emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; - - case OP_LHA: - emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); - break; - - case OP_LHAU: - emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; - - case OP_STH: - emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), 2, 1); - break; - - case OP_STHU: - emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), 2, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; - -#ifdef CONFIG_PPC_FPU - case OP_LFS: - if (kvmppc_check_fp_disabled(vcpu)) - return EMULATE_DONE; - vcpu->arch.mmio_sp64_extend = 1; - emulated = kvmppc_handle_load(run, vcpu, - KVM_MMIO_REG_FPR|rt, 4, 1); - break; - - case OP_LFSU: - if (kvmppc_check_fp_disabled(vcpu)) - return EMULATE_DONE; - vcpu->arch.mmio_sp64_extend = 1; - emulated = kvmppc_handle_load(run, vcpu, - KVM_MMIO_REG_FPR|rt, 4, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; - - case OP_LFD: - if (kvmppc_check_fp_disabled(vcpu)) - return EMULATE_DONE; - emulated = kvmppc_handle_load(run, vcpu, - KVM_MMIO_REG_FPR|rt, 8, 1); - break; - - case OP_LFDU: - if (kvmppc_check_fp_disabled(vcpu)) - return EMULATE_DONE; - emulated = kvmppc_handle_load(run, vcpu, - KVM_MMIO_REG_FPR|rt, 8, 1); - kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); - break; -#endif - - default: - emulated = EMULATE_FAIL; - break; } if (emulated == EMULATE_FAIL) { diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 4e387647b5af..0e8c20c5eaac 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -648,9 +648,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) #endif #ifdef CONFIG_PPC_TRANSACTIONAL_MEM case KVM_CAP_PPC_HTM: - r = hv_enabled && - (!!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) || - cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)); + r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) || + (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)); break; #endif default: @@ -907,6 +906,26 @@ static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, } } +static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu, + u32 gpr) +{ + union kvmppc_one_reg val; + int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; + + if (vcpu->arch.mmio_vsx_tx_sx_enabled) { + val.vsx32val[0] = gpr; + val.vsx32val[1] = gpr; + val.vsx32val[2] = gpr; + val.vsx32val[3] = gpr; + VCPU_VSX_VR(vcpu, index) = val.vval; + } else { + val.vsx32val[0] = gpr; + val.vsx32val[1] = gpr; + VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0]; + VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0]; + } +} + static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, u32 gpr32) { @@ -933,30 +952,110 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, #endif /* CONFIG_VSX */ #ifdef CONFIG_ALTIVEC -static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, - u64 gpr) +static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu, + int index, int element_size) { + int offset; + int elts = sizeof(vector128)/element_size; + + if ((index < 0) || (index >= elts)) + return -1; + + if (kvmppc_need_byteswap(vcpu)) + offset = elts - index - 1; + else + offset = index; + + return offset; +} + +static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu, + int index) +{ + return kvmppc_get_vmx_offset_generic(vcpu, index, 8); +} + +static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu, + int index) +{ + return kvmppc_get_vmx_offset_generic(vcpu, index, 4); +} + +static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu, + int index) +{ + return kvmppc_get_vmx_offset_generic(vcpu, index, 2); +} + +static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu, + int index) +{ + return kvmppc_get_vmx_offset_generic(vcpu, index, 1); +} + + +static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, + u64 gpr) +{ + union kvmppc_one_reg val; + int offset = kvmppc_get_vmx_dword_offset(vcpu, + vcpu->arch.mmio_vmx_offset); int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; - u32 hi, lo; - u32 di; -#ifdef __BIG_ENDIAN - hi = gpr >> 32; - lo = gpr & 0xffffffff; -#else - lo = gpr >> 32; - hi = gpr & 0xffffffff; -#endif - - di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */ - if (di > 1) + if (offset == -1) return; - if (vcpu->arch.mmio_host_swabbed) - di = 1 - di; + val.vval = VCPU_VSX_VR(vcpu, index); + val.vsxval[offset] = gpr; + VCPU_VSX_VR(vcpu, index) = val.vval; +} - VCPU_VSX_VR(vcpu, index).u[di * 2] = hi; - VCPU_VSX_VR(vcpu, index).u[di * 2 + 1] = lo; +static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu, + u32 gpr32) +{ + union kvmppc_one_reg val; + int offset = kvmppc_get_vmx_word_offset(vcpu, + vcpu->arch.mmio_vmx_offset); + int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; + + if (offset == -1) + return; + + val.vval = VCPU_VSX_VR(vcpu, index); + val.vsx32val[offset] = gpr32; + VCPU_VSX_VR(vcpu, index) = val.vval; +} + +static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu, + u16 gpr16) +{ + union kvmppc_one_reg val; + int offset = kvmppc_get_vmx_hword_offset(vcpu, + vcpu->arch.mmio_vmx_offset); + int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; + + if (offset == -1) + return; + + val.vval = VCPU_VSX_VR(vcpu, index); + val.vsx16val[offset] = gpr16; + VCPU_VSX_VR(vcpu, index) = val.vval; +} + +static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu, + u8 gpr8) +{ + union kvmppc_one_reg val; + int offset = kvmppc_get_vmx_byte_offset(vcpu, + vcpu->arch.mmio_vmx_offset); + int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; + + if (offset == -1) + return; + + val.vval = VCPU_VSX_VR(vcpu, index); + val.vsx8val[offset] = gpr8; + VCPU_VSX_VR(vcpu, index) = val.vval; } #endif /* CONFIG_ALTIVEC */ @@ -1041,6 +1140,9 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); break; case KVM_MMIO_REG_FPR: + if (vcpu->kvm->arch.kvm_ops->giveup_ext) + vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); + VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; break; #ifdef CONFIG_PPC_BOOK3S @@ -1054,18 +1156,36 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, #endif #ifdef CONFIG_VSX case KVM_MMIO_REG_VSX: - if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_DWORD) + if (vcpu->kvm->arch.kvm_ops->giveup_ext) + vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); + + if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) kvmppc_set_vsr_dword(vcpu, gpr); - else if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_WORD) + else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) kvmppc_set_vsr_word(vcpu, gpr); - else if (vcpu->arch.mmio_vsx_copy_type == + else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) kvmppc_set_vsr_dword_dump(vcpu, gpr); + else if (vcpu->arch.mmio_copy_type == + KVMPPC_VSX_COPY_WORD_LOAD_DUMP) + kvmppc_set_vsr_word_dump(vcpu, gpr); break; #endif #ifdef CONFIG_ALTIVEC case KVM_MMIO_REG_VMX: - kvmppc_set_vmx_dword(vcpu, gpr); + if (vcpu->kvm->arch.kvm_ops->giveup_ext) + vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); + + if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) + kvmppc_set_vmx_dword(vcpu, gpr); + else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) + kvmppc_set_vmx_word(vcpu, gpr); + else if (vcpu->arch.mmio_copy_type == + KVMPPC_VMX_COPY_HWORD) + kvmppc_set_vmx_hword(vcpu, gpr); + else if (vcpu->arch.mmio_copy_type == + KVMPPC_VMX_COPY_BYTE) + kvmppc_set_vmx_byte(vcpu, gpr); break; #endif default: @@ -1228,7 +1348,7 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) u32 dword_offset, word_offset; union kvmppc_one_reg reg; int vsx_offset = 0; - int copy_type = vcpu->arch.mmio_vsx_copy_type; + int copy_type = vcpu->arch.mmio_copy_type; int result = 0; switch (copy_type) { @@ -1344,14 +1464,16 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, #endif /* CONFIG_VSX */ #ifdef CONFIG_ALTIVEC -/* handle quadword load access in two halves */ -int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, - unsigned int rt, int is_default_endian) +int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int rt, unsigned int bytes, int is_default_endian) { enum emulation_result emulated = EMULATE_DONE; + if (vcpu->arch.mmio_vsx_copy_nums > 2) + return EMULATE_FAIL; + while (vcpu->arch.mmio_vmx_copy_nums) { - emulated = __kvmppc_handle_load(run, vcpu, rt, 8, + emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0); if (emulated != EMULATE_DONE) @@ -1359,55 +1481,127 @@ int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, vcpu->arch.paddr_accessed += run->mmio.len; vcpu->arch.mmio_vmx_copy_nums--; + vcpu->arch.mmio_vmx_offset++; } return emulated; } -static inline int kvmppc_get_vmx_data(struct kvm_vcpu *vcpu, int rs, u64 *val) +int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) { - vector128 vrs = VCPU_VSX_VR(vcpu, rs); - u32 di; - u64 w0, w1; + union kvmppc_one_reg reg; + int vmx_offset = 0; + int result = 0; - di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */ - if (di > 1) + vmx_offset = + kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); + + if (vmx_offset == -1) return -1; - if (vcpu->arch.mmio_host_swabbed) - di = 1 - di; + reg.vval = VCPU_VSX_VR(vcpu, index); + *val = reg.vsxval[vmx_offset]; - w0 = vrs.u[di * 2]; - w1 = vrs.u[di * 2 + 1]; - -#ifdef __BIG_ENDIAN - *val = (w0 << 32) | w1; -#else - *val = (w1 << 32) | w0; -#endif - return 0; + return result; } -/* handle quadword store in two halves */ -int kvmppc_handle_store128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, - unsigned int rs, int is_default_endian) +int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) +{ + union kvmppc_one_reg reg; + int vmx_offset = 0; + int result = 0; + + vmx_offset = + kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); + + if (vmx_offset == -1) + return -1; + + reg.vval = VCPU_VSX_VR(vcpu, index); + *val = reg.vsx32val[vmx_offset]; + + return result; +} + +int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) +{ + union kvmppc_one_reg reg; + int vmx_offset = 0; + int result = 0; + + vmx_offset = + kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); + + if (vmx_offset == -1) + return -1; + + reg.vval = VCPU_VSX_VR(vcpu, index); + *val = reg.vsx16val[vmx_offset]; + + return result; +} + +int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) +{ + union kvmppc_one_reg reg; + int vmx_offset = 0; + int result = 0; + + vmx_offset = + kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); + + if (vmx_offset == -1) + return -1; + + reg.vval = VCPU_VSX_VR(vcpu, index); + *val = reg.vsx8val[vmx_offset]; + + return result; +} + +int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int rs, unsigned int bytes, int is_default_endian) { u64 val = 0; + unsigned int index = rs & KVM_MMIO_REG_MASK; enum emulation_result emulated = EMULATE_DONE; + if (vcpu->arch.mmio_vsx_copy_nums > 2) + return EMULATE_FAIL; + vcpu->arch.io_gpr = rs; while (vcpu->arch.mmio_vmx_copy_nums) { - if (kvmppc_get_vmx_data(vcpu, rs, &val) == -1) - return EMULATE_FAIL; + switch (vcpu->arch.mmio_copy_type) { + case KVMPPC_VMX_COPY_DWORD: + if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1) + return EMULATE_FAIL; - emulated = kvmppc_handle_store(run, vcpu, val, 8, + break; + case KVMPPC_VMX_COPY_WORD: + if (kvmppc_get_vmx_word(vcpu, index, &val) == -1) + return EMULATE_FAIL; + break; + case KVMPPC_VMX_COPY_HWORD: + if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1) + return EMULATE_FAIL; + break; + case KVMPPC_VMX_COPY_BYTE: + if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1) + return EMULATE_FAIL; + break; + default: + return EMULATE_FAIL; + } + + emulated = kvmppc_handle_store(run, vcpu, val, bytes, is_default_endian); if (emulated != EMULATE_DONE) break; vcpu->arch.paddr_accessed += run->mmio.len; vcpu->arch.mmio_vmx_copy_nums--; + vcpu->arch.mmio_vmx_offset++; } return emulated; @@ -1422,11 +1616,11 @@ static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu, vcpu->arch.paddr_accessed += run->mmio.len; if (!vcpu->mmio_is_write) { - emulated = kvmppc_handle_load128_by2x64(run, vcpu, - vcpu->arch.io_gpr, 1); + emulated = kvmppc_handle_vmx_load(run, vcpu, + vcpu->arch.io_gpr, run->mmio.len, 1); } else { - emulated = kvmppc_handle_store128_by2x64(run, vcpu, - vcpu->arch.io_gpr, 1); + emulated = kvmppc_handle_vmx_store(run, vcpu, + vcpu->arch.io_gpr, run->mmio.len, 1); } switch (emulated) { @@ -1570,8 +1764,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) } #endif #ifdef CONFIG_ALTIVEC - if (vcpu->arch.mmio_vmx_copy_nums > 0) + if (vcpu->arch.mmio_vmx_copy_nums > 0) { vcpu->arch.mmio_vmx_copy_nums--; + vcpu->arch.mmio_vmx_offset++; + } if (vcpu->arch.mmio_vmx_copy_nums > 0) { r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run); @@ -1784,16 +1980,16 @@ long kvm_arch_vcpu_ioctl(struct file *filp, void __user *argp = (void __user *)arg; long r; - vcpu_load(vcpu); - switch (ioctl) { case KVM_ENABLE_CAP: { struct kvm_enable_cap cap; r = -EFAULT; + vcpu_load(vcpu); if (copy_from_user(&cap, argp, sizeof(cap))) goto out; r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); + vcpu_put(vcpu); break; } @@ -1815,9 +2011,11 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_DIRTY_TLB: { struct kvm_dirty_tlb dirty; r = -EFAULT; + vcpu_load(vcpu); if (copy_from_user(&dirty, argp, sizeof(dirty))) goto out; r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); + vcpu_put(vcpu); break; } #endif @@ -1826,11 +2024,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, } out: - vcpu_put(vcpu); return r; } -int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } diff --git a/arch/powerpc/kvm/tm.S b/arch/powerpc/kvm/tm.S new file mode 100644 index 000000000000..90e330f21356 --- /dev/null +++ b/arch/powerpc/kvm/tm.S @@ -0,0 +1,384 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Derived from book3s_hv_rmhandlers.S, which is: + * + * Copyright 2011 Paul Mackerras, IBM Corp. + * + */ + +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) + +/* + * Save transactional state and TM-related registers. + * Called with: + * - r3 pointing to the vcpu struct + * - r4 points to the MSR with current TS bits: + * (For HV KVM, it is VCPU_MSR ; For PR KVM, it is host MSR). + * This can modify all checkpointed registers, but + * restores r1, r2 before exit. + */ +_GLOBAL(__kvmppc_save_tm) + mflr r0 + std r0, PPC_LR_STKOFF(r1) + + /* Turn on TM. */ + mfmsr r8 + li r0, 1 + rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG + ori r8, r8, MSR_FP + oris r8, r8, (MSR_VEC | MSR_VSX)@h + mtmsrd r8 + + rldicl. r4, r4, 64 - MSR_TS_S_LG, 62 + beq 1f /* TM not active in guest. */ + + std r1, HSTATE_SCRATCH2(r13) + std r3, HSTATE_SCRATCH1(r13) + +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE +BEGIN_FTR_SECTION + /* Emulation of the treclaim instruction needs TEXASR before treclaim */ + mfspr r6, SPRN_TEXASR + std r6, VCPU_ORIG_TEXASR(r3) +END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) +#endif + + /* Clear the MSR RI since r1, r13 are all going to be foobar. */ + li r5, 0 + mtmsrd r5, 1 + + li r3, TM_CAUSE_KVM_RESCHED + + /* All GPRs are volatile at this point. */ + TRECLAIM(R3) + + /* Temporarily store r13 and r9 so we have some regs to play with */ + SET_SCRATCH0(r13) + GET_PACA(r13) + std r9, PACATMSCRATCH(r13) + ld r9, HSTATE_SCRATCH1(r13) + + /* Get a few more GPRs free. */ + std r29, VCPU_GPRS_TM(29)(r9) + std r30, VCPU_GPRS_TM(30)(r9) + std r31, VCPU_GPRS_TM(31)(r9) + + /* Save away PPR and DSCR soon so don't run with user values. */ + mfspr r31, SPRN_PPR + HMT_MEDIUM + mfspr r30, SPRN_DSCR +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + ld r29, HSTATE_DSCR(r13) + mtspr SPRN_DSCR, r29 +#endif + + /* Save all but r9, r13 & r29-r31 */ + reg = 0 + .rept 29 + .if (reg != 9) && (reg != 13) + std reg, VCPU_GPRS_TM(reg)(r9) + .endif + reg = reg + 1 + .endr + /* ... now save r13 */ + GET_SCRATCH0(r4) + std r4, VCPU_GPRS_TM(13)(r9) + /* ... and save r9 */ + ld r4, PACATMSCRATCH(r13) + std r4, VCPU_GPRS_TM(9)(r9) + + /* Reload stack pointer and TOC. */ + ld r1, HSTATE_SCRATCH2(r13) + ld r2, PACATOC(r13) + + /* Set MSR RI now we have r1 and r13 back. */ + li r5, MSR_RI + mtmsrd r5, 1 + + /* Save away checkpinted SPRs. */ + std r31, VCPU_PPR_TM(r9) + std r30, VCPU_DSCR_TM(r9) + mflr r5 + mfcr r6 + mfctr r7 + mfspr r8, SPRN_AMR + mfspr r10, SPRN_TAR + mfxer r11 + std r5, VCPU_LR_TM(r9) + stw r6, VCPU_CR_TM(r9) + std r7, VCPU_CTR_TM(r9) + std r8, VCPU_AMR_TM(r9) + std r10, VCPU_TAR_TM(r9) + std r11, VCPU_XER_TM(r9) + + /* Restore r12 as trap number. */ + lwz r12, VCPU_TRAP(r9) + + /* Save FP/VSX. */ + addi r3, r9, VCPU_FPRS_TM + bl store_fp_state + addi r3, r9, VCPU_VRS_TM + bl store_vr_state + mfspr r6, SPRN_VRSAVE + stw r6, VCPU_VRSAVE_TM(r9) +1: + /* + * We need to save these SPRs after the treclaim so that the software + * error code is recorded correctly in the TEXASR. Also the user may + * change these outside of a transaction, so they must always be + * context switched. + */ + mfspr r7, SPRN_TEXASR + std r7, VCPU_TEXASR(r9) +11: + mfspr r5, SPRN_TFHAR + mfspr r6, SPRN_TFIAR + std r5, VCPU_TFHAR(r9) + std r6, VCPU_TFIAR(r9) + + ld r0, PPC_LR_STKOFF(r1) + mtlr r0 + blr + +/* + * _kvmppc_save_tm_pr() is a wrapper around __kvmppc_save_tm(), so that it can + * be invoked from C function by PR KVM only. + */ +_GLOBAL(_kvmppc_save_tm_pr) + mflr r5 + std r5, PPC_LR_STKOFF(r1) + stdu r1, -SWITCH_FRAME_SIZE(r1) + SAVE_NVGPRS(r1) + + /* save MSR since TM/math bits might be impacted + * by __kvmppc_save_tm(). + */ + mfmsr r5 + SAVE_GPR(5, r1) + + /* also save DSCR/CR/TAR so that it can be recovered later */ + mfspr r6, SPRN_DSCR + SAVE_GPR(6, r1) + + mfcr r7 + stw r7, _CCR(r1) + + mfspr r8, SPRN_TAR + SAVE_GPR(8, r1) + + bl __kvmppc_save_tm + + REST_GPR(8, r1) + mtspr SPRN_TAR, r8 + + ld r7, _CCR(r1) + mtcr r7 + + REST_GPR(6, r1) + mtspr SPRN_DSCR, r6 + + /* need preserve current MSR's MSR_TS bits */ + REST_GPR(5, r1) + mfmsr r6 + rldicl r6, r6, 64 - MSR_TS_S_LG, 62 + rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG + mtmsrd r5 + + REST_NVGPRS(r1) + addi r1, r1, SWITCH_FRAME_SIZE + ld r5, PPC_LR_STKOFF(r1) + mtlr r5 + blr + +EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr); + +/* + * Restore transactional state and TM-related registers. + * Called with: + * - r3 pointing to the vcpu struct. + * - r4 is the guest MSR with desired TS bits: + * For HV KVM, it is VCPU_MSR + * For PR KVM, it is provided by caller + * This potentially modifies all checkpointed registers. + * It restores r1, r2 from the PACA. + */ +_GLOBAL(__kvmppc_restore_tm) + mflr r0 + std r0, PPC_LR_STKOFF(r1) + + /* Turn on TM/FP/VSX/VMX so we can restore them. */ + mfmsr r5 + li r6, MSR_TM >> 32 + sldi r6, r6, 32 + or r5, r5, r6 + ori r5, r5, MSR_FP + oris r5, r5, (MSR_VEC | MSR_VSX)@h + mtmsrd r5 + + /* + * The user may change these outside of a transaction, so they must + * always be context switched. + */ + ld r5, VCPU_TFHAR(r3) + ld r6, VCPU_TFIAR(r3) + ld r7, VCPU_TEXASR(r3) + mtspr SPRN_TFHAR, r5 + mtspr SPRN_TFIAR, r6 + mtspr SPRN_TEXASR, r7 + + mr r5, r4 + rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 + beqlr /* TM not active in guest */ + std r1, HSTATE_SCRATCH2(r13) + + /* Make sure the failure summary is set, otherwise we'll program check + * when we trechkpt. It's possible that this might have been not set + * on a kvmppc_set_one_reg() call but we shouldn't let this crash the + * host. + */ + oris r7, r7, (TEXASR_FS)@h + mtspr SPRN_TEXASR, r7 + + /* + * We need to load up the checkpointed state for the guest. + * We need to do this early as it will blow away any GPRs, VSRs and + * some SPRs. + */ + + mr r31, r3 + addi r3, r31, VCPU_FPRS_TM + bl load_fp_state + addi r3, r31, VCPU_VRS_TM + bl load_vr_state + mr r3, r31 + lwz r7, VCPU_VRSAVE_TM(r3) + mtspr SPRN_VRSAVE, r7 + + ld r5, VCPU_LR_TM(r3) + lwz r6, VCPU_CR_TM(r3) + ld r7, VCPU_CTR_TM(r3) + ld r8, VCPU_AMR_TM(r3) + ld r9, VCPU_TAR_TM(r3) + ld r10, VCPU_XER_TM(r3) + mtlr r5 + mtcr r6 + mtctr r7 + mtspr SPRN_AMR, r8 + mtspr SPRN_TAR, r9 + mtxer r10 + + /* + * Load up PPR and DSCR values but don't put them in the actual SPRs + * till the last moment to avoid running with userspace PPR and DSCR for + * too long. + */ + ld r29, VCPU_DSCR_TM(r3) + ld r30, VCPU_PPR_TM(r3) + + std r2, PACATMSCRATCH(r13) /* Save TOC */ + + /* Clear the MSR RI since r1, r13 are all going to be foobar. */ + li r5, 0 + mtmsrd r5, 1 + + /* Load GPRs r0-r28 */ + reg = 0 + .rept 29 + ld reg, VCPU_GPRS_TM(reg)(r31) + reg = reg + 1 + .endr + + mtspr SPRN_DSCR, r29 + mtspr SPRN_PPR, r30 + + /* Load final GPRs */ + ld 29, VCPU_GPRS_TM(29)(r31) + ld 30, VCPU_GPRS_TM(30)(r31) + ld 31, VCPU_GPRS_TM(31)(r31) + + /* TM checkpointed state is now setup. All GPRs are now volatile. */ + TRECHKPT + + /* Now let's get back the state we need. */ + HMT_MEDIUM + GET_PACA(r13) +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + ld r29, HSTATE_DSCR(r13) + mtspr SPRN_DSCR, r29 +#endif + ld r1, HSTATE_SCRATCH2(r13) + ld r2, PACATMSCRATCH(r13) + + /* Set the MSR RI since we have our registers back. */ + li r5, MSR_RI + mtmsrd r5, 1 + ld r0, PPC_LR_STKOFF(r1) + mtlr r0 + blr + +/* + * _kvmppc_restore_tm_pr() is a wrapper around __kvmppc_restore_tm(), so that it + * can be invoked from C function by PR KVM only. + */ +_GLOBAL(_kvmppc_restore_tm_pr) + mflr r5 + std r5, PPC_LR_STKOFF(r1) + stdu r1, -SWITCH_FRAME_SIZE(r1) + SAVE_NVGPRS(r1) + + /* save MSR to avoid TM/math bits change */ + mfmsr r5 + SAVE_GPR(5, r1) + + /* also save DSCR/CR/TAR so that it can be recovered later */ + mfspr r6, SPRN_DSCR + SAVE_GPR(6, r1) + + mfcr r7 + stw r7, _CCR(r1) + + mfspr r8, SPRN_TAR + SAVE_GPR(8, r1) + + bl __kvmppc_restore_tm + + REST_GPR(8, r1) + mtspr SPRN_TAR, r8 + + ld r7, _CCR(r1) + mtcr r7 + + REST_GPR(6, r1) + mtspr SPRN_DSCR, r6 + + /* need preserve current MSR's MSR_TS bits */ + REST_GPR(5, r1) + mfmsr r6 + rldicl r6, r6, 64 - MSR_TS_S_LG, 62 + rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG + mtmsrd r5 + + REST_NVGPRS(r1) + addi r1, r1, SWITCH_FRAME_SIZE + ld r5, PPC_LR_STKOFF(r1) + mtlr r5 + blr + +EXPORT_SYMBOL_GPL(_kvmppc_restore_tm_pr); +#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c index 94058c21a482..6aa774aa5b16 100644 --- a/arch/powerpc/lib/rheap.c +++ b/arch/powerpc/lib/rheap.c @@ -54,7 +54,7 @@ static int grow(rh_info_t * info, int max_blocks) new_blocks = max_blocks - info->max_blocks; - block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_ATOMIC); + block = kmalloc_array(max_blocks, sizeof(rh_block_t), GFP_ATOMIC); if (block == NULL) return -ENOMEM; diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 8cecda4bd66a..5c8530d0c611 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -215,7 +215,7 @@ void __init mem_topology_setup(void) /* Place all memblock_regions in the same node and merge contiguous * memblock_regions */ - memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); + memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); } void __init initmem_init(void) diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c index 4c615fcb0cf0..abb43646927a 100644 --- a/arch/powerpc/mm/mmu_context_iommu.c +++ b/arch/powerpc/mm/mmu_context_iommu.c @@ -159,7 +159,7 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, goto unlock_exit; } - mem->hpas = vzalloc(entries * sizeof(mem->hpas[0])); + mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0]))); if (!mem->hpas) { kfree(mem); ret = -ENOMEM; diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 57a5029b4521..0c7e05d89244 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1316,7 +1316,7 @@ int numa_update_cpu_topology(bool cpus_locked) if (!weight) return 0; - updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL); + updates = kcalloc(weight, sizeof(*updates), GFP_KERNEL); if (!updates) return 0; diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index a9636d8cba15..5b061fc81df3 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -566,7 +566,7 @@ void bpf_jit_compile(struct bpf_prog *fp) if (!bpf_jit_enable) return; - addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL); + addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL); if (addrs == NULL) return; diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index f1c95779843b..380cbf9a40d9 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -949,7 +949,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) goto skip_init_ctx; } - addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL); + addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL); if (addrs == NULL) { fp = org_fp; goto out_addrs; diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c index 5182f2936af2..4e099e556645 100644 --- a/arch/powerpc/oprofile/cell/spu_profiler.c +++ b/arch/powerpc/oprofile/cell/spu_profiler.c @@ -210,8 +210,8 @@ int start_spu_profiling_cycles(unsigned int cycles_reset) timer.function = profile_spus; /* Allocate arrays for collecting SPU PC samples */ - samples = kzalloc(SPUS_PER_NODE * - TRACE_ARRAY_SIZE * sizeof(u32), GFP_KERNEL); + samples = kcalloc(SPUS_PER_NODE * TRACE_ARRAY_SIZE, sizeof(u32), + GFP_KERNEL); if (!samples) return -ENOMEM; diff --git a/arch/powerpc/platforms/4xx/hsta_msi.c b/arch/powerpc/platforms/4xx/hsta_msi.c index 9926ad67af76..1c18f2955f7d 100644 --- a/arch/powerpc/platforms/4xx/hsta_msi.c +++ b/arch/powerpc/platforms/4xx/hsta_msi.c @@ -156,7 +156,8 @@ static int hsta_msi_probe(struct platform_device *pdev) if (ret) goto out; - ppc4xx_hsta_msi.irq_map = kmalloc(sizeof(int) * irq_count, GFP_KERNEL); + ppc4xx_hsta_msi.irq_map = kmalloc_array(irq_count, sizeof(int), + GFP_KERNEL); if (!ppc4xx_hsta_msi.irq_map) { ret = -ENOMEM; goto out1; diff --git a/arch/powerpc/platforms/4xx/msi.c b/arch/powerpc/platforms/4xx/msi.c index 96aaae678928..81b2cbce7df8 100644 --- a/arch/powerpc/platforms/4xx/msi.c +++ b/arch/powerpc/platforms/4xx/msi.c @@ -89,7 +89,7 @@ static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) if (type == PCI_CAP_ID_MSIX) pr_debug("ppc4xx msi: MSI-X untested, trying anyway.\n"); - msi_data->msi_virqs = kmalloc((msi_irqs) * sizeof(int), GFP_KERNEL); + msi_data->msi_virqs = kmalloc_array(msi_irqs, sizeof(int), GFP_KERNEL); if (!msi_data->msi_virqs) return -ENOMEM; diff --git a/arch/powerpc/platforms/4xx/pci.c b/arch/powerpc/platforms/4xx/pci.c index 73e6b36bcd51..5aca523551ae 100644 --- a/arch/powerpc/platforms/4xx/pci.c +++ b/arch/powerpc/platforms/4xx/pci.c @@ -1449,7 +1449,7 @@ static int __init ppc4xx_pciex_check_core_init(struct device_node *np) count = ppc4xx_pciex_hwops->core_init(np); if (count > 0) { ppc4xx_pciex_ports = - kzalloc(count * sizeof(struct ppc4xx_pciex_port), + kcalloc(count, sizeof(struct ppc4xx_pciex_port), GFP_KERNEL); if (ppc4xx_pciex_ports) { ppc4xx_pciex_port_count = count; diff --git a/arch/powerpc/platforms/powernv/opal-sysparam.c b/arch/powerpc/platforms/powernv/opal-sysparam.c index 6fd4092798d5..9aa87df114fd 100644 --- a/arch/powerpc/platforms/powernv/opal-sysparam.c +++ b/arch/powerpc/platforms/powernv/opal-sysparam.c @@ -198,21 +198,21 @@ void __init opal_sys_param_init(void) goto out_param_buf; } - id = kzalloc(sizeof(*id) * count, GFP_KERNEL); + id = kcalloc(count, sizeof(*id), GFP_KERNEL); if (!id) { pr_err("SYSPARAM: Failed to allocate memory to read parameter " "id\n"); goto out_param_buf; } - size = kzalloc(sizeof(*size) * count, GFP_KERNEL); + size = kcalloc(count, sizeof(*size), GFP_KERNEL); if (!size) { pr_err("SYSPARAM: Failed to allocate memory to read parameter " "size\n"); goto out_free_id; } - perm = kzalloc(sizeof(*perm) * count, GFP_KERNEL); + perm = kcalloc(count, sizeof(*perm), GFP_KERNEL); if (!perm) { pr_err("SYSPARAM: Failed to allocate memory to read supported " "action on the parameter"); @@ -235,7 +235,7 @@ void __init opal_sys_param_init(void) goto out_free_perm; } - attr = kzalloc(sizeof(*attr) * count, GFP_KERNEL); + attr = kcalloc(count, sizeof(*attr), GFP_KERNEL); if (!attr) { pr_err("SYSPARAM: Failed to allocate memory for parameter " "attributes\n"); diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 1d4e0ef658d3..353b43972bbf 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -544,7 +544,7 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic) printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n"); /* Allocate fixups array */ - mpic->fixups = kzalloc(128 * sizeof(*mpic->fixups), GFP_KERNEL); + mpic->fixups = kcalloc(128, sizeof(*mpic->fixups), GFP_KERNEL); BUG_ON(mpic->fixups == NULL); /* Init spinlock */ @@ -1324,7 +1324,7 @@ struct mpic * __init mpic_alloc(struct device_node *node, if (psrc) { /* Allocate a bitmap with one bit per interrupt */ unsigned int mapsize = BITS_TO_LONGS(intvec_top + 1); - mpic->protected = kzalloc(mapsize*sizeof(long), GFP_KERNEL); + mpic->protected = kcalloc(mapsize, sizeof(long), GFP_KERNEL); BUG_ON(mpic->protected == NULL); for (i = 0; i < psize/sizeof(u32); i++) { if (psrc[i] > intvec_top) @@ -1639,8 +1639,9 @@ void __init mpic_init(struct mpic *mpic) #ifdef CONFIG_PM /* allocate memory to save mpic state */ - mpic->save_data = kmalloc(mpic->num_sources * sizeof(*mpic->save_data), - GFP_KERNEL); + mpic->save_data = kmalloc_array(mpic->num_sources, + sizeof(*mpic->save_data), + GFP_KERNEL); BUG_ON(mpic->save_data == NULL); #endif diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index 83bcd72b21cf..311185b9960a 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -489,7 +489,7 @@ static bool xive_parse_provisioning(struct device_node *np) if (rc == 0) return true; - xive_provision_chips = kzalloc(4 * xive_provision_chip_count, + xive_provision_chips = kcalloc(4, xive_provision_chip_count, GFP_KERNEL); if (WARN_ON(!xive_provision_chips)) return false; diff --git a/arch/powerpc/tools/gcc-check-mprofile-kernel.sh b/arch/powerpc/tools/gcc-check-mprofile-kernel.sh index a7dd0e5d9f98..137f3376ac2b 100755 --- a/arch/powerpc/tools/gcc-check-mprofile-kernel.sh +++ b/arch/powerpc/tools/gcc-check-mprofile-kernel.sh @@ -24,5 +24,4 @@ echo -e "#include \nnotrace int func() { return 0; }" | \ 2> /dev/null | grep -q "_mcount" && \ exit 1 -echo "OK" exit 0 diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 17f19e67993b..f12680c9b947 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -32,6 +32,7 @@ config RISCV select HAVE_MEMBLOCK_NODE_MAP select HAVE_DMA_CONTIGUOUS select HAVE_GENERIC_DMA_COHERENT + select HAVE_PERF_EVENTS select IRQ_DOMAIN select NO_BOOTMEM select RISCV_ISA_A if SMP @@ -103,9 +104,9 @@ choice config ARCH_RV32I bool "RV32I" select 32BIT - select GENERIC_ASHLDI3 - select GENERIC_ASHRDI3 - select GENERIC_LSHRDI3 + select GENERIC_LIB_ASHLDI3 + select GENERIC_LIB_ASHRDI3 + select GENERIC_LIB_LSHRDI3 config ARCH_RV64I bool "RV64I" @@ -193,6 +194,19 @@ config RISCV_ISA_C config RISCV_ISA_A def_bool y +menu "supported PMU type" + depends on PERF_EVENTS + +config RISCV_BASE_PMU + bool "Base Performance Monitoring Unit" + def_bool y + help + A base PMU that serves as a reference implementation and has limited + feature of perf. It can run on any RISC-V machines so serves as the + fallback, but this option can also be disable to reduce kernel size. + +endmenu + endmenu menu "Kernel type" diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 76e958a5414a..6d4a5f6c3f4f 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -71,6 +71,9 @@ KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) # architectures. It's faster to have GCC emit only aligned accesses. KBUILD_CFLAGS += $(call cc-option,-mstrict-align) +# arch specific predefines for sparse +CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS) + head-y := arch/riscv/kernel/head.o core-y += arch/riscv/kernel/ arch/riscv/mm/ diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index bca0eee733b0..07326466871b 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -44,6 +44,7 @@ CONFIG_INPUT_MOUSEDEV=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_HVC_RISCV_SBI=y # CONFIG_PTP_1588_CLOCK is not set CONFIG_DRM=y CONFIG_DRM_RADEON=y diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild index 4286a5f83876..576ffdca06ba 100644 --- a/arch/riscv/include/asm/Kbuild +++ b/arch/riscv/include/asm/Kbuild @@ -25,6 +25,7 @@ generic-y += kdebug.h generic-y += kmap_types.h generic-y += kvm_para.h generic-y += local.h +generic-y += local64.h generic-y += mm-arch-hooks.h generic-y += mman.h generic-y += module.h diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h index efd89a88d2d0..8f13074413a7 100644 --- a/arch/riscv/include/asm/cacheflush.h +++ b/arch/riscv/include/asm/cacheflush.h @@ -47,7 +47,7 @@ static inline void flush_dcache_page(struct page *page) #else /* CONFIG_SMP */ -#define flush_icache_all() sbi_remote_fence_i(0) +#define flush_icache_all() sbi_remote_fence_i(NULL) void flush_icache_mm(struct mm_struct *mm, bool local); #endif /* CONFIG_SMP */ diff --git a/arch/riscv/include/asm/perf_event.h b/arch/riscv/include/asm/perf_event.h new file mode 100644 index 000000000000..0e638a0c3feb --- /dev/null +++ b/arch/riscv/include/asm/perf_event.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 SiFive + * Copyright (C) 2018 Andes Technology Corporation + * + */ + +#ifndef _ASM_RISCV_PERF_EVENT_H +#define _ASM_RISCV_PERF_EVENT_H + +#include +#include + +#define RISCV_BASE_COUNTERS 2 + +/* + * The RISCV_MAX_COUNTERS parameter should be specified. + */ + +#ifdef CONFIG_RISCV_BASE_PMU +#define RISCV_MAX_COUNTERS 2 +#endif + +#ifndef RISCV_MAX_COUNTERS +#error "Please provide a valid RISCV_MAX_COUNTERS for the PMU." +#endif + +/* + * These are the indexes of bits in counteren register *minus* 1, + * except for cycle. It would be coherent if it can directly mapped + * to counteren bit definition, but there is a *time* register at + * counteren[1]. Per-cpu structure is scarce resource here. + * + * According to the spec, an implementation can support counter up to + * mhpmcounter31, but many high-end processors has at most 6 general + * PMCs, we give the definition to MHPMCOUNTER8 here. + */ +#define RISCV_PMU_CYCLE 0 +#define RISCV_PMU_INSTRET 1 +#define RISCV_PMU_MHPMCOUNTER3 2 +#define RISCV_PMU_MHPMCOUNTER4 3 +#define RISCV_PMU_MHPMCOUNTER5 4 +#define RISCV_PMU_MHPMCOUNTER6 5 +#define RISCV_PMU_MHPMCOUNTER7 6 +#define RISCV_PMU_MHPMCOUNTER8 7 + +#define RISCV_OP_UNSUPP (-EOPNOTSUPP) + +struct cpu_hw_events { + /* # currently enabled events*/ + int n_events; + /* currently enabled events */ + struct perf_event *events[RISCV_MAX_COUNTERS]; + /* vendor-defined PMU data */ + void *platform; +}; + +struct riscv_pmu { + struct pmu *pmu; + + /* generic hw/cache events table */ + const int *hw_events; + const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + /* method used to map hw/cache events */ + int (*map_hw_event)(u64 config); + int (*map_cache_event)(u64 config); + + /* max generic hw events in map */ + int max_events; + /* number total counters, 2(base) + x(general) */ + int num_counters; + /* the width of the counter */ + int counter_width; + + /* vendor-defined PMU features */ + void *platform; + + irqreturn_t (*handle_irq)(int irq_num, void *dev); + int irq; +}; + +#endif /* _ASM_RISCV_PERF_EVENT_H */ diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 7b209aec355d..85c2d8bae957 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -49,7 +49,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, #include -#define flush_tlb_all() sbi_remote_sfence_vma(0, 0, -1) +#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1) #define flush_tlb_page(vma, addr) flush_tlb_range(vma, addr, 0) #define flush_tlb_range(vma, start, end) \ sbi_remote_sfence_vma(mm_cpumask((vma)->vm_mm)->bits, \ diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index 14b0b22fb578..473cfc84e412 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -392,19 +392,21 @@ do { \ }) -extern unsigned long __must_check __copy_user(void __user *to, +extern unsigned long __must_check __asm_copy_to_user(void __user *to, + const void *from, unsigned long n); +extern unsigned long __must_check __asm_copy_from_user(void *to, const void __user *from, unsigned long n); static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - return __copy_user(to, from, n); + return __asm_copy_to_user(to, from, n); } static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - return __copy_user(to, from, n); + return __asm_copy_from_user(to, from, n); } extern long strncpy_from_user(char *dest, const char __user *src, long count); diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 8586dd96c2f0..e1274fc03af4 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -39,4 +39,6 @@ obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o +obj-$(CONFIG_PERF_EVENTS) += perf_event.o + clean: diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S index ce9bdc57a2a1..5721624886a1 100644 --- a/arch/riscv/kernel/mcount.S +++ b/arch/riscv/kernel/mcount.S @@ -126,5 +126,5 @@ do_trace: RESTORE_ABI_STATE ret ENDPROC(_mcount) -EXPORT_SYMBOL(_mcount) #endif +EXPORT_SYMBOL(_mcount) diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 5dddba301d0a..1d5e9b934b8c 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -17,6 +17,17 @@ #include #include +static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v) +{ + if (v != (u32)v) { + pr_err("%s: value %016llx out of range for 32-bit field\n", + me->name, v); + return -EINVAL; + } + *location = v; + return 0; +} + static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v) { *(u64 *)location = v; @@ -265,6 +276,7 @@ static int apply_r_riscv_sub32_rela(struct module *me, u32 *location, static int (*reloc_handlers_rela[]) (struct module *me, u32 *location, Elf_Addr v) = { + [R_RISCV_32] = apply_r_riscv_32_rela, [R_RISCV_64] = apply_r_riscv_64_rela, [R_RISCV_BRANCH] = apply_r_riscv_branch_rela, [R_RISCV_JAL] = apply_r_riscv_jal_rela, diff --git a/arch/riscv/kernel/perf_event.c b/arch/riscv/kernel/perf_event.c new file mode 100644 index 000000000000..b0e10c4e9f77 --- /dev/null +++ b/arch/riscv/kernel/perf_event.c @@ -0,0 +1,485 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2008 Thomas Gleixner + * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2009 Jaswinder Singh Rajput + * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter + * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2009 Intel Corporation, + * Copyright (C) 2009 Google, Inc., Stephane Eranian + * Copyright 2014 Tilera Corporation. All Rights Reserved. + * Copyright (C) 2018 Andes Technology Corporation + * + * Perf_events support for RISC-V platforms. + * + * Since the spec. (as of now, Priv-Spec 1.10) does not provide enough + * functionality for perf event to fully work, this file provides + * the very basic framework only. + * + * For platform portings, please check Documentations/riscv/pmu.txt. + * + * The Copyright line includes x86 and tile ones. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const struct riscv_pmu *riscv_pmu __read_mostly; +static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); + +/* + * Hardware & cache maps and their methods + */ + +static const int riscv_hw_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = RISCV_PMU_CYCLE, + [PERF_COUNT_HW_INSTRUCTIONS] = RISCV_PMU_INSTRET, + [PERF_COUNT_HW_CACHE_REFERENCES] = RISCV_OP_UNSUPP, + [PERF_COUNT_HW_CACHE_MISSES] = RISCV_OP_UNSUPP, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = RISCV_OP_UNSUPP, + [PERF_COUNT_HW_BRANCH_MISSES] = RISCV_OP_UNSUPP, + [PERF_COUNT_HW_BUS_CYCLES] = RISCV_OP_UNSUPP, +}; + +#define C(x) PERF_COUNT_HW_CACHE_##x +static const int riscv_cache_event_map[PERF_COUNT_HW_CACHE_MAX] +[PERF_COUNT_HW_CACHE_OP_MAX] +[PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, + [C(RESULT_MISS)] = RISCV_OP_UNSUPP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, + [C(RESULT_MISS)] = RISCV_OP_UNSUPP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, + [C(RESULT_MISS)] = RISCV_OP_UNSUPP, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, + [C(RESULT_MISS)] = RISCV_OP_UNSUPP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, + [C(RESULT_MISS)] = RISCV_OP_UNSUPP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, + [C(RESULT_MISS)] = RISCV_OP_UNSUPP, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, + [C(RESULT_MISS)] = RISCV_OP_UNSUPP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, + [C(RESULT_MISS)] = RISCV_OP_UNSUPP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, + [C(RESULT_MISS)] = RISCV_OP_UNSUPP, + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, + [C(RESULT_MISS)] = RISCV_OP_UNSUPP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, + [C(RESULT_MISS)] = RISCV_OP_UNSUPP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, + [C(RESULT_MISS)] = RISCV_OP_UNSUPP, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, + [C(RESULT_MISS)] = RISCV_OP_UNSUPP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, + [C(RESULT_MISS)] = RISCV_OP_UNSUPP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, + [C(RESULT_MISS)] = RISCV_OP_UNSUPP, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, + [C(RESULT_MISS)] = RISCV_OP_UNSUPP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, + [C(RESULT_MISS)] = RISCV_OP_UNSUPP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP, + [C(RESULT_MISS)] = RISCV_OP_UNSUPP, + }, + }, +}; + +static int riscv_map_hw_event(u64 config) +{ + if (config >= riscv_pmu->max_events) + return -EINVAL; + + return riscv_pmu->hw_events[config]; +} + +int riscv_map_cache_decode(u64 config, unsigned int *type, + unsigned int *op, unsigned int *result) +{ + return -ENOENT; +} + +static int riscv_map_cache_event(u64 config) +{ + unsigned int type, op, result; + int err = -ENOENT; + int code; + + err = riscv_map_cache_decode(config, &type, &op, &result); + if (!riscv_pmu->cache_events || err) + return err; + + if (type >= PERF_COUNT_HW_CACHE_MAX || + op >= PERF_COUNT_HW_CACHE_OP_MAX || + result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return -EINVAL; + + code = (*riscv_pmu->cache_events)[type][op][result]; + if (code == RISCV_OP_UNSUPP) + return -EINVAL; + + return code; +} + +/* + * Low-level functions: reading/writing counters + */ + +static inline u64 read_counter(int idx) +{ + u64 val = 0; + + switch (idx) { + case RISCV_PMU_CYCLE: + val = csr_read(cycle); + break; + case RISCV_PMU_INSTRET: + val = csr_read(instret); + break; + default: + WARN_ON_ONCE(idx < 0 || idx > RISCV_MAX_COUNTERS); + return -EINVAL; + } + + return val; +} + +static inline void write_counter(int idx, u64 value) +{ + /* currently not supported */ + WARN_ON_ONCE(1); +} + +/* + * pmu->read: read and update the counter + * + * Other architectures' implementation often have a xxx_perf_event_update + * routine, which can return counter values when called in the IRQ, but + * return void when being called by the pmu->read method. + */ +static void riscv_pmu_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 prev_raw_count, new_raw_count; + u64 oldval; + int idx = hwc->idx; + u64 delta; + + do { + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = read_counter(idx); + + oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count); + } while (oldval != prev_raw_count); + + /* + * delta is the value to update the counter we maintain in the kernel. + */ + delta = (new_raw_count - prev_raw_count) & + ((1ULL << riscv_pmu->counter_width) - 1); + local64_add(delta, &event->count); + /* + * Something like local64_sub(delta, &hwc->period_left) here is + * needed if there is an interrupt for perf. + */ +} + +/* + * State transition functions: + * + * stop()/start() & add()/del() + */ + +/* + * pmu->stop: stop the counter + */ +static void riscv_pmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + + if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + riscv_pmu->pmu->read(event); + hwc->state |= PERF_HES_UPTODATE; + } +} + +/* + * pmu->start: start the event. + */ +static void riscv_pmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + return; + + if (flags & PERF_EF_RELOAD) { + WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); + + /* + * Set the counter to the period to the next interrupt here, + * if you have any. + */ + } + + hwc->state = 0; + perf_event_update_userpage(event); + + /* + * Since we cannot write to counters, this serves as an initialization + * to the delta-mechanism in pmu->read(); otherwise, the delta would be + * wrong when pmu->read is called for the first time. + */ + local64_set(&hwc->prev_count, read_counter(hwc->idx)); +} + +/* + * pmu->add: add the event to PMU. + */ +static int riscv_pmu_add(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + + if (cpuc->n_events == riscv_pmu->num_counters) + return -ENOSPC; + + /* + * We don't have general conunters, so no binding-event-to-counter + * process here. + * + * Indexing using hwc->config generally not works, since config may + * contain extra information, but here the only info we have in + * hwc->config is the event index. + */ + hwc->idx = hwc->config; + cpuc->events[hwc->idx] = event; + cpuc->n_events++; + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (flags & PERF_EF_START) + riscv_pmu->pmu->start(event, PERF_EF_RELOAD); + + return 0; +} + +/* + * pmu->del: delete the event from PMU. + */ +static void riscv_pmu_del(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + + cpuc->events[hwc->idx] = NULL; + cpuc->n_events--; + riscv_pmu->pmu->stop(event, PERF_EF_UPDATE); + perf_event_update_userpage(event); +} + +/* + * Interrupt: a skeletion for reference. + */ + +static DEFINE_MUTEX(pmc_reserve_mutex); + +irqreturn_t riscv_base_pmu_handle_irq(int irq_num, void *dev) +{ + return IRQ_NONE; +} + +static int reserve_pmc_hardware(void) +{ + int err = 0; + + mutex_lock(&pmc_reserve_mutex); + if (riscv_pmu->irq >= 0 && riscv_pmu->handle_irq) { + err = request_irq(riscv_pmu->irq, riscv_pmu->handle_irq, + IRQF_PERCPU, "riscv-base-perf", NULL); + } + mutex_unlock(&pmc_reserve_mutex); + + return err; +} + +void release_pmc_hardware(void) +{ + mutex_lock(&pmc_reserve_mutex); + if (riscv_pmu->irq >= 0) + free_irq(riscv_pmu->irq, NULL); + mutex_unlock(&pmc_reserve_mutex); +} + +/* + * Event Initialization/Finalization + */ + +static atomic_t riscv_active_events = ATOMIC_INIT(0); + +static void riscv_event_destroy(struct perf_event *event) +{ + if (atomic_dec_return(&riscv_active_events) == 0) + release_pmc_hardware(); +} + +static int riscv_event_init(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; + int err; + int code; + + if (atomic_inc_return(&riscv_active_events) == 1) { + err = reserve_pmc_hardware(); + + if (err) { + pr_warn("PMC hardware not available\n"); + atomic_dec(&riscv_active_events); + return -EBUSY; + } + } + + switch (event->attr.type) { + case PERF_TYPE_HARDWARE: + code = riscv_pmu->map_hw_event(attr->config); + break; + case PERF_TYPE_HW_CACHE: + code = riscv_pmu->map_cache_event(attr->config); + break; + case PERF_TYPE_RAW: + return -EOPNOTSUPP; + default: + return -ENOENT; + } + + event->destroy = riscv_event_destroy; + if (code < 0) { + event->destroy(event); + return code; + } + + /* + * idx is set to -1 because the index of a general event should not be + * decided until binding to some counter in pmu->add(). + * + * But since we don't have such support, later in pmu->add(), we just + * use hwc->config as the index instead. + */ + hwc->config = code; + hwc->idx = -1; + + return 0; +} + +/* + * Initialization + */ + +static struct pmu min_pmu = { + .name = "riscv-base", + .event_init = riscv_event_init, + .add = riscv_pmu_add, + .del = riscv_pmu_del, + .start = riscv_pmu_start, + .stop = riscv_pmu_stop, + .read = riscv_pmu_read, +}; + +static const struct riscv_pmu riscv_base_pmu = { + .pmu = &min_pmu, + .max_events = ARRAY_SIZE(riscv_hw_event_map), + .map_hw_event = riscv_map_hw_event, + .hw_events = riscv_hw_event_map, + .map_cache_event = riscv_map_cache_event, + .cache_events = &riscv_cache_event_map, + .counter_width = 63, + .num_counters = RISCV_BASE_COUNTERS + 0, + .handle_irq = &riscv_base_pmu_handle_irq, + + /* This means this PMU has no IRQ. */ + .irq = -1, +}; + +static const struct of_device_id riscv_pmu_of_ids[] = { + {.compatible = "riscv,base-pmu", .data = &riscv_base_pmu}, + { /* sentinel value */ } +}; + +int __init init_hw_perf_events(void) +{ + struct device_node *node = of_find_node_by_type(NULL, "pmu"); + const struct of_device_id *of_id; + + riscv_pmu = &riscv_base_pmu; + + if (node) { + of_id = of_match_node(riscv_pmu_of_ids, node); + + if (of_id) + riscv_pmu = of_id->data; + } + + perf_pmu_register(riscv_pmu->pmu, "cpu", PERF_TYPE_RAW); + return 0; +} +arch_initcall(init_hw_perf_events); diff --git a/arch/riscv/kernel/riscv_ksyms.c b/arch/riscv/kernel/riscv_ksyms.c index 551734248748..f247d6d2137c 100644 --- a/arch/riscv/kernel/riscv_ksyms.c +++ b/arch/riscv/kernel/riscv_ksyms.c @@ -13,6 +13,7 @@ * Assembly functions that may be used (directly or indirectly) by modules */ EXPORT_SYMBOL(__clear_user); -EXPORT_SYMBOL(__copy_user); +EXPORT_SYMBOL(__asm_copy_to_user); +EXPORT_SYMBOL(__asm_copy_from_user); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index b99d9dd21fd0..81a1952015a6 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -148,7 +148,7 @@ int is_valid_bugaddr(unsigned long pc) if (pc < PAGE_OFFSET) return 0; - if (probe_kernel_address((bug_insn_t __user *)pc, insn)) + if (probe_kernel_address((bug_insn_t *)pc, insn)) return 0; return (insn == __BUG_INSN); } diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S index 58fb2877c865..399e6f0c2d98 100644 --- a/arch/riscv/lib/uaccess.S +++ b/arch/riscv/lib/uaccess.S @@ -13,7 +13,8 @@ _epc: .previous .endm -ENTRY(__copy_user) +ENTRY(__asm_copy_to_user) +ENTRY(__asm_copy_from_user) /* Enable access to user memory */ li t6, SR_SUM @@ -63,7 +64,8 @@ ENTRY(__copy_user) addi a0, a0, 1 bltu a1, a3, 5b j 3b -ENDPROC(__copy_user) +ENDPROC(__asm_copy_to_user) +ENDPROC(__asm_copy_from_user) ENTRY(__clear_user) @@ -84,7 +86,7 @@ ENTRY(__clear_user) bgeu t0, t1, 2f bltu a0, t0, 4f 1: - fixup REG_S, zero, (a0), 10f + fixup REG_S, zero, (a0), 11f addi a0, a0, SZREG bltu a0, t1, 1b 2: @@ -96,12 +98,12 @@ ENTRY(__clear_user) li a0, 0 ret 4: /* Edge case: unalignment */ - fixup sb, zero, (a0), 10f + fixup sb, zero, (a0), 11f addi a0, a0, 1 bltu a0, t0, 4b j 1b 5: /* Edge case: remainder */ - fixup sb, zero, (a0), 10f + fixup sb, zero, (a0), 11f addi a0, a0, 1 bltu a0, a3, 5b j 3b @@ -109,9 +111,14 @@ ENDPROC(__clear_user) .section .fixup,"ax" .balign 4 + /* Fixup code for __copy_user(10) and __clear_user(11) */ 10: /* Disable access to user memory */ csrs sstatus, t6 - sub a0, a3, a0 + mv a0, a2 + ret +11: + csrs sstatus, t6 + mv a0, a1 ret .previous diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index cb6e8066b1ad..ee6a9c387c87 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -391,7 +391,7 @@ int appldata_register_ops(struct appldata_ops *ops) if (ops->size > APPLDATA_MAX_REC_SIZE) return -EINVAL; - ops->ctl_table = kzalloc(4 * sizeof(struct ctl_table), GFP_KERNEL); + ops->ctl_table = kcalloc(4, sizeof(struct ctl_table), GFP_KERNEL); if (!ops->ctl_table) return -ENOMEM; diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index be8cc53204b5..a2945b289a29 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c @@ -239,7 +239,7 @@ static void *page_align_ptr(void *ptr) static void *diag204_alloc_vbuf(int pages) { /* The buffer has to be page aligned! */ - diag204_buf_vmalloc = vmalloc(PAGE_SIZE * (pages + 1)); + diag204_buf_vmalloc = vmalloc(array_size(PAGE_SIZE, (pages + 1))); if (!diag204_buf_vmalloc) return ERR_PTR(-ENOMEM); diag204_buf = page_align_ptr(diag204_buf_vmalloc); diff --git a/arch/s390/hypfs/hypfs_diag0c.c b/arch/s390/hypfs/hypfs_diag0c.c index dce87f1bec94..cebf05150cc1 100644 --- a/arch/s390/hypfs/hypfs_diag0c.c +++ b/arch/s390/hypfs/hypfs_diag0c.c @@ -49,7 +49,8 @@ static void *diag0c_store(unsigned int *count) get_online_cpus(); cpu_count = num_online_cpus(); - cpu_vec = kmalloc(sizeof(*cpu_vec) * num_possible_cpus(), GFP_KERNEL); + cpu_vec = kmalloc_array(num_possible_cpus(), sizeof(*cpu_vec), + GFP_KERNEL); if (!cpu_vec) goto fail_put_online_cpus; /* Note: Diag 0c needs 8 byte alignment and real storage */ diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index 99c93d0346f9..4600453536c2 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h @@ -10,8 +10,20 @@ #include +#define CR0_CLOCK_COMPARATOR_SIGN _BITUL(63 - 10) +#define CR0_EMERGENCY_SIGNAL_SUBMASK _BITUL(63 - 49) +#define CR0_EXTERNAL_CALL_SUBMASK _BITUL(63 - 50) +#define CR0_CLOCK_COMPARATOR_SUBMASK _BITUL(63 - 52) +#define CR0_CPU_TIMER_SUBMASK _BITUL(63 - 53) +#define CR0_SERVICE_SIGNAL_SUBMASK _BITUL(63 - 54) +#define CR0_UNUSED_56 _BITUL(63 - 56) +#define CR0_INTERRUPT_KEY_SUBMASK _BITUL(63 - 57) +#define CR0_MEASUREMENT_ALERT_SUBMASK _BITUL(63 - 58) + #define CR2_GUARDED_STORAGE _BITUL(63 - 59) +#define CR14_UNUSED_32 _BITUL(63 - 32) +#define CR14_UNUSED_33 _BITUL(63 - 33) #define CR14_CHANNEL_REPORT_SUBMASK _BITUL(63 - 35) #define CR14_RECOVERY_SUBMASK _BITUL(63 - 36) #define CR14_DEGRADATION_SUBMASK _BITUL(63 - 37) diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 81cdb6b55118..a2188e309bd6 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -812,6 +812,7 @@ struct kvm_arch{ int use_irqchip; int use_cmma; int use_pfmfi; + int use_skf; int user_cpu_state_ctrl; int user_sigp; int user_stsi; diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index c639c95850e4..f5ff9dbad8ac 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h @@ -21,7 +21,7 @@ typedef struct { /* The mmu context uses extended page tables. */ unsigned int has_pgste:1; /* The mmu context uses storage keys. */ - unsigned int use_skey:1; + unsigned int uses_skeys:1; /* The mmu context uses CMM. */ unsigned int uses_cmm:1; } mm_context_t; diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 324f6f452982..d16bc79c30bb 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -30,7 +30,7 @@ static inline int init_new_context(struct task_struct *tsk, test_thread_flag(TIF_PGSTE) || (current->mm && current->mm->context.alloc_pgste); mm->context.has_pgste = 0; - mm->context.use_skey = 0; + mm->context.uses_skeys = 0; mm->context.uses_cmm = 0; #endif switch (mm->context.asce_limit) { diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 9809694e1389..5ab636089c60 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -506,10 +506,10 @@ static inline int mm_alloc_pgste(struct mm_struct *mm) * faults should no longer be backed by zero pages */ #define mm_forbids_zeropage mm_has_pgste -static inline int mm_use_skey(struct mm_struct *mm) +static inline int mm_uses_skeys(struct mm_struct *mm) { #ifdef CONFIG_PGSTE - if (mm->context.use_skey) + if (mm->context.uses_skeys) return 1; #endif return 0; diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 80e974adb9e8..d374f9b218b4 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -194,11 +194,13 @@ static debug_entry_t ***debug_areas_alloc(int pages_per_area, int nr_areas) debug_entry_t ***areas; int i, j; - areas = kmalloc(nr_areas * sizeof(debug_entry_t **), GFP_KERNEL); + areas = kmalloc_array(nr_areas, sizeof(debug_entry_t **), GFP_KERNEL); if (!areas) goto fail_malloc_areas; for (i = 0; i < nr_areas; i++) { - areas[i] = kmalloc(pages_per_area * sizeof(debug_entry_t *), GFP_KERNEL); + areas[i] = kmalloc_array(pages_per_area, + sizeof(debug_entry_t *), + GFP_KERNEL); if (!areas[i]) goto fail_malloc_areas2; for (j = 0; j < pages_per_area; j++) { diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 0dc8ac8548ee..d298d3cb46d0 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -123,8 +123,8 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, /* Allocate one syminfo structure per symbol. */ me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym); - me->arch.syminfo = vmalloc(me->arch.nsyms * - sizeof(struct mod_arch_syminfo)); + me->arch.syminfo = vmalloc(array_size(sizeof(struct mod_arch_syminfo), + me->arch.nsyms)); if (!me->arch.syminfo) return -ENOMEM; symbols = (void *) hdr + symtab->sh_offset; diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c index feebb2944882..d63fb3c56b8a 100644 --- a/arch/s390/kernel/perf_cpum_cf_events.c +++ b/arch/s390/kernel/perf_cpum_cf_events.c @@ -527,7 +527,7 @@ static __init struct attribute **merge_attr(struct attribute **a, j++; j++; - new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL); + new = kmalloc_array(j, sizeof(struct attribute *), GFP_KERNEL); if (!new) return NULL; j = 0; diff --git a/arch/s390/kernel/sthyi.c b/arch/s390/kernel/sthyi.c index 80b862e9c53c..0859cde36f75 100644 --- a/arch/s390/kernel/sthyi.c +++ b/arch/s390/kernel/sthyi.c @@ -315,7 +315,7 @@ static void fill_diag(struct sthyi_sctns *sctns) if (pages <= 0) return; - diag204_buf = vmalloc(PAGE_SIZE * pages); + diag204_buf = vmalloc(array_size(pages, PAGE_SIZE)); if (!diag204_buf) return; diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index f3a1c7c6824e..09abae40f917 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -285,7 +285,7 @@ static int __init vdso_init(void) + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; /* Make sure pages are in the correct state */ - vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1), + vdso32_pagelist = kcalloc(vdso32_pages + 1, sizeof(struct page *), GFP_KERNEL); BUG_ON(vdso32_pagelist == NULL); for (i = 0; i < vdso32_pages - 1; i++) { @@ -303,7 +303,7 @@ static int __init vdso_init(void) + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; /* Make sure pages are in the correct state */ - vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1), + vdso64_pagelist = kcalloc(vdso64_pages + 1, sizeof(struct page *), GFP_KERNEL); BUG_ON(vdso64_pagelist == NULL); for (i = 0; i < vdso64_pages - 1; i++) { diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 8e2b8647ee12..07d30ffcfa41 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -847,7 +847,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data, nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; pages = pages_array; if (nr_pages > ARRAY_SIZE(pages_array)) - pages = vmalloc(nr_pages * sizeof(unsigned long)); + pages = vmalloc(array_size(nr_pages, sizeof(unsigned long))); if (!pages) return -ENOMEM; need_ipte_lock = psw_bits(*psw).dat && !asce.r; diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c index b5f3e82006d0..394a5f53805b 100644 --- a/arch/s390/kvm/guestdbg.c +++ b/arch/s390/kvm/guestdbg.c @@ -153,7 +153,7 @@ void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu) if (guestdbg_sstep_enabled(vcpu)) { /* disable timer (clock-comparator) interrupts */ - vcpu->arch.sie_block->gcr[0] &= ~0x800ul; + vcpu->arch.sie_block->gcr[0] &= ~CR0_CLOCK_COMPARATOR_SUBMASK; vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH; vcpu->arch.sie_block->gcr[10] = 0; vcpu->arch.sie_block->gcr[11] = -1UL; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 37d06e022238..daa09f89ca2d 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -159,7 +159,7 @@ static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) { if (psw_extint_disabled(vcpu) || - !(vcpu->arch.sie_block->gcr[0] & 0x800ul)) + !(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK)) return 0; if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu)) /* No timer interrupts when single stepping */ @@ -172,7 +172,7 @@ static int ckc_irq_pending(struct kvm_vcpu *vcpu) const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm); const u64 ckc = vcpu->arch.sie_block->ckc; - if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) { + if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) { if ((s64)ckc >= (s64)now) return 0; } else if (ckc >= now) { @@ -184,7 +184,7 @@ static int ckc_irq_pending(struct kvm_vcpu *vcpu) static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu) { return !psw_extint_disabled(vcpu) && - (vcpu->arch.sie_block->gcr[0] & 0x400ul); + (vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK); } static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu) @@ -285,15 +285,15 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) active_mask &= ~IRQ_PEND_IO_MASK; else active_mask = disable_iscs(vcpu, active_mask); - if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul)) + if (!(vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK)) __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask); - if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul)) + if (!(vcpu->arch.sie_block->gcr[0] & CR0_EMERGENCY_SIGNAL_SUBMASK)) __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask); - if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul)) + if (!(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK)) __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask); - if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul)) + if (!(vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK)) __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask); - if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) + if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask); if (psw_mchk_disabled(vcpu)) active_mask &= ~IRQ_PEND_MCHK_MASK; @@ -1042,7 +1042,7 @@ int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) /* external call pending and deliverable */ if (kvm_s390_ext_call_pending(vcpu) && !psw_extint_disabled(vcpu) && - (vcpu->arch.sie_block->gcr[0] & 0x2000ul)) + (vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK)) return 1; if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu)) @@ -1062,7 +1062,7 @@ static u64 __calculate_sltime(struct kvm_vcpu *vcpu) u64 cputm, sltime = 0; if (ckc_interrupts_enabled(vcpu)) { - if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) { + if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) { if ((s64)now < (s64)ckc) sltime = tod_to_ns((s64)ckc - (s64)now); } else if (now < ckc) { diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 64c986243018..3b7a5151b6a5 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -791,11 +791,21 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu); -static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) +void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm) { struct kvm_vcpu *vcpu; int i; + kvm_s390_vcpu_block_all(kvm); + + kvm_for_each_vcpu(i, vcpu, kvm) + kvm_s390_vcpu_crypto_setup(vcpu); + + kvm_s390_vcpu_unblock_all(kvm); +} + +static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) +{ if (!test_kvm_facility(kvm, 76)) return -EINVAL; @@ -832,10 +842,7 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) return -ENXIO; } - kvm_for_each_vcpu(i, vcpu, kvm) { - kvm_s390_vcpu_crypto_setup(vcpu); - exit_sie(vcpu); - } + kvm_s390_vcpu_crypto_reset_all(kvm); mutex_unlock(&kvm->lock); return 0; } @@ -1033,8 +1040,8 @@ static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) return ret; } -static void kvm_s390_get_tod_clock_ext(struct kvm *kvm, - struct kvm_s390_vm_tod_clock *gtod) +static void kvm_s390_get_tod_clock(struct kvm *kvm, + struct kvm_s390_vm_tod_clock *gtod) { struct kvm_s390_tod_clock_ext htod; @@ -1043,10 +1050,12 @@ static void kvm_s390_get_tod_clock_ext(struct kvm *kvm, get_tod_clock_ext((char *)&htod); gtod->tod = htod.tod + kvm->arch.epoch; - gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx; - - if (gtod->tod < htod.tod) - gtod->epoch_idx += 1; + gtod->epoch_idx = 0; + if (test_kvm_facility(kvm, 139)) { + gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx; + if (gtod->tod < htod.tod) + gtod->epoch_idx += 1; + } preempt_enable(); } @@ -1056,12 +1065,7 @@ static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) struct kvm_s390_vm_tod_clock gtod; memset(>od, 0, sizeof(gtod)); - - if (test_kvm_facility(kvm, 139)) - kvm_s390_get_tod_clock_ext(kvm, >od); - else - gtod.tod = kvm_s390_get_tod_clock_fast(kvm); - + kvm_s390_get_tod_clock(kvm, >od); if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) return -EFAULT; @@ -1493,7 +1497,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) return -EINVAL; /* Is this guest using storage keys? */ - if (!mm_use_skey(current->mm)) + if (!mm_uses_skeys(current->mm)) return KVM_S390_GET_SKEYS_NONE; /* Enforce sane limit on memory allocation */ @@ -1725,7 +1729,7 @@ static int kvm_s390_set_cmma_bits(struct kvm *kvm, if (args->count == 0) return 0; - bits = vmalloc(sizeof(*bits) * args->count); + bits = vmalloc(array_size(sizeof(*bits), args->count)); if (!bits) return -ENOMEM; @@ -1982,10 +1986,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) rc = -ENOMEM; - kvm->arch.use_esca = 0; /* start with basic SCA */ if (!sclp.has_64bscao) alloc_flags |= GFP_DMA; rwlock_init(&kvm->arch.sca_lock); + /* start with basic SCA */ kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); if (!kvm->arch.sca) goto out_err; @@ -2036,8 +2040,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm_s390_crypto_init(kvm); mutex_init(&kvm->arch.float_int.ais_lock); - kvm->arch.float_int.simm = 0; - kvm->arch.float_int.nimm = 0; spin_lock_init(&kvm->arch.float_int.lock); for (i = 0; i < FIRQ_LIST_COUNT; i++) INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); @@ -2063,11 +2065,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.gmap->pfault_enabled = 0; } - kvm->arch.css_support = 0; - kvm->arch.use_irqchip = 0; kvm->arch.use_pfmfi = sclp.has_pfmfi; - kvm->arch.epoch = 0; - + kvm->arch.use_skf = sclp.has_skey; spin_lock_init(&kvm->arch.start_stop_lock); kvm_s390_vsie_init(kvm); kvm_s390_gisa_init(kvm); @@ -2433,8 +2432,12 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->ckc = 0UL; vcpu->arch.sie_block->todpr = 0; memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); - vcpu->arch.sie_block->gcr[0] = 0xE0UL; - vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; + vcpu->arch.sie_block->gcr[0] = CR0_UNUSED_56 | + CR0_INTERRUPT_KEY_SUBMASK | + CR0_MEASUREMENT_ALERT_SUBMASK; + vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 | + CR14_UNUSED_33 | + CR14_EXTERNAL_DAMAGE_SUBMASK; /* make sure the new fpc will be lazily loaded */ save_fpu_regs(); current->thread.fpu.fpc = 0; @@ -3192,7 +3195,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) return 0; if (kvm_s390_vcpu_has_irq(vcpu, 0)) return 0; - if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) + if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) return 0; if (!vcpu->arch.gmap->pfault_enabled) return 0; @@ -3990,7 +3993,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, return r; } -int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { #ifdef CONFIG_KVM_S390_UCONTROL if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 1b5621f4fe5b..981e3ba97461 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -410,4 +410,17 @@ static inline int kvm_s390_use_sca_entries(void) } void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu, struct mcck_volatile_info *mcck_info); + +/** + * kvm_s390_vcpu_crypto_reset_all + * + * Reset the crypto attributes for each vcpu. This can be done while the vcpus + * are running as each vcpu will be removed from SIE before resetting the crypt + * attributes and restored to SIE afterward. + * + * Note: The kvm->lock must be held while calling this function + * + * @kvm: the KVM guest + */ +void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm); #endif diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index a3bce0e84346..eb0eb60c7be6 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -204,24 +204,28 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu) int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu) { - int rc = 0; + int rc; struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; trace_kvm_s390_skey_related_inst(vcpu); - if (!(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) && + /* Already enabled? */ + if (vcpu->kvm->arch.use_skf && + !(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) && !kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS)) - return rc; + return 0; rc = s390_enable_skey(); VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc); - if (!rc) { - if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS)) - kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS); - else - sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | - ICTL_RRBE); - } - return rc; + if (rc) + return rc; + + if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS)) + kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS); + if (!vcpu->kvm->arch.use_skf) + sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; + else + sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); + return 0; } static int try_handle_skey(struct kvm_vcpu *vcpu) @@ -231,7 +235,7 @@ static int try_handle_skey(struct kvm_vcpu *vcpu) rc = kvm_s390_skey_check_enable(vcpu); if (rc) return rc; - if (sclp.has_skey) { + if (vcpu->kvm->arch.use_skf) { /* with storage-key facility, SIE interprets it for us */ kvm_s390_retry_instr(vcpu); VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 969882b54266..84c89cb9636f 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -557,7 +557,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO)) gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32; if (gpa) { - if (!(gpa & ~0x1fffUL)) + if (gpa < 2 * PAGE_SIZE) rc = set_validity_icpt(scb_s, 0x0038U); else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu)) rc = set_validity_icpt(scb_s, 0x0011U); @@ -578,7 +578,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) gpa = READ_ONCE(scb_o->itdba) & ~0xffUL; if (gpa && (scb_s->ecb & ECB_TE)) { - if (!(gpa & ~0x1fffUL)) { + if (gpa < 2 * PAGE_SIZE) { rc = set_validity_icpt(scb_s, 0x0080U); goto unpin; } @@ -594,7 +594,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL; if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) { - if (!(gpa & ~0x1fffUL)) { + if (gpa < 2 * PAGE_SIZE) { rc = set_validity_icpt(scb_s, 0x1310U); goto unpin; } @@ -613,7 +613,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL; if (gpa && (scb_s->ecb3 & ECB3_RI)) { - if (!(gpa & ~0x1fffUL)) { + if (gpa < 2 * PAGE_SIZE) { rc = set_validity_icpt(scb_s, 0x0043U); goto unpin; } @@ -632,7 +632,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL; sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL; - if (!gpa || !(gpa & ~0x1fffUL)) { + if (!gpa || gpa < 2 * PAGE_SIZE) { rc = set_validity_icpt(scb_s, 0x10b0U); goto unpin; } diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 920d40894535..6ad15d3fab81 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c @@ -103,7 +103,7 @@ static int scode_set; static int dcss_set_subcodes(void) { - char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA); + char *name = kmalloc(8, GFP_KERNEL | GFP_DMA); unsigned long rx, ry; int rc; diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 2c55a2b9d6c6..bc56ec8abcf7 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -2184,14 +2184,14 @@ int s390_enable_skey(void) int rc = 0; down_write(&mm->mmap_sem); - if (mm_use_skey(mm)) + if (mm_uses_skeys(mm)) goto out_up; - mm->context.use_skey = 1; + mm->context.uses_skeys = 1; for (vma = mm->mmap; vma; vma = vma->vm_next) { if (ksm_madvise(vma, vma->vm_start, vma->vm_end, MADV_UNMERGEABLE, &vma->vm_flags)) { - mm->context.use_skey = 0; + mm->context.uses_skeys = 0; rc = -ENOMEM; goto out_up; } diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 4f2b65d01a70..301e466e4263 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -158,7 +158,7 @@ static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste, #ifdef CONFIG_PGSTE unsigned long address, bits, skey; - if (!mm_use_skey(mm) || pte_val(pte) & _PAGE_INVALID) + if (!mm_uses_skeys(mm) || pte_val(pte) & _PAGE_INVALID) return pgste; address = pte_val(pte) & PAGE_MASK; skey = (unsigned long) page_get_storage_key(address); @@ -180,7 +180,7 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry, unsigned long address; unsigned long nkey; - if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID) + if (!mm_uses_skeys(mm) || pte_val(entry) & _PAGE_INVALID) return; VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); address = pte_val(entry) & PAGE_MASK; diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 4d61a085982b..dd4f3d3e644f 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -77,7 +77,7 @@ config SUPERH32 select PERF_EVENTS select ARCH_HIBERNATION_POSSIBLE if MMU select SPARSE_IRQ - select HAVE_CC_STACKPROTECTOR + select HAVE_STACKPROTECTOR config SUPERH64 def_bool "$(ARCH)" = "sh64" @@ -687,7 +687,7 @@ config SMP People using multiprocessor machines who say Y here should also say Y to "Enhanced Real Time Clock Support", below. - See also and the SMP-HOWTO + See also and the SMP-HOWTO available at . If you don't know what to do here, say N. diff --git a/arch/sh/boards/board-sh7785lcr.c b/arch/sh/boards/board-sh7785lcr.c index d7d232dea33e..3cba60ff7aab 100644 --- a/arch/sh/boards/board-sh7785lcr.c +++ b/arch/sh/boards/board-sh7785lcr.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/sh/drivers/dma/dmabrg.c b/arch/sh/drivers/dma/dmabrg.c index c0dd904483c7..e5a57a109d6c 100644 --- a/arch/sh/drivers/dma/dmabrg.c +++ b/arch/sh/drivers/dma/dmabrg.c @@ -154,7 +154,7 @@ static int __init dmabrg_init(void) unsigned long or; int ret; - dmabrg_handlers = kzalloc(10 * sizeof(struct dmabrg_handler), + dmabrg_handlers = kcalloc(10, sizeof(struct dmabrg_handler), GFP_KERNEL); if (!dmabrg_handlers) return -ENOMEM; diff --git a/arch/sh/drivers/pci/pcie-sh7786.c b/arch/sh/drivers/pci/pcie-sh7786.c index 382e7ecf4c82..3d81a8b80942 100644 --- a/arch/sh/drivers/pci/pcie-sh7786.c +++ b/arch/sh/drivers/pci/pcie-sh7786.c @@ -561,7 +561,7 @@ static int __init sh7786_pcie_init(void) if (unlikely(nr_ports == 0)) return -ENODEV; - sh7786_pcie_ports = kzalloc(nr_ports * sizeof(struct sh7786_pcie_port), + sh7786_pcie_ports = kcalloc(nr_ports, sizeof(struct sh7786_pcie_port), GFP_KERNEL); if (unlikely(!sh7786_pcie_ports)) return -ENOMEM; diff --git a/arch/sh/include/asm/vmlinux.lds.h b/arch/sh/include/asm/vmlinux.lds.h index f312813f39d8..992955685874 100644 --- a/arch/sh/include/asm/vmlinux.lds.h +++ b/arch/sh/include/asm/vmlinux.lds.h @@ -7,9 +7,9 @@ #ifdef CONFIG_DWARF_UNWINDER #define DWARF_EH_FRAME \ .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) { \ - VMLINUX_SYMBOL(__start_eh_frame) = .; \ + __start_eh_frame = .; \ *(.eh_frame) \ - VMLINUX_SYMBOL(__stop_eh_frame) = .; \ + __stop_eh_frame = .; \ } #else #define DWARF_EH_FRAME diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c index 68b1a67533ce..4d1bfc848dd3 100644 --- a/arch/sh/kernel/process.c +++ b/arch/sh/kernel/process.c @@ -12,7 +12,7 @@ struct kmem_cache *task_xstate_cachep = NULL; unsigned int xstate_size; -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); #endif diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 93522069cb15..27fddb56b3e1 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -177,7 +177,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next) { struct thread_struct *next_t = &next->thread; -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) __stack_chk_guard = next->stack_canary; #endif diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 9a2b8877f174..0f535debf802 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -178,7 +178,7 @@ config SMP Y to "Enhanced Real Time Clock Support", below. The "Advanced Power Management" code will be disabled if you say Y here. - See also and the SMP-HOWTO + See also and the SMP-HOWTO available at . If you don't know what to do here, say N. diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index 048ad783ea3f..8babbeb30adf 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c @@ -166,7 +166,8 @@ static int __init check_nmi_watchdog(void) if (!atomic_read(&nmi_active)) return 0; - prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL); + prev_nmi_count = kmalloc_array(nr_cpu_ids, sizeof(unsigned int), + GFP_KERNEL); if (!prev_nmi_count) { err = -ENOMEM; goto error; diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 7e49bbc925a5..63baa8aa9414 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -565,7 +565,8 @@ SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type, } if (!current_thread_info()->utraps) { current_thread_info()->utraps = - kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL); + kcalloc(UT_TRAP_INSTRUCTION_31 + 1, sizeof(long), + GFP_KERNEL); if (!current_thread_info()->utraps) return -ENOMEM; current_thread_info()->utraps[0] = 1; @@ -575,8 +576,9 @@ SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type, unsigned long *p = current_thread_info()->utraps; current_thread_info()->utraps = - kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), - GFP_KERNEL); + kmalloc_array(UT_TRAP_INSTRUCTION_31 + 1, + sizeof(long), + GFP_KERNEL); if (!current_thread_info()->utraps) { current_thread_info()->utraps = p; return -ENOMEM; diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 8aeb1aabe76e..f396048a0d68 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -1620,7 +1620,7 @@ static void __init bootmem_init_nonnuma(void) (top_of_ram - total_ram) >> 20); init_node_masks_nonnuma(); - memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); + memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); allocate_node_data(0); node_set_online(0); } diff --git a/arch/sparc/net/bpf_jit_comp_32.c b/arch/sparc/net/bpf_jit_comp_32.c index 3bd8ca95e521..a5ff88643d5c 100644 --- a/arch/sparc/net/bpf_jit_comp_32.c +++ b/arch/sparc/net/bpf_jit_comp_32.c @@ -335,7 +335,7 @@ void bpf_jit_compile(struct bpf_prog *fp) if (!bpf_jit_enable) return; - addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL); + addrs = kmalloc_array(flen, sizeof(*addrs), GFP_KERNEL); if (addrs == NULL) return; diff --git a/arch/um/Kconfig.um b/arch/um/Kconfig.um index 3e7f228b22e1..20da5a8ca949 100644 --- a/arch/um/Kconfig.um +++ b/arch/um/Kconfig.um @@ -80,7 +80,7 @@ config MAGIC_SYSRQ On UML, this is accomplished by sending a "sysrq" command with mconsole, followed by the letter for the requested command. - The keys are documented in . Don't say Y + The keys are documented in . Don't say Y unless you really know what this hack does. config KERNEL_STACK_ORDER diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index dcf5ea28a281..83c470364dfb 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -1127,9 +1127,9 @@ static int __init ubd_init(void) return -1; } - irq_req_buffer = kmalloc( - sizeof(struct io_thread_req *) * UBD_REQ_BUFFER_SIZE, - GFP_KERNEL + irq_req_buffer = kmalloc_array(UBD_REQ_BUFFER_SIZE, + sizeof(struct io_thread_req *), + GFP_KERNEL ); irq_remainder = 0; @@ -1137,9 +1137,9 @@ static int __init ubd_init(void) printk(KERN_ERR "Failed to initialize ubd buffering\n"); return -1; } - io_req_buffer = kmalloc( - sizeof(struct io_thread_req *) * UBD_REQ_BUFFER_SIZE, - GFP_KERNEL + io_req_buffer = kmalloc_array(UBD_REQ_BUFFER_SIZE, + sizeof(struct io_thread_req *), + GFP_KERNEL ); io_remainder = 0; diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c index 02168fe25105..50ee3bb5a63a 100644 --- a/arch/um/drivers/vector_kern.c +++ b/arch/um/drivers/vector_kern.c @@ -188,7 +188,7 @@ static int get_transport_options(struct arglist *def) if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0) return (vec_rx | VECTOR_BPF); if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0) - return (vec_rx | vec_tx); + return (vec_rx | vec_tx | VECTOR_QDISC_BYPASS); return (vec_rx | vec_tx); } @@ -504,15 +504,19 @@ static struct vector_queue *create_queue( result = kmalloc(sizeof(struct vector_queue), GFP_KERNEL); if (result == NULL) - goto out_fail; + return NULL; result->max_depth = max_size; result->dev = vp->dev; result->mmsg_vector = kmalloc( (sizeof(struct mmsghdr) * max_size), GFP_KERNEL); + if (result->mmsg_vector == NULL) + goto out_mmsg_fail; result->skbuff_vector = kmalloc( (sizeof(void *) * max_size), GFP_KERNEL); - if (result->mmsg_vector == NULL || result->skbuff_vector == NULL) - goto out_fail; + if (result->skbuff_vector == NULL) + goto out_skb_fail; + + /* further failures can be handled safely by destroy_queue*/ mmsg_vector = result->mmsg_vector; for (i = 0; i < max_size; i++) { @@ -527,14 +531,14 @@ static struct vector_queue *create_queue( result->max_iov_frags = num_extra_frags; for (i = 0; i < max_size; i++) { if (vp->header_size > 0) - iov = kmalloc( - sizeof(struct iovec) * (3 + num_extra_frags), - GFP_KERNEL + iov = kmalloc_array(3 + num_extra_frags, + sizeof(struct iovec), + GFP_KERNEL ); else - iov = kmalloc( - sizeof(struct iovec) * (2 + num_extra_frags), - GFP_KERNEL + iov = kmalloc_array(2 + num_extra_frags, + sizeof(struct iovec), + GFP_KERNEL ); if (iov == NULL) goto out_fail; @@ -563,6 +567,11 @@ static struct vector_queue *create_queue( result->head = 0; result->tail = 0; return result; +out_skb_fail: + kfree(result->mmsg_vector); +out_mmsg_fail: + kfree(result); + return NULL; out_fail: destroy_queue(result); return NULL; @@ -1232,9 +1241,8 @@ static int vector_net_open(struct net_device *dev) if ((vp->options & VECTOR_QDISC_BYPASS) != 0) { if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd)) - vp->options = vp->options | VECTOR_BPF; + vp->options |= VECTOR_BPF; } - if ((vp->options & VECTOR_BPF) != 0) vp->bpf = uml_vector_default_bpf(vp->fds->rx_fd, dev->dev_addr); diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S index b30d73ca29d0..7adb4e6b658a 100644 --- a/arch/um/include/asm/common.lds.S +++ b/arch/um/include/asm/common.lds.S @@ -53,12 +53,6 @@ CON_INITCALL } - .uml.initcall.init : { - __uml_initcall_start = .; - *(.uml.initcall.init) - __uml_initcall_end = .; - } - SECURITY_INIT .exitcall : { diff --git a/arch/um/include/shared/init.h b/arch/um/include/shared/init.h index b3f5865a92c9..c66de434a983 100644 --- a/arch/um/include/shared/init.h +++ b/arch/um/include/shared/init.h @@ -64,14 +64,10 @@ struct uml_param { int (*setup_func)(char *, int *); }; -extern initcall_t __uml_initcall_start, __uml_initcall_end; extern initcall_t __uml_postsetup_start, __uml_postsetup_end; extern const char *__uml_help_start, *__uml_help_end; #endif -#define __uml_initcall(fn) \ - static initcall_t __uml_initcall_##fn __uml_init_call = fn - #define __uml_exitcall(fn) \ static exitcall_t __uml_exitcall_##fn __uml_exit_call = fn @@ -108,7 +104,6 @@ extern struct uml_param __uml_setup_start, __uml_setup_end; */ #define __uml_init_setup __used __section(.uml.setup.init) #define __uml_setup_help __used __section(.uml.help.init) -#define __uml_init_call __used __section(.uml.initcall.init) #define __uml_postsetup_call __used __section(.uml.postsetup.init) #define __uml_exit_call __used __section(.uml.exitcall.exit) diff --git a/arch/um/os-Linux/main.c b/arch/um/os-Linux/main.c index 5f970ece5ac3..f1fee2b91239 100644 --- a/arch/um/os-Linux/main.c +++ b/arch/um/os-Linux/main.c @@ -40,17 +40,6 @@ static void set_stklim(void) } } -static __init void do_uml_initcalls(void) -{ - initcall_t *call; - - call = &__uml_initcall_start; - while (call < &__uml_initcall_end) { - (*call)(); - call++; - } -} - static void last_ditch_exit(int sig) { uml_cleanup(); @@ -151,7 +140,6 @@ int __init main(int argc, char **argv, char **envp) scan_elf_aux(envp); #endif - do_uml_initcalls(); change_sig(SIGPIPE, 0); ret = linux_main(argc, argv); diff --git a/arch/unicore32/include/asm/cacheflush.h b/arch/unicore32/include/asm/cacheflush.h index 1d9132b66039..1c8b9f13a9e1 100644 --- a/arch/unicore32/include/asm/cacheflush.h +++ b/arch/unicore32/include/asm/cacheflush.h @@ -33,7 +33,7 @@ * Start addresses are inclusive and end addresses are exclusive; * start addresses should be rounded down, end addresses up. * - * See Documentation/cachetlb.txt for more information. + * See Documentation/core-api/cachetlb.rst for more information. * Please note that the implementation of these, and the required * effects are cache-type (VIVT/VIPT/PIPT) specific. * diff --git a/arch/unicore32/kernel/pm.c b/arch/unicore32/kernel/pm.c index 784bc2db3b28..6f8164d91dc2 100644 --- a/arch/unicore32/kernel/pm.c +++ b/arch/unicore32/kernel/pm.c @@ -109,8 +109,9 @@ static int __init puv3_pm_init(void) return -EINVAL; } - sleep_save = kmalloc(puv3_cpu_pm_fns->save_count - * sizeof(unsigned long), GFP_KERNEL); + sleep_save = kmalloc_array(puv3_cpu_pm_fns->save_count, + sizeof(unsigned long), + GFP_KERNEL); if (!sleep_save) { printk(KERN_ERR "failed to alloc memory for pm save\n"); return -ENOMEM; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 297789aef9fa..f1dbb4ee19d7 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -130,7 +130,6 @@ config X86 select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64 select HAVE_ARCH_VMAP_STACK if X86_64 select HAVE_ARCH_WITHIN_STACK_FRAMES - select HAVE_CC_STACKPROTECTOR select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL select HAVE_CONTEXT_TRACKING if X86_64 @@ -182,6 +181,7 @@ config X86 select HAVE_RCU_TABLE_FREE select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION + select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR select HAVE_STACK_VALIDATION if X86_64 select HAVE_RSEQ select HAVE_SYSCALL_TRACEPOINTS @@ -327,7 +327,7 @@ config X86_64_SMP config X86_32_LAZY_GS def_bool y - depends on X86_32 && CC_STACKPROTECTOR_NONE + depends on X86_32 && !STACKPROTECTOR config ARCH_SUPPORTS_UPROBES def_bool y @@ -346,6 +346,15 @@ config PGTABLE_LEVELS default 2 source "init/Kconfig" + +config CC_HAS_SANE_STACKPROTECTOR + bool + default $(success,$(srctree)/scripts/gcc-x86_64-has-stack-protector.sh $(CC)) if 64BIT + default $(success,$(srctree)/scripts/gcc-x86_32-has-stack-protector.sh $(CC)) + help + We have to make sure stack protector is unconditionally disabled if + the compiler produces broken code. + source "kernel/Kconfig.freezer" menu "Processor type and features" diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index bef8e2b202a8..2582881d19ce 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -239,7 +239,7 @@ ENTRY(__switch_to_asm) movl %esp, TASK_threadsp(%eax) movl TASK_threadsp(%edx), %esp -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR movl TASK_stack_canary(%edx), %ebx movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset #endif diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 3166b9674429..73a522d53b53 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -357,7 +357,7 @@ ENTRY(__switch_to_asm) movq %rsp, TASK_threadsp(%rdi) movq TASK_threadsp(%rsi), %rsp -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR movq TASK_stack_canary(%rsi), %rbx movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset #endif diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 7782cdbcd67d..82ed001e8909 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -201,7 +201,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) /* * Handle seccomp. regs->ip must be the original value. - * See seccomp_send_sigsys and Documentation/prctl/seccomp_filter.txt. + * See seccomp_send_sigsys and Documentation/userspace-api/seccomp_filter.rst. * * We could optimize the seccomp disabled case, but performance * here doesn't matter. diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c index 38b5d41b0c37..3210fee27e7f 100644 --- a/arch/x86/events/amd/iommu.c +++ b/arch/x86/events/amd/iommu.c @@ -387,7 +387,7 @@ static __init int _init_events_attrs(void) while (amd_iommu_v2_event_descs[i].attr.attr.name) i++; - attrs = kzalloc(sizeof(struct attribute **) * (i + 1), GFP_KERNEL); + attrs = kcalloc(i + 1, sizeof(struct attribute **), GFP_KERNEL); if (!attrs) return -ENOMEM; diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 6e461fb1e0d4..5f4829f10129 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1637,7 +1637,7 @@ __init struct attribute **merge_attr(struct attribute **a, struct attribute **b) j++; j++; - new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL); + new = kmalloc_array(j, sizeof(struct attribute *), GFP_KERNEL); if (!new) return NULL; diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 15b07379e72d..27a461414b30 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -865,12 +865,10 @@ static void uncore_types_exit(struct intel_uncore_type **types) static int __init uncore_type_init(struct intel_uncore_type *type, bool setid) { struct intel_uncore_pmu *pmus; - struct attribute_group *attr_group; - struct attribute **attrs; size_t size; int i, j; - pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL); + pmus = kcalloc(type->num_boxes, sizeof(*pmus), GFP_KERNEL); if (!pmus) return -ENOMEM; @@ -891,21 +889,24 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid) 0, type->num_counters, 0, 0); if (type->event_descs) { + struct { + struct attribute_group group; + struct attribute *attrs[]; + } *attr_group; for (i = 0; type->event_descs[i].attr.attr.name; i++); - attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) + - sizeof(*attr_group), GFP_KERNEL); + attr_group = kzalloc(struct_size(attr_group, attrs, i + 1), + GFP_KERNEL); if (!attr_group) goto err; - attrs = (struct attribute **)(attr_group + 1); - attr_group->name = "events"; - attr_group->attrs = attrs; + attr_group->group.name = "events"; + attr_group->group.attrs = attr_group->attrs; for (j = 0; j < i; j++) - attrs[j] = &type->event_descs[j].attr.attr; + attr_group->attrs[j] = &type->event_descs[j].attr.attr; - type->events_group = attr_group; + type->events_group = &attr_group->group; } type->pmu_group = &uncore_pmu_attr_group; diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c index 5f053d7d1bd9..de27615c51ea 100644 --- a/arch/x86/hyperv/mmu.c +++ b/arch/x86/hyperv/mmu.c @@ -13,22 +13,6 @@ #define CREATE_TRACE_POINTS #include -/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */ -struct hv_flush_pcpu { - u64 address_space; - u64 flags; - u64 processor_mask; - u64 gva_list[]; -}; - -/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */ -struct hv_flush_pcpu_ex { - u64 address_space; - u64 flags; - struct hv_vpset hv_vp_set; - u64 gva_list[]; -}; - /* Each gva in gva_list encodes up to 4096 pages to flush */ #define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE) @@ -67,8 +51,8 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus, const struct flush_tlb_info *info) { int cpu, vcpu, gva_n, max_gvas; - struct hv_flush_pcpu **flush_pcpu; - struct hv_flush_pcpu *flush; + struct hv_tlb_flush **flush_pcpu; + struct hv_tlb_flush *flush; u64 status = U64_MAX; unsigned long flags; @@ -82,7 +66,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus, local_irq_save(flags); - flush_pcpu = (struct hv_flush_pcpu **) + flush_pcpu = (struct hv_tlb_flush **) this_cpu_ptr(hyperv_pcpu_input_arg); flush = *flush_pcpu; @@ -152,8 +136,8 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus, const struct flush_tlb_info *info) { int nr_bank = 0, max_gvas, gva_n; - struct hv_flush_pcpu_ex **flush_pcpu; - struct hv_flush_pcpu_ex *flush; + struct hv_tlb_flush_ex **flush_pcpu; + struct hv_tlb_flush_ex *flush; u64 status = U64_MAX; unsigned long flags; @@ -167,7 +151,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus, local_irq_save(flags); - flush_pcpu = (struct hv_flush_pcpu_ex **) + flush_pcpu = (struct hv_tlb_flush_ex **) this_cpu_ptr(hyperv_pcpu_input_arg); flush = *flush_pcpu; diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h index 3bfa92c2793c..b8c89265baf0 100644 --- a/arch/x86/include/asm/hyperv-tlfs.h +++ b/arch/x86/include/asm/hyperv-tlfs.h @@ -308,6 +308,9 @@ struct ms_hyperv_tsc_page { /* TSC emulation after migration */ #define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106 +/* Nested features (CPUID 0x4000000A) EAX */ +#define HV_X64_NESTED_MSR_BITMAP BIT(19) + struct hv_reenlightenment_control { __u64 vector:8; __u64 reserved1:8; @@ -678,7 +681,11 @@ struct hv_enlightened_vmcs { u32 hv_clean_fields; u32 hv_padding_32; u32 hv_synthetic_controls; - u32 hv_enlightenments_control; + struct { + u32 nested_flush_hypercall:1; + u32 msr_bitmap:1; + u32 reserved:30; + } hv_enlightenments_control; u32 hv_vp_id; u64 hv_vm_id; @@ -734,4 +741,20 @@ struct ipi_arg_ex { struct hv_vpset vp_set; }; +/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */ +struct hv_tlb_flush { + u64 address_space; + u64 flags; + u64 processor_mask; + u64 gva_list[]; +}; + +/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */ +struct hv_tlb_flush_ex { + u64 address_space; + u64 flags; + struct hv_vpset hv_vp_set; + u64 gva_list[]; +}; + #endif diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index b24b1c8b3979..0f82cd91cd3c 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -107,11 +107,12 @@ struct x86_emulate_ops { * @addr: [IN ] Linear address from which to read. * @val: [OUT] Value read from memory, zero-extended to 'u_long'. * @bytes: [IN ] Number of bytes to read from memory. + * @system:[IN ] Whether the access is forced to be at CPL0. */ int (*read_std)(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, - struct x86_exception *fault); + struct x86_exception *fault, bool system); /* * read_phys: Read bytes of standard (non-emulated/special) memory. @@ -129,10 +130,11 @@ struct x86_emulate_ops { * @addr: [IN ] Linear address to which to write. * @val: [OUT] Value write to memory, zero-extended to 'u_long'. * @bytes: [IN ] Number of bytes to write to memory. + * @system:[IN ] Whether the access is forced to be at CPL0. */ int (*write_std)(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, - struct x86_exception *fault); + struct x86_exception *fault, bool system); /* * fetch: Read bytes of standard (non-emulated/special) memory. * Used for instruction fetch. diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f4b2588865e9..c13cd28d9d1b 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -258,7 +258,8 @@ union kvm_mmu_page_role { unsigned smep_andnot_wp:1; unsigned smap_andnot_wp:1; unsigned ad_disabled:1; - unsigned :7; + unsigned guest_mode:1; + unsigned :6; /* * This is left at the top of the word so that @@ -476,6 +477,7 @@ struct kvm_vcpu_hv { struct kvm_hyperv_exit exit; struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT]; DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); + cpumask_t tlb_lush; }; struct kvm_vcpu_arch { @@ -995,7 +997,7 @@ struct kvm_x86_ops { void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); - void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set); + void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu); void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa); void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector); int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu); @@ -1277,6 +1279,7 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); int kvm_mmu_load(struct kvm_vcpu *vcpu); void kvm_mmu_unload(struct kvm_vcpu *vcpu); void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); +void kvm_mmu_free_roots(struct kvm_vcpu *vcpu); gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, struct x86_exception *exception); gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 997192131b7b..3cd14311edfa 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -269,7 +269,7 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset, return 0; /* - * Clear all banks up to the maximum possible bank as hv_flush_pcpu_ex + * Clear all banks up to the maximum possible bank as hv_tlb_flush_ex * structs are not cleared between calls, we risk flushing unneeded * vCPUs otherwise. */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index e28add6b791f..cfd29ee8c3da 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -412,7 +412,7 @@ extern asmlinkage void ignore_sysret(void); void save_fsgs_for_kvm(void); #endif #else /* X86_64 */ -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR /* * Make sure stack canary segment base is cached-aligned: * "For Intel Atom processors, avoid non zero segment base address diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 8f09012b92e7..e293c122d0d5 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -146,7 +146,7 @@ # define __KERNEL_PERCPU 0 #endif -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR # define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY*8) #else # define __KERNEL_STACK_CANARY 0 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index 371b3a4af000..8ec97a62c245 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -34,7 +34,7 @@ #ifndef _ASM_STACKPROTECTOR_H #define _ASM_STACKPROTECTOR_H 1 -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR #include #include @@ -105,7 +105,7 @@ static inline void load_stack_canary_segment(void) #endif } -#else /* CC_STACKPROTECTOR */ +#else /* STACKPROTECTOR */ #define GDT_STACK_CANARY_INIT @@ -121,5 +121,5 @@ static inline void load_stack_canary_segment(void) #endif } -#endif /* CC_STACKPROTECTOR */ +#endif /* STACKPROTECTOR */ #endif /* _ASM_STACKPROTECTOR_H */ diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 5db8b0b10766..425e6b8b9547 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -207,7 +207,9 @@ enum vmcs_field { EPTP_LIST_ADDRESS = 0x00002024, EPTP_LIST_ADDRESS_HIGH = 0x00002025, VMREAD_BITMAP = 0x00002026, + VMREAD_BITMAP_HIGH = 0x00002027, VMWRITE_BITMAP = 0x00002028, + VMWRITE_BITMAP_HIGH = 0x00002029, XSS_EXIT_BITMAP = 0x0000202C, XSS_EXIT_BITMAP_HIGH = 0x0000202D, TSC_MULTIPLIER = 0x00002032, diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 76417a9aab73..dcb008c320fe 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -32,7 +32,7 @@ void common(void) { BLANK(); OFFSET(TASK_threadsp, task_struct, thread.sp); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR OFFSET(TASK_stack_canary, task_struct, stack_canary); #endif diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index f91ba53e06c8..a4a3be399f4b 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -50,7 +50,7 @@ void foo(void) DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) - offsetofend(struct cpu_entry_area, entry_stack_page.stack)); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR BLANK(); OFFSET(stack_canary_offset, stack_canary, canary); #endif diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index bf51e51d808d..b2dcd161f514 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -69,7 +69,7 @@ int main(void) OFFSET(TSS_sp1, tss_struct, x86_tss.sp1); BLANK(); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR DEFINE(stack_canary_offset, offsetof(union irq_stack_union, stack_canary)); BLANK(); #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 910b47ee8078..0df7151cfef4 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1599,7 +1599,7 @@ DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = (unsigned long)&init_thread_union + THREAD_SIZE; EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); #endif diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index cd76380af79f..e4cf6ff1c2e1 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1457,7 +1457,7 @@ static int __mcheck_cpu_mce_banks_init(void) int i; u8 num_banks = mca_cfg.banks; - mce_banks = kzalloc(num_banks * sizeof(struct mce_bank), GFP_KERNEL); + mce_banks = kcalloc(num_banks, sizeof(struct mce_bank), GFP_KERNEL); if (!mce_banks) return -ENOMEM; diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index f591b01930db..dd33c357548f 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -1384,7 +1384,7 @@ int mce_threshold_create_device(unsigned int cpu) if (bp) return 0; - bp = kzalloc(sizeof(struct threshold_bank *) * mca_cfg.banks, + bp = kcalloc(mca_cfg.banks, sizeof(struct threshold_bank *), GFP_KERNEL); if (!bp) return -ENOMEM; diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index c610f47373e4..4021d3859499 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c @@ -43,7 +43,7 @@ mtrr_file_add(unsigned long base, unsigned long size, max = num_var_ranges; if (fcount == NULL) { - fcount = kzalloc(max * sizeof *fcount, GFP_KERNEL); + fcount = kcalloc(max, sizeof(*fcount), GFP_KERNEL); if (!fcount) return -ENOMEM; FILE_FCOUNT(file) = fcount; diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index b59e4fb40fd9..abe6df15a8fb 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -375,7 +375,7 @@ ENDPROC(startup_32_smp) */ __INIT setup_once: -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR /* * Configure the stack canary. The linker can't handle this by * relocation. Manually set base address in stack canary diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index b6be34ee88e9..346b24883911 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -610,7 +610,7 @@ static void hpet_msi_capability_lookup(unsigned int start_timer) if (!hpet_domain) return; - hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL); + hpet_devs = kcalloc(num_timers, sizeof(struct hpet_dev), GFP_KERNEL); if (!hpet_devs) return; @@ -966,8 +966,8 @@ int __init hpet_enable(void) #endif cfg = hpet_readl(HPET_CFG); - hpet_boot_cfg = kmalloc((last + 2) * sizeof(*hpet_boot_cfg), - GFP_KERNEL); + hpet_boot_cfg = kmalloc_array(last + 2, sizeof(*hpet_boot_cfg), + GFP_KERNEL); if (hpet_boot_cfg) *hpet_boot_cfg = cfg; else diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c index 8c1cc08f514f..163ae706a0d4 100644 --- a/arch/x86/kernel/ksysfs.c +++ b/arch/x86/kernel/ksysfs.c @@ -283,7 +283,7 @@ static int __init create_setup_data_nodes(struct kobject *parent) if (ret) goto out_setup_data_kobj; - kobjp = kmalloc(sizeof(*kobjp) * nr, GFP_KERNEL); + kobjp = kmalloc_array(nr, sizeof(*kobjp), GFP_KERNEL); if (!kobjp) { ret = -ENOMEM; goto out_setup_data_kobj; diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index f4f30d0c25c4..7e042e3d47fd 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -203,8 +203,9 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, goto out; r = -ENOMEM; if (cpuid->nent) { - cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * - cpuid->nent); + cpuid_entries = + vmalloc(array_size(sizeof(struct kvm_cpuid_entry), + cpuid->nent)); if (!cpuid_entries) goto out; r = -EFAULT; @@ -404,7 +405,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, const u32 kvm_cpuid_7_0_ecx_x86_features = F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) | - F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG); + F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) | + F(CLDEMOTE); /* cpuid 7.0.edx*/ const u32 kvm_cpuid_7_0_edx_x86_features = @@ -784,7 +786,8 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, return -EINVAL; r = -ENOMEM; - cpuid_entries = vzalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent); + cpuid_entries = vzalloc(array_size(sizeof(struct kvm_cpuid_entry2), + cpuid->nent)); if (!cpuid_entries) goto out; diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index b3705ae52824..4c4f4263420c 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -812,6 +812,19 @@ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) return assign_eip_near(ctxt, ctxt->_eip + rel); } +static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear, + void *data, unsigned size) +{ + return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true); +} + +static int linear_write_system(struct x86_emulate_ctxt *ctxt, + ulong linear, void *data, + unsigned int size) +{ + return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true); +} + static int segmented_read_std(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, @@ -823,7 +836,7 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt, rc = linearize(ctxt, addr, size, false, &linear); if (rc != X86EMUL_CONTINUE) return rc; - return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); + return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false); } static int segmented_write_std(struct x86_emulate_ctxt *ctxt, @@ -837,7 +850,7 @@ static int segmented_write_std(struct x86_emulate_ctxt *ctxt, rc = linearize(ctxt, addr, size, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; - return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception); + return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false); } /* @@ -1496,8 +1509,7 @@ static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, return emulate_gp(ctxt, index << 3 | 0x2); addr = dt.address + index * 8; - return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, - &ctxt->exception); + return linear_read_system(ctxt, addr, desc, sizeof *desc); } static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, @@ -1560,8 +1572,7 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, if (rc != X86EMUL_CONTINUE) return rc; - return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc), - &ctxt->exception); + return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc)); } /* allowed just for 8 bytes segments */ @@ -1575,8 +1586,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, if (rc != X86EMUL_CONTINUE) return rc; - return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc, - &ctxt->exception); + return linear_write_system(ctxt, addr, desc, sizeof *desc); } static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, @@ -1737,8 +1747,7 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, return ret; } } else if (ctxt->mode == X86EMUL_MODE_PROT64) { - ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3, - sizeof(base3), &ctxt->exception); + ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3)); if (ret != X86EMUL_CONTINUE) return ret; if (emul_is_noncanonical_address(get_desc_base(&seg_desc) | @@ -2051,11 +2060,11 @@ static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) eip_addr = dt.address + (irq << 2); cs_addr = dt.address + (irq << 2) + 2; - rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception); + rc = linear_read_system(ctxt, cs_addr, &cs, 2); if (rc != X86EMUL_CONTINUE) return rc; - rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception); + rc = linear_read_system(ctxt, eip_addr, &eip, 2); if (rc != X86EMUL_CONTINUE) return rc; @@ -2919,12 +2928,12 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, #ifdef CONFIG_X86_64 base |= ((u64)base3) << 32; #endif - r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL); + r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true); if (r != X86EMUL_CONTINUE) return false; if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) return false; - r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL); + r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true); if (r != X86EMUL_CONTINUE) return false; if ((perm >> bit_idx) & mask) @@ -3053,35 +3062,30 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, u16 tss_selector, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc) { - const struct x86_emulate_ops *ops = ctxt->ops; struct tss_segment_16 tss_seg; int ret; u32 new_tss_base = get_desc_base(new_desc); - ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, - &ctxt->exception); + ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg); if (ret != X86EMUL_CONTINUE) return ret; save_state_to_tss16(ctxt, &tss_seg); - ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, - &ctxt->exception); + ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg); if (ret != X86EMUL_CONTINUE) return ret; - ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, - &ctxt->exception); + ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg); if (ret != X86EMUL_CONTINUE) return ret; if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; - ret = ops->write_std(ctxt, new_tss_base, - &tss_seg.prev_task_link, - sizeof tss_seg.prev_task_link, - &ctxt->exception); + ret = linear_write_system(ctxt, new_tss_base, + &tss_seg.prev_task_link, + sizeof tss_seg.prev_task_link); if (ret != X86EMUL_CONTINUE) return ret; } @@ -3197,38 +3201,34 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, u16 tss_selector, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc) { - const struct x86_emulate_ops *ops = ctxt->ops; struct tss_segment_32 tss_seg; int ret; u32 new_tss_base = get_desc_base(new_desc); u32 eip_offset = offsetof(struct tss_segment_32, eip); u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector); - ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, - &ctxt->exception); + ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg); if (ret != X86EMUL_CONTINUE) return ret; save_state_to_tss32(ctxt, &tss_seg); /* Only GP registers and segment selectors are saved */ - ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip, - ldt_sel_offset - eip_offset, &ctxt->exception); + ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip, + ldt_sel_offset - eip_offset); if (ret != X86EMUL_CONTINUE) return ret; - ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, - &ctxt->exception); + ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg); if (ret != X86EMUL_CONTINUE) return ret; if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; - ret = ops->write_std(ctxt, new_tss_base, - &tss_seg.prev_task_link, - sizeof tss_seg.prev_task_link, - &ctxt->exception); + ret = linear_write_system(ctxt, new_tss_base, + &tss_seg.prev_task_link, + sizeof tss_seg.prev_task_link); if (ret != X86EMUL_CONTINUE) return ret; } @@ -4189,7 +4189,9 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt) maxphyaddr = eax & 0xff; else maxphyaddr = 36; - rsvd = rsvd_bits(maxphyaddr, 62); + rsvd = rsvd_bits(maxphyaddr, 63); + if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE) + rsvd &= ~CR3_PCID_INVD; } if (new_val & rsvd) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 46ff64da44ca..af8caf965baa 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1242,6 +1242,121 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) return kvm_hv_get_msr(vcpu, msr, pdata); } +static __always_inline int get_sparse_bank_no(u64 valid_bank_mask, int bank_no) +{ + int i = 0, j; + + if (!(valid_bank_mask & BIT_ULL(bank_no))) + return -1; + + for (j = 0; j < bank_no; j++) + if (valid_bank_mask & BIT_ULL(j)) + i++; + + return i; +} + +static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, + u16 rep_cnt, bool ex) +{ + struct kvm *kvm = current_vcpu->kvm; + struct kvm_vcpu_hv *hv_current = ¤t_vcpu->arch.hyperv; + struct hv_tlb_flush_ex flush_ex; + struct hv_tlb_flush flush; + struct kvm_vcpu *vcpu; + unsigned long vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)] = {0}; + unsigned long valid_bank_mask = 0; + u64 sparse_banks[64]; + int sparse_banks_len, i; + bool all_cpus; + + if (!ex) { + if (unlikely(kvm_read_guest(kvm, ingpa, &flush, sizeof(flush)))) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + + trace_kvm_hv_flush_tlb(flush.processor_mask, + flush.address_space, flush.flags); + + sparse_banks[0] = flush.processor_mask; + all_cpus = flush.flags & HV_FLUSH_ALL_PROCESSORS; + } else { + if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex, + sizeof(flush_ex)))) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + + trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask, + flush_ex.hv_vp_set.format, + flush_ex.address_space, + flush_ex.flags); + + valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask; + all_cpus = flush_ex.hv_vp_set.format != + HV_GENERIC_SET_SPARSE_4K; + + sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) * + sizeof(sparse_banks[0]); + + if (!sparse_banks_len && !all_cpus) + goto ret_success; + + if (!all_cpus && + kvm_read_guest(kvm, + ingpa + offsetof(struct hv_tlb_flush_ex, + hv_vp_set.bank_contents), + sparse_banks, + sparse_banks_len)) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + } + + cpumask_clear(&hv_current->tlb_lush); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; + int bank = hv->vp_index / 64, sbank = 0; + + if (!all_cpus) { + /* Banks >64 can't be represented */ + if (bank >= 64) + continue; + + /* Non-ex hypercalls can only address first 64 vCPUs */ + if (!ex && bank) + continue; + + if (ex) { + /* + * Check is the bank of this vCPU is in sparse + * set and get the sparse bank number. + */ + sbank = get_sparse_bank_no(valid_bank_mask, + bank); + + if (sbank < 0) + continue; + } + + if (!(sparse_banks[sbank] & BIT_ULL(hv->vp_index % 64))) + continue; + } + + /* + * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we + * can't analyze it here, flush TLB regardless of the specified + * address space. + */ + __set_bit(i, vcpu_bitmap); + } + + kvm_make_vcpus_request_mask(kvm, + KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP, + vcpu_bitmap, &hv_current->tlb_lush); + +ret_success: + /* We always do full TLB flush, set rep_done = rep_cnt. */ + return (u64)HV_STATUS_SUCCESS | + ((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET); +} + bool kvm_hv_hypercall_enabled(struct kvm *kvm) { return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE; @@ -1315,7 +1430,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) { u64 param, ingpa, outgpa, ret = HV_STATUS_SUCCESS; uint16_t code, rep_idx, rep_cnt; - bool fast, longmode; + bool fast, longmode, rep; /* * hypercall generates UD from non zero cpl and real mode @@ -1345,31 +1460,34 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) #endif code = param & 0xffff; - fast = (param >> 16) & 0x1; - rep_cnt = (param >> 32) & 0xfff; - rep_idx = (param >> 48) & 0xfff; + fast = !!(param & HV_HYPERCALL_FAST_BIT); + rep_cnt = (param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff; + rep_idx = (param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff; + rep = !!(rep_cnt || rep_idx); trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); - /* Hypercall continuation is not supported yet */ - if (rep_cnt || rep_idx) { - ret = HV_STATUS_INVALID_HYPERCALL_CODE; - goto out; - } - switch (code) { case HVCALL_NOTIFY_LONG_SPIN_WAIT: + if (unlikely(rep)) { + ret = HV_STATUS_INVALID_HYPERCALL_INPUT; + break; + } kvm_vcpu_on_spin(vcpu, true); break; case HVCALL_SIGNAL_EVENT: + if (unlikely(rep)) { + ret = HV_STATUS_INVALID_HYPERCALL_INPUT; + break; + } ret = kvm_hvcall_signal_event(vcpu, fast, ingpa); if (ret != HV_STATUS_INVALID_PORT_ID) break; /* maybe userspace knows this conn_id: fall through */ case HVCALL_POST_MESSAGE: /* don't bother userspace if it has no way to handle it */ - if (!vcpu_to_synic(vcpu)->active) { - ret = HV_STATUS_INVALID_HYPERCALL_CODE; + if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) { + ret = HV_STATUS_INVALID_HYPERCALL_INPUT; break; } vcpu->run->exit_reason = KVM_EXIT_HYPERV; @@ -1380,12 +1498,39 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) vcpu->arch.complete_userspace_io = kvm_hv_hypercall_complete_userspace; return 0; + case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST: + if (unlikely(fast || !rep_cnt || rep_idx)) { + ret = HV_STATUS_INVALID_HYPERCALL_INPUT; + break; + } + ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false); + break; + case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE: + if (unlikely(fast || rep)) { + ret = HV_STATUS_INVALID_HYPERCALL_INPUT; + break; + } + ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false); + break; + case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX: + if (unlikely(fast || !rep_cnt || rep_idx)) { + ret = HV_STATUS_INVALID_HYPERCALL_INPUT; + break; + } + ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true); + break; + case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX: + if (unlikely(fast || rep)) { + ret = HV_STATUS_INVALID_HYPERCALL_INPUT; + break; + } + ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true); + break; default: ret = HV_STATUS_INVALID_HYPERCALL_CODE; break; } -out: return kvm_hv_hypercall_complete(vcpu, ret); } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 3773c4625114..b5cd8465d44f 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2002,13 +2002,11 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) } } - if ((old_value ^ value) & X2APIC_ENABLE) { - if (value & X2APIC_ENABLE) { - kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id); - kvm_x86_ops->set_virtual_x2apic_mode(vcpu, true); - } else - kvm_x86_ops->set_virtual_x2apic_mode(vcpu, false); - } + if (((old_value ^ value) & X2APIC_ENABLE) && (value & X2APIC_ENABLE)) + kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id); + + if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) + kvm_x86_ops->set_virtual_apic_mode(vcpu); apic->base_address = apic->vcpu->arch.apic_base & MSR_IA32_APICBASE_BASE; diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index edce055e9fd7..ed0ed39abd36 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -16,6 +16,13 @@ #define APIC_BUS_CYCLE_NS 1 #define APIC_BUS_FREQUENCY (1000000000ULL / APIC_BUS_CYCLE_NS) +enum lapic_mode { + LAPIC_MODE_DISABLED = 0, + LAPIC_MODE_INVALID = X2APIC_ENABLE, + LAPIC_MODE_XAPIC = MSR_IA32_APICBASE_ENABLE, + LAPIC_MODE_X2APIC = MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE, +}; + struct kvm_timer { struct hrtimer timer; s64 period; /* unit: ns */ @@ -89,6 +96,7 @@ u64 kvm_get_apic_base(struct kvm_vcpu *vcpu); int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info); int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s); int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s); +enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu); int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu); u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu); @@ -220,4 +228,10 @@ void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu); void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu); bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu); void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu); + +static inline enum lapic_mode kvm_apic_mode(u64 apic_base) +{ + return apic_base & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); +} + #endif diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index d634f0332c0f..d594690d8b95 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -222,7 +222,6 @@ static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK | static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT; static void mmu_spte_set(u64 *sptep, u64 spte); -static void mmu_free_roots(struct kvm_vcpu *vcpu); void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value) { @@ -3343,51 +3342,48 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, return RET_PF_RETRY; } +static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, + struct list_head *invalid_list) +{ + struct kvm_mmu_page *sp; -static void mmu_free_roots(struct kvm_vcpu *vcpu) + if (!VALID_PAGE(*root_hpa)) + return; + + sp = page_header(*root_hpa & PT64_BASE_ADDR_MASK); + --sp->root_count; + if (!sp->root_count && sp->role.invalid) + kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); + + *root_hpa = INVALID_PAGE; +} + +void kvm_mmu_free_roots(struct kvm_vcpu *vcpu) { int i; - struct kvm_mmu_page *sp; LIST_HEAD(invalid_list); + struct kvm_mmu *mmu = &vcpu->arch.mmu; - if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + if (!VALID_PAGE(mmu->root_hpa)) return; - if (vcpu->arch.mmu.shadow_root_level >= PT64_ROOT_4LEVEL && - (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL || - vcpu->arch.mmu.direct_map)) { - hpa_t root = vcpu->arch.mmu.root_hpa; - - spin_lock(&vcpu->kvm->mmu_lock); - sp = page_header(root); - --sp->root_count; - if (!sp->root_count && sp->role.invalid) { - kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); - kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); - } - spin_unlock(&vcpu->kvm->mmu_lock); - vcpu->arch.mmu.root_hpa = INVALID_PAGE; - return; - } - spin_lock(&vcpu->kvm->mmu_lock); - for (i = 0; i < 4; ++i) { - hpa_t root = vcpu->arch.mmu.pae_root[i]; - if (root) { - root &= PT64_BASE_ADDR_MASK; - sp = page_header(root); - --sp->root_count; - if (!sp->root_count && sp->role.invalid) - kvm_mmu_prepare_zap_page(vcpu->kvm, sp, - &invalid_list); - } - vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; + if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && + (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) { + mmu_free_root_page(vcpu->kvm, &mmu->root_hpa, &invalid_list); + } else { + for (i = 0; i < 4; ++i) + if (mmu->pae_root[i] != 0) + mmu_free_root_page(vcpu->kvm, &mmu->pae_root[i], + &invalid_list); + mmu->root_hpa = INVALID_PAGE; } + kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); spin_unlock(&vcpu->kvm->mmu_lock); - vcpu->arch.mmu.root_hpa = INVALID_PAGE; } +EXPORT_SYMBOL_GPL(kvm_mmu_free_roots); static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) { @@ -3720,7 +3716,6 @@ static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) */ return RET_PF_RETRY; } -EXPORT_SYMBOL_GPL(handle_mmio_page_fault); static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, u32 error_code, gfn_t gfn) @@ -3812,6 +3807,14 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, struct kvm_memory_slot *slot; bool async; + /* + * Don't expose private memslots to L2. + */ + if (is_guest_mode(vcpu) && !kvm_is_visible_gfn(vcpu->kvm, gfn)) { + *pfn = KVM_PFN_NOSLOT; + return false; + } + slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); async = false; *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable); @@ -3951,7 +3954,7 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu, void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu) { - mmu_free_roots(vcpu); + kvm_mmu_free_roots(vcpu); } static unsigned long get_cr3(struct kvm_vcpu *vcpu) @@ -4473,6 +4476,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) struct kvm_mmu *context = &vcpu->arch.mmu; context->base_role.word = 0; + context->base_role.guest_mode = is_guest_mode(vcpu); context->base_role.smm = is_smm(vcpu); context->base_role.ad_disabled = (shadow_accessed_mask == 0); context->page_fault = tdp_page_fault; @@ -4539,6 +4543,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) = smep && !is_write_protection(vcpu); context->base_role.smap_andnot_wp = smap && !is_write_protection(vcpu); + context->base_role.guest_mode = is_guest_mode(vcpu); context->base_role.smm = is_smm(vcpu); reset_shadow_zero_bits_mask(vcpu, context); } @@ -4564,7 +4569,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, context->root_hpa = INVALID_PAGE; context->direct_map = false; context->base_role.ad_disabled = !accessed_dirty; - + context->base_role.guest_mode = 1; update_permission_bitmask(vcpu, context, true); update_pkru_bitmask(vcpu, context, true); update_last_nonleaf_level(vcpu, context); @@ -4664,7 +4669,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_load); void kvm_mmu_unload(struct kvm_vcpu *vcpu) { - mmu_free_roots(vcpu); + kvm_mmu_free_roots(vcpu); WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); } EXPORT_SYMBOL_GPL(kvm_mmu_unload); @@ -4825,6 +4830,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, mask.smep_andnot_wp = 1; mask.smap_andnot_wp = 1; mask.smm = 1; + mask.guest_mode = 1; mask.ad_disabled = 1; /* diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c index 01c1371f39f8..3052a59a3065 100644 --- a/arch/x86/kvm/page_track.c +++ b/arch/x86/kvm/page_track.c @@ -40,8 +40,9 @@ int kvm_page_track_create_memslot(struct kvm_memory_slot *slot, int i; for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) { - slot->arch.gfn_track[i] = kvzalloc(npages * - sizeof(*slot->arch.gfn_track[i]), GFP_KERNEL); + slot->arch.gfn_track[i] = + kvcalloc(npages, sizeof(*slot->arch.gfn_track[i]), + GFP_KERNEL); if (!slot->arch.gfn_track[i]) goto track_free; } diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 950ec50f77c3..f059a73f0fd0 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1001,7 +1001,9 @@ static int svm_cpu_init(int cpu) if (svm_sev_enabled()) { r = -ENOMEM; - sd->sev_vmcbs = kmalloc((max_sev_asid + 1) * sizeof(void *), GFP_KERNEL); + sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1, + sizeof(void *), + GFP_KERNEL); if (!sd->sev_vmcbs) goto err_1; } @@ -1768,7 +1770,10 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, unsigned long npages, npinned, size; unsigned long locked, lock_limit; struct page **pages; - int first, last; + unsigned long first, last; + + if (ulen == 0 || uaddr + ulen < uaddr) + return NULL; /* Calculate number of pages. */ first = (uaddr & PAGE_MASK) >> PAGE_SHIFT; @@ -1855,13 +1860,13 @@ static void __unregister_enc_region_locked(struct kvm *kvm, static struct kvm *svm_vm_alloc(void) { - struct kvm_svm *kvm_svm = kzalloc(sizeof(struct kvm_svm), GFP_KERNEL); + struct kvm_svm *kvm_svm = vzalloc(sizeof(struct kvm_svm)); return &kvm_svm->kvm; } static void svm_vm_free(struct kvm *kvm) { - kfree(to_kvm_svm(kvm)); + vfree(to_kvm_svm(kvm)); } static void sev_vm_destroy(struct kvm *kvm) @@ -5062,7 +5067,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) set_cr_intercept(svm, INTERCEPT_CR8_WRITE); } -static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) +static void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu) { return; } @@ -6949,6 +6954,9 @@ static int svm_register_enc_region(struct kvm *kvm, if (!sev_guest(kvm)) return -ENOTTY; + if (range->addr > ULONG_MAX || range->size > ULONG_MAX) + return -EINVAL; + region = kzalloc(sizeof(*region), GFP_KERNEL); if (!region) return -ENOMEM; @@ -7100,7 +7108,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .enable_nmi_window = enable_nmi_window, .enable_irq_window = enable_irq_window, .update_cr8_intercept = update_cr8_intercept, - .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode, + .set_virtual_apic_mode = svm_set_virtual_apic_mode, .get_enable_apicv = svm_get_enable_apicv, .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl, .load_eoi_exitmap = svm_load_eoi_exitmap, diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 9807c314c478..0f997683404f 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -1367,6 +1367,57 @@ TRACE_EVENT(kvm_hv_timer_state, __entry->vcpu_id, __entry->hv_timer_in_use) ); + +/* + * Tracepoint for kvm_hv_flush_tlb. + */ +TRACE_EVENT(kvm_hv_flush_tlb, + TP_PROTO(u64 processor_mask, u64 address_space, u64 flags), + TP_ARGS(processor_mask, address_space, flags), + + TP_STRUCT__entry( + __field(u64, processor_mask) + __field(u64, address_space) + __field(u64, flags) + ), + + TP_fast_assign( + __entry->processor_mask = processor_mask; + __entry->address_space = address_space; + __entry->flags = flags; + ), + + TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx", + __entry->processor_mask, __entry->address_space, + __entry->flags) +); + +/* + * Tracepoint for kvm_hv_flush_tlb_ex. + */ +TRACE_EVENT(kvm_hv_flush_tlb_ex, + TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags), + TP_ARGS(valid_bank_mask, format, address_space, flags), + + TP_STRUCT__entry( + __field(u64, valid_bank_mask) + __field(u64, format) + __field(u64, address_space) + __field(u64, flags) + ), + + TP_fast_assign( + __entry->valid_bank_mask = valid_bank_mask; + __entry->format = format; + __entry->address_space = address_space; + __entry->flags = flags; + ), + + TP_printk("valid_bank_mask 0x%llx format 0x%llx " + "address_space 0x%llx flags 0x%llx", + __entry->valid_bank_mask, __entry->format, + __entry->address_space, __entry->flags) +); #endif /* _TRACE_KVM_H */ #undef TRACE_INCLUDE_PATH diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 40aa29204baf..559a12b6184d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -242,7 +242,11 @@ struct shared_msr_entry { * underlying hardware which will be used to run L2. * This structure is packed to ensure that its layout is identical across * machines (necessary for live migration). - * If there are changes in this struct, VMCS12_REVISION must be changed. + * + * IMPORTANT: Changing the layout of existing fields in this structure + * will break save/restore compatibility with older kvm releases. When + * adding new fields, either use space in the reserved padding* arrays + * or add the new fields to the end of the structure. */ typedef u64 natural_width; struct __packed vmcs12 { @@ -265,17 +269,14 @@ struct __packed vmcs12 { u64 virtual_apic_page_addr; u64 apic_access_addr; u64 posted_intr_desc_addr; - u64 vm_function_control; u64 ept_pointer; u64 eoi_exit_bitmap0; u64 eoi_exit_bitmap1; u64 eoi_exit_bitmap2; u64 eoi_exit_bitmap3; - u64 eptp_list_address; u64 xss_exit_bitmap; u64 guest_physical_address; u64 vmcs_link_pointer; - u64 pml_address; u64 guest_ia32_debugctl; u64 guest_ia32_pat; u64 guest_ia32_efer; @@ -288,7 +289,12 @@ struct __packed vmcs12 { u64 host_ia32_pat; u64 host_ia32_efer; u64 host_ia32_perf_global_ctrl; - u64 padding64[8]; /* room for future expansion */ + u64 vmread_bitmap; + u64 vmwrite_bitmap; + u64 vm_function_control; + u64 eptp_list_address; + u64 pml_address; + u64 padding64[3]; /* room for future expansion */ /* * To allow migration of L1 (complete with its L2 guests) between * machines of different natural widths (32 or 64 bit), we cannot have @@ -397,7 +403,6 @@ struct __packed vmcs12 { u16 guest_ldtr_selector; u16 guest_tr_selector; u16 guest_intr_status; - u16 guest_pml_index; u16 host_es_selector; u16 host_cs_selector; u16 host_ss_selector; @@ -405,12 +410,172 @@ struct __packed vmcs12 { u16 host_fs_selector; u16 host_gs_selector; u16 host_tr_selector; + u16 guest_pml_index; }; +/* + * For save/restore compatibility, the vmcs12 field offsets must not change. + */ +#define CHECK_OFFSET(field, loc) \ + BUILD_BUG_ON_MSG(offsetof(struct vmcs12, field) != (loc), \ + "Offset of " #field " in struct vmcs12 has changed.") + +static inline void vmx_check_vmcs12_offsets(void) { + CHECK_OFFSET(revision_id, 0); + CHECK_OFFSET(abort, 4); + CHECK_OFFSET(launch_state, 8); + CHECK_OFFSET(io_bitmap_a, 40); + CHECK_OFFSET(io_bitmap_b, 48); + CHECK_OFFSET(msr_bitmap, 56); + CHECK_OFFSET(vm_exit_msr_store_addr, 64); + CHECK_OFFSET(vm_exit_msr_load_addr, 72); + CHECK_OFFSET(vm_entry_msr_load_addr, 80); + CHECK_OFFSET(tsc_offset, 88); + CHECK_OFFSET(virtual_apic_page_addr, 96); + CHECK_OFFSET(apic_access_addr, 104); + CHECK_OFFSET(posted_intr_desc_addr, 112); + CHECK_OFFSET(ept_pointer, 120); + CHECK_OFFSET(eoi_exit_bitmap0, 128); + CHECK_OFFSET(eoi_exit_bitmap1, 136); + CHECK_OFFSET(eoi_exit_bitmap2, 144); + CHECK_OFFSET(eoi_exit_bitmap3, 152); + CHECK_OFFSET(xss_exit_bitmap, 160); + CHECK_OFFSET(guest_physical_address, 168); + CHECK_OFFSET(vmcs_link_pointer, 176); + CHECK_OFFSET(guest_ia32_debugctl, 184); + CHECK_OFFSET(guest_ia32_pat, 192); + CHECK_OFFSET(guest_ia32_efer, 200); + CHECK_OFFSET(guest_ia32_perf_global_ctrl, 208); + CHECK_OFFSET(guest_pdptr0, 216); + CHECK_OFFSET(guest_pdptr1, 224); + CHECK_OFFSET(guest_pdptr2, 232); + CHECK_OFFSET(guest_pdptr3, 240); + CHECK_OFFSET(guest_bndcfgs, 248); + CHECK_OFFSET(host_ia32_pat, 256); + CHECK_OFFSET(host_ia32_efer, 264); + CHECK_OFFSET(host_ia32_perf_global_ctrl, 272); + CHECK_OFFSET(vmread_bitmap, 280); + CHECK_OFFSET(vmwrite_bitmap, 288); + CHECK_OFFSET(vm_function_control, 296); + CHECK_OFFSET(eptp_list_address, 304); + CHECK_OFFSET(pml_address, 312); + CHECK_OFFSET(cr0_guest_host_mask, 344); + CHECK_OFFSET(cr4_guest_host_mask, 352); + CHECK_OFFSET(cr0_read_shadow, 360); + CHECK_OFFSET(cr4_read_shadow, 368); + CHECK_OFFSET(cr3_target_value0, 376); + CHECK_OFFSET(cr3_target_value1, 384); + CHECK_OFFSET(cr3_target_value2, 392); + CHECK_OFFSET(cr3_target_value3, 400); + CHECK_OFFSET(exit_qualification, 408); + CHECK_OFFSET(guest_linear_address, 416); + CHECK_OFFSET(guest_cr0, 424); + CHECK_OFFSET(guest_cr3, 432); + CHECK_OFFSET(guest_cr4, 440); + CHECK_OFFSET(guest_es_base, 448); + CHECK_OFFSET(guest_cs_base, 456); + CHECK_OFFSET(guest_ss_base, 464); + CHECK_OFFSET(guest_ds_base, 472); + CHECK_OFFSET(guest_fs_base, 480); + CHECK_OFFSET(guest_gs_base, 488); + CHECK_OFFSET(guest_ldtr_base, 496); + CHECK_OFFSET(guest_tr_base, 504); + CHECK_OFFSET(guest_gdtr_base, 512); + CHECK_OFFSET(guest_idtr_base, 520); + CHECK_OFFSET(guest_dr7, 528); + CHECK_OFFSET(guest_rsp, 536); + CHECK_OFFSET(guest_rip, 544); + CHECK_OFFSET(guest_rflags, 552); + CHECK_OFFSET(guest_pending_dbg_exceptions, 560); + CHECK_OFFSET(guest_sysenter_esp, 568); + CHECK_OFFSET(guest_sysenter_eip, 576); + CHECK_OFFSET(host_cr0, 584); + CHECK_OFFSET(host_cr3, 592); + CHECK_OFFSET(host_cr4, 600); + CHECK_OFFSET(host_fs_base, 608); + CHECK_OFFSET(host_gs_base, 616); + CHECK_OFFSET(host_tr_base, 624); + CHECK_OFFSET(host_gdtr_base, 632); + CHECK_OFFSET(host_idtr_base, 640); + CHECK_OFFSET(host_ia32_sysenter_esp, 648); + CHECK_OFFSET(host_ia32_sysenter_eip, 656); + CHECK_OFFSET(host_rsp, 664); + CHECK_OFFSET(host_rip, 672); + CHECK_OFFSET(pin_based_vm_exec_control, 744); + CHECK_OFFSET(cpu_based_vm_exec_control, 748); + CHECK_OFFSET(exception_bitmap, 752); + CHECK_OFFSET(page_fault_error_code_mask, 756); + CHECK_OFFSET(page_fault_error_code_match, 760); + CHECK_OFFSET(cr3_target_count, 764); + CHECK_OFFSET(vm_exit_controls, 768); + CHECK_OFFSET(vm_exit_msr_store_count, 772); + CHECK_OFFSET(vm_exit_msr_load_count, 776); + CHECK_OFFSET(vm_entry_controls, 780); + CHECK_OFFSET(vm_entry_msr_load_count, 784); + CHECK_OFFSET(vm_entry_intr_info_field, 788); + CHECK_OFFSET(vm_entry_exception_error_code, 792); + CHECK_OFFSET(vm_entry_instruction_len, 796); + CHECK_OFFSET(tpr_threshold, 800); + CHECK_OFFSET(secondary_vm_exec_control, 804); + CHECK_OFFSET(vm_instruction_error, 808); + CHECK_OFFSET(vm_exit_reason, 812); + CHECK_OFFSET(vm_exit_intr_info, 816); + CHECK_OFFSET(vm_exit_intr_error_code, 820); + CHECK_OFFSET(idt_vectoring_info_field, 824); + CHECK_OFFSET(idt_vectoring_error_code, 828); + CHECK_OFFSET(vm_exit_instruction_len, 832); + CHECK_OFFSET(vmx_instruction_info, 836); + CHECK_OFFSET(guest_es_limit, 840); + CHECK_OFFSET(guest_cs_limit, 844); + CHECK_OFFSET(guest_ss_limit, 848); + CHECK_OFFSET(guest_ds_limit, 852); + CHECK_OFFSET(guest_fs_limit, 856); + CHECK_OFFSET(guest_gs_limit, 860); + CHECK_OFFSET(guest_ldtr_limit, 864); + CHECK_OFFSET(guest_tr_limit, 868); + CHECK_OFFSET(guest_gdtr_limit, 872); + CHECK_OFFSET(guest_idtr_limit, 876); + CHECK_OFFSET(guest_es_ar_bytes, 880); + CHECK_OFFSET(guest_cs_ar_bytes, 884); + CHECK_OFFSET(guest_ss_ar_bytes, 888); + CHECK_OFFSET(guest_ds_ar_bytes, 892); + CHECK_OFFSET(guest_fs_ar_bytes, 896); + CHECK_OFFSET(guest_gs_ar_bytes, 900); + CHECK_OFFSET(guest_ldtr_ar_bytes, 904); + CHECK_OFFSET(guest_tr_ar_bytes, 908); + CHECK_OFFSET(guest_interruptibility_info, 912); + CHECK_OFFSET(guest_activity_state, 916); + CHECK_OFFSET(guest_sysenter_cs, 920); + CHECK_OFFSET(host_ia32_sysenter_cs, 924); + CHECK_OFFSET(vmx_preemption_timer_value, 928); + CHECK_OFFSET(virtual_processor_id, 960); + CHECK_OFFSET(posted_intr_nv, 962); + CHECK_OFFSET(guest_es_selector, 964); + CHECK_OFFSET(guest_cs_selector, 966); + CHECK_OFFSET(guest_ss_selector, 968); + CHECK_OFFSET(guest_ds_selector, 970); + CHECK_OFFSET(guest_fs_selector, 972); + CHECK_OFFSET(guest_gs_selector, 974); + CHECK_OFFSET(guest_ldtr_selector, 976); + CHECK_OFFSET(guest_tr_selector, 978); + CHECK_OFFSET(guest_intr_status, 980); + CHECK_OFFSET(host_es_selector, 982); + CHECK_OFFSET(host_cs_selector, 984); + CHECK_OFFSET(host_ss_selector, 986); + CHECK_OFFSET(host_ds_selector, 988); + CHECK_OFFSET(host_fs_selector, 990); + CHECK_OFFSET(host_gs_selector, 992); + CHECK_OFFSET(host_tr_selector, 994); + CHECK_OFFSET(guest_pml_index, 996); +} + /* * VMCS12_REVISION is an arbitrary id that should be changed if the content or * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and * VMPTRLD verifies that the VMCS region that L1 is loading contains this id. + * + * IMPORTANT: Changing this value will break save/restore compatibility with + * older kvm releases. */ #define VMCS12_REVISION 0x11e57ed0 @@ -481,7 +646,8 @@ struct nested_vmx { bool sync_shadow_vmcs; bool dirty_vmcs12; - bool change_vmcs01_virtual_x2apic_mode; + bool change_vmcs01_virtual_apic_mode; + /* L2 must run next, and mustn't decide to exit to L1. */ bool nested_run_pending; @@ -761,6 +927,7 @@ static const unsigned short vmcs_field_to_offset_table[] = { FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr), FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr), FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr), + FIELD64(PML_ADDRESS, pml_address), FIELD64(TSC_OFFSET, tsc_offset), FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr), FIELD64(APIC_ACCESS_ADDR, apic_access_addr), @@ -772,10 +939,11 @@ static const unsigned short vmcs_field_to_offset_table[] = { FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2), FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3), FIELD64(EPTP_LIST_ADDRESS, eptp_list_address), + FIELD64(VMREAD_BITMAP, vmread_bitmap), + FIELD64(VMWRITE_BITMAP, vmwrite_bitmap), FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap), FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address), FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer), - FIELD64(PML_ADDRESS, pml_address), FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl), FIELD64(GUEST_IA32_PAT, guest_ia32_pat), FIELD64(GUEST_IA32_EFER, guest_ia32_efer), @@ -1089,6 +1257,16 @@ static inline u16 evmcs_read16(unsigned long field) return *(u16 *)((char *)current_evmcs + offset); } +static inline void evmcs_touch_msr_bitmap(void) +{ + if (unlikely(!current_evmcs)) + return; + + if (current_evmcs->hv_enlightenments_control.msr_bitmap) + current_evmcs->hv_clean_fields &= + ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP; +} + static void evmcs_load(u64 phys_addr) { struct hv_vp_assist_page *vp_ap = @@ -1173,6 +1351,7 @@ static inline u32 evmcs_read32(unsigned long field) { return 0; } static inline u16 evmcs_read16(unsigned long field) { return 0; } static inline void evmcs_load(u64 phys_addr) {} static inline void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) {} +static inline void evmcs_touch_msr_bitmap(void) {} #endif /* IS_ENABLED(CONFIG_HYPERV) */ static inline bool is_exception_n(u32 intr_info, u8 vector) @@ -1393,6 +1572,11 @@ static inline bool cpu_has_vmx_invept_global(void) return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT; } +static inline bool cpu_has_vmx_invvpid_individual_addr(void) +{ + return vmx_capability.vpid & VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT; +} + static inline bool cpu_has_vmx_invvpid_single(void) { return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT; @@ -1510,6 +1694,17 @@ static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu) return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low); } +/* + * Do the virtual VMX capability MSRs specify that L1 can use VMWRITE + * to modify any valid field of the VMCS, or are the VM-exit + * information fields read-only? + */ +static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu) +{ + return to_vmx(vcpu)->nested.msrs.misc_low & + MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS; +} + static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit) { return vmcs12->cpu_based_vm_exec_control & bit; @@ -3127,6 +3322,7 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv) msrs->misc_high); msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA; msrs->misc_low |= + MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS | VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE | VMX_MISC_ACTIVITY_HLT; msrs->misc_high = 0; @@ -3300,6 +3496,15 @@ static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) vmx->nested.msrs.misc_low = data; vmx->nested.msrs.misc_high = data >> 32; + + /* + * If L1 has read-only VM-exit information fields, use the + * less permissive vmx_vmwrite_bitmap to specify write + * permissions for the shadow VMCS. + */ + if (enable_shadow_vmcs && !nested_cpu_has_vmwrite_any_field(&vmx->vcpu)) + vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); + return 0; } @@ -3354,6 +3559,13 @@ static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) { struct vcpu_vmx *vmx = to_vmx(vcpu); + /* + * Don't allow changes to the VMX capability MSRs while the vCPU + * is in VMX operation. + */ + if (vmx->nested.vmxon) + return -EBUSY; + switch (msr_index) { case MSR_IA32_VMX_BASIC: return vmx_restore_vmx_basic(vmx, data); @@ -4216,6 +4428,15 @@ static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) if (!loaded_vmcs->msr_bitmap) goto out_vmcs; memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE); + + if (IS_ENABLED(CONFIG_HYPERV) && + static_branch_unlikely(&enable_evmcs) && + (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) { + struct hv_enlightened_vmcs *evmcs = + (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs; + + evmcs->hv_enlightenments_control.msr_bitmap = 1; + } } return 0; @@ -5329,6 +5550,9 @@ static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bit if (!cpu_has_vmx_msr_bitmap()) return; + if (static_branch_unlikely(&enable_evmcs)) + evmcs_touch_msr_bitmap(); + /* * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals * have the write-low and read-high bitmap offsets the wrong way round. @@ -5364,6 +5588,9 @@ static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitm if (!cpu_has_vmx_msr_bitmap()) return; + if (static_branch_unlikely(&enable_evmcs)) + evmcs_touch_msr_bitmap(); + /* * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals * have the write-low and read-high bitmap offsets the wrong way round. @@ -5946,8 +6173,14 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx) int i; if (enable_shadow_vmcs) { + /* + * At vCPU creation, "VMWRITE to any supported field + * in the VMCS" is supported, so use the more + * permissive vmx_vmread_bitmap to specify both read + * and write permissions for the shadow VMCS. + */ vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); - vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); + vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmread_bitmap)); } if (cpu_has_vmx_msr_bitmap()) vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap)); @@ -7588,8 +7821,7 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer) vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) return 1; - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer, - sizeof(*vmpointer), &e)) { + if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } @@ -7670,6 +7902,12 @@ static int handle_vmon(struct kvm_vcpu *vcpu) return 1; } + /* CPL=0 must be checked manually. */ + if (vmx_get_cpl(vcpu)) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; + } + if (vmx->nested.vmxon) { nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); return kvm_skip_emulated_instruction(vcpu); @@ -7729,6 +7967,11 @@ static int handle_vmon(struct kvm_vcpu *vcpu) */ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) { + if (vmx_get_cpl(vcpu)) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 0; + } + if (!to_vmx(vcpu)->nested.vmxon) { kvm_queue_exception(vcpu, UD_VECTOR); return 0; @@ -7928,23 +8171,42 @@ static inline int vmcs12_write_any(struct kvm_vcpu *vcpu, } +/* + * Copy the writable VMCS shadow fields back to the VMCS12, in case + * they have been modified by the L1 guest. Note that the "read-only" + * VM-exit information fields are actually writable if the vCPU is + * configured to support "VMWRITE to any supported field in the VMCS." + */ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) { - int i; + const u16 *fields[] = { + shadow_read_write_fields, + shadow_read_only_fields + }; + const int max_fields[] = { + max_shadow_read_write_fields, + max_shadow_read_only_fields + }; + int i, q; unsigned long field; u64 field_value; struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; - const u16 *fields = shadow_read_write_fields; - const int num_fields = max_shadow_read_write_fields; preempt_disable(); vmcs_load(shadow_vmcs); - for (i = 0; i < num_fields; i++) { - field = fields[i]; - field_value = __vmcs_readl(field); - vmcs12_write_any(&vmx->vcpu, field, field_value); + for (q = 0; q < ARRAY_SIZE(fields); q++) { + for (i = 0; i < max_fields[q]; i++) { + field = fields[q][i]; + field_value = __vmcs_readl(field); + vmcs12_write_any(&vmx->vcpu, field, field_value); + } + /* + * Skip the VM-exit information fields if they are read-only. + */ + if (!nested_cpu_has_vmwrite_any_field(&vmx->vcpu)) + break; } vmcs_clear(shadow_vmcs); @@ -8029,9 +8291,9 @@ static int handle_vmread(struct kvm_vcpu *vcpu) if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, true, &gva)) return 1; - /* _system ok, as hardware has verified cpl=0 */ - kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, - &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL); + /* _system ok, nested_vmx_check_permission has verified cpl=0 */ + kvm_write_guest_virt_system(vcpu, gva, &field_value, + (is_long_mode(vcpu) ? 8 : 4), NULL); } nested_vmx_succeed(vcpu); @@ -8069,8 +8331,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, false, &gva)) return 1; - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, - &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { + if (kvm_read_guest_virt(vcpu, gva, &field_value, + (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } @@ -8078,7 +8340,12 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); - if (vmcs_field_readonly(field)) { + /* + * If the vCPU supports "VMWRITE to any supported field in the + * VMCS," then the "read-only" fields are actually read/write. + */ + if (vmcs_field_readonly(field) && + !nested_cpu_has_vmwrite_any_field(vcpu)) { nested_vmx_failValid(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); return kvm_skip_emulated_instruction(vcpu); @@ -8189,10 +8456,10 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, true, &vmcs_gva)) return 1; - /* ok to use *_system, as hardware has verified cpl=0 */ - if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, - (void *)&to_vmx(vcpu)->nested.current_vmptr, - sizeof(u64), &e)) { + /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ + if (kvm_write_guest_virt_system(vcpu, vmcs_gva, + (void *)&to_vmx(vcpu)->nested.current_vmptr, + sizeof(u64), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } @@ -8239,8 +8506,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), vmx_instruction_info, false, &gva)) return 1; - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, - sizeof(operand), &e)) { + if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } @@ -8304,8 +8570,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), vmx_instruction_info, false, &gva)) return 1; - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, - sizeof(operand), &e)) { + if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } @@ -8317,12 +8582,19 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) switch (type) { case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: - if (is_noncanonical_address(operand.gla, vcpu)) { + if (!operand.vpid || + is_noncanonical_address(operand.gla, vcpu)) { nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); return kvm_skip_emulated_instruction(vcpu); } - /* fall through */ + if (cpu_has_vmx_invvpid_individual_addr() && + vmx->nested.vpid02) { + __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, + vmx->nested.vpid02, operand.gla); + } else + __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); + break; case VMX_VPID_EXTENT_SINGLE_CONTEXT: case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: if (!operand.vpid) { @@ -8330,15 +8602,16 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); return kvm_skip_emulated_instruction(vcpu); } + __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); break; case VMX_VPID_EXTENT_ALL_CONTEXT: + __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); break; default: WARN_ON_ONCE(1); return kvm_skip_emulated_instruction(vcpu); } - __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); @@ -8842,11 +9115,13 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) case EXIT_REASON_TPR_BELOW_THRESHOLD: return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); case EXIT_REASON_APIC_ACCESS: - return nested_cpu_has2(vmcs12, - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); case EXIT_REASON_APIC_WRITE: case EXIT_REASON_EOI_INDUCED: - /* apic_write and eoi_induced should exit unconditionally. */ + /* + * The controls for "virtualize APIC accesses," "APIC- + * register virtualization," and "virtual-interrupt + * delivery" only come from vmcs12. + */ return true; case EXIT_REASON_EPT_VIOLATION: /* @@ -9253,31 +9528,43 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) vmcs_write32(TPR_THRESHOLD, irr); } -static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) +static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu) { u32 sec_exec_control; + if (!lapic_in_kernel(vcpu)) + return; + /* Postpone execution until vmcs01 is the current VMCS. */ if (is_guest_mode(vcpu)) { - to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true; + to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true; return; } - if (!cpu_has_vmx_virtualize_x2apic_mode()) - return; - if (!cpu_need_tpr_shadow(vcpu)) return; sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); + sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); - if (set) { - sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; - sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; - } else { - sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; - sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; - vmx_flush_tlb(vcpu, true); + switch (kvm_get_apic_mode(vcpu)) { + case LAPIC_MODE_INVALID: + WARN_ONCE(true, "Invalid local APIC state"); + case LAPIC_MODE_DISABLED: + break; + case LAPIC_MODE_XAPIC: + if (flexpriority_enabled) { + sec_exec_control |= + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + vmx_flush_tlb(vcpu, true); + } + break; + case LAPIC_MODE_X2APIC: + if (cpu_has_vmx_virtualize_x2apic_mode()) + sec_exec_control |= + SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; + break; } vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); @@ -9286,24 +9573,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) { - struct vcpu_vmx *vmx = to_vmx(vcpu); - - /* - * Currently we do not handle the nested case where L2 has an - * APIC access page of its own; that page is still pinned. - * Hence, we skip the case where the VCPU is in guest mode _and_ - * L1 prepared an APIC access page for L2. - * - * For the case where L1 and L2 share the same APIC access page - * (flexpriority=Y but SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES clear - * in the vmcs12), this function will only update either the vmcs01 - * or the vmcs02. If the former, the vmcs02 will be updated by - * prepare_vmcs02. If the latter, the vmcs01 will be updated in - * the next L2->L1 exit. - */ - if (!is_guest_mode(vcpu) || - !nested_cpu_has2(get_vmcs12(&vmx->vcpu), - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { + if (!is_guest_mode(vcpu)) { vmcs_write64(APIC_ACCESS_ADDR, hpa); vmx_flush_tlb(vcpu, true); } @@ -9943,13 +10213,13 @@ STACK_FRAME_NON_STANDARD(vmx_vcpu_run); static struct kvm *vmx_vm_alloc(void) { - struct kvm_vmx *kvm_vmx = kzalloc(sizeof(struct kvm_vmx), GFP_KERNEL); + struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx)); return &kvm_vmx->kvm; } static void vmx_vm_free(struct kvm *kvm) { - kfree(to_kvm_vmx(kvm)); + vfree(to_kvm_vmx(kvm)); } static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) @@ -10387,11 +10657,6 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); } - } else if (!(nested_cpu_has_virt_x2apic_mode(vmcs12)) && - cpu_need_virtualize_apic_accesses(&vmx->vcpu)) { - vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); - kvm_vcpu_reload_apic_access_page(vcpu); } if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { @@ -10871,8 +11136,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne return 0; } -static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, - bool from_vmentry) +static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -11006,13 +11270,13 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, * is assigned to entry_failure_code on failure. */ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, - bool from_vmentry, u32 *entry_failure_code) + u32 *entry_failure_code) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exec_control, vmcs12_exec_ctrl; if (vmx->nested.dirty_vmcs12) { - prepare_vmcs02_full(vcpu, vmcs12, from_vmentry); + prepare_vmcs02_full(vcpu, vmcs12); vmx->nested.dirty_vmcs12 = false; } @@ -11032,7 +11296,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, * HOST_FS_BASE, HOST_GS_BASE. */ - if (from_vmentry && + if (vmx->nested.nested_run_pending && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); @@ -11040,7 +11304,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, kvm_set_dr(vcpu, 7, vcpu->arch.dr7); vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); } - if (from_vmentry) { + if (vmx->nested.nested_run_pending) { vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, vmcs12->vm_entry_intr_info_field); vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, @@ -11172,7 +11436,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ~VM_ENTRY_IA32E_MODE) | (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE)); - if (from_vmentry && + if (vmx->nested.nested_run_pending && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) { vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); vcpu->arch.pat = vmcs12->guest_ia32_pat; @@ -11197,7 +11461,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) { if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) { vmx->nested.last_vpid = vmcs12->virtual_processor_id; - __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02, true); + __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); } } else { vmx_flush_tlb(vcpu, true); @@ -11240,7 +11504,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmx_set_cr4(vcpu, vmcs12->guest_cr4); vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); - if (from_vmentry && + if (vmx->nested.nested_run_pending && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) vcpu->arch.efer = vmcs12->guest_ia32_efer; else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) @@ -11418,7 +11682,7 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, return 0; } -static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) +static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); @@ -11438,7 +11702,7 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) vcpu->arch.tsc_offset += vmcs12->tsc_offset; r = EXIT_REASON_INVALID_STATE; - if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) + if (prepare_vmcs02(vcpu, vmcs12, &exit_qual)) goto fail; nested_get_vmcs12_pages(vcpu, vmcs12); @@ -11540,20 +11804,22 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) * the nested entry. */ - ret = enter_vmx_non_root_mode(vcpu, true); - if (ret) + vmx->nested.nested_run_pending = 1; + ret = enter_vmx_non_root_mode(vcpu); + if (ret) { + vmx->nested.nested_run_pending = 0; return ret; + } /* * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken * by event injection, halt vcpu. */ if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) && - !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK)) + !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK)) { + vmx->nested.nested_run_pending = 0; return kvm_vcpu_halt(vcpu); - - vmx->nested.nested_run_pending = 1; - + } return 1; out: @@ -11925,12 +12191,20 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, load_vmcs12_mmu_host_state(vcpu, vmcs12); - if (enable_vpid) { - /* - * Trivially support vpid by letting L2s share their parent - * L1's vpid. TODO: move to a more elaborate solution, giving - * each L2 its own vpid and exposing the vpid feature to L1. - */ + /* + * If vmcs01 don't use VPID, CPU flushes TLB on every + * VMEntry/VMExit. Thus, no need to flush TLB. + * + * If vmcs12 uses VPID, TLB entries populated by L2 are + * tagged with vmx->nested.vpid02 while L1 entries are tagged + * with vmx->vpid. Thus, no need to flush TLB. + * + * Therefore, flush TLB only in case vmcs01 uses VPID and + * vmcs12 don't use VPID as in this case L1 & L2 TLB entries + * are both tagged with vmx->vpid. + */ + if (enable_vpid && + !(nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02)) { vmx_flush_tlb(vcpu, true); } @@ -12069,10 +12343,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, if (kvm_has_tsc_control) decache_tsc_multiplier(vmx); - if (vmx->nested.change_vmcs01_virtual_x2apic_mode) { - vmx->nested.change_vmcs01_virtual_x2apic_mode = false; - vmx_set_virtual_x2apic_mode(vcpu, - vcpu->arch.apic_base & X2APIC_ENABLE); + if (vmx->nested.change_vmcs01_virtual_apic_mode) { + vmx->nested.change_vmcs01_virtual_apic_mode = false; + vmx_set_virtual_apic_mode(vcpu); } else if (!nested_cpu_has_ept(vmcs12) && nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { @@ -12236,7 +12509,7 @@ static inline int u64_shl_div_u64(u64 a, unsigned int shift, static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc) { struct vcpu_vmx *vmx; - u64 tscl, guest_tscl, delta_tsc; + u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles; if (kvm_mwait_in_guest(vcpu->kvm)) return -EOPNOTSUPP; @@ -12245,6 +12518,12 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc) tscl = rdtsc(); guest_tscl = kvm_read_l1_tsc(vcpu, tscl); delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; + lapic_timer_advance_cycles = nsec_to_cycles(vcpu, lapic_timer_advance_ns); + + if (delta_tsc > lapic_timer_advance_cycles) + delta_tsc -= lapic_timer_advance_cycles; + else + delta_tsc = 0; /* Convert to host delta tsc if tsc scaling is enabled */ if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio && @@ -12615,7 +12894,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase) if (vmx->nested.smm.guest_mode) { vcpu->arch.hflags &= ~HF_SMM_MASK; - ret = enter_vmx_non_root_mode(vcpu, false); + ret = enter_vmx_non_root_mode(vcpu); vcpu->arch.hflags |= HF_SMM_MASK; if (ret) return ret; @@ -12700,7 +12979,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .enable_nmi_window = enable_nmi_window, .enable_irq_window = enable_irq_window, .update_cr8_intercept = update_cr8_intercept, - .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode, + .set_virtual_apic_mode = vmx_set_virtual_apic_mode, .set_apic_access_page_addr = vmx_set_apic_access_page_addr, .get_enable_apicv = vmx_get_enable_apicv, .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl, @@ -12812,6 +13091,7 @@ static int __init vmx_init(void) rcu_assign_pointer(crash_vmclear_loaded_vmcss, crash_vmclear_local_loaded_vmcss); #endif + vmx_check_vmcs12_offsets(); return 0; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 71e7cda6d014..0046aa70205a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -138,6 +138,7 @@ module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); /* lapic timer advance (tscdeadline mode only) in nanoseconds */ unsigned int __read_mostly lapic_timer_advance_ns = 0; module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR); +EXPORT_SYMBOL_GPL(lapic_timer_advance_ns); static bool __read_mostly vector_hashing = true; module_param(vector_hashing, bool, S_IRUGO); @@ -318,23 +319,27 @@ u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_get_apic_base); +enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu) +{ + return kvm_apic_mode(kvm_get_apic_base(vcpu)); +} +EXPORT_SYMBOL_GPL(kvm_get_apic_mode); + int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { - u64 old_state = vcpu->arch.apic_base & - (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); - u64 new_state = msr_info->data & - (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); + enum lapic_mode old_mode = kvm_get_apic_mode(vcpu); + enum lapic_mode new_mode = kvm_apic_mode(msr_info->data); u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff | (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE); - if ((msr_info->data & reserved_bits) || new_state == X2APIC_ENABLE) - return 1; - if (!msr_info->host_initiated && - ((new_state == MSR_IA32_APICBASE_ENABLE && - old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) || - (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) && - old_state == 0))) + if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID) return 1; + if (!msr_info->host_initiated) { + if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC) + return 1; + if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC) + return 1; + } kvm_lapic_set_base(vcpu, msr_info->data); return 0; @@ -856,7 +861,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) } if (is_long_mode(vcpu) && - (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 62))) + (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63))) return 1; else if (is_pae(vcpu) && is_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) @@ -1761,7 +1766,7 @@ static int do_monotonic_boot(s64 *t, u64 *tsc_timestamp) return mode; } -static int do_realtime(struct timespec *ts, u64 *tsc_timestamp) +static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp) { struct pvclock_gtod_data *gtod = &pvclock_gtod_data; unsigned long seq; @@ -1794,7 +1799,7 @@ static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp) } /* returns true if host is using TSC based clocksource */ -static bool kvm_get_walltime_and_clockread(struct timespec *ts, +static bool kvm_get_walltime_and_clockread(struct timespec64 *ts, u64 *tsc_timestamp) { /* checked again under seqlock below */ @@ -2868,6 +2873,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_HYPERV_SYNIC2: case KVM_CAP_HYPERV_VP_INDEX: case KVM_CAP_HYPERV_EVENTFD: + case KVM_CAP_HYPERV_TLBFLUSH: case KVM_CAP_PCI_SEGMENT: case KVM_CAP_DEBUGREGS: case KVM_CAP_X86_ROBUST_SINGLESTEP: @@ -2894,7 +2900,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = KVM_CLOCK_TSC_STABLE; break; case KVM_CAP_X86_DISABLE_EXITS: - r |= KVM_X86_DISABLE_EXITS_HTL | KVM_X86_DISABLE_EXITS_PAUSE; + r |= KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE; if(kvm_can_mwait_in_guest()) r |= KVM_X86_DISABLE_EXITS_MWAIT; break; @@ -3962,7 +3968,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, return r; } -int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } @@ -4248,7 +4254,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) && kvm_can_mwait_in_guest()) kvm->arch.mwait_in_guest = true; - if (cap->args[0] & KVM_X86_DISABLE_EXITS_HTL) + if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) kvm->arch.hlt_in_guest = true; if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE) kvm->arch.pause_in_guest = true; @@ -4787,11 +4793,10 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, return X86EMUL_CONTINUE; } -int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, +int kvm_read_guest_virt(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { - struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, @@ -4799,12 +4804,17 @@ int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, } EXPORT_SYMBOL_GPL(kvm_read_guest_virt); -static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, - gva_t addr, void *val, unsigned int bytes, - struct x86_exception *exception) +static int emulator_read_std(struct x86_emulate_ctxt *ctxt, + gva_t addr, void *val, unsigned int bytes, + struct x86_exception *exception, bool system) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); - return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); + u32 access = 0; + + if (!system && kvm_x86_ops->get_cpl(vcpu) == 3) + access |= PFERR_USER_MASK; + + return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); } static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt, @@ -4816,18 +4826,16 @@ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt, return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE; } -int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, - gva_t addr, void *val, - unsigned int bytes, - struct x86_exception *exception) +static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, + struct kvm_vcpu *vcpu, u32 access, + struct x86_exception *exception) { - struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, - PFERR_WRITE_MASK, + access, exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); @@ -4848,6 +4856,27 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, out: return r; } + +static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, + unsigned int bytes, struct x86_exception *exception, + bool system) +{ + struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); + u32 access = PFERR_WRITE_MASK; + + if (!system && kvm_x86_ops->get_cpl(vcpu) == 3) + access |= PFERR_USER_MASK; + + return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, + access, exception); +} + +int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, + unsigned int bytes, struct x86_exception *exception) +{ + return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, + PFERR_WRITE_MASK, exception); +} EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); int handle_ud(struct kvm_vcpu *vcpu) @@ -4858,8 +4887,8 @@ int handle_ud(struct kvm_vcpu *vcpu) struct x86_exception e; if (force_emulation_prefix && - kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, - kvm_get_linear_rip(vcpu), sig, sizeof(sig), &e) == 0 && + kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu), + sig, sizeof(sig), &e) == 0 && memcmp(sig, "\xf\xbkvm", sizeof(sig)) == 0) { kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig)); emul_type = 0; @@ -5600,8 +5629,8 @@ static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, u64 smbase) static const struct x86_emulate_ops emulate_ops = { .read_gpr = emulator_read_gpr, .write_gpr = emulator_write_gpr, - .read_std = kvm_read_guest_virt_system, - .write_std = kvm_write_guest_virt_system, + .read_std = emulator_read_std, + .write_std = emulator_write_std, .read_phys = kvm_read_guest_phys_system, .fetch = kvm_fetch_guest_virt, .read_emulated = emulator_read_emulated, @@ -6617,7 +6646,7 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, unsigned long clock_type) { struct kvm_clock_pairing clock_pairing; - struct timespec ts; + struct timespec64 ts; u64 cycle; int ret; @@ -8538,7 +8567,7 @@ int kvm_arch_hardware_setup(void) /* * Make sure the user can only configure tsc_khz values that * fit into a signed integer. - * A min value is not calculated needed because it will always + * A min value is not calculated because it will always * be 1 on all machines. */ u64 max = min(0x7fffffffULL, @@ -8871,13 +8900,14 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, slot->base_gfn, level) + 1; slot->arch.rmap[i] = - kvzalloc(lpages * sizeof(*slot->arch.rmap[i]), GFP_KERNEL); + kvcalloc(lpages, sizeof(*slot->arch.rmap[i]), + GFP_KERNEL); if (!slot->arch.rmap[i]) goto out_free; if (i == 0) continue; - linfo = kvzalloc(lpages * sizeof(*linfo), GFP_KERNEL); + linfo = kvcalloc(lpages, sizeof(*linfo), GFP_KERNEL); if (!linfo) goto out_free; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index c9492f764902..331993c49dae 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -247,11 +247,11 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); u64 get_kvmclock_ns(struct kvm *kvm); -int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, +int kvm_read_guest_virt(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception); -int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, +int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception); diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index fec82b577c18..cee58a972cb2 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -706,7 +706,9 @@ void __init init_mem_mapping(void) */ int devmem_is_allowed(unsigned long pagenr) { - if (page_is_ram(pagenr)) { + if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE, + IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE) + != REGION_DISJOINT) { /* * For disallowed memory regions in the low 1MB range, * request that the page be shown as all zeros. diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index c893c6a3d707..979e0a02cbe1 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -692,7 +692,7 @@ void __init initmem_init(void) high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; #endif - memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); + memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); sparse_memory_present_with_active_regions(0); #ifdef CONFIG_FLATMEM diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 17383f9677fa..045f492d5f68 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -742,7 +742,7 @@ kernel_physical_mapping_init(unsigned long paddr_start, #ifndef CONFIG_NUMA void __init initmem_init(void) { - memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); + memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); } #endif diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 8fca446aaef6..2580cd2e98b1 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1107,7 +1107,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) extra_pass = true; goto skip_init_addrs; } - addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL); + addrs = kmalloc_array(prog->len, sizeof(*addrs), GFP_KERNEL); if (!addrs) { prog = orig_prog; goto out_addrs; diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index 0cc04e30adc1..55799873ebe5 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -2345,7 +2345,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) prog = tmp; } - addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL); + addrs = kmalloc_array(prog->len, sizeof(*addrs), GFP_KERNEL); if (!addrs) { prog = orig_prog; goto out; diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 9542a746dc50..9112d1cb397b 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -168,7 +168,7 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) if (type == PCI_CAP_ID_MSI && nvec > 1) return 1; - v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL); + v = kcalloc(max(1, nvec), sizeof(int), GFP_KERNEL); if (!v) return -ENOMEM; diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index b96d38288c60..ca446da48fd2 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -2142,7 +2142,7 @@ static int __init init_per_cpu(int nuvhubs, int base_part_pnode) if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub()) timeout_us = calculate_destination_timeout(); - vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL); + vp = kmalloc_array(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL); uvhub_descs = (struct uvhub_desc *)vp; memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc)); uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL); diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c index b082d71b08ee..a36b368eea08 100644 --- a/arch/x86/platform/uv/uv_time.c +++ b/arch/x86/platform/uv/uv_time.c @@ -158,7 +158,7 @@ static __init int uv_rtc_allocate_timers(void) { int cpu; - blade_info = kzalloc(uv_possible_blades * sizeof(void *), GFP_KERNEL); + blade_info = kcalloc(uv_possible_blades, sizeof(void *), GFP_KERNEL); if (!blade_info) return -ENOMEM; diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 17df332269b2..d575e8701955 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -17,7 +17,6 @@ config XTENSA select GENERIC_SCHED_CLOCK select GENERIC_STRNCPY_FROM_USER if KASAN select HAVE_ARCH_KASAN if MMU - select HAVE_CC_STACKPROTECTOR select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS select HAVE_EXIT_THREAD @@ -28,6 +27,7 @@ config XTENSA select HAVE_MEMBLOCK select HAVE_OPROFILE select HAVE_PERF_EVENTS + select HAVE_STACKPROTECTOR select IRQ_DOMAIN select MODULES_USE_ELF_RELA select NO_BOOTMEM diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h index 397d6a1a4224..a0d50be5a8cb 100644 --- a/arch/xtensa/include/asm/cacheflush.h +++ b/arch/xtensa/include/asm/cacheflush.h @@ -88,7 +88,7 @@ static inline void __invalidate_icache_page_alias(unsigned long virt, * * Pages can get remapped. Because this might change the 'color' of that page, * we have to flush the cache before the PTE is changed. - * (see also Documentation/cachetlb.txt) + * (see also Documentation/core-api/cachetlb.rst) */ #if defined(CONFIG_MMU) && \ @@ -152,7 +152,7 @@ void local_flush_cache_page(struct vm_area_struct *vma, __invalidate_icache_range(start,(end) - (start)); \ } while (0) -/* This is not required, see Documentation/cachetlb.txt */ +/* This is not required, see Documentation/core-api/cachetlb.rst */ #define flush_icache_page(vma,page) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0) diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c index 022cf918ec20..67904f55f188 100644 --- a/arch/xtensa/kernel/asm-offsets.c +++ b/arch/xtensa/kernel/asm-offsets.c @@ -76,7 +76,7 @@ int main(void) DEFINE(TASK_PID, offsetof (struct task_struct, pid)); DEFINE(TASK_THREAD, offsetof (struct task_struct, thread)); DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack)); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR DEFINE(TASK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); #endif DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct)); diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 5caff0744f3c..9cbc380e9572 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -1971,7 +1971,7 @@ ENTRY(_switch_to) s32i a1, a2, THREAD_SP # save stack pointer #endif -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) movi a6, __stack_chk_guard l32i a8, a3, TASK_STACK_CANARY s32i a8, a6, 0 diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 8dd0593fb2c4..483dcfb6e681 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -58,7 +58,7 @@ void (*pm_power_off)(void) = NULL; EXPORT_SYMBOL(pm_power_off); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR #include unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); diff --git a/block/Kconfig b/block/Kconfig index 28ec55752b68..eb50fd4977c2 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -114,7 +114,7 @@ config BLK_DEV_THROTTLING one needs to mount and use blkio cgroup controller for creating cgroups and specifying per device IO rate policies. - See Documentation/cgroups/blkio-controller.txt for more information. + See Documentation/cgroup-v1/blkio-controller.txt for more information. config BLK_DEV_THROTTLING_LOW bool "Block throttling .low limit interface support (EXPERIMENTAL)" diff --git a/block/bio.c b/block/bio.c index db9a40e9a136..9710e275f230 100644 --- a/block/bio.c +++ b/block/bio.c @@ -2091,7 +2091,8 @@ static int __init init_bio(void) { bio_slab_max = 2; bio_slab_nr = 0; - bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL); + bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab), + GFP_KERNEL); if (!bio_slabs) panic("bio: can't allocate bios\n"); diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 70356a2a11ab..09b2ee6694fb 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -311,35 +311,6 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, } EXPORT_SYMBOL(blk_mq_tagset_busy_iter); -int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data, - int (fn)(void *, struct request *)) -{ - int i, j, ret = 0; - - if (WARN_ON_ONCE(!fn)) - goto out; - - for (i = 0; i < set->nr_hw_queues; i++) { - struct blk_mq_tags *tags = set->tags[i]; - - if (!tags) - continue; - - for (j = 0; j < tags->nr_tags; j++) { - if (!tags->static_rqs[j]) - continue; - - ret = fn(data, tags->static_rqs[j]); - if (ret) - goto out; - } - } - -out: - return ret; -} -EXPORT_SYMBOL_GPL(blk_mq_tagset_iter); - void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, void *priv) { diff --git a/block/blk-mq.c b/block/blk-mq.c index d2de0a719ab8..70c65bb6c013 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -671,6 +671,7 @@ static void __blk_mq_requeue_request(struct request *rq) if (blk_mq_request_started(rq)) { WRITE_ONCE(rq->state, MQ_RQ_IDLE); + rq->rq_flags &= ~RQF_TIMED_OUT; if (q->dma_drain_size && blk_rq_bytes(rq)) rq->nr_phys_segments--; } @@ -770,6 +771,7 @@ EXPORT_SYMBOL(blk_mq_tag_to_rq); static void blk_mq_rq_timed_out(struct request *req, bool reserved) { + req->rq_flags |= RQF_TIMED_OUT; if (req->q->mq_ops->timeout) { enum blk_eh_timer_return ret; @@ -779,6 +781,7 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved) WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER); } + req->rq_flags &= ~RQF_TIMED_OUT; blk_add_timer(req); } @@ -788,6 +791,8 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next) if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT) return false; + if (rq->rq_flags & RQF_TIMED_OUT) + return false; deadline = blk_rq_deadline(rq); if (time_after_eq(jiffies, deadline)) @@ -1903,7 +1908,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, if (!tags) return NULL; - tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *), + tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node); if (!tags->rqs) { @@ -1911,9 +1916,9 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, return NULL; } - tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *), - GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, - node); + tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *), + GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, + node); if (!tags->static_rqs) { kfree(tags->rqs); blk_mq_free_tags(tags); @@ -2349,7 +2354,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) mutex_lock(&set->tag_list_lock); list_del_rcu(&q->tag_set_list); - INIT_LIST_HEAD(&q->tag_set_list); if (list_is_singular(&set->tag_list)) { /* just transitioned to unshared */ set->flags &= ~BLK_MQ_F_TAG_SHARED; @@ -2357,8 +2361,8 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) blk_mq_update_tag_set_depth(set, false); } mutex_unlock(&set->tag_list_lock); - synchronize_rcu(); + INIT_LIST_HEAD(&q->tag_set_list); } static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, @@ -2522,7 +2526,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, /* init q->mq_kobj and sw queues' kobjects */ blk_mq_sysfs_init(q); - q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)), + q->queue_hw_ctx = kcalloc_node(nr_cpu_ids, sizeof(*(q->queue_hw_ctx)), GFP_KERNEL, set->numa_node); if (!q->queue_hw_ctx) goto err_percpu; @@ -2741,14 +2745,14 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) if (set->nr_hw_queues > nr_cpu_ids) set->nr_hw_queues = nr_cpu_ids; - set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *), + set->tags = kcalloc_node(nr_cpu_ids, sizeof(struct blk_mq_tags *), GFP_KERNEL, set->numa_node); if (!set->tags) return -ENOMEM; ret = -ENOMEM; - set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids, - GFP_KERNEL, set->numa_node); + set->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*set->mq_map), + GFP_KERNEL, set->numa_node); if (!set->mq_map) goto out_free_tags; diff --git a/block/blk-tag.c b/block/blk-tag.c index 09f19c6c52ce..fbc153aef166 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c @@ -99,12 +99,12 @@ init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) __func__, depth); } - tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); + tag_index = kcalloc(depth, sizeof(struct request *), GFP_ATOMIC); if (!tag_index) goto fail; nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; - tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); + tag_map = kcalloc(nr_ulongs, sizeof(unsigned long), GFP_ATOMIC); if (!tag_map) goto fail; @@ -188,7 +188,6 @@ int blk_queue_init_tags(struct request_queue *q, int depth, */ q->queue_tags = tags; queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q); - INIT_LIST_HEAD(&q->tag_busy_list); return 0; } EXPORT_SYMBOL(blk_queue_init_tags); @@ -374,27 +373,6 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) rq->tag = tag; bqt->tag_index[tag] = rq; blk_start_request(rq); - list_add(&rq->queuelist, &q->tag_busy_list); return 0; } EXPORT_SYMBOL(blk_queue_start_tag); - -/** - * blk_queue_invalidate_tags - invalidate all pending tags - * @q: the request queue for the device - * - * Description: - * Hardware conditions may dictate a need to stop all pending requests. - * In this case, we will safely clear the block side of the tag queue and - * readd all requests to the request queue in the right order. - **/ -void blk_queue_invalidate_tags(struct request_queue *q) -{ - struct list_head *tmp, *n; - - lockdep_assert_held(q->queue_lock); - - list_for_each_safe(tmp, n, &q->tag_busy_list) - blk_requeue_request(q, list_entry_rq(tmp)); -} -EXPORT_SYMBOL(blk_queue_invalidate_tags); diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 3d08dc84db16..51000914e23f 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -331,8 +331,8 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, if (rep.nr_zones > INT_MAX / sizeof(struct blk_zone)) return -ERANGE; - zones = kvmalloc(rep.nr_zones * sizeof(struct blk_zone), - GFP_KERNEL | __GFP_ZERO); + zones = kvmalloc_array(rep.nr_zones, sizeof(struct blk_zone), + GFP_KERNEL | __GFP_ZERO); if (!zones) return -ENOMEM; diff --git a/block/bsg.c b/block/bsg.c index 132e657e2d91..66602c489956 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -693,6 +693,8 @@ static struct bsg_device *bsg_add_device(struct inode *inode, struct bsg_device *bd; unsigned char buf[32]; + lockdep_assert_held(&bsg_mutex); + if (!blk_get_queue(rq)) return ERR_PTR(-ENXIO); @@ -707,14 +709,12 @@ static struct bsg_device *bsg_add_device(struct inode *inode, bsg_set_block(bd, file); atomic_set(&bd->ref_count, 1); - mutex_lock(&bsg_mutex); hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); bsg_dbg(bd, "bound to <%s>, max queue %d\n", format_dev_t(buf, inode->i_rdev), bd->max_queue); - mutex_unlock(&bsg_mutex); return bd; } @@ -722,7 +722,7 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) { struct bsg_device *bd; - mutex_lock(&bsg_mutex); + lockdep_assert_held(&bsg_mutex); hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { if (bd->queue == q) { @@ -732,7 +732,6 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) } bd = NULL; found: - mutex_unlock(&bsg_mutex); return bd; } @@ -746,17 +745,18 @@ static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) */ mutex_lock(&bsg_mutex); bcd = idr_find(&bsg_minor_idr, iminor(inode)); - mutex_unlock(&bsg_mutex); - if (!bcd) - return ERR_PTR(-ENODEV); + if (!bcd) { + bd = ERR_PTR(-ENODEV); + goto out_unlock; + } bd = __bsg_get_device(iminor(inode), bcd->queue); - if (bd) - return bd; - - bd = bsg_add_device(inode, bcd->queue, file); + if (!bd) + bd = bsg_add_device(inode, bcd->queue, file); +out_unlock: + mutex_unlock(&bsg_mutex); return bd; } diff --git a/block/partitions/check.c b/block/partitions/check.c index 720145c49066..ffe408fead0c 100644 --- a/block/partitions/check.c +++ b/block/partitions/check.c @@ -122,7 +122,7 @@ static struct parsed_partitions *allocate_partitions(struct gendisk *hd) return NULL; nr = disk_max_parts(hd); - state->parts = vzalloc(nr * sizeof(state->parts[0])); + state->parts = vzalloc(array_size(nr, sizeof(state->parts[0]))); if (!state->parts) { kfree(state); return NULL; diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c index 2a365c756648..0417937dfe99 100644 --- a/block/partitions/ldm.c +++ b/block/partitions/ldm.c @@ -378,7 +378,7 @@ static bool ldm_validate_tocblocks(struct parsed_partitions *state, BUG_ON(!state || !ldb); ph = &ldb->ph; tb[0] = &ldb->toc; - tb[1] = kmalloc(sizeof(*tb[1]) * 3, GFP_KERNEL); + tb[1] = kmalloc_array(3, sizeof(*tb[1]), GFP_KERNEL); if (!tb[1]) { ldm_crit("Out of memory."); goto err; diff --git a/certs/Kconfig b/certs/Kconfig index 5f7663df6e8e..c94e93d8bccf 100644 --- a/certs/Kconfig +++ b/certs/Kconfig @@ -13,7 +13,7 @@ config MODULE_SIG_KEY If this option is unchanged from its default "certs/signing_key.pem", then the kernel will automatically generate the private key and - certificate as described in Documentation/module-signing.txt + certificate as described in Documentation/admin-guide/module-signing.rst config SYSTEM_TRUSTED_KEYRING bool "Provide system-wide ring of trusted keys" diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 330cf9f2b767..825524f27438 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -255,8 +255,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, processed - as); if (!areq->tsgl_entries) areq->tsgl_entries = 1; - areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * - areq->tsgl_entries, + areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl), + areq->tsgl_entries), GFP_KERNEL); if (!areq->tsgl) { err = -ENOMEM; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 15cf3c5222e0..4c04eb9888ad 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -100,7 +100,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0); if (!areq->tsgl_entries) areq->tsgl_entries = 1; - areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries, + areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl), + areq->tsgl_entries), GFP_KERNEL); if (!areq->tsgl) { err = -ENOMEM; diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c index 39aecad286fe..26539e9a8bda 100644 --- a/crypto/asymmetric_keys/asymmetric_type.c +++ b/crypto/asymmetric_keys/asymmetric_type.c @@ -1,6 +1,6 @@ /* Asymmetric public-key cryptography key type * - * See Documentation/security/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.txt * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c index 11b7ba170904..28198314bc39 100644 --- a/crypto/asymmetric_keys/signature.c +++ b/crypto/asymmetric_keys/signature.c @@ -1,6 +1,6 @@ /* Signature verification with an asymmetric key * - * See Documentation/security/asymmetric-keys.txt + * See Documentation/crypto/asymmetric-keys.txt * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index d1d99843cce4..11e45352fd0b 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -603,7 +603,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc, goto out_nooutbuf; /* avoid "the frame size is larger than 1024 bytes" compiler warning */ - sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 4 : 2), GFP_KERNEL); + sg = kmalloc(array3_size(sizeof(*sg), 8, (diff_dst ? 4 : 2)), + GFP_KERNEL); if (!sg) goto out_nosg; sgout = &sg[16]; diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index cb6ac5c65c2e..38a286975c31 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -233,11 +233,13 @@ static const struct lpss_device_desc lpt_sdio_dev_desc = { static const struct lpss_device_desc byt_pwm_dev_desc = { .flags = LPSS_SAVE_CTX, + .prv_offset = 0x800, .setup = byt_pwm_setup, }; static const struct lpss_device_desc bsw_pwm_dev_desc = { .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, + .prv_offset = 0x800, .setup = bsw_pwm_setup, }; diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index 88cd949003f3..eaa60c94205a 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c @@ -82,7 +82,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev, if (count < 0) { return NULL; } else if (count > 0) { - resources = kzalloc(count * sizeof(struct resource), + resources = kcalloc(count, sizeof(struct resource), GFP_KERNEL); if (!resources) { dev_err(&adev->dev, "No memory for resources\n"); diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 2f2e737be0f8..f0b52266b3ac 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -832,8 +832,9 @@ int acpi_video_get_levels(struct acpi_device *device, * in order to account for buggy BIOS which don't export the first two * special levels (see below) */ - br->levels = kmalloc((obj->package.count + ACPI_VIDEO_FIRST_LEVEL) * - sizeof(*br->levels), GFP_KERNEL); + br->levels = kmalloc_array(obj->package.count + ACPI_VIDEO_FIRST_LEVEL, + sizeof(*br->levels), + GFP_KERNEL); if (!br->levels) { result = -ENOMEM; goto out_free; diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c index dc94de91033e..992bd7b92540 100644 --- a/drivers/acpi/acpica/dbnames.c +++ b/drivers/acpi/acpica/dbnames.c @@ -322,6 +322,7 @@ acpi_db_walk_and_match_name(acpi_handle obj_handle, acpi_os_printf("Could Not get pathname for object %p\n", obj_handle); } else { + info.count = 0; info.owner_id = ACPI_OWNER_ID_MAX; info.debug_level = ACPI_UINT32_MAX; info.display_type = ACPI_DISPLAY_SUMMARY | ACPI_DISPLAY_SHORT; diff --git a/drivers/acpi/acpica/dbobject.c b/drivers/acpi/acpica/dbobject.c index 58c3253b533a..a1c76bf21122 100644 --- a/drivers/acpi/acpica/dbobject.c +++ b/drivers/acpi/acpica/dbobject.c @@ -35,6 +35,15 @@ void acpi_db_dump_method_info(acpi_status status, struct acpi_walk_state *walk_state) { struct acpi_thread_state *thread; + struct acpi_namespace_node *node; + + node = walk_state->method_node; + + /* There are no locals or arguments for the module-level code case */ + + if (node == acpi_gbl_root_node) { + return; + } /* Ignore control codes, they are not errors */ @@ -384,8 +393,14 @@ void acpi_db_decode_locals(struct acpi_walk_state *walk_state) struct acpi_namespace_node *node; u8 display_locals = FALSE; - obj_desc = walk_state->method_desc; node = walk_state->method_node; + obj_desc = walk_state->method_desc; + + /* There are no locals for the module-level code case */ + + if (node == acpi_gbl_root_node) { + return; + } if (!node) { acpi_os_printf @@ -452,6 +467,12 @@ void acpi_db_decode_arguments(struct acpi_walk_state *walk_state) node = walk_state->method_node; obj_desc = walk_state->method_desc; + /* There are no arguments for the module-level code case */ + + if (node == acpi_gbl_root_node) { + return; + } + if (!node) { acpi_os_printf ("No method node (Executing subtree for buffer or opregion)\n"); diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c index 70a2fca60306..9d33f0bb2885 100644 --- a/drivers/acpi/acpica/dsdebug.c +++ b/drivers/acpi/acpica/dsdebug.c @@ -162,9 +162,15 @@ acpi_ds_dump_method_stack(acpi_status status, op->common.next = NULL; #ifdef ACPI_DISASSEMBLER - acpi_os_printf("Failed at "); - acpi_dm_disassemble(next_walk_state, op, - ACPI_UINT32_MAX); + if (walk_state->method_node != + acpi_gbl_root_node) { + + /* More verbose if not module-level code */ + + acpi_os_printf("Failed at "); + acpi_dm_disassemble(next_walk_state, op, + ACPI_UINT32_MAX); + } #endif op->common.next = next; } diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c index f85c6f3271f6..2373a7492151 100644 --- a/drivers/acpi/acpica/exconfig.c +++ b/drivers/acpi/acpica/exconfig.c @@ -489,6 +489,17 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle) */ ACPI_WARNING((AE_INFO, "Received request to unload an ACPI table")); + /* + * May 2018: Unload is no longer supported for the following reasons: + * 1) A correct implementation on some hosts may not be possible. + * 2) Other ACPI implementations do not correctly/fully support it. + * 3) It requires host device driver support which does not exist. + * (To properly support namespace unload out from underneath.) + * 4) This AML operator has never been seen in the field. + */ + ACPI_EXCEPTION((AE_INFO, AE_NOT_IMPLEMENTED, + "AML Unload operator is not supported")); + /* * Validate the handle * Although the handle is partially validated in acpi_ex_reconfiguration() diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c index 4bdbd1d8431b..90ccffcd770b 100644 --- a/drivers/acpi/acpica/nsdump.c +++ b/drivers/acpi/acpica/nsdump.c @@ -170,6 +170,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle, } type = this_node->type; + info->count++; /* Check if the owner matches */ @@ -639,6 +640,7 @@ acpi_ns_dump_objects(acpi_object_type type, return; } + info.count = 0; info.debug_level = ACPI_LV_TABLES; info.owner_id = owner_id; info.display_type = display_type; @@ -649,6 +651,7 @@ acpi_ns_dump_objects(acpi_object_type type, acpi_ns_dump_one_object, NULL, (void *)&info, NULL); + acpi_os_printf("\nNamespace node count: %u\n\n", info.count); (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); } diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index 68422afc365f..bc5f05906bd1 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c @@ -515,6 +515,22 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } + if (walk_state->opcode == AML_SCOPE_OP) { + /* + * If the scope op fails to parse, skip the body of the + * scope op because the parse failure indicates that the + * device may not exist. + */ + walk_state->parser_state.aml = + walk_state->aml + 1; + walk_state->parser_state.aml = + acpi_ps_get_next_package_end + (&walk_state->parser_state); + walk_state->aml = + walk_state->parser_state.aml; + ACPI_ERROR((AE_INFO, + "Skipping Scope block")); + } continue; } @@ -557,7 +573,40 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } + if ((walk_state->control_state) && + ((walk_state->control_state->control. + opcode == AML_IF_OP) + || (walk_state->control_state->control. + opcode == AML_WHILE_OP))) { + /* + * If the if/while op fails to parse, we will skip parsing + * the body of the op. + */ + parser_state->aml = + walk_state->control_state->control. + aml_predicate_start + 1; + parser_state->aml = + acpi_ps_get_next_package_end + (parser_state); + walk_state->aml = parser_state->aml; + ACPI_ERROR((AE_INFO, + "Skipping While/If block")); + if (*walk_state->aml == AML_ELSE_OP) { + ACPI_ERROR((AE_INFO, + "Skipping Else block")); + walk_state->parser_state.aml = + walk_state->aml + 1; + walk_state->parser_state.aml = + acpi_ps_get_next_package_end + (parser_state); + walk_state->aml = + parser_state->aml; + } + ACPI_FREE(acpi_ut_pop_generic_state + (&walk_state->control_state)); + } + op = NULL; continue; } } diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c index 7d9d0151ee54..3138e7a00da8 100644 --- a/drivers/acpi/acpica/psobject.c +++ b/drivers/acpi/acpica/psobject.c @@ -12,6 +12,7 @@ #include "acparser.h" #include "amlcode.h" #include "acconvert.h" +#include "acnamesp.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("psobject") @@ -549,6 +550,21 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state, do { if (*op) { + /* + * These Opcodes need to be removed from the namespace because they + * get created even if these opcodes cannot be created due to + * errors. + */ + if (((*op)->common.aml_opcode == AML_REGION_OP) + || ((*op)->common.aml_opcode == + AML_DATA_REGION_OP)) { + acpi_ns_delete_children((*op)->common. + node); + acpi_ns_remove_node((*op)->common.node); + (*op)->common.node = NULL; + acpi_ps_delete_parse_tree(*op); + } + status2 = acpi_ps_complete_this_op(walk_state, *op); if (ACPI_FAILURE(status2)) { @@ -574,6 +590,20 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state, #endif walk_state->prev_op = NULL; walk_state->prev_arg_types = walk_state->arg_types; + + if (walk_state->parse_flags & ACPI_PARSE_MODULE_LEVEL) { + /* + * There was something that went wrong while executing code at the + * module-level. We need to skip parsing whatever caused the + * error and keep going. One runtime error during the table load + * should not cause the entire table to not be loaded. This is + * because there could be correct AML beyond the parts that caused + * the runtime error. + */ + ACPI_ERROR((AE_INFO, + "Ignore error and continue table load")); + return_ACPI_STATUS(AE_OK); + } return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c index e0a442b8648b..bd6af8c87d48 100644 --- a/drivers/acpi/acpica/pswalk.c +++ b/drivers/acpi/acpica/pswalk.c @@ -25,22 +25,48 @@ ACPI_MODULE_NAME("pswalk") * DESCRIPTION: Delete a portion of or an entire parse tree. * ******************************************************************************/ +#include "amlcode.h" void acpi_ps_delete_parse_tree(union acpi_parse_object *subtree_root) { union acpi_parse_object *op = subtree_root; union acpi_parse_object *next = NULL; union acpi_parse_object *parent = NULL; + u32 level = 0; ACPI_FUNCTION_TRACE_PTR(ps_delete_parse_tree, subtree_root); + ACPI_DEBUG_PRINT((ACPI_DB_PARSE_TREES, " root %p\n", subtree_root)); + /* Visit all nodes in the subtree */ while (op) { - - /* Check if we are not ascending */ - if (op != parent) { + /* This is the descending case */ + + if (ACPI_IS_DEBUG_ENABLED + (ACPI_LV_PARSE_TREES, _COMPONENT)) { + + /* This debug option will print the entire parse tree */ + + acpi_os_printf(" %*.s%s %p", (level * 4), + " ", + acpi_ps_get_opcode_name(op-> + common. + aml_opcode), + op); + + if (op->named.aml_opcode == AML_INT_NAMEPATH_OP) { + acpi_os_printf(" %4.4s", + op->common.value.string); + } + if (op->named.aml_opcode == AML_STRING_OP) { + acpi_os_printf(" %s", + op->common.value.string); + } + acpi_os_printf("\n"); + } + /* Look for an argument or child of the current op */ next = acpi_ps_get_arg(op, 0); @@ -49,6 +75,7 @@ void acpi_ps_delete_parse_tree(union acpi_parse_object *subtree_root) /* Still going downward in tree (Op is not completed yet) */ op = next; + level++; continue; } } @@ -69,6 +96,7 @@ void acpi_ps_delete_parse_tree(union acpi_parse_object *subtree_root) if (next) { op = next; } else { + level--; op = parent; } } diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c index 12d4a0f6b8d2..5a64ddaed8a3 100644 --- a/drivers/acpi/acpica/uterror.c +++ b/drivers/acpi/acpica/uterror.c @@ -182,20 +182,20 @@ acpi_ut_prefixed_namespace_error(const char *module_name, switch (lookup_status) { case AE_ALREADY_EXISTS: - acpi_os_printf(ACPI_MSG_BIOS_ERROR); + acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR); message = "Failure creating"; break; case AE_NOT_FOUND: - acpi_os_printf(ACPI_MSG_BIOS_ERROR); - message = "Failure looking up"; + acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR); + message = "Could not resolve"; break; default: - acpi_os_printf(ACPI_MSG_ERROR); - message = "Failure looking up"; + acpi_os_printf("\n" ACPI_MSG_ERROR); + message = "Failure resolving"; break; } diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c index 1b415fa90cf8..64b63c81994b 100644 --- a/drivers/acpi/acpica/utosi.c +++ b/drivers/acpi/acpica/utosi.c @@ -69,6 +69,7 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = { {"Windows 2015", NULL, 0, ACPI_OSI_WIN_10}, /* Windows 10 - Added 03/2015 */ {"Windows 2016", NULL, 0, ACPI_OSI_WIN_10_RS1}, /* Windows 10 version 1607 - Added 12/2017 */ {"Windows 2017", NULL, 0, ACPI_OSI_WIN_10_RS2}, /* Windows 10 version 1703 - Added 12/2017 */ + {"Windows 2017.2", NULL, 0, ACPI_OSI_WIN_10_RS3}, /* Windows 10 version 1709 - Added 02/2018 */ /* Feature Group Strings */ diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 9bff853e85f3..3c5ea7cb693e 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -524,7 +524,8 @@ static int __erst_record_id_cache_add_one(void) pr_warn(FW_WARN "too many record IDs!\n"); return 0; } - new_entries = kvmalloc(new_size * sizeof(entries[0]), GFP_KERNEL); + new_entries = kvmalloc_array(new_size, sizeof(entries[0]), + GFP_KERNEL); if (!new_entries) return -ENOMEM; memcpy(new_entries, entries, diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index 9cb74115a43d..b1e9f81ebeea 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c @@ -195,7 +195,8 @@ static int __init hest_ghes_dev_register(unsigned int ghes_count) struct ghes_arr ghes_arr; ghes_arr.count = 0; - ghes_arr.ghes_devs = kmalloc(sizeof(void *) * ghes_count, GFP_KERNEL); + ghes_arr.ghes_devs = kmalloc_array(ghes_count, sizeof(void *), + GFP_KERNEL); if (!ghes_arr.ghes_devs) return -ENOMEM; diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 3563103590c6..fe0183d48dcd 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -298,8 +298,8 @@ static int acpi_fan_get_fps(struct acpi_device *device) } fan->fps_count = obj->package.count - 1; /* minus revision field */ - fan->fps = devm_kzalloc(&device->dev, - fan->fps_count * sizeof(struct acpi_fan_fps), + fan->fps = devm_kcalloc(&device->dev, + fan->fps_count, sizeof(struct acpi_fan_fps), GFP_KERNEL); if (!fan->fps) { dev_err(&device->dev, "Not enough memory\n"); diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index b87252bf4571..d15814e1727f 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -1082,9 +1082,10 @@ static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc, continue; nfit_mem->nfit_flush = nfit_flush; flush = nfit_flush->flush; - nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev, - flush->hint_count - * sizeof(struct resource), GFP_KERNEL); + nfit_mem->flush_wpq = devm_kcalloc(acpi_desc->dev, + flush->hint_count, + sizeof(struct resource), + GFP_KERNEL); if (!nfit_mem->flush_wpq) return -ENOMEM; for (i = 0; i < flush->hint_count; i++) { diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index a651ab3490d8..a303fd0e108c 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -343,8 +343,9 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr) pr->performance->state_count = pss->package.count; pr->performance->states = - kmalloc(sizeof(struct acpi_processor_px) * pss->package.count, - GFP_KERNEL); + kmalloc_array(pss->package.count, + sizeof(struct acpi_processor_px), + GFP_KERNEL); if (!pr->performance->states) { result = -ENOMEM; goto end; diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index 7f9aff4b8d62..fbc936cf2025 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -534,8 +534,9 @@ static int acpi_processor_get_throttling_states(struct acpi_processor *pr) pr->throttling.state_count = tss->package.count; pr->throttling.states_tss = - kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count, - GFP_KERNEL); + kmalloc_array(tss->package.count, + sizeof(struct acpi_processor_tx_tss), + GFP_KERNEL); if (!pr->throttling.states_tss) { result = -ENOMEM; goto end; diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 4fc59c3bc673..41324f0b1bee 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -857,12 +857,12 @@ void acpi_irq_stats_init(void) num_gpes = acpi_current_gpe_count; num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA; - all_attrs = kzalloc(sizeof(struct attribute *) * (num_counters + 1), + all_attrs = kcalloc(num_counters + 1, sizeof(struct attribute *), GFP_KERNEL); if (all_attrs == NULL) return; - all_counters = kzalloc(sizeof(struct event_counter) * (num_counters), + all_counters = kcalloc(num_counters, sizeof(struct event_counter), GFP_KERNEL); if (all_counters == NULL) goto fail; @@ -871,7 +871,7 @@ void acpi_irq_stats_init(void) if (ACPI_FAILURE(status)) goto fail; - counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters), + counter_attrs = kcalloc(num_counters, sizeof(struct kobj_attribute), GFP_KERNEL); if (counter_attrs == NULL) goto fail; diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 4f382d51def1..2628806c64a2 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -692,8 +692,8 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, } } #endif - alloc->pages = kzalloc(sizeof(alloc->pages[0]) * - ((vma->vm_end - vma->vm_start) / PAGE_SIZE), + alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE, + sizeof(alloc->pages[0]), GFP_KERNEL); if (alloc->pages == NULL) { ret = -ENOMEM; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index c41b9eeabe7c..27d15ed7fa3d 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -6987,7 +6987,7 @@ static void __init ata_parse_force_param(void) if (*p == ',') size++; - ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL); + ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL); if (!ata_force_tbl) { printk(KERN_WARNING "ata: failed to extend force table, " "libata.force ignored\n"); diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index 85aa76116a30..2ae1799f4992 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c @@ -340,7 +340,7 @@ static int sata_pmp_init_links (struct ata_port *ap, int nr_ports) int i, err; if (!pmp_link) { - pmp_link = kzalloc(sizeof(pmp_link[0]) * SATA_PMP_MAX_PORTS, + pmp_link = kcalloc(SATA_PMP_MAX_PORTS, sizeof(pmp_link[0]), GFP_NOIO); if (!pmp_link) return -ENOMEM; diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index cddf96f6e431..73ba8e134ca9 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -4114,13 +4114,13 @@ static int mv_platform_probe(struct platform_device *pdev) if (!host || !hpriv) return -ENOMEM; - hpriv->port_clks = devm_kzalloc(&pdev->dev, - sizeof(struct clk *) * n_ports, + hpriv->port_clks = devm_kcalloc(&pdev->dev, + n_ports, sizeof(struct clk *), GFP_KERNEL); if (!hpriv->port_clks) return -ENOMEM; - hpriv->port_phys = devm_kzalloc(&pdev->dev, - sizeof(struct phy *) * n_ports, + hpriv->port_phys = devm_kcalloc(&pdev->dev, + n_ports, sizeof(struct phy *), GFP_KERNEL); if (!hpriv->port_phys) return -ENOMEM; diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index 6ebc4e4820fc..99a38115b0a8 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -2094,7 +2094,8 @@ static int fore200e_alloc_rx_buf(struct fore200e *fore200e) DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn); /* allocate the array of receive buffers */ - buffer = bsq->buffer = kzalloc(nbr * sizeof(struct buffer), GFP_KERNEL); + buffer = bsq->buffer = kcalloc(nbr, sizeof(struct buffer), + GFP_KERNEL); if (buffer == NULL) return -ENOMEM; diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index be076606d30e..ff81a576347e 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -1618,7 +1618,7 @@ static int rx_init(struct atm_dev *dev) skb_queue_head_init(&iadev->rx_dma_q); iadev->rx_free_desc_qhead = NULL; - iadev->rx_open = kzalloc(4 * iadev->num_vc, GFP_KERNEL); + iadev->rx_open = kcalloc(4, iadev->num_vc, GFP_KERNEL); if (!iadev->rx_open) { printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n", dev->number); diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 0df1a1c80b00..17283018269f 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -1291,7 +1291,8 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id) card->using_dma = 1; if (1) { /* All known FPGA versions so far */ card->dma_alignment = 3; - card->dma_bounce = kmalloc(card->nr_ports * BUF_SIZE, GFP_KERNEL); + card->dma_bounce = kmalloc_array(card->nr_ports, + BUF_SIZE, GFP_KERNEL); if (!card->dma_bounce) { dev_warn(&card->dev->dev, "Failed to allocate DMA bounce buffers\n"); err = -ENOMEM; diff --git a/drivers/auxdisplay/cfag12864b.c b/drivers/auxdisplay/cfag12864b.c index 6bd2f65e116a..7eebae7e322c 100644 --- a/drivers/auxdisplay/cfag12864b.c +++ b/drivers/auxdisplay/cfag12864b.c @@ -333,8 +333,8 @@ static int __init cfag12864b_init(void) goto none; } - cfag12864b_cache = kmalloc(sizeof(unsigned char) * - CFAG12864B_SIZE, GFP_KERNEL); + cfag12864b_cache = kmalloc(CFAG12864B_SIZE, + GFP_KERNEL); if (cfag12864b_cache == NULL) { printk(KERN_ERR CFAG12864B_NAME ": ERROR: " "can't alloc cache buffer (%i bytes)\n", diff --git a/drivers/base/dd.c b/drivers/base/dd.c index fb4e2df68d95..1435d7281c66 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -580,7 +580,7 @@ int driver_probe_device(struct device_driver *drv, struct device *dev) pr_debug("bus: '%s': %s: matched device %s with driver %s\n", drv->bus->name, __func__, dev_name(dev), drv->name); - pm_runtime_resume_suppliers(dev); + pm_runtime_get_suppliers(dev); if (dev->parent) pm_runtime_get_sync(dev->parent); @@ -591,6 +591,7 @@ int driver_probe_device(struct device_driver *drv, struct device *dev) if (dev->parent) pm_runtime_put(dev->parent); + pm_runtime_put_suppliers(dev); return ret; } diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c index b676a99c469c..7f732744f0d3 100644 --- a/drivers/base/firmware_loader/fallback.c +++ b/drivers/base/firmware_loader/fallback.c @@ -403,7 +403,7 @@ static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size) fw_priv->page_array_size * 2); struct page **new_pages; - new_pages = vmalloc(new_array_size * sizeof(void *)); + new_pages = vmalloc(array_size(new_array_size, sizeof(void *))); if (!new_pages) { fw_load_abort(fw_sysfs); return -ENOMEM; diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index 7ae62b6355b8..df41b4780b3b 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -116,14 +116,51 @@ int dev_pm_domain_attach(struct device *dev, bool power_on) } EXPORT_SYMBOL_GPL(dev_pm_domain_attach); +/** + * dev_pm_domain_attach_by_id - Associate a device with one of its PM domains. + * @dev: The device used to lookup the PM domain. + * @index: The index of the PM domain. + * + * As @dev may only be attached to a single PM domain, the backend PM domain + * provider creates a virtual device to attach instead. If attachment succeeds, + * the ->detach() callback in the struct dev_pm_domain are assigned by the + * corresponding backend attach function, as to deal with detaching of the + * created virtual device. + * + * This function should typically be invoked by a driver during the probe phase, + * in case its device requires power management through multiple PM domains. The + * driver may benefit from using the received device, to configure device-links + * towards its original device. Depending on the use-case and if needed, the + * links may be dynamically changed by the driver, which allows it to control + * the power to the PM domains independently from each other. + * + * Callers must ensure proper synchronization of this function with power + * management callbacks. + * + * Returns the virtual created device when successfully attached to its PM + * domain, NULL in case @dev don't need a PM domain, else an ERR_PTR(). + * Note that, to detach the returned virtual device, the driver shall call + * dev_pm_domain_detach() on it, typically during the remove phase. + */ +struct device *dev_pm_domain_attach_by_id(struct device *dev, + unsigned int index) +{ + if (dev->pm_domain) + return ERR_PTR(-EEXIST); + + return genpd_dev_pm_attach_by_id(dev, index); +} +EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_id); + /** * dev_pm_domain_detach - Detach a device from its PM domain. * @dev: Device to detach. * @power_off: Used to indicate whether we should power off the device. * - * This functions will reverse the actions from dev_pm_domain_attach() and thus - * try to detach the @dev from its PM domain. Typically it should be invoked - * from subsystem level code during the remove phase. + * This functions will reverse the actions from dev_pm_domain_attach() and + * dev_pm_domain_attach_by_id(), thus it detaches @dev from its PM domain. + * Typically it should be invoked during the remove phase, either from + * subsystem level code or from drivers. * * Callers must ensure proper synchronization of this function with power * management callbacks. diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 6f403d6fccb2..4925af5c4cf0 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2171,6 +2171,15 @@ struct generic_pm_domain *of_genpd_remove_last(struct device_node *np) } EXPORT_SYMBOL_GPL(of_genpd_remove_last); +static void genpd_release_dev(struct device *dev) +{ + kfree(dev); +} + +static struct bus_type genpd_bus_type = { + .name = "genpd", +}; + /** * genpd_dev_pm_detach - Detach a device from its PM domain. * @dev: Device to detach. @@ -2208,6 +2217,10 @@ static void genpd_dev_pm_detach(struct device *dev, bool power_off) /* Check if PM domain can be powered off after removing this device. */ genpd_queue_power_off_work(pd); + + /* Unregister the device if it was created by genpd. */ + if (dev->bus == &genpd_bus_type) + device_unregister(dev); } static void genpd_dev_pm_sync(struct device *dev) @@ -2221,32 +2234,17 @@ static void genpd_dev_pm_sync(struct device *dev) genpd_queue_power_off_work(pd); } -/** - * genpd_dev_pm_attach - Attach a device to its PM domain using DT. - * @dev: Device to attach. - * - * Parse device's OF node to find a PM domain specifier. If such is found, - * attaches the device to retrieved pm_domain ops. - * - * Returns 1 on successfully attached PM domain, 0 when the device don't need a - * PM domain or a negative error code in case of failures. Note that if a - * power-domain exists for the device, but it cannot be found or turned on, - * then return -EPROBE_DEFER to ensure that the device is not probed and to - * re-try again later. - */ -int genpd_dev_pm_attach(struct device *dev) +static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np, + unsigned int index) { struct of_phandle_args pd_args; struct generic_pm_domain *pd; int ret; - if (!dev->of_node) - return 0; - - ret = of_parse_phandle_with_args(dev->of_node, "power-domains", - "#power-domain-cells", 0, &pd_args); + ret = of_parse_phandle_with_args(np, "power-domains", + "#power-domain-cells", index, &pd_args); if (ret < 0) - return 0; + return ret; mutex_lock(&gpd_list_lock); pd = genpd_get_from_provider(&pd_args); @@ -2282,8 +2280,98 @@ int genpd_dev_pm_attach(struct device *dev) return ret ? -EPROBE_DEFER : 1; } + +/** + * genpd_dev_pm_attach - Attach a device to its PM domain using DT. + * @dev: Device to attach. + * + * Parse device's OF node to find a PM domain specifier. If such is found, + * attaches the device to retrieved pm_domain ops. + * + * Returns 1 on successfully attached PM domain, 0 when the device don't need a + * PM domain or when multiple power-domains exists for it, else a negative error + * code. Note that if a power-domain exists for the device, but it cannot be + * found or turned on, then return -EPROBE_DEFER to ensure that the device is + * not probed and to re-try again later. + */ +int genpd_dev_pm_attach(struct device *dev) +{ + if (!dev->of_node) + return 0; + + /* + * Devices with multiple PM domains must be attached separately, as we + * can only attach one PM domain per device. + */ + if (of_count_phandle_with_args(dev->of_node, "power-domains", + "#power-domain-cells") != 1) + return 0; + + return __genpd_dev_pm_attach(dev, dev->of_node, 0); +} EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); +/** + * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains. + * @dev: The device used to lookup the PM domain. + * @index: The index of the PM domain. + * + * Parse device's OF node to find a PM domain specifier at the provided @index. + * If such is found, creates a virtual device and attaches it to the retrieved + * pm_domain ops. To deal with detaching of the virtual device, the ->detach() + * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach(). + * + * Returns the created virtual device if successfully attached PM domain, NULL + * when the device don't need a PM domain, else an ERR_PTR() in case of + * failures. If a power-domain exists for the device, but cannot be found or + * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device + * is not probed and to re-try again later. + */ +struct device *genpd_dev_pm_attach_by_id(struct device *dev, + unsigned int index) +{ + struct device *genpd_dev; + int num_domains; + int ret; + + if (!dev->of_node) + return NULL; + + /* Deal only with devices using multiple PM domains. */ + num_domains = of_count_phandle_with_args(dev->of_node, "power-domains", + "#power-domain-cells"); + if (num_domains < 2 || index >= num_domains) + return NULL; + + /* Allocate and register device on the genpd bus. */ + genpd_dev = kzalloc(sizeof(*genpd_dev), GFP_KERNEL); + if (!genpd_dev) + return ERR_PTR(-ENOMEM); + + dev_set_name(genpd_dev, "genpd:%u:%s", index, dev_name(dev)); + genpd_dev->bus = &genpd_bus_type; + genpd_dev->release = genpd_release_dev; + + ret = device_register(genpd_dev); + if (ret) { + kfree(genpd_dev); + return ERR_PTR(ret); + } + + /* Try to attach the device to the PM domain at the specified index. */ + ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index); + if (ret < 1) { + device_unregister(genpd_dev); + return ret ? ERR_PTR(ret) : NULL; + } + + pm_runtime_set_active(genpd_dev); + pm_runtime_enable(genpd_dev); + + return genpd_dev; +} +EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); + static const struct of_device_id idle_state_match[] = { { .compatible = "domain-idle-state", }, { } @@ -2443,6 +2531,12 @@ unsigned int of_genpd_opp_to_performance_state(struct device *dev, } EXPORT_SYMBOL_GPL(of_genpd_opp_to_performance_state); +static int __init genpd_bus_init(void) +{ + return bus_register(&genpd_bus_type); +} +core_initcall(genpd_bus_init); + #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index c6030f100c08..beb85c31f3fa 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1563,16 +1563,37 @@ void pm_runtime_clean_up_links(struct device *dev) } /** - * pm_runtime_resume_suppliers - Resume supplier devices. + * pm_runtime_get_suppliers - Resume and reference-count supplier devices. * @dev: Consumer device. */ -void pm_runtime_resume_suppliers(struct device *dev) +void pm_runtime_get_suppliers(struct device *dev) { + struct device_link *link; int idx; idx = device_links_read_lock(); - rpm_get_suppliers(dev); + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) + if (link->flags & DL_FLAG_PM_RUNTIME) + pm_runtime_get_sync(link->supplier); + + device_links_read_unlock(idx); +} + +/** + * pm_runtime_put_suppliers - Drop references to supplier devices. + * @dev: Consumer device. + */ +void pm_runtime_put_suppliers(struct device *dev) +{ + struct device_link *link; + int idx; + + idx = device_links_read_lock(); + + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) + if (link->flags & DL_FLAG_PM_RUNTIME) + pm_runtime_put(link->supplier); device_links_read_unlock(idx); } diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 0f651efc58a1..d713738ce796 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -353,7 +353,7 @@ static ssize_t wakeup_count_show(struct device *dev, spin_lock_irq(&dev->power.lock); if (dev->power.wakeup) { - count = dev->power.wakeup->event_count; + count = dev->power.wakeup->wakeup_count; enabled = true; } spin_unlock_irq(&dev->power.lock); diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 6ca77d6047d6..f6518067aa7d 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -5719,8 +5719,8 @@ static bool DAC960_CheckStatusBuffer(DAC960_Controller_T *Controller, Controller->CombinedStatusBufferLength = NewStatusBufferLength; return true; } - NewStatusBuffer = kmalloc(2 * Controller->CombinedStatusBufferLength, - GFP_ATOMIC); + NewStatusBuffer = kmalloc_array(2, Controller->CombinedStatusBufferLength, + GFP_ATOMIC); if (NewStatusBuffer == NULL) { DAC960_Warning("Unable to expand Combined Status Buffer - Truncating\n", diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 7655d6133139..a80809bd3057 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -511,7 +511,8 @@ static void drbd_calc_cpu_mask(cpumask_var_t *cpu_mask) { unsigned int *resources_per_cpu, min_index = ~0; - resources_per_cpu = kzalloc(nr_cpu_ids * sizeof(*resources_per_cpu), GFP_KERNEL); + resources_per_cpu = kcalloc(nr_cpu_ids, sizeof(*resources_per_cpu), + GFP_KERNEL); if (resources_per_cpu) { struct drbd_resource *resource; unsigned int cpu, min = ~0; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 21e6d1b3b393..d6b6f434fd4b 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -524,7 +524,8 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, __rq_for_each_bio(bio, rq) segments += bio_segments(bio); - bvec = kmalloc(sizeof(struct bio_vec) * segments, GFP_NOIO); + bvec = kmalloc_array(segments, sizeof(struct bio_vec), + GFP_NOIO); if (!bvec) return -EIO; cmd->bvec = bvec; diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 2bdadd7f1454..7948049f6c43 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -1575,12 +1575,12 @@ static int setup_commands(struct nullb_queue *nq) struct nullb_cmd *cmd; int i, tag_size; - nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); + nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL); if (!nq->cmds) return -ENOMEM; tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; - nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); + nq->tag_map = kcalloc(tag_size, sizeof(unsigned long), GFP_KERNEL); if (!nq->tag_map) { kfree(nq->cmds); return -ENOMEM; @@ -1598,8 +1598,9 @@ static int setup_commands(struct nullb_queue *nq) static int setup_queues(struct nullb *nullb) { - nullb->queues = kzalloc(nullb->dev->submit_queues * - sizeof(struct nullb_queue), GFP_KERNEL); + nullb->queues = kcalloc(nullb->dev->submit_queues, + sizeof(struct nullb_queue), + GFP_KERNEL); if (!nullb->queues) return -ENOMEM; diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index 8fa4533a1249..1e3d5de9d838 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c @@ -407,8 +407,9 @@ static int ps3vram_cache_init(struct ps3_system_bus_device *dev) priv->cache.page_count = CACHE_PAGE_COUNT; priv->cache.page_size = CACHE_PAGE_SIZE; - priv->cache.tags = kzalloc(sizeof(struct ps3vram_tag) * - CACHE_PAGE_COUNT, GFP_KERNEL); + priv->cache.tags = kcalloc(CACHE_PAGE_COUNT, + sizeof(struct ps3vram_tag), + GFP_KERNEL); if (!priv->cache.tags) return -ENOMEM; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index af354047ac4b..fa0729c1e776 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -2339,6 +2339,7 @@ static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes) static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes) { unsigned int num_osd_ops = obj_req->osd_req->r_num_ops; + int ret; dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes); rbd_assert(obj_req->osd_req->r_ops[0].op == CEPH_OSD_OP_STAT); @@ -2353,6 +2354,11 @@ static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes) if (!obj_req->osd_req) return -ENOMEM; + ret = osd_req_op_cls_init(obj_req->osd_req, 0, CEPH_OSD_OP_CALL, "rbd", + "copyup"); + if (ret) + return ret; + /* * Only send non-zero copyup data to save some I/O and network * bandwidth -- zero copyup data is equivalent to the object not @@ -2362,9 +2368,6 @@ static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes) dout("%s obj_req %p detected zeroes\n", __func__, obj_req); bytes = 0; } - - osd_req_op_cls_init(obj_req->osd_req, 0, CEPH_OSD_OP_CALL, "rbd", - "copyup"); osd_req_op_cls_request_data_bvecs(obj_req->osd_req, 0, obj_req->copyup_bvecs, obj_req->copyup_bvec_count, @@ -3397,7 +3400,6 @@ static void cancel_tasks_sync(struct rbd_device *rbd_dev) { dout("%s rbd_dev %p\n", __func__, rbd_dev); - cancel_delayed_work_sync(&rbd_dev->watch_dwork); cancel_work_sync(&rbd_dev->acquired_lock_work); cancel_work_sync(&rbd_dev->released_lock_work); cancel_delayed_work_sync(&rbd_dev->lock_dwork); @@ -3415,6 +3417,7 @@ static void rbd_unregister_watch(struct rbd_device *rbd_dev) rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED; mutex_unlock(&rbd_dev->watch_mutex); + cancel_delayed_work_sync(&rbd_dev->watch_dwork); ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc); } diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index 09537bee387f..b7d71914a32a 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c @@ -873,7 +873,8 @@ static int rsxx_pci_probe(struct pci_dev *dev, dev_info(CARD_TO_DEV(card), "Failed reading the number of DMA targets\n"); - card->ctrl = kzalloc(card->n_targets * sizeof(*card->ctrl), GFP_KERNEL); + card->ctrl = kcalloc(card->n_targets, sizeof(*card->ctrl), + GFP_KERNEL); if (!card->ctrl) { st = -ENOMEM; goto failed_dma_setup; diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index beaccf197a5a..8fbc1bf6db3d 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c @@ -1038,7 +1038,7 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) struct rsxx_dma *dma; struct list_head *issued_dmas; - issued_dmas = kzalloc(sizeof(*issued_dmas) * card->n_targets, + issued_dmas = kcalloc(card->n_targets, sizeof(*issued_dmas), GFP_KERNEL); if (!issued_dmas) return -ENOMEM; diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 66412eededda..a4bc74e72c39 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -139,7 +139,8 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif) { unsigned int r; - blkif->rings = kzalloc(blkif->nr_rings * sizeof(struct xen_blkif_ring), GFP_KERNEL); + blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring), + GFP_KERNEL); if (!blkif->rings) return -ENOMEM; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index ae00a82f350b..b5cedccb5d7d 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1906,7 +1906,9 @@ static int negotiate_mq(struct blkfront_info *info) if (!info->nr_rings) info->nr_rings = 1; - info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL); + info->rinfo = kcalloc(info->nr_rings, + sizeof(struct blkfront_ring_info), + GFP_KERNEL); if (!info->rinfo) { xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure"); return -ENOMEM; @@ -2216,15 +2218,18 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) } for (i = 0; i < BLK_RING_SIZE(info); i++) { - rinfo->shadow[i].grants_used = kzalloc( - sizeof(rinfo->shadow[i].grants_used[0]) * grants, - GFP_NOIO); - rinfo->shadow[i].sg = kzalloc(sizeof(rinfo->shadow[i].sg[0]) * psegs, GFP_NOIO); - if (info->max_indirect_segments) - rinfo->shadow[i].indirect_grants = kzalloc( - sizeof(rinfo->shadow[i].indirect_grants[0]) * - INDIRECT_GREFS(grants), + rinfo->shadow[i].grants_used = + kcalloc(grants, + sizeof(rinfo->shadow[i].grants_used[0]), GFP_NOIO); + rinfo->shadow[i].sg = kcalloc(psegs, + sizeof(rinfo->shadow[i].sg[0]), + GFP_NOIO); + if (info->max_indirect_segments) + rinfo->shadow[i].indirect_grants = + kcalloc(INDIRECT_GREFS(grants), + sizeof(rinfo->shadow[i].indirect_grants[0]), + GFP_NOIO); if ((rinfo->shadow[i].grants_used == NULL) || (rinfo->shadow[i].sg == NULL) || (info->max_indirect_segments && diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index 8f9130ab5887..d0c5bc4e0703 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c @@ -197,8 +197,9 @@ static int z2_open(struct block_device *bdev, fmode_t mode) vaddr = (unsigned long)z_remap_nocache_nonser(paddr, size); #endif z2ram_map = - kmalloc((size/Z2RAM_CHUNKSIZE)*sizeof(z2ram_map[0]), - GFP_KERNEL); + kmalloc_array(size / Z2RAM_CHUNKSIZE, + sizeof(z2ram_map[0]), + GFP_KERNEL); if ( z2ram_map == NULL ) { printk( KERN_ERR DEVICE_NAME diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index da51293e7c03..7436b2d27fa3 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -898,7 +898,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) size_t num_pages; num_pages = disksize >> PAGE_SHIFT; - zram->table = vzalloc(num_pages * sizeof(*zram->table)); + zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table))); if (!zram->table) return false; diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c index fb1442b08962..e906ecfe23dd 100644 --- a/drivers/bus/fsl-mc/fsl-mc-allocator.c +++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c @@ -354,8 +354,8 @@ int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus, if (error < 0) return error; - irq_resources = devm_kzalloc(&mc_bus_dev->dev, - sizeof(*irq_resources) * irq_count, + irq_resources = devm_kcalloc(&mc_bus_dev->dev, + irq_count, sizeof(*irq_resources), GFP_KERNEL); if (!irq_resources) { error = -ENOMEM; @@ -455,7 +455,7 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev) return -ENOSPC; } - irqs = devm_kzalloc(&mc_dev->dev, irq_count * sizeof(irqs[0]), + irqs = devm_kcalloc(&mc_dev->dev, irq_count, sizeof(irqs[0]), GFP_KERNEL); if (!irqs) return -ENOMEM; diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 9adc8c3eb0fa..a78b8e7085e9 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -2132,7 +2132,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf, */ nr = nframes; do { - cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL); + cgc.buffer = kmalloc_array(nr, CD_FRAMESIZE_RAW, GFP_KERNEL); if (cgc.buffer) break; diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 410c30c42120..212f447938ae 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -81,7 +81,7 @@ config PRINTER corresponding drivers into the kernel. To compile this driver as a module, choose M here and read - . The module will be called lp. + . The module will be called lp. If you have several parallel ports, you can specify which ports to use with the "lp" kernel command line option. (Try "man bootparam" diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c index b450544dcaf0..6914e4f0ce98 100644 --- a/drivers/char/agp/amd-k7-agp.c +++ b/drivers/char/agp/amd-k7-agp.c @@ -85,7 +85,8 @@ static int amd_create_gatt_pages(int nr_tables) int retval = 0; int i; - tables = kzalloc((nr_tables + 1) * sizeof(struct amd_page_map *),GFP_KERNEL); + tables = kcalloc(nr_tables + 1, sizeof(struct amd_page_map *), + GFP_KERNEL); if (tables == NULL) return -ENOMEM; diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c index 88b4cbee4dac..20bf5f78a362 100644 --- a/drivers/char/agp/ati-agp.c +++ b/drivers/char/agp/ati-agp.c @@ -108,7 +108,8 @@ static int ati_create_gatt_pages(int nr_tables) int retval = 0; int i; - tables = kzalloc((nr_tables + 1) * sizeof(struct ati_page_map *),GFP_KERNEL); + tables = kcalloc(nr_tables + 1, sizeof(struct ati_page_map *), + GFP_KERNEL); if (tables == NULL) return -ENOMEM; diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c index 2053f70ef66b..52ffe1706ce0 100644 --- a/drivers/char/agp/compat_ioctl.c +++ b/drivers/char/agp/compat_ioctl.c @@ -98,11 +98,15 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user if (ureserve.seg_count >= 16384) return -EINVAL; - usegment = kmalloc(sizeof(*usegment) * ureserve.seg_count, GFP_KERNEL); + usegment = kmalloc_array(ureserve.seg_count, + sizeof(*usegment), + GFP_KERNEL); if (!usegment) return -ENOMEM; - ksegment = kmalloc(sizeof(*ksegment) * kreserve.seg_count, GFP_KERNEL); + ksegment = kmalloc_array(kreserve.seg_count, + sizeof(*ksegment), + GFP_KERNEL); if (!ksegment) { kfree(usegment); return -ENOMEM; diff --git a/drivers/char/agp/isoch.c b/drivers/char/agp/isoch.c index fc8e1bc3347d..31c374b1b91b 100644 --- a/drivers/char/agp/isoch.c +++ b/drivers/char/agp/isoch.c @@ -93,7 +93,8 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge, * We'll work with an array of isoch_data's (one for each * device in dev_list) throughout this function. */ - if ((master = kmalloc(ndevs * sizeof(*master), GFP_KERNEL)) == NULL) { + master = kmalloc_array(ndevs, sizeof(*master), GFP_KERNEL); + if (master == NULL) { ret = -ENOMEM; goto get_out; } diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c index 3051c73bc383..e7d5bdc02d93 100644 --- a/drivers/char/agp/sgi-agp.c +++ b/drivers/char/agp/sgi-agp.c @@ -280,9 +280,9 @@ static int agp_sgi_init(void) else return 0; - sgi_tioca_agp_bridges = kmalloc(tioca_gart_found * - sizeof(struct agp_bridge_data *), - GFP_KERNEL); + sgi_tioca_agp_bridges = kmalloc_array(tioca_gart_found, + sizeof(struct agp_bridge_data *), + GFP_KERNEL); if (!sgi_tioca_agp_bridges) return -ENOMEM; diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c index 4dbdd3bc9bb8..7729414100ff 100644 --- a/drivers/char/agp/sworks-agp.c +++ b/drivers/char/agp/sworks-agp.c @@ -96,7 +96,7 @@ static int serverworks_create_gatt_pages(int nr_tables) int retval = 0; int i; - tables = kzalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *), + tables = kcalloc(nr_tables + 1, sizeof(struct serverworks_page_map *), GFP_KERNEL); if (tables == NULL) return -ENOMEM; diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c index 79d8c84693a1..31fcd0430426 100644 --- a/drivers/char/agp/uninorth-agp.c +++ b/drivers/char/agp/uninorth-agp.c @@ -402,7 +402,9 @@ static int uninorth_create_gatt_table(struct agp_bridge_data *bridge) if (table == NULL) return -ENOMEM; - uninorth_priv.pages_arr = kmalloc((1 << page_order) * sizeof(struct page*), GFP_KERNEL); + uninorth_priv.pages_arr = kmalloc_array(1 << page_order, + sizeof(struct page *), + GFP_KERNEL); if (uninorth_priv.pages_arr == NULL) goto enomem; diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 22f634eb09fd..18e4650c233b 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -1757,7 +1757,8 @@ static unsigned short *ssif_address_list(void) list_for_each_entry(info, &ssif_infos, link) count++; - address_list = kzalloc(sizeof(*address_list) * (count + 1), GFP_KERNEL); + address_list = kcalloc(count + 1, sizeof(*address_list), + GFP_KERNEL); if (!address_list) return NULL; diff --git a/drivers/char/raw.c b/drivers/char/raw.c index 293167c6e254..fd6eec8085b4 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c @@ -321,7 +321,8 @@ static int __init raw_init(void) max_raw_minors = MAX_RAW_MINORS; } - raw_devices = vzalloc(sizeof(struct raw_device_data) * max_raw_minors); + raw_devices = vzalloc(array_size(max_raw_minors, + sizeof(struct raw_device_data))); if (!raw_devices) { printk(KERN_ERR "Not enough memory for raw device structures\n"); ret = -ENOMEM; diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index 96c77c8e7f40..d31b09099216 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -980,7 +980,7 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip) goto out; } - chip->cc_attrs_tbl = devm_kzalloc(&chip->dev, 4 * nr_commands, + chip->cc_attrs_tbl = devm_kcalloc(&chip->dev, 4, nr_commands, GFP_KERNEL); rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_CAPABILITY); diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 21085515814f..17084cfcf53e 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -433,8 +433,7 @@ static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size * Allocate buffer and the sg list. The sg list array is allocated * directly after the port_buffer struct. */ - buf = kmalloc(sizeof(*buf) + sizeof(struct scatterlist) * pages, - GFP_KERNEL); + buf = kmalloc(struct_size(buf, sg, pages), GFP_KERNEL); if (!buf) goto fail; @@ -1892,13 +1891,14 @@ static int init_vqs(struct ports_device *portdev) nr_ports = portdev->max_nr_ports; nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; - vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL); - io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL); - io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL); - portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), - GFP_KERNEL); - portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), - GFP_KERNEL); + vqs = kmalloc_array(nr_queues, sizeof(struct virtqueue *), GFP_KERNEL); + io_callbacks = kmalloc_array(nr_queues, sizeof(vq_callback_t *), + GFP_KERNEL); + io_names = kmalloc_array(nr_queues, sizeof(char *), GFP_KERNEL); + portdev->in_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *), + GFP_KERNEL); + portdev->out_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *), + GFP_KERNEL); if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs || !portdev->out_vqs) { err = -ENOMEM; diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index 9e0b2f2b48e7..7bef0666ae7e 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -734,7 +734,7 @@ static void bcm2835_pll_debug_init(struct clk_hw *hw, const struct bcm2835_pll_data *data = pll->data; struct debugfs_reg32 *regs; - regs = devm_kzalloc(cprman->dev, 7 * sizeof(*regs), GFP_KERNEL); + regs = devm_kcalloc(cprman->dev, 7, sizeof(*regs), GFP_KERNEL); if (!regs) return; @@ -865,7 +865,7 @@ static void bcm2835_pll_divider_debug_init(struct clk_hw *hw, const struct bcm2835_pll_divider_data *data = divider->data; struct debugfs_reg32 *regs; - regs = devm_kzalloc(cprman->dev, 7 * sizeof(*regs), GFP_KERNEL); + regs = devm_kcalloc(cprman->dev, 7, sizeof(*regs), GFP_KERNEL); if (!regs) return; diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index a24a6afb50b6..9760b526ca31 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -6,7 +6,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * - * Standard functionality for the common clock API. See Documentation/clk.txt + * Standard functionality for the common clock API. See Documentation/driver-api/clk.rst */ #include @@ -2747,7 +2747,7 @@ static int __clk_core_init(struct clk_core *core) goto out; } - /* check that clk_ops are sane. See Documentation/clk.txt */ + /* check that clk_ops are sane. See Documentation/driver-api/clk.rst */ if (core->ops->set_rate && !((core->ops->round_rate || core->ops->determine_rate) && core->ops->recalc_rate)) { diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h index 542192376ebf..502bcbb61b04 100644 --- a/drivers/clk/ingenic/cgu.h +++ b/drivers/clk/ingenic/cgu.h @@ -194,7 +194,7 @@ struct ingenic_cgu { /** * struct ingenic_clk - private data for a clock - * @hw: see Documentation/clk.txt + * @hw: see Documentation/driver-api/clk.rst * @cgu: a pointer to the CGU data * @idx: the index of this clock in cgu->clock_info */ diff --git a/drivers/clk/renesas/clk-r8a7740.c b/drivers/clk/renesas/clk-r8a7740.c index d074f8e982d0..a7a30d2eca41 100644 --- a/drivers/clk/renesas/clk-r8a7740.c +++ b/drivers/clk/renesas/clk-r8a7740.c @@ -161,7 +161,7 @@ static void __init r8a7740_cpg_clocks_init(struct device_node *np) } cpg = kzalloc(sizeof(*cpg), GFP_KERNEL); - clks = kzalloc(num_clks * sizeof(*clks), GFP_KERNEL); + clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL); if (cpg == NULL || clks == NULL) { /* We're leaking memory on purpose, there's no point in cleaning * up as the system won't boot anyway. diff --git a/drivers/clk/renesas/clk-r8a7779.c b/drivers/clk/renesas/clk-r8a7779.c index 27fbfafaf2cd..5adcca4656c3 100644 --- a/drivers/clk/renesas/clk-r8a7779.c +++ b/drivers/clk/renesas/clk-r8a7779.c @@ -138,7 +138,7 @@ static void __init r8a7779_cpg_clocks_init(struct device_node *np) } cpg = kzalloc(sizeof(*cpg), GFP_KERNEL); - clks = kzalloc(CPG_NUM_CLOCKS * sizeof(*clks), GFP_KERNEL); + clks = kcalloc(CPG_NUM_CLOCKS, sizeof(*clks), GFP_KERNEL); if (cpg == NULL || clks == NULL) { /* We're leaking memory on purpose, there's no point in cleaning * up as the system won't boot anyway. diff --git a/drivers/clk/renesas/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c index ee32a022e6da..bccd62f2cb09 100644 --- a/drivers/clk/renesas/clk-rcar-gen2.c +++ b/drivers/clk/renesas/clk-rcar-gen2.c @@ -417,7 +417,7 @@ static void __init rcar_gen2_cpg_clocks_init(struct device_node *np) } cpg = kzalloc(sizeof(*cpg), GFP_KERNEL); - clks = kzalloc(num_clks * sizeof(*clks), GFP_KERNEL); + clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL); if (cpg == NULL || clks == NULL) { /* We're leaking memory on purpose, there's no point in cleaning * up as the system won't boot anyway. diff --git a/drivers/clk/renesas/clk-rz.c b/drivers/clk/renesas/clk-rz.c index 67dd712aa723..ac2f86d626b6 100644 --- a/drivers/clk/renesas/clk-rz.c +++ b/drivers/clk/renesas/clk-rz.c @@ -97,7 +97,7 @@ static void __init rz_cpg_clocks_init(struct device_node *np) return; cpg = kzalloc(sizeof(*cpg), GFP_KERNEL); - clks = kzalloc(num_clks * sizeof(*clks), GFP_KERNEL); + clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL); BUG_ON(!cpg || !clks); cpg->data.clks = clks; diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c index 14819d919df1..a79d81985c4e 100644 --- a/drivers/clk/st/clkgen-fsyn.c +++ b/drivers/clk/st/clkgen-fsyn.c @@ -874,7 +874,7 @@ static void __init st_of_create_quadfs_fsynths( return; clk_data->clk_num = QUADFS_MAX_CHAN; - clk_data->clks = kzalloc(QUADFS_MAX_CHAN * sizeof(struct clk *), + clk_data->clks = kcalloc(QUADFS_MAX_CHAN, sizeof(struct clk *), GFP_KERNEL); if (!clk_data->clks) { diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c index 25bda48a5d35..7a7106dc80bf 100644 --- a/drivers/clk/st/clkgen-pll.c +++ b/drivers/clk/st/clkgen-pll.c @@ -738,7 +738,7 @@ static void __init clkgen_c32_pll_setup(struct device_node *np, return; clk_data->clk_num = num_odfs; - clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *), + clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *), GFP_KERNEL); if (!clk_data->clks) diff --git a/drivers/clk/sunxi/clk-usb.c b/drivers/clk/sunxi/clk-usb.c index fe0c3d169377..917fc27a33dd 100644 --- a/drivers/clk/sunxi/clk-usb.c +++ b/drivers/clk/sunxi/clk-usb.c @@ -122,7 +122,7 @@ static void __init sunxi_usb_clk_setup(struct device_node *node, if (!clk_data) return; - clk_data->clks = kzalloc((qty+1) * sizeof(struct clk *), GFP_KERNEL); + clk_data->clks = kcalloc(qty + 1, sizeof(struct clk *), GFP_KERNEL); if (!clk_data->clks) { kfree(clk_data); return; diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c index 593d76a114f9..ffaf17f71860 100644 --- a/drivers/clk/tegra/clk.c +++ b/drivers/clk/tegra/clk.c @@ -216,14 +216,15 @@ struct clk ** __init tegra_clk_init(void __iomem *regs, int num, int banks) if (WARN_ON(banks > ARRAY_SIZE(periph_regs))) return NULL; - periph_clk_enb_refcnt = kzalloc(32 * banks * - sizeof(*periph_clk_enb_refcnt), GFP_KERNEL); + periph_clk_enb_refcnt = kcalloc(32 * banks, + sizeof(*periph_clk_enb_refcnt), + GFP_KERNEL); if (!periph_clk_enb_refcnt) return NULL; periph_banks = banks; - clks = kzalloc(num * sizeof(struct clk *), GFP_KERNEL); + clks = kcalloc(num, sizeof(struct clk *), GFP_KERNEL); if (!clks) kfree(periph_clk_enb_refcnt); diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c index d6036c788fab..688e403333b9 100644 --- a/drivers/clk/ti/adpll.c +++ b/drivers/clk/ti/adpll.c @@ -501,8 +501,9 @@ static int ti_adpll_init_dco(struct ti_adpll_data *d) const char *postfix; int width, err; - d->outputs.clks = devm_kzalloc(d->dev, sizeof(struct clk *) * + d->outputs.clks = devm_kcalloc(d->dev, MAX_ADPLL_OUTPUTS, + sizeof(struct clk *), GFP_KERNEL); if (!d->outputs.clks) return -ENOMEM; @@ -915,8 +916,9 @@ static int ti_adpll_probe(struct platform_device *pdev) if (err) return err; - d->clocks = devm_kzalloc(d->dev, sizeof(struct ti_adpll_clock) * + d->clocks = devm_kcalloc(d->dev, TI_ADPLL_NR_CLOCKS, + sizeof(struct ti_adpll_clock), GFP_KERNEL); if (!d->clocks) return -ENOMEM; diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c index 9498e9363b57..61c126a5d26a 100644 --- a/drivers/clk/ti/apll.c +++ b/drivers/clk/ti/apll.c @@ -206,7 +206,7 @@ static void __init of_dra7_apll_setup(struct device_node *node) goto cleanup; } - parent_names = kzalloc(sizeof(char *) * init->num_parents, GFP_KERNEL); + parent_names = kcalloc(init->num_parents, sizeof(char *), GFP_KERNEL); if (!parent_names) goto cleanup; diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c index aaa277dd6d99..ccfb4d9a152a 100644 --- a/drivers/clk/ti/divider.c +++ b/drivers/clk/ti/divider.c @@ -366,7 +366,7 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div, num_dividers = i; - tmp = kzalloc(sizeof(*tmp) * (valid_div + 1), GFP_KERNEL); + tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL); if (!tmp) return -ENOMEM; @@ -496,7 +496,7 @@ __init ti_clk_get_div_table(struct device_node *node) return ERR_PTR(-EINVAL); } - table = kzalloc(sizeof(*table) * (valid_div + 1), GFP_KERNEL); + table = kcalloc(valid_div + 1, sizeof(*table), GFP_KERNEL); if (!table) return ERR_PTR(-ENOMEM); diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c index 7d33ca9042cb..dc86d07d0921 100644 --- a/drivers/clk/ti/dpll.c +++ b/drivers/clk/ti/dpll.c @@ -309,7 +309,7 @@ static void __init of_ti_dpll_setup(struct device_node *node, goto cleanup; } - parent_names = kzalloc(sizeof(char *) * init->num_parents, GFP_KERNEL); + parent_names = kcalloc(init->num_parents, sizeof(char *), GFP_KERNEL); if (!parent_names) goto cleanup; diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 70b3cf8e23d0..bbbf37c471a3 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -1000,7 +1000,7 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) /* Allocate and setup the channels. */ cmt->num_channels = hweight8(cmt->hw_channels); - cmt->channels = kzalloc(cmt->num_channels * sizeof(*cmt->channels), + cmt->channels = kcalloc(cmt->num_channels, sizeof(*cmt->channels), GFP_KERNEL); if (cmt->channels == NULL) { ret = -ENOMEM; diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index 53aa7e92a7d7..6812e099b6a3 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c @@ -418,7 +418,7 @@ static int sh_mtu2_setup(struct sh_mtu2_device *mtu, /* Allocate and setup the channels. */ mtu->num_channels = 3; - mtu->channels = kzalloc(sizeof(*mtu->channels) * mtu->num_channels, + mtu->channels = kcalloc(mtu->num_channels, sizeof(*mtu->channels), GFP_KERNEL); if (mtu->channels == NULL) { ret = -ENOMEM; diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 31d881621e41..c74a6c543ca2 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -569,7 +569,7 @@ static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev) } /* Allocate and setup the channels. */ - tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels, + tmu->channels = kcalloc(tmu->num_channels, sizeof(*tmu->channels), GFP_KERNEL); if (tmu->channels == NULL) { ret = -ENOMEM; diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index c7ce928fbf1f..52f5f1a2040c 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -125,7 +125,7 @@ config ARM_OMAP2PLUS_CPUFREQ default ARCH_OMAP2PLUS config ARM_QCOM_CPUFREQ_KRYO - bool "Qualcomm Kryo based CPUFreq" + tristate "Qualcomm Kryo based CPUFreq" depends on ARM64 depends on QCOM_QFPROM depends on QCOM_SMEM diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 9449657d72f0..b61f4ec43e06 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -465,8 +465,8 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, return result; } -unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy, - unsigned int target_freq) +static unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy, + unsigned int target_freq) { struct acpi_cpufreq_data *data = policy->driver_data; struct acpi_processor_performance *perf; @@ -759,8 +759,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) goto err_unreg; } - freq_table = kzalloc(sizeof(*freq_table) * - (perf->state_count+1), GFP_KERNEL); + freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table), + GFP_KERNEL); if (!freq_table) { result = -ENOMEM; goto err_unreg; diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index 1d7ef5fc1977..cf62a1f64dd7 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c @@ -280,7 +280,7 @@ static int merge_cluster_tables(void) for (i = 0; i < MAX_CLUSTERS; i++) count += get_table_count(freq_table[i]); - table = kzalloc(sizeof(*table) * count, GFP_KERNEL); + table = kcalloc(count, sizeof(*table), GFP_KERNEL); if (!table) return -ENOMEM; diff --git a/drivers/cpufreq/bmips-cpufreq.c b/drivers/cpufreq/bmips-cpufreq.c index 1653151b77df..56a4ebbf00e0 100644 --- a/drivers/cpufreq/bmips-cpufreq.c +++ b/drivers/cpufreq/bmips-cpufreq.c @@ -71,7 +71,7 @@ bmips_cpufreq_get_freq_table(const struct cpufreq_policy *policy) cpu_freq = htp_freq_to_cpu_freq(priv->clk_mult); - table = kmalloc((priv->max_freqs + 1) * sizeof(*table), GFP_KERNEL); + table = kmalloc_array(priv->max_freqs + 1, sizeof(*table), GFP_KERNEL); if (!table) return ERR_PTR(-ENOMEM); diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c index b07559b9ed99..e6f9cbe5835f 100644 --- a/drivers/cpufreq/brcmstb-avs-cpufreq.c +++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c @@ -410,7 +410,7 @@ brcm_avs_get_freq_table(struct device *dev, struct private_data *priv) if (ret) return ERR_PTR(ret); - table = devm_kzalloc(dev, (AVS_PSTATE_MAX + 1) * sizeof(*table), + table = devm_kcalloc(dev, AVS_PSTATE_MAX + 1, sizeof(*table), GFP_KERNEL); if (!table) return ERR_PTR(-ENOMEM); diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 3464580ac3ca..a9d3eec32795 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -313,7 +313,8 @@ static int __init cppc_cpufreq_init(void) if (acpi_disabled) return -ENODEV; - all_cpu_data = kzalloc(sizeof(void *) * num_possible_cpus(), GFP_KERNEL); + all_cpu_data = kcalloc(num_possible_cpus(), sizeof(void *), + GFP_KERNEL); if (!all_cpu_data) return -ENOMEM; diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 871bf9cf55cf..1d50e97d49f1 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -165,7 +165,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy) * calls, so the previous load value can be used then. */ load = j_cdbs->prev_load; - } else if (unlikely(time_elapsed > 2 * sampling_rate && + } else if (unlikely((int)idle_time > 2 * sampling_rate && j_cdbs->prev_load)) { /* * If the CPU had gone completely idle and a task has @@ -185,10 +185,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy) * clear prev_load to guarantee that the load will be * computed again next time. * - * Detecting this situation is easy: the governor's - * utilization update handler would not have run during - * CPU-idle periods. Hence, an unusually large - * 'time_elapsed' (as compared to the sampling rate) + * Detecting this situation is easy: an unusually large + * 'idle_time' (as compared to the sampling rate) * indicates this scenario. */ load = j_cdbs->prev_load; @@ -217,8 +215,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy) j_cdbs->prev_load = load; } - if (time_elapsed > 2 * sampling_rate) { - unsigned int periods = time_elapsed / sampling_rate; + if (unlikely((int)idle_time > 2 * sampling_rate)) { + unsigned int periods = idle_time / sampling_rate; if (periods < idle_periods) idle_periods = periods; diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c index 7974a2fdb760..dd5440d3372d 100644 --- a/drivers/cpufreq/ia64-acpi-cpufreq.c +++ b/drivers/cpufreq/ia64-acpi-cpufreq.c @@ -241,8 +241,8 @@ acpi_cpufreq_cpu_init ( } /* alloc freq_table */ - freq_table = kzalloc(sizeof(*freq_table) * - (data->acpi_data.state_count + 1), + freq_table = kcalloc(data->acpi_data.state_count + 1, + sizeof(*freq_table), GFP_KERNEL); if (!freq_table) { result = -ENOMEM; diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index 83cf631fc9bc..8b3c2a79ad6c 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -266,6 +266,8 @@ static void imx6q_opp_check_speed_grading(struct device *dev) } #define OCOTP_CFG3_6UL_SPEED_696MHZ 0x2 +#define OCOTP_CFG3_6ULL_SPEED_792MHZ 0x2 +#define OCOTP_CFG3_6ULL_SPEED_900MHZ 0x3 static void imx6ul_opp_check_speed_grading(struct device *dev) { @@ -287,16 +289,30 @@ static void imx6ul_opp_check_speed_grading(struct device *dev) * Speed GRADING[1:0] defines the max speed of ARM: * 2b'00: Reserved; * 2b'01: 528000000Hz; - * 2b'10: 696000000Hz; - * 2b'11: Reserved; + * 2b'10: 696000000Hz on i.MX6UL, 792000000Hz on i.MX6ULL; + * 2b'11: 900000000Hz on i.MX6ULL only; * We need to set the max speed of ARM according to fuse map. */ val = readl_relaxed(base + OCOTP_CFG3); val >>= OCOTP_CFG3_SPEED_SHIFT; val &= 0x3; - if (val != OCOTP_CFG3_6UL_SPEED_696MHZ) - if (dev_pm_opp_disable(dev, 696000000)) - dev_warn(dev, "failed to disable 696MHz OPP\n"); + + if (of_machine_is_compatible("fsl,imx6ul")) { + if (val != OCOTP_CFG3_6UL_SPEED_696MHZ) + if (dev_pm_opp_disable(dev, 696000000)) + dev_warn(dev, "failed to disable 696MHz OPP\n"); + } + + if (of_machine_is_compatible("fsl,imx6ull")) { + if (val != OCOTP_CFG3_6ULL_SPEED_792MHZ) + if (dev_pm_opp_disable(dev, 792000000)) + dev_warn(dev, "failed to disable 792MHz OPP\n"); + + if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ) + if (dev_pm_opp_disable(dev, 900000000)) + dev_warn(dev, "failed to disable 900MHz OPP\n"); + } + iounmap(base); put_node: of_node_put(np); @@ -356,7 +372,8 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) goto put_reg; } - if (of_machine_is_compatible("fsl,imx6ul")) + if (of_machine_is_compatible("fsl,imx6ul") || + of_machine_is_compatible("fsl,imx6ull")) imx6ul_opp_check_speed_grading(cpu_dev); else imx6q_opp_check_speed_grading(cpu_dev); @@ -377,7 +394,8 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) } /* Make imx6_soc_volt array's size same as arm opp number */ - imx6_soc_volt = devm_kzalloc(cpu_dev, sizeof(*imx6_soc_volt) * num, GFP_KERNEL); + imx6_soc_volt = devm_kcalloc(cpu_dev, num, sizeof(*imx6_soc_volt), + GFP_KERNEL); if (imx6_soc_volt == NULL) { ret = -ENOMEM; goto free_freq_table; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 08960a55eb27..1de5ec8d5ea3 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -221,6 +221,11 @@ struct global_params { * preference/bias * @epp_saved: Saved EPP/EPB during system suspend or CPU offline * operation + * @hwp_req_cached: Cached value of the last HWP Request MSR + * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR + * @last_io_update: Last time when IO wake flag was set + * @sched_flags: Store scheduler flags for possible cross CPU update + * @hwp_boost_min: Last HWP boosted min performance * * This structure stores per CPU instance data for all CPUs. */ @@ -253,6 +258,11 @@ struct cpudata { s16 epp_policy; s16 epp_default; s16 epp_saved; + u64 hwp_req_cached; + u64 hwp_cap_cached; + u64 last_io_update; + unsigned int sched_flags; + u32 hwp_boost_min; }; static struct cpudata **all_cpu_data; @@ -285,6 +295,7 @@ static struct pstate_funcs pstate_funcs __read_mostly; static int hwp_active __read_mostly; static bool per_cpu_limits __read_mostly; +static bool hwp_boost __read_mostly; static struct cpufreq_driver *intel_pstate_driver __read_mostly; @@ -689,6 +700,7 @@ static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max, u64 cap; rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); + WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap); if (global.no_turbo) *current_max = HWP_GUARANTEED_PERF(cap); else @@ -763,6 +775,7 @@ static void intel_pstate_hwp_set(unsigned int cpu) intel_pstate_set_epb(cpu, epp); } skip_epp: + WRITE_ONCE(cpu_data->hwp_req_cached, value); wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); } @@ -1020,6 +1033,30 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, return count; } +static ssize_t show_hwp_dynamic_boost(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", hwp_boost); +} + +static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = kstrtouint(buf, 10, &input); + if (ret) + return ret; + + mutex_lock(&intel_pstate_driver_lock); + hwp_boost = !!input; + intel_pstate_update_policies(); + mutex_unlock(&intel_pstate_driver_lock); + + return count; +} + show_one(max_perf_pct, max_perf_pct); show_one(min_perf_pct, min_perf_pct); @@ -1029,6 +1066,7 @@ define_one_global_rw(max_perf_pct); define_one_global_rw(min_perf_pct); define_one_global_ro(turbo_pct); define_one_global_ro(num_pstates); +define_one_global_rw(hwp_dynamic_boost); static struct attribute *intel_pstate_attributes[] = { &status.attr, @@ -1069,6 +1107,11 @@ static void __init intel_pstate_sysfs_expose_params(void) rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr); WARN_ON(rc); + if (hwp_active) { + rc = sysfs_create_file(intel_pstate_kobject, + &hwp_dynamic_boost.attr); + WARN_ON(rc); + } } /************************** sysfs end ************************/ @@ -1381,6 +1424,116 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) intel_pstate_set_min_pstate(cpu); } +/* + * Long hold time will keep high perf limits for long time, + * which negatively impacts perf/watt for some workloads, + * like specpower. 3ms is based on experiements on some + * workoads. + */ +static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC; + +static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu) +{ + u64 hwp_req = READ_ONCE(cpu->hwp_req_cached); + u32 max_limit = (hwp_req & 0xff00) >> 8; + u32 min_limit = (hwp_req & 0xff); + u32 boost_level1; + + /* + * Cases to consider (User changes via sysfs or boot time): + * If, P0 (Turbo max) = P1 (Guaranteed max) = min: + * No boost, return. + * If, P0 (Turbo max) > P1 (Guaranteed max) = min: + * Should result in one level boost only for P0. + * If, P0 (Turbo max) = P1 (Guaranteed max) > min: + * Should result in two level boost: + * (min + p1)/2 and P1. + * If, P0 (Turbo max) > P1 (Guaranteed max) > min: + * Should result in three level boost: + * (min + p1)/2, P1 and P0. + */ + + /* If max and min are equal or already at max, nothing to boost */ + if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit) + return; + + if (!cpu->hwp_boost_min) + cpu->hwp_boost_min = min_limit; + + /* level at half way mark between min and guranteed */ + boost_level1 = (HWP_GUARANTEED_PERF(cpu->hwp_cap_cached) + min_limit) >> 1; + + if (cpu->hwp_boost_min < boost_level1) + cpu->hwp_boost_min = boost_level1; + else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(cpu->hwp_cap_cached)) + cpu->hwp_boost_min = HWP_GUARANTEED_PERF(cpu->hwp_cap_cached); + else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(cpu->hwp_cap_cached) && + max_limit != HWP_GUARANTEED_PERF(cpu->hwp_cap_cached)) + cpu->hwp_boost_min = max_limit; + else + return; + + hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min; + wrmsrl(MSR_HWP_REQUEST, hwp_req); + cpu->last_update = cpu->sample.time; +} + +static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu) +{ + if (cpu->hwp_boost_min) { + bool expired; + + /* Check if we are idle for hold time to boost down */ + expired = time_after64(cpu->sample.time, cpu->last_update + + hwp_boost_hold_time_ns); + if (expired) { + wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached); + cpu->hwp_boost_min = 0; + } + } + cpu->last_update = cpu->sample.time; +} + +static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu, + u64 time) +{ + cpu->sample.time = time; + + if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) { + bool do_io = false; + + cpu->sched_flags = 0; + /* + * Set iowait_boost flag and update time. Since IO WAIT flag + * is set all the time, we can't just conclude that there is + * some IO bound activity is scheduled on this CPU with just + * one occurrence. If we receive at least two in two + * consecutive ticks, then we treat as boost candidate. + */ + if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC)) + do_io = true; + + cpu->last_io_update = time; + + if (do_io) + intel_pstate_hwp_boost_up(cpu); + + } else { + intel_pstate_hwp_boost_down(cpu); + } +} + +static inline void intel_pstate_update_util_hwp(struct update_util_data *data, + u64 time, unsigned int flags) +{ + struct cpudata *cpu = container_of(data, struct cpudata, update_util); + + cpu->sched_flags |= flags; + + if (smp_processor_id() == cpu->cpu) + intel_pstate_update_util_hwp_local(cpu, time); +} + static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) { struct sample *sample = &cpu->sample; @@ -1641,6 +1794,12 @@ static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { {} }; +static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = { + ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs), + ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_funcs), + {} +}; + static int intel_pstate_init_cpu(unsigned int cpunum) { struct cpudata *cpu; @@ -1671,6 +1830,10 @@ static int intel_pstate_init_cpu(unsigned int cpunum) intel_pstate_disable_ee(cpunum); intel_pstate_hwp_enable(cpu); + + id = x86_match_cpu(intel_pstate_hwp_boost_ids); + if (id) + hwp_boost = true; } intel_pstate_get_cpu_pstates(cpu); @@ -1684,7 +1847,7 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num) { struct cpudata *cpu = all_cpu_data[cpu_num]; - if (hwp_active) + if (hwp_active && !hwp_boost) return; if (cpu->update_util_set) @@ -1693,7 +1856,9 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num) /* Prevent intel_pstate_update_util() from using stale data. */ cpu->sample.time = 0; cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, - intel_pstate_update_util); + (hwp_active ? + intel_pstate_update_util_hwp : + intel_pstate_update_util)); cpu->update_util_set = true; } @@ -1805,8 +1970,16 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) intel_pstate_set_update_util_hook(policy->cpu); } - if (hwp_active) + if (hwp_active) { + /* + * When hwp_boost was active before and dynamically it + * was turned off, in that case we need to clear the + * update util hook. + */ + if (!hwp_boost) + intel_pstate_clear_update_util_hook(policy->cpu); intel_pstate_hwp_set(policy->cpu); + } mutex_unlock(&intel_pstate_limits_lock); @@ -2339,7 +2512,7 @@ static int __init intel_pstate_init(void) pr_info("Intel P-state driver initializing\n"); - all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); + all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus())); if (!all_cpu_data) return -ENOMEM; diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 61a4c5b08219..279bd9e9fa95 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -474,8 +474,8 @@ static int longhaul_get_ranges(void) return -EINVAL; } - longhaul_table = kzalloc((numscales + 1) * sizeof(*longhaul_table), - GFP_KERNEL); + longhaul_table = kcalloc(numscales + 1, sizeof(*longhaul_table), + GFP_KERNEL); if (!longhaul_table) return -ENOMEM; diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c index 7acc7fa4536d..9daa2cc318bb 100644 --- a/drivers/cpufreq/pxa3xx-cpufreq.c +++ b/drivers/cpufreq/pxa3xx-cpufreq.c @@ -93,7 +93,7 @@ static int setup_freqs_table(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table; int i; - table = kzalloc((num + 1) * sizeof(*table), GFP_KERNEL); + table = kcalloc(num + 1, sizeof(*table), GFP_KERNEL); if (table == NULL) return -ENOMEM; diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c index 909bd6e27639..3b291a2b0cb3 100644 --- a/drivers/cpufreq/s3c24xx-cpufreq.c +++ b/drivers/cpufreq/s3c24xx-cpufreq.c @@ -562,7 +562,7 @@ static int s3c_cpufreq_build_freq(void) size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0); size++; - ftab = kzalloc(sizeof(*ftab) * size, GFP_KERNEL); + ftab = kcalloc(size, sizeof(*ftab), GFP_KERNEL); if (!ftab) return -ENOMEM; diff --git a/drivers/cpufreq/sfi-cpufreq.c b/drivers/cpufreq/sfi-cpufreq.c index 9767afe05da2..978770432b13 100644 --- a/drivers/cpufreq/sfi-cpufreq.c +++ b/drivers/cpufreq/sfi-cpufreq.c @@ -95,8 +95,8 @@ static int __init sfi_cpufreq_init(void) if (ret) return ret; - freq_table = kzalloc(sizeof(*freq_table) * - (num_freq_table_entries + 1), GFP_KERNEL); + freq_table = kcalloc(num_freq_table_entries + 1, sizeof(*freq_table), + GFP_KERNEL); if (!freq_table) { ret = -ENOMEM; goto err_free_array; diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c index 195f27f9c1cb..4074e2615522 100644 --- a/drivers/cpufreq/spear-cpufreq.c +++ b/drivers/cpufreq/spear-cpufreq.c @@ -195,7 +195,7 @@ static int spear_cpufreq_probe(struct platform_device *pdev) cnt = prop->length / sizeof(u32); val = prop->value; - freq_tbl = kzalloc(sizeof(*freq_tbl) * (cnt + 1), GFP_KERNEL); + freq_tbl = kcalloc(cnt + 1, sizeof(*freq_tbl), GFP_KERNEL); if (!freq_tbl) { ret = -ENOMEM; goto out_put_node; diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index 6ba709b6f095..3f0e2a14895a 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -217,7 +217,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev) if (!match) return -ENODEV; - opp_data = kzalloc(sizeof(*opp_data), GFP_KERNEL); + opp_data = devm_kzalloc(&pdev->dev, sizeof(*opp_data), GFP_KERNEL); if (!opp_data) return -ENOMEM; @@ -226,8 +226,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev) opp_data->cpu_dev = get_cpu_device(0); if (!opp_data->cpu_dev) { pr_err("%s: Failed to get device for CPU0\n", __func__); - ret = ENODEV; - goto free_opp_data; + return -ENODEV; } opp_data->opp_node = dev_pm_opp_of_get_opp_desc_node(opp_data->cpu_dev); @@ -285,8 +284,6 @@ static int ti_cpufreq_probe(struct platform_device *pdev) fail_put_node: of_node_put(opp_data->opp_node); -free_opp_data: - kfree(opp_data); return ret; } diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 9cb234c72549..05981ccd9901 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -141,11 +141,11 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev) int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size) { - ctx->sa_in = kzalloc(size * 4, GFP_ATOMIC); + ctx->sa_in = kcalloc(size, 4, GFP_ATOMIC); if (ctx->sa_in == NULL) return -ENOMEM; - ctx->sa_out = kzalloc(size * 4, GFP_ATOMIC); + ctx->sa_out = kcalloc(size, 4, GFP_ATOMIC); if (ctx->sa_out == NULL) { kfree(ctx->sa_in); ctx->sa_in = NULL; @@ -180,8 +180,8 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev) if (!dev->pdr) return -ENOMEM; - dev->pdr_uinfo = kzalloc(sizeof(struct pd_uinfo) * PPC4XX_NUM_PD, - GFP_KERNEL); + dev->pdr_uinfo = kcalloc(PPC4XX_NUM_PD, sizeof(struct pd_uinfo), + GFP_KERNEL); if (!dev->pdr_uinfo) { dma_free_coherent(dev->core_dev->device, sizeof(struct ce_pd) * PPC4XX_NUM_PD, diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c index dbead5f45df3..ee0d70ba25d5 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_isr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c @@ -254,7 +254,7 @@ static int nitrox_enable_msix(struct nitrox_device *ndev) * Entry 192: NPS_CORE_INT_ACTIVE */ nr_entries = (ndev->nr_queues * NR_RING_VECTORS) + 1; - entries = kzalloc_node(nr_entries * sizeof(struct msix_entry), + entries = kcalloc_node(nr_entries, sizeof(struct msix_entry), GFP_KERNEL, ndev->node); if (!entries) return -ENOMEM; diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index 51fc6821cbbf..00c7aab8e7d0 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -240,7 +240,7 @@ static int tls_copy_ivs(struct sock *sk, struct sk_buff *skb) } /* generate the IVs */ - ivs = kmalloc(number_of_ivs * CIPHER_BLOCK_SIZE, GFP_ATOMIC); + ivs = kmalloc_array(CIPHER_BLOCK_SIZE, number_of_ivs, GFP_ATOMIC); if (!ivs) return -ENOMEM; get_random_bytes(ivs, number_of_ivs * CIPHER_BLOCK_SIZE); diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index d138d6b8fec5..c77b0e1655a8 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -922,7 +922,7 @@ int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen, crypto_ahash_clear_flags(tfm, ~0); blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); - ipad = kzalloc(2 * blocksize, GFP_KERNEL); + ipad = kcalloc(2, blocksize, GFP_KERNEL); if (!ipad) { ret = -ENOMEM; goto free_request; diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index f81fa4a3e66b..a4aa6813de4b 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -471,7 +471,7 @@ static int mv_cesa_probe(struct platform_device *pdev) sram_size = CESA_SA_MIN_SRAM_SIZE; cesa->sram_size = sram_size; - cesa->engines = devm_kzalloc(dev, caps->nengines * sizeof(*engines), + cesa->engines = devm_kcalloc(dev, caps->nengines, sizeof(*engines), GFP_KERNEL); if (!cesa->engines) return -ENOMEM; diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index e61b08566093..e34d80b6b7e5 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c @@ -1198,7 +1198,7 @@ static int mv_cesa_ahmac_setkey(const char *hash_alg_name, blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); - ipad = kzalloc(2 * blocksize, GFP_KERNEL); + ipad = kcalloc(2, blocksize, GFP_KERNEL); if (!ipad) { ret = -ENOMEM; goto free_req; diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 80e9c842aad4..ab6235b7ff22 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1919,12 +1919,12 @@ static int grab_global_resources(void) goto out_hvapi_release; err = -ENOMEM; - cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, + cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *), GFP_KERNEL); if (!cpu_to_cwq) goto out_queue_cache_destroy; - cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, + cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *), GFP_KERNEL); if (!cpu_to_mau) goto out_free_cwq_table; diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c index 06d49017a52b..cd1cdf5305bc 100644 --- a/drivers/crypto/qat/qat_common/adf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_isr.c @@ -238,7 +238,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev) if (!accel_dev->pf.vf_info) msix_num_entries += hw_data->num_banks; - entries = kzalloc_node(msix_num_entries * sizeof(*entries), + entries = kcalloc_node(msix_num_entries, sizeof(*entries), GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); if (!entries) return -ENOMEM; diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 98d22c2096e3..6bd8f6a2a24f 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -1162,8 +1162,9 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle, suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1; if (suof_handle->img_table.num_simgs != 0) { - suof_img_hdr = kzalloc(suof_handle->img_table.num_simgs * - sizeof(img_header), GFP_KERNEL); + suof_img_hdr = kcalloc(suof_handle->img_table.num_simgs, + sizeof(img_header), + GFP_KERNEL); if (!suof_img_hdr) return -ENOMEM; suof_handle->img_table.simg_hdr = suof_img_hdr; diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 981e45692695..cdc96f1bb917 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -970,8 +970,9 @@ static int stm32_hash_export(struct ahash_request *req, void *out) while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY)) cpu_relax(); - rctx->hw_context = kmalloc(sizeof(u32) * (3 + HASH_CSR_REGISTER_NUMBER), - GFP_KERNEL); + rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER, + sizeof(u32), + GFP_KERNEL); preg = rctx->hw_context; diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 7cebf0a6ffbc..cf14f099ce4a 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -3393,8 +3393,10 @@ static int talitos_probe(struct platform_device *ofdev) } } - priv->chan = devm_kzalloc(dev, sizeof(struct talitos_channel) * - priv->num_channels, GFP_KERNEL); + priv->chan = devm_kcalloc(dev, + priv->num_channels, + sizeof(struct talitos_channel), + GFP_KERNEL); if (!priv->chan) { dev_err(dev, "failed to allocate channel management space\n"); err = -ENOMEM; @@ -3411,9 +3413,10 @@ static int talitos_probe(struct platform_device *ofdev) spin_lock_init(&priv->chan[i].head_lock); spin_lock_init(&priv->chan[i].tail_lock); - priv->chan[i].fifo = devm_kzalloc(dev, - sizeof(struct talitos_request) * - priv->fifo_len, GFP_KERNEL); + priv->chan[i].fifo = devm_kcalloc(dev, + priv->fifo_len, + sizeof(struct talitos_request), + GFP_KERNEL); if (!priv->chan[i].fifo) { dev_err(dev, "failed to allocate request fifo %d\n", i); err = -ENOMEM; diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c index ba190cfa7aa1..af6a908dfa7a 100644 --- a/drivers/crypto/virtio/virtio_crypto_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_algs.c @@ -371,7 +371,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, /* Why 3? outhdr + iv + inhdr */ sg_total = src_nents + dst_nents + 3; - sgs = kzalloc_node(sg_total * sizeof(*sgs), GFP_ATOMIC, + sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_ATOMIC, dev_to_node(&vcrypto->vdev->dev)); if (!sgs) return -ENOMEM; diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index fe2af6aa88fc..0b5b3abe054e 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -628,14 +628,15 @@ struct devfreq *devfreq_add_device(struct device *dev, goto err_dev; } - devfreq->trans_table = devm_kzalloc(&devfreq->dev, - sizeof(unsigned int) * - devfreq->profile->max_state * - devfreq->profile->max_state, - GFP_KERNEL); - devfreq->time_in_state = devm_kzalloc(&devfreq->dev, - sizeof(unsigned long) * + devfreq->trans_table = + devm_kzalloc(&devfreq->dev, + array3_size(sizeof(unsigned int), + devfreq->profile->max_state, + devfreq->profile->max_state), + GFP_KERNEL); + devfreq->time_in_state = devm_kcalloc(&devfreq->dev, devfreq->profile->max_state, + sizeof(unsigned long), GFP_KERNEL); devfreq->last_stat_updated = jiffies; diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c index d96e3dc71cf8..3cd6a184fe7c 100644 --- a/drivers/devfreq/event/exynos-ppmu.c +++ b/drivers/devfreq/event/exynos-ppmu.c @@ -518,7 +518,7 @@ static int of_get_devfreq_events(struct device_node *np, event_ops = exynos_bus_get_ops(np); count = of_get_child_count(events_np); - desc = devm_kzalloc(dev, sizeof(*desc) * count, GFP_KERNEL); + desc = devm_kcalloc(dev, count, sizeof(*desc), GFP_KERNEL); if (!desc) return -ENOMEM; info->num_events = count; diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c index 7a67b8345092..d91cbbe7a48f 100644 --- a/drivers/dma/bestcomm/bestcomm.c +++ b/drivers/dma/bestcomm/bestcomm.c @@ -87,7 +87,8 @@ bcom_task_alloc(int bd_count, int bd_size, int priv_size) /* Init the BDs, if needed */ if (bd_count) { - tsk->cookie = kmalloc(sizeof(void*) * bd_count, GFP_KERNEL); + tsk->cookie = kmalloc_array(bd_count, sizeof(void *), + GFP_KERNEL); if (!tsk->cookie) goto error; diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index b451354735d3..08ba8473a284 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -38,7 +38,7 @@ * Each device has a channels list, which runs unlocked but is never modified * once the device is registered, it's just setup by the driver. * - * See Documentation/dmaengine.txt for more details + * See Documentation/driver-api/dmaengine for more details */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 7792a9186f9c..4fa4c06c9edb 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -322,10 +322,10 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma) unsigned long tmo; unsigned long flags; - src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); + src = kzalloc(IOAT_TEST_SIZE, GFP_KERNEL); if (!src) return -ENOMEM; - dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); + dest = kzalloc(IOAT_TEST_SIZE, GFP_KERNEL); if (!dest) { kfree(src); return -ENOMEM; diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index ed76044ce4b9..bbff52be4f0f 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -910,7 +910,8 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx) /* Called with ichan->chan_mutex held */ static int idmac_desc_alloc(struct idmac_channel *ichan, int n) { - struct idmac_tx_desc *desc = vmalloc(n * sizeof(struct idmac_tx_desc)); + struct idmac_tx_desc *desc = + vmalloc(array_size(n, sizeof(struct idmac_tx_desc))); struct idmac *idmac = to_idmac(ichan->dma_chan.device); if (!desc) diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 26b67455208f..fa31cccbe04f 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -848,8 +848,8 @@ static int k3_dma_probe(struct platform_device *op) return -ENOMEM; /* init phy channel */ - d->phy = devm_kzalloc(&op->dev, - d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL); + d->phy = devm_kcalloc(&op->dev, + d->dma_channels, sizeof(struct k3_dma_phy), GFP_KERNEL); if (d->phy == NULL) return -ENOMEM; @@ -879,8 +879,8 @@ static int k3_dma_probe(struct platform_device *op) d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES; /* init virtual channel */ - d->chans = devm_kzalloc(&op->dev, - d->dma_requests * sizeof(struct k3_dma_chan), GFP_KERNEL); + d->chans = devm_kcalloc(&op->dev, + d->dma_requests, sizeof(struct k3_dma_chan), GFP_KERNEL); if (d->chans == NULL) return -ENOMEM; diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c index 94d7bd7d2880..68dd79783b54 100644 --- a/drivers/dma/mic_x100_dma.c +++ b/drivers/dma/mic_x100_dma.c @@ -385,7 +385,8 @@ static int mic_dma_alloc_desc_ring(struct mic_dma_chan *ch) if (dma_mapping_error(dev, ch->desc_ring_micpa)) goto map_error; - ch->tx_array = vzalloc(MIC_DMA_DESC_RX_SIZE * sizeof(*ch->tx_array)); + ch->tx_array = vzalloc(array_size(MIC_DMA_DESC_RX_SIZE, + sizeof(*ch->tx_array))); if (!ch->tx_array) goto tx_error; return 0; diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 1993889003fd..969534c1a6c6 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -777,11 +777,11 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) struct dmaengine_unmap_data *unmap; int err = 0; - src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); + src = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!src) return -ENOMEM; - dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); + dest = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!dest) { kfree(src); return -ENOMEM; diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index 3548caa9e933..c6589ccf1b9a 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -809,8 +809,9 @@ static int mv_xor_v2_probe(struct platform_device *pdev) } /* alloc memory for the SW descriptors */ - xor_dev->sw_desq = devm_kzalloc(&pdev->dev, sizeof(*sw_desc) * - MV_XOR_V2_DESC_NUM, GFP_KERNEL); + xor_dev->sw_desq = devm_kcalloc(&pdev->dev, + MV_XOR_V2_DESC_NUM, sizeof(*sw_desc), + GFP_KERNEL); if (!xor_dev->sw_desq) { ret = -ENOMEM; goto free_hw_desq; diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 6237069001c4..defcdde4d358 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -1866,7 +1866,7 @@ static int dmac_alloc_threads(struct pl330_dmac *pl330) int i; /* Allocate 1 Manager and 'chans' Channel threads */ - pl330->channels = kzalloc((1 + chans) * sizeof(*thrd), + pl330->channels = kcalloc(1 + chans, sizeof(*thrd), GFP_KERNEL); if (!pl330->channels) return -ENOMEM; @@ -2990,7 +2990,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pl330->num_peripherals = num_chan; - pl330->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); + pl330->peripherals = kcalloc(num_chan, sizeof(*pch), GFP_KERNEL); if (!pl330->peripherals) { ret = -ENOMEM; goto probe_err2; diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index cd92d696bcf9..7056fe7513b4 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -1223,9 +1223,9 @@ static int s3c24xx_dma_probe(struct platform_device *pdev) if (IS_ERR(s3cdma->base)) return PTR_ERR(s3cdma->base); - s3cdma->phy_chans = devm_kzalloc(&pdev->dev, - sizeof(struct s3c24xx_dma_phy) * - pdata->num_phy_channels, + s3cdma->phy_chans = devm_kcalloc(&pdev->dev, + pdata->num_phy_channels, + sizeof(struct s3c24xx_dma_phy), GFP_KERNEL); if (!s3cdma->phy_chans) return -ENOMEM; diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 12fa48e380cf..6b5626e299b2 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -1045,8 +1045,9 @@ EXPORT_SYMBOL(shdma_cleanup); static int __init shdma_enter(void) { - shdma_slave_used = kzalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG) * - sizeof(long), GFP_KERNEL); + shdma_slave_used = kcalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG), + sizeof(long), + GFP_KERNEL); if (!shdma_slave_used) return -ENOMEM; return 0; diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index f14645817ed8..c74a88b65039 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -471,7 +471,7 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan) if (ret < 0) return ret; - chan->sw_desc_pool = kzalloc(sizeof(*desc) * ZYNQMP_DMA_NUM_DESCS, + chan->sw_desc_pool = kcalloc(ZYNQMP_DMA_NUM_DESCS, sizeof(*desc), GFP_KERNEL); if (!chan->sw_desc_pool) return -ENOMEM; diff --git a/drivers/dma/zx_dma.c b/drivers/dma/zx_dma.c index 2bb695315300..2571bc7693df 100644 --- a/drivers/dma/zx_dma.c +++ b/drivers/dma/zx_dma.c @@ -798,8 +798,8 @@ static int zx_dma_probe(struct platform_device *op) return -ENOMEM; /* init phy channel */ - d->phy = devm_kzalloc(&op->dev, - d->dma_channels * sizeof(struct zx_dma_phy), GFP_KERNEL); + d->phy = devm_kcalloc(&op->dev, + d->dma_channels, sizeof(struct zx_dma_phy), GFP_KERNEL); if (!d->phy) return -ENOMEM; @@ -834,8 +834,8 @@ static int zx_dma_probe(struct platform_device *op) d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; /* init virtual channel */ - d->chans = devm_kzalloc(&op->dev, - d->dma_requests * sizeof(struct zx_dma_chan), GFP_KERNEL); + d->chans = devm_kcalloc(&op->dev, + d->dma_requests, sizeof(struct zx_dma_chan), GFP_KERNEL); if (!d->chans) return -ENOMEM; diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 329cb96f886f..18aeabb1d5ee 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -3451,7 +3451,7 @@ static int __init amd64_edac_init(void) opstate_init(); err = -ENOMEM; - ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL); + ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL); if (!ecc_stngs) goto err_free; diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index 4d0ea3563d47..8ed4dd9c571b 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -461,7 +461,7 @@ static struct i7core_dev *alloc_i7core_dev(u8 socket, if (!i7core_dev) return NULL; - i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs, + i7core_dev->pdev = kcalloc(table->n_devs, sizeof(*i7core_dev->pdev), GFP_KERNEL); if (!i7core_dev->pdev) { kfree(i7core_dev); diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index 8bff5fd18185..af83ad58819c 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -1126,8 +1126,9 @@ int extcon_dev_register(struct extcon_dev *edev) char *str; struct extcon_cable *cable; - edev->cables = kzalloc(sizeof(struct extcon_cable) * - edev->max_supported, GFP_KERNEL); + edev->cables = kcalloc(edev->max_supported, + sizeof(struct extcon_cable), + GFP_KERNEL); if (!edev->cables) { ret = -ENOMEM; goto err_sysfs_alloc; @@ -1136,7 +1137,7 @@ int extcon_dev_register(struct extcon_dev *edev) cable = &edev->cables[index]; snprintf(buf, 10, "cable.%d", index); - str = kzalloc(sizeof(char) * (strlen(buf) + 1), + str = kzalloc(strlen(buf) + 1, GFP_KERNEL); if (!str) { for (index--; index >= 0; index--) { @@ -1177,15 +1178,17 @@ int extcon_dev_register(struct extcon_dev *edev) for (index = 0; edev->mutually_exclusive[index]; index++) ; - edev->attrs_muex = kzalloc(sizeof(struct attribute *) * - (index + 1), GFP_KERNEL); + edev->attrs_muex = kcalloc(index + 1, + sizeof(struct attribute *), + GFP_KERNEL); if (!edev->attrs_muex) { ret = -ENOMEM; goto err_muex; } - edev->d_attrs_muex = kzalloc(sizeof(struct device_attribute) * - index, GFP_KERNEL); + edev->d_attrs_muex = kcalloc(index, + sizeof(struct device_attribute), + GFP_KERNEL); if (!edev->d_attrs_muex) { ret = -ENOMEM; kfree(edev->attrs_muex); @@ -1194,7 +1197,7 @@ int extcon_dev_register(struct extcon_dev *edev) for (index = 0; edev->mutually_exclusive[index]; index++) { sprintf(buf, "0x%x", edev->mutually_exclusive[index]); - name = kzalloc(sizeof(char) * (strlen(buf) + 1), + name = kzalloc(strlen(buf) + 1, GFP_KERNEL); if (!name) { for (index--; index >= 0; index--) { @@ -1220,8 +1223,9 @@ int extcon_dev_register(struct extcon_dev *edev) if (edev->max_supported) { edev->extcon_dev_type.groups = - kzalloc(sizeof(struct attribute_group *) * - (edev->max_supported + 2), GFP_KERNEL); + kcalloc(edev->max_supported + 2, + sizeof(struct attribute_group *), + GFP_KERNEL); if (!edev->extcon_dev_type.groups) { ret = -ENOMEM; goto err_alloc_groups; diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 38c0aa60b2cb..051327a951b1 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c @@ -45,8 +45,8 @@ int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count) buffer->page_count = 0; buffer->page_count_mapped = 0; - buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]), - GFP_KERNEL); + buffer->pages = kmalloc_array(page_count, sizeof(buffer->pages[0]), + GFP_KERNEL); if (buffer->pages == NULL) return -ENOMEM; diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 60e75e6d9104..82ba110d9d1a 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -1121,7 +1121,7 @@ static int fwnet_broadcast_start(struct fwnet_device *dev) max_receive = 1U << (dev->card->max_receive + 1); num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive; - ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL); + ptrptr = kmalloc_array(num_packets, sizeof(void *), GFP_KERNEL); if (!ptrptr) { retval = -ENOMEM; goto failed; diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index 6d7a6c0a5e07..c7d06a36b23a 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -890,7 +890,7 @@ static int scpi_alloc_xfer_list(struct device *dev, struct scpi_chan *ch) int i; struct scpi_xfer *xfers; - xfers = devm_kzalloc(dev, MAX_SCPI_XFERS * sizeof(*xfers), GFP_KERNEL); + xfers = devm_kcalloc(dev, MAX_SCPI_XFERS, sizeof(*xfers), GFP_KERNEL); if (!xfers) return -ENOMEM; diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c index 0b631e5b5b84..d25f080fcb0d 100644 --- a/drivers/firmware/broadcom/bcm47xx_nvram.c +++ b/drivers/firmware/broadcom/bcm47xx_nvram.c @@ -36,7 +36,7 @@ struct nvram_header { static char nvram_buf[NVRAM_SPACE]; static size_t nvram_len; -static const u32 nvram_sizes[] = {0x8000, 0xF000, 0x10000}; +static const u32 nvram_sizes[] = {0x6000, 0x8000, 0xF000, 0x10000}; static u32 find_nvram_size(void __iomem *end) { diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c index 2f452f1f7c8a..fb8af5cb7c9b 100644 --- a/drivers/firmware/dell_rbu.c +++ b/drivers/firmware/dell_rbu.c @@ -146,7 +146,7 @@ static int create_packet(void *data, size_t length) packet_array_size = max( (unsigned int)(allocation_floor / rbu_data.packetsize), (unsigned int)1); - invalid_addr_packet_array = kzalloc(packet_array_size * sizeof(void*), + invalid_addr_packet_array = kcalloc(packet_array_size, sizeof(void *), GFP_KERNEL); if (!invalid_addr_packet_array) { diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c index 80d1a885def5..b5214c143fee 100644 --- a/drivers/firmware/efi/arm-init.c +++ b/drivers/firmware/efi/arm-init.c @@ -193,7 +193,7 @@ static __init void reserve_regions(void) * uses its own memory map instead. */ memblock_dump_all(); - memblock_remove(0, (phys_addr_t)ULLONG_MAX); + memblock_remove(0, PHYS_ADDR_MAX); for_each_efi_memory_desc(md) { paddr = md->phys_addr; diff --git a/drivers/firmware/efi/capsule.c b/drivers/firmware/efi/capsule.c index 901b9306bf94..4938c29b7c5d 100644 --- a/drivers/firmware/efi/capsule.c +++ b/drivers/firmware/efi/capsule.c @@ -231,7 +231,7 @@ int efi_capsule_update(efi_capsule_header_t *capsule, phys_addr_t *pages) count = DIV_ROUND_UP(imagesize, PAGE_SIZE); sg_count = sg_pages_num(count); - sg_pages = kzalloc(sg_count * sizeof(*sg_pages), GFP_KERNEL); + sg_pages = kcalloc(sg_count, sizeof(*sg_pages), GFP_KERNEL); if (!sg_pages) return -ENOMEM; diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index 5a0fa939d70f..cfe87b465819 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c @@ -28,10 +28,9 @@ static int efi_pstore_close(struct pstore_info *psi) return 0; } -static inline u64 generic_id(unsigned long timestamp, - unsigned int part, int count) +static inline u64 generic_id(u64 timestamp, unsigned int part, int count) { - return ((u64) timestamp * 100 + part) * 1000 + count; + return (timestamp * 100 + part) * 1000 + count; } static int efi_pstore_read_func(struct efivar_entry *entry, @@ -42,7 +41,8 @@ static int efi_pstore_read_func(struct efivar_entry *entry, int i; int cnt; unsigned int part; - unsigned long time, size; + unsigned long size; + u64 time; if (efi_guidcmp(entry->var.VendorGuid, vendor)) return 0; @@ -50,7 +50,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, for (i = 0; i < DUMP_NAME_LEN; i++) name[i] = entry->var.VariableName[i]; - if (sscanf(name, "dump-type%u-%u-%d-%lu-%c", + if (sscanf(name, "dump-type%u-%u-%d-%llu-%c", &record->type, &part, &cnt, &time, &data_type) == 5) { record->id = generic_id(time, part, cnt); record->part = part; @@ -62,7 +62,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, else record->compressed = false; record->ecc_notice_size = 0; - } else if (sscanf(name, "dump-type%u-%u-%d-%lu", + } else if (sscanf(name, "dump-type%u-%u-%d-%llu", &record->type, &part, &cnt, &time) == 4) { record->id = generic_id(time, part, cnt); record->part = part; @@ -71,7 +71,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, record->time.tv_nsec = 0; record->compressed = false; record->ecc_notice_size = 0; - } else if (sscanf(name, "dump-type%u-%u-%lu", + } else if (sscanf(name, "dump-type%u-%u-%llu", &record->type, &part, &time) == 3) { /* * Check if an old format, @@ -250,9 +250,10 @@ static int efi_pstore_write(struct pstore_record *record) /* Since we copy the entire length of name, make sure it is wiped. */ memset(name, 0, sizeof(name)); - snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lu-%c", + snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lld-%c", record->type, record->part, record->count, - record->time.tv_sec, record->compressed ? 'C' : 'D'); + (long long)record->time.tv_sec, + record->compressed ? 'C' : 'D'); for (i = 0; i < DUMP_NAME_LEN; i++) efi_name[i] = name[i]; @@ -327,15 +328,15 @@ static int efi_pstore_erase(struct pstore_record *record) char name[DUMP_NAME_LEN]; int ret; - snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lu", + snprintf(name, sizeof(name), "dump-type%u-%u-%d-%lld", record->type, record->part, record->count, - record->time.tv_sec); + (long long)record->time.tv_sec); ret = efi_pstore_erase_name(name); if (ret != -ENOENT) return ret; - snprintf(name, sizeof(name), "dump-type%u-%u-%lu", - record->type, record->part, record->time.tv_sec); + snprintf(name, sizeof(name), "dump-type%u-%u-%lld", + record->type, record->part, (long long)record->time.tv_sec); ret = efi_pstore_erase_name(name); return ret; diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c index f377609ff141..84a11d0a8023 100644 --- a/drivers/firmware/efi/runtime-map.c +++ b/drivers/firmware/efi/runtime-map.c @@ -166,7 +166,7 @@ int __init efi_runtime_map_init(struct kobject *efi_kobj) if (!efi_enabled(EFI_MEMMAP)) return 0; - map_entries = kzalloc(efi.memmap.nr_map * sizeof(entry), GFP_KERNEL); + map_entries = kcalloc(efi.memmap.nr_map, sizeof(entry), GFP_KERNEL); if (!map_entries) { ret = -ENOMEM; goto out; diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index b74a533ef35b..7fa744793bc5 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -1854,9 +1854,9 @@ static int ti_sci_probe(struct platform_device *pdev) if (!minfo->xfer_block) return -ENOMEM; - minfo->xfer_alloc_table = devm_kzalloc(dev, - BITS_TO_LONGS(desc->max_msgs) - * sizeof(unsigned long), + minfo->xfer_alloc_table = devm_kcalloc(dev, + BITS_TO_LONGS(desc->max_msgs), + sizeof(unsigned long), GFP_KERNEL); if (!minfo->xfer_alloc_table) return -ENOMEM; diff --git a/drivers/fmc/fmc-sdb.c b/drivers/fmc/fmc-sdb.c index ffdc1762b580..d0e65b86dc22 100644 --- a/drivers/fmc/fmc-sdb.c +++ b/drivers/fmc/fmc-sdb.c @@ -48,8 +48,8 @@ static struct sdb_array *__fmc_scan_sdb_tree(struct fmc_device *fmc, arr = kzalloc(sizeof(*arr), GFP_KERNEL); if (!arr) return ERR_PTR(-ENOMEM); - arr->record = kzalloc(sizeof(arr->record[0]) * n, GFP_KERNEL); - arr->subtree = kzalloc(sizeof(arr->subtree[0]) * n, GFP_KERNEL); + arr->record = kcalloc(n, sizeof(arr->record[0]), GFP_KERNEL); + arr->subtree = kcalloc(n, sizeof(arr->subtree[0]), GFP_KERNEL); if (!arr->record || !arr->subtree) { kfree(arr->record); kfree(arr->subtree); diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c index 44c09904daa6..91b90c0cea73 100644 --- a/drivers/gpio/gpio-adnp.c +++ b/drivers/gpio/gpio-adnp.c @@ -427,7 +427,7 @@ static int adnp_irq_setup(struct adnp *adnp) * is chosen to match the register layout of the hardware in that * each segment contains the corresponding bits for all interrupts. */ - adnp->irq_enable = devm_kzalloc(chip->parent, num_regs * 6, + adnp->irq_enable = devm_kcalloc(chip->parent, num_regs, 6, GFP_KERNEL); if (!adnp->irq_enable) return -ENOMEM; diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c index 5e89f1c74a33..1e00f4045f9d 100644 --- a/drivers/gpio/gpio-aspeed.c +++ b/drivers/gpio/gpio-aspeed.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -22,6 +23,15 @@ #include #include +/* + * These two headers aren't meant to be used by GPIO drivers. We need + * them in order to access gpio_chip_hwgpio() which we need to implement + * the aspeed specific API which allows the coprocessor to request + * access to some GPIOs and to arbitrate between coprocessor and ARM. + */ +#include +#include "gpiolib.h" + struct aspeed_bank_props { unsigned int bank; u32 input; @@ -56,83 +66,130 @@ struct aspeed_gpio { struct clk *clk; u32 *dcache; + u8 *cf_copro_bankmap; }; struct aspeed_gpio_bank { - uint16_t val_regs; + uint16_t val_regs; /* +0: Rd: read input value, Wr: set write latch + * +4: Rd/Wr: Direction (0=in, 1=out) + */ + uint16_t rdata_reg; /* Rd: read write latch, Wr: */ uint16_t irq_regs; uint16_t debounce_regs; uint16_t tolerance_regs; + uint16_t cmdsrc_regs; const char names[4][3]; }; +/* + * Note: The "value" register returns the input value sampled on the + * line even when the GPIO is configured as an output. Since + * that input goes through synchronizers, writing, then reading + * back may not return the written value right away. + * + * The "rdata" register returns the content of the write latch + * and thus can be used to read back what was last written + * reliably. + */ + static const int debounce_timers[4] = { 0x00, 0x50, 0x54, 0x58 }; +static const struct aspeed_gpio_copro_ops *copro_ops; +static void *copro_data; + static const struct aspeed_gpio_bank aspeed_gpio_banks[] = { { .val_regs = 0x0000, + .rdata_reg = 0x00c0, .irq_regs = 0x0008, .debounce_regs = 0x0040, .tolerance_regs = 0x001c, + .cmdsrc_regs = 0x0060, .names = { "A", "B", "C", "D" }, }, { .val_regs = 0x0020, + .rdata_reg = 0x00c4, .irq_regs = 0x0028, .debounce_regs = 0x0048, .tolerance_regs = 0x003c, + .cmdsrc_regs = 0x0068, .names = { "E", "F", "G", "H" }, }, { .val_regs = 0x0070, + .rdata_reg = 0x00c8, .irq_regs = 0x0098, .debounce_regs = 0x00b0, .tolerance_regs = 0x00ac, + .cmdsrc_regs = 0x0090, .names = { "I", "J", "K", "L" }, }, { .val_regs = 0x0078, + .rdata_reg = 0x00cc, .irq_regs = 0x00e8, .debounce_regs = 0x0100, .tolerance_regs = 0x00fc, + .cmdsrc_regs = 0x00e0, .names = { "M", "N", "O", "P" }, }, { .val_regs = 0x0080, + .rdata_reg = 0x00d0, .irq_regs = 0x0118, .debounce_regs = 0x0130, .tolerance_regs = 0x012c, + .cmdsrc_regs = 0x0110, .names = { "Q", "R", "S", "T" }, }, { .val_regs = 0x0088, + .rdata_reg = 0x00d4, .irq_regs = 0x0148, .debounce_regs = 0x0160, .tolerance_regs = 0x015c, + .cmdsrc_regs = 0x0140, .names = { "U", "V", "W", "X" }, }, { .val_regs = 0x01E0, + .rdata_reg = 0x00d8, .irq_regs = 0x0178, .debounce_regs = 0x0190, .tolerance_regs = 0x018c, + .cmdsrc_regs = 0x0170, .names = { "Y", "Z", "AA", "AB" }, }, { .val_regs = 0x01e8, + .rdata_reg = 0x00dc, .irq_regs = 0x01a8, .debounce_regs = 0x01c0, .tolerance_regs = 0x01bc, + .cmdsrc_regs = 0x01a0, .names = { "AC", "", "", "" }, }, }; -#define GPIO_BANK(x) ((x) >> 5) -#define GPIO_OFFSET(x) ((x) & 0x1f) -#define GPIO_BIT(x) BIT(GPIO_OFFSET(x)) +enum aspeed_gpio_reg { + reg_val, + reg_rdata, + reg_dir, + reg_irq_enable, + reg_irq_type0, + reg_irq_type1, + reg_irq_type2, + reg_irq_status, + reg_debounce_sel1, + reg_debounce_sel2, + reg_tolerance, + reg_cmdsrc0, + reg_cmdsrc1, +}; -#define GPIO_DATA 0x00 -#define GPIO_DIR 0x04 +#define GPIO_VAL_VALUE 0x00 +#define GPIO_VAL_DIR 0x04 #define GPIO_IRQ_ENABLE 0x00 #define GPIO_IRQ_TYPE0 0x04 @@ -143,6 +200,53 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = { #define GPIO_DEBOUNCE_SEL1 0x00 #define GPIO_DEBOUNCE_SEL2 0x04 +#define GPIO_CMDSRC_0 0x00 +#define GPIO_CMDSRC_1 0x04 +#define GPIO_CMDSRC_ARM 0 +#define GPIO_CMDSRC_LPC 1 +#define GPIO_CMDSRC_COLDFIRE 2 +#define GPIO_CMDSRC_RESERVED 3 + +/* This will be resolved at compile time */ +static inline void __iomem *bank_reg(struct aspeed_gpio *gpio, + const struct aspeed_gpio_bank *bank, + const enum aspeed_gpio_reg reg) +{ + switch (reg) { + case reg_val: + return gpio->base + bank->val_regs + GPIO_VAL_VALUE; + case reg_rdata: + return gpio->base + bank->rdata_reg; + case reg_dir: + return gpio->base + bank->val_regs + GPIO_VAL_DIR; + case reg_irq_enable: + return gpio->base + bank->irq_regs + GPIO_IRQ_ENABLE; + case reg_irq_type0: + return gpio->base + bank->irq_regs + GPIO_IRQ_TYPE0; + case reg_irq_type1: + return gpio->base + bank->irq_regs + GPIO_IRQ_TYPE1; + case reg_irq_type2: + return gpio->base + bank->irq_regs + GPIO_IRQ_TYPE2; + case reg_irq_status: + return gpio->base + bank->irq_regs + GPIO_IRQ_STATUS; + case reg_debounce_sel1: + return gpio->base + bank->debounce_regs + GPIO_DEBOUNCE_SEL1; + case reg_debounce_sel2: + return gpio->base + bank->debounce_regs + GPIO_DEBOUNCE_SEL2; + case reg_tolerance: + return gpio->base + bank->tolerance_regs; + case reg_cmdsrc0: + return gpio->base + bank->cmdsrc_regs + GPIO_CMDSRC_0; + case reg_cmdsrc1: + return gpio->base + bank->cmdsrc_regs + GPIO_CMDSRC_1; + } + BUG_ON(1); +} + +#define GPIO_BANK(x) ((x) >> 5) +#define GPIO_OFFSET(x) ((x) & 0x1f) +#define GPIO_BIT(x) BIT(GPIO_OFFSET(x)) + #define _GPIO_SET_DEBOUNCE(t, o, i) ((!!((t) & BIT(i))) << GPIO_OFFSET(o)) #define GPIO_SET_DEBOUNCE1(t, o) _GPIO_SET_DEBOUNCE(t, o, 1) #define GPIO_SET_DEBOUNCE2(t, o) _GPIO_SET_DEBOUNCE(t, o, 0) @@ -201,18 +305,80 @@ static inline bool have_output(struct aspeed_gpio *gpio, unsigned int offset) return !props || (props->output & GPIO_BIT(offset)); } -static void __iomem *bank_val_reg(struct aspeed_gpio *gpio, - const struct aspeed_gpio_bank *bank, - unsigned int reg) +static void aspeed_gpio_change_cmd_source(struct aspeed_gpio *gpio, + const struct aspeed_gpio_bank *bank, + int bindex, int cmdsrc) { - return gpio->base + bank->val_regs + reg; + void __iomem *c0 = bank_reg(gpio, bank, reg_cmdsrc0); + void __iomem *c1 = bank_reg(gpio, bank, reg_cmdsrc1); + u32 bit, reg; + + /* + * Each register controls 4 banks, so take the bottom 2 + * bits of the bank index, and use them to select the + * right control bit (0, 8, 16 or 24). + */ + bit = BIT((bindex & 3) << 3); + + /* Source 1 first to avoid illegal 11 combination */ + reg = ioread32(c1); + if (cmdsrc & 2) + reg |= bit; + else + reg &= ~bit; + iowrite32(reg, c1); + + /* Then Source 0 */ + reg = ioread32(c0); + if (cmdsrc & 1) + reg |= bit; + else + reg &= ~bit; + iowrite32(reg, c0); } -static void __iomem *bank_irq_reg(struct aspeed_gpio *gpio, - const struct aspeed_gpio_bank *bank, - unsigned int reg) +static bool aspeed_gpio_copro_request(struct aspeed_gpio *gpio, + unsigned int offset) { - return gpio->base + bank->irq_regs + reg; + const struct aspeed_gpio_bank *bank = to_bank(offset); + + if (!copro_ops || !gpio->cf_copro_bankmap) + return false; + if (!gpio->cf_copro_bankmap[offset >> 3]) + return false; + if (!copro_ops->request_access) + return false; + + /* Pause the coprocessor */ + copro_ops->request_access(copro_data); + + /* Change command source back to ARM */ + aspeed_gpio_change_cmd_source(gpio, bank, offset >> 3, GPIO_CMDSRC_ARM); + + /* Update cache */ + gpio->dcache[GPIO_BANK(offset)] = ioread32(bank_reg(gpio, bank, reg_rdata)); + + return true; +} + +static void aspeed_gpio_copro_release(struct aspeed_gpio *gpio, + unsigned int offset) +{ + const struct aspeed_gpio_bank *bank = to_bank(offset); + + if (!copro_ops || !gpio->cf_copro_bankmap) + return; + if (!gpio->cf_copro_bankmap[offset >> 3]) + return; + if (!copro_ops->release_access) + return; + + /* Change command source back to ColdFire */ + aspeed_gpio_change_cmd_source(gpio, bank, offset >> 3, + GPIO_CMDSRC_COLDFIRE); + + /* Restart the coprocessor */ + copro_ops->release_access(copro_data); } static int aspeed_gpio_get(struct gpio_chip *gc, unsigned int offset) @@ -220,8 +386,7 @@ static int aspeed_gpio_get(struct gpio_chip *gc, unsigned int offset) struct aspeed_gpio *gpio = gpiochip_get_data(gc); const struct aspeed_gpio_bank *bank = to_bank(offset); - return !!(ioread32(bank_val_reg(gpio, bank, GPIO_DATA)) - & GPIO_BIT(offset)); + return !!(ioread32(bank_reg(gpio, bank, reg_val)) & GPIO_BIT(offset)); } static void __aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset, @@ -232,7 +397,7 @@ static void __aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset, void __iomem *addr; u32 reg; - addr = bank_val_reg(gpio, bank, GPIO_DATA); + addr = bank_reg(gpio, bank, reg_val); reg = gpio->dcache[GPIO_BANK(offset)]; if (val) @@ -249,11 +414,15 @@ static void aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset, { struct aspeed_gpio *gpio = gpiochip_get_data(gc); unsigned long flags; + bool copro; spin_lock_irqsave(&gpio->lock, flags); + copro = aspeed_gpio_copro_request(gpio, offset); __aspeed_gpio_set(gc, offset, val); + if (copro) + aspeed_gpio_copro_release(gpio, offset); spin_unlock_irqrestore(&gpio->lock, flags); } @@ -261,7 +430,9 @@ static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset) { struct aspeed_gpio *gpio = gpiochip_get_data(gc); const struct aspeed_gpio_bank *bank = to_bank(offset); + void __iomem *addr = bank_reg(gpio, bank, reg_dir); unsigned long flags; + bool copro; u32 reg; if (!have_input(gpio, offset)) @@ -269,8 +440,13 @@ static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset) spin_lock_irqsave(&gpio->lock, flags); - reg = ioread32(bank_val_reg(gpio, bank, GPIO_DIR)); - iowrite32(reg & ~GPIO_BIT(offset), bank_val_reg(gpio, bank, GPIO_DIR)); + reg = ioread32(addr); + reg &= ~GPIO_BIT(offset); + + copro = aspeed_gpio_copro_request(gpio, offset); + iowrite32(reg, addr); + if (copro) + aspeed_gpio_copro_release(gpio, offset); spin_unlock_irqrestore(&gpio->lock, flags); @@ -282,7 +458,9 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc, { struct aspeed_gpio *gpio = gpiochip_get_data(gc); const struct aspeed_gpio_bank *bank = to_bank(offset); + void __iomem *addr = bank_reg(gpio, bank, reg_dir); unsigned long flags; + bool copro; u32 reg; if (!have_output(gpio, offset)) @@ -290,10 +468,15 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc, spin_lock_irqsave(&gpio->lock, flags); - __aspeed_gpio_set(gc, offset, val); - reg = ioread32(bank_val_reg(gpio, bank, GPIO_DIR)); - iowrite32(reg | GPIO_BIT(offset), bank_val_reg(gpio, bank, GPIO_DIR)); + reg = ioread32(addr); + reg |= GPIO_BIT(offset); + copro = aspeed_gpio_copro_request(gpio, offset); + __aspeed_gpio_set(gc, offset, val); + iowrite32(reg, addr); + + if (copro) + aspeed_gpio_copro_release(gpio, offset); spin_unlock_irqrestore(&gpio->lock, flags); return 0; @@ -314,7 +497,7 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) spin_lock_irqsave(&gpio->lock, flags); - val = ioread32(bank_val_reg(gpio, bank, GPIO_DIR)) & GPIO_BIT(offset); + val = ioread32(bank_reg(gpio, bank, reg_dir)) & GPIO_BIT(offset); spin_unlock_irqrestore(&gpio->lock, flags); @@ -323,24 +506,23 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) } static inline int irqd_to_aspeed_gpio_data(struct irq_data *d, - struct aspeed_gpio **gpio, - const struct aspeed_gpio_bank **bank, - u32 *bit) + struct aspeed_gpio **gpio, + const struct aspeed_gpio_bank **bank, + u32 *bit, int *offset) { - int offset; struct aspeed_gpio *internal; - offset = irqd_to_hwirq(d); + *offset = irqd_to_hwirq(d); internal = irq_data_get_irq_chip_data(d); /* This might be a bit of a questionable place to check */ - if (!have_irq(internal, offset)) + if (!have_irq(internal, *offset)) return -ENOTSUPP; *gpio = internal; - *bank = to_bank(offset); - *bit = GPIO_BIT(offset); + *bank = to_bank(*offset); + *bit = GPIO_BIT(*offset); return 0; } @@ -351,17 +533,23 @@ static void aspeed_gpio_irq_ack(struct irq_data *d) struct aspeed_gpio *gpio; unsigned long flags; void __iomem *status_addr; + int rc, offset; + bool copro; u32 bit; - int rc; - rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit); + rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset); if (rc) return; - status_addr = bank_irq_reg(gpio, bank, GPIO_IRQ_STATUS); + status_addr = bank_reg(gpio, bank, reg_irq_status); spin_lock_irqsave(&gpio->lock, flags); + copro = aspeed_gpio_copro_request(gpio, offset); + iowrite32(bit, status_addr); + + if (copro) + aspeed_gpio_copro_release(gpio, offset); spin_unlock_irqrestore(&gpio->lock, flags); } @@ -372,15 +560,17 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set) unsigned long flags; u32 reg, bit; void __iomem *addr; - int rc; + int rc, offset; + bool copro; - rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit); + rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset); if (rc) return; - addr = bank_irq_reg(gpio, bank, GPIO_IRQ_ENABLE); + addr = bank_reg(gpio, bank, reg_irq_enable); spin_lock_irqsave(&gpio->lock, flags); + copro = aspeed_gpio_copro_request(gpio, offset); reg = ioread32(addr); if (set) @@ -389,6 +579,8 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set) reg &= ~bit; iowrite32(reg, addr); + if (copro) + aspeed_gpio_copro_release(gpio, offset); spin_unlock_irqrestore(&gpio->lock, flags); } @@ -413,9 +605,10 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type) struct aspeed_gpio *gpio; unsigned long flags; void __iomem *addr; - int rc; + int rc, offset; + bool copro; - rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit); + rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset); if (rc) return -EINVAL; @@ -441,22 +634,25 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type) } spin_lock_irqsave(&gpio->lock, flags); + copro = aspeed_gpio_copro_request(gpio, offset); - addr = bank_irq_reg(gpio, bank, GPIO_IRQ_TYPE0); + addr = bank_reg(gpio, bank, reg_irq_type0); reg = ioread32(addr); reg = (reg & ~bit) | type0; iowrite32(reg, addr); - addr = bank_irq_reg(gpio, bank, GPIO_IRQ_TYPE1); + addr = bank_reg(gpio, bank, reg_irq_type1); reg = ioread32(addr); reg = (reg & ~bit) | type1; iowrite32(reg, addr); - addr = bank_irq_reg(gpio, bank, GPIO_IRQ_TYPE2); + addr = bank_reg(gpio, bank, reg_irq_type2); reg = ioread32(addr); reg = (reg & ~bit) | type2; iowrite32(reg, addr); + if (copro) + aspeed_gpio_copro_release(gpio, offset); spin_unlock_irqrestore(&gpio->lock, flags); irq_set_handler_locked(d, handler); @@ -477,7 +673,7 @@ static void aspeed_gpio_irq_handler(struct irq_desc *desc) for (i = 0; i < ARRAY_SIZE(aspeed_gpio_banks); i++) { const struct aspeed_gpio_bank *bank = &aspeed_gpio_banks[i]; - reg = ioread32(bank_irq_reg(data, bank, GPIO_IRQ_STATUS)); + reg = ioread32(bank_reg(data, bank, reg_irq_status)); for_each_set_bit(p, ®, 32) { girq = irq_find_mapping(gc->irq.domain, i * 32 + p); @@ -549,21 +745,27 @@ static int aspeed_gpio_reset_tolerance(struct gpio_chip *chip, unsigned int offset, bool enable) { struct aspeed_gpio *gpio = gpiochip_get_data(chip); - const struct aspeed_gpio_bank *bank; unsigned long flags; + void __iomem *treg; + bool copro; u32 val; - bank = to_bank(offset); + treg = bank_reg(gpio, to_bank(offset), reg_tolerance); spin_lock_irqsave(&gpio->lock, flags); - val = readl(gpio->base + bank->tolerance_regs); + copro = aspeed_gpio_copro_request(gpio, offset); + + val = readl(treg); if (enable) val |= GPIO_BIT(offset); else val &= ~GPIO_BIT(offset); - writel(val, gpio->base + bank->tolerance_regs); + writel(val, treg); + + if (copro) + aspeed_gpio_copro_release(gpio, offset); spin_unlock_irqrestore(&gpio->lock, flags); return 0; @@ -582,13 +784,6 @@ static void aspeed_gpio_free(struct gpio_chip *chip, unsigned int offset) pinctrl_gpio_free(chip->base + offset); } -static inline void __iomem *bank_debounce_reg(struct aspeed_gpio *gpio, - const struct aspeed_gpio_bank *bank, - unsigned int reg) -{ - return gpio->base + bank->debounce_regs + reg; -} - static int usecs_to_cycles(struct aspeed_gpio *gpio, unsigned long usecs, u32 *cycles) { @@ -666,11 +861,14 @@ static void configure_timer(struct aspeed_gpio *gpio, unsigned int offset, void __iomem *addr; u32 val; - addr = bank_debounce_reg(gpio, bank, GPIO_DEBOUNCE_SEL1); + /* Note: Debounce timer isn't under control of the command + * source registers, so no need to sync with the coprocessor + */ + addr = bank_reg(gpio, bank, reg_debounce_sel1); val = ioread32(addr); iowrite32((val & ~mask) | GPIO_SET_DEBOUNCE1(timer, offset), addr); - addr = bank_debounce_reg(gpio, bank, GPIO_DEBOUNCE_SEL2); + addr = bank_reg(gpio, bank, reg_debounce_sel2); val = ioread32(addr); iowrite32((val & ~mask) | GPIO_SET_DEBOUNCE2(timer, offset), addr); } @@ -812,6 +1010,111 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset, return -ENOTSUPP; } +/** + * aspeed_gpio_copro_set_ops - Sets the callbacks used for handhsaking with + * the coprocessor for shared GPIO banks + * @ops: The callbacks + * @data: Pointer passed back to the callbacks + */ +int aspeed_gpio_copro_set_ops(const struct aspeed_gpio_copro_ops *ops, void *data) +{ + copro_data = data; + copro_ops = ops; + + return 0; +} +EXPORT_SYMBOL_GPL(aspeed_gpio_copro_set_ops); + +/** + * aspeed_gpio_copro_grab_gpio - Mark a GPIO used by the coprocessor. The entire + * bank gets marked and any access from the ARM will + * result in handshaking via callbacks. + * @desc: The GPIO to be marked + * @vreg_offset: If non-NULL, returns the value register offset in the GPIO space + * @dreg_offset: If non-NULL, returns the data latch register offset in the GPIO space + * @bit: If non-NULL, returns the bit number of the GPIO in the registers + */ +int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc, + u16 *vreg_offset, u16 *dreg_offset, u8 *bit) +{ + struct gpio_chip *chip = gpiod_to_chip(desc); + struct aspeed_gpio *gpio = gpiochip_get_data(chip); + int rc = 0, bindex, offset = gpio_chip_hwgpio(desc); + const struct aspeed_gpio_bank *bank = to_bank(offset); + unsigned long flags; + + if (!gpio->cf_copro_bankmap) + gpio->cf_copro_bankmap = kzalloc(gpio->config->nr_gpios >> 3, GFP_KERNEL); + if (!gpio->cf_copro_bankmap) + return -ENOMEM; + if (offset < 0 || offset > gpio->config->nr_gpios) + return -EINVAL; + bindex = offset >> 3; + + spin_lock_irqsave(&gpio->lock, flags); + + /* Sanity check, this shouldn't happen */ + if (gpio->cf_copro_bankmap[bindex] == 0xff) { + rc = -EIO; + goto bail; + } + gpio->cf_copro_bankmap[bindex]++; + + /* Switch command source */ + if (gpio->cf_copro_bankmap[bindex] == 1) + aspeed_gpio_change_cmd_source(gpio, bank, bindex, + GPIO_CMDSRC_COLDFIRE); + + if (vreg_offset) + *vreg_offset = bank->val_regs; + if (dreg_offset) + *dreg_offset = bank->rdata_reg; + if (bit) + *bit = GPIO_OFFSET(offset); + bail: + spin_unlock_irqrestore(&gpio->lock, flags); + return rc; +} +EXPORT_SYMBOL_GPL(aspeed_gpio_copro_grab_gpio); + +/** + * aspeed_gpio_copro_release_gpio - Unmark a GPIO used by the coprocessor. + * @desc: The GPIO to be marked + */ +int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc) +{ + struct gpio_chip *chip = gpiod_to_chip(desc); + struct aspeed_gpio *gpio = gpiochip_get_data(chip); + int rc = 0, bindex, offset = gpio_chip_hwgpio(desc); + const struct aspeed_gpio_bank *bank = to_bank(offset); + unsigned long flags; + + if (!gpio->cf_copro_bankmap) + return -ENXIO; + + if (offset < 0 || offset > gpio->config->nr_gpios) + return -EINVAL; + bindex = offset >> 3; + + spin_lock_irqsave(&gpio->lock, flags); + + /* Sanity check, this shouldn't happen */ + if (gpio->cf_copro_bankmap[bindex] == 0) { + rc = -EIO; + goto bail; + } + gpio->cf_copro_bankmap[bindex]--; + + /* Switch command source */ + if (gpio->cf_copro_bankmap[bindex] == 0) + aspeed_gpio_change_cmd_source(gpio, bank, bindex, + GPIO_CMDSRC_ARM); + bail: + spin_unlock_irqrestore(&gpio->lock, flags); + return rc; +} +EXPORT_SYMBOL_GPL(aspeed_gpio_copro_release_gpio); + /* * Any banks not specified in a struct aspeed_bank_props array are assumed to * have the properties: @@ -897,16 +1200,23 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev) /* Allocate a cache of the output registers */ banks = gpio->config->nr_gpios >> 5; - gpio->dcache = devm_kzalloc(&pdev->dev, - sizeof(u32) * banks, GFP_KERNEL); + gpio->dcache = devm_kcalloc(&pdev->dev, + banks, sizeof(u32), GFP_KERNEL); if (!gpio->dcache) return -ENOMEM; - /* Populate it with initial values read from the HW */ + /* + * Populate it with initial values read from the HW and switch + * all command sources to the ARM by default + */ for (i = 0; i < banks; i++) { const struct aspeed_gpio_bank *bank = &aspeed_gpio_banks[i]; - gpio->dcache[i] = ioread32(gpio->base + bank->val_regs + - GPIO_DATA); + void __iomem *addr = bank_reg(gpio, bank, reg_rdata); + gpio->dcache[i] = ioread32(addr); + aspeed_gpio_change_cmd_source(gpio, bank, 0, GPIO_CMDSRC_ARM); + aspeed_gpio_change_cmd_source(gpio, bank, 1, GPIO_CMDSRC_ARM); + aspeed_gpio_change_cmd_source(gpio, bank, 2, GPIO_CMDSRC_ARM); + aspeed_gpio_change_cmd_source(gpio, bank, 3, GPIO_CMDSRC_ARM); } rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio); diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c index eb8369b21e90..00272fa7cc4f 100644 --- a/drivers/gpio/gpio-bcm-kona.c +++ b/drivers/gpio/gpio-bcm-kona.c @@ -601,9 +601,10 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev) GPIO_MAX_BANK_NUM); return -ENXIO; } - kona_gpio->banks = devm_kzalloc(dev, - kona_gpio->num_bank * - sizeof(*kona_gpio->banks), GFP_KERNEL); + kona_gpio->banks = devm_kcalloc(dev, + kona_gpio->num_bank, + sizeof(*kona_gpio->banks), + GFP_KERNEL); if (!kona_gpio->banks) return -ENOMEM; diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index b574ecff7761..035a454eca43 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -198,8 +198,8 @@ static int davinci_gpio_probe(struct platform_device *pdev) ngpio = ARCH_NR_GPIOS; nbank = DIV_ROUND_UP(ngpio, 32); - chips = devm_kzalloc(dev, - nbank * sizeof(struct davinci_gpio_controller), + chips = devm_kcalloc(dev, + nbank, sizeof(struct davinci_gpio_controller), GFP_KERNEL); if (!chips) return -ENOMEM; diff --git a/drivers/gpio/gpio-htc-egpio.c b/drivers/gpio/gpio-htc-egpio.c index 516383934945..ad6e5b518669 100644 --- a/drivers/gpio/gpio-htc-egpio.c +++ b/drivers/gpio/gpio-htc-egpio.c @@ -321,8 +321,8 @@ static int __init egpio_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ei); ei->nchips = pdata->num_chips; - ei->chip = devm_kzalloc(&pdev->dev, - sizeof(struct egpio_chip) * ei->nchips, + ei->chip = devm_kcalloc(&pdev->dev, + ei->nchips, sizeof(struct egpio_chip), GFP_KERNEL); if (!ei->chip) { ret = -ENOMEM; diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c index e2bee27eb526..b23d9a36be1f 100644 --- a/drivers/gpio/gpio-ml-ioh.c +++ b/drivers/gpio/gpio-ml-ioh.c @@ -443,7 +443,7 @@ static int ioh_gpio_probe(struct pci_dev *pdev, goto err_iomap; } - chip_save = kzalloc(sizeof(*chip) * 8, GFP_KERNEL); + chip_save = kcalloc(8, sizeof(*chip), GFP_KERNEL); if (chip_save == NULL) { ret = -ENOMEM; goto err_kzalloc; diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c index d16e9d4a129b..1306722faa5a 100644 --- a/drivers/gpio/gpio-thunderx.c +++ b/drivers/gpio/gpio-thunderx.c @@ -504,16 +504,17 @@ static int thunderx_gpio_probe(struct pci_dev *pdev, txgpio->base_msi = (c >> 8) & 0xff; } - txgpio->msix_entries = devm_kzalloc(dev, - sizeof(struct msix_entry) * ngpio, + txgpio->msix_entries = devm_kcalloc(dev, + ngpio, sizeof(struct msix_entry), GFP_KERNEL); if (!txgpio->msix_entries) { err = -ENOMEM; goto out; } - txgpio->line_entries = devm_kzalloc(dev, - sizeof(struct thunderx_line) * ngpio, + txgpio->line_entries = devm_kcalloc(dev, + ngpio, + sizeof(struct thunderx_line), GFP_KERNEL); if (!txgpio->line_entries) { err = -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 428e5eb3444f..f4c474a95875 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -310,20 +310,20 @@ static int acp_hw_init(void *handle) pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false); } - adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS, + adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell), GFP_KERNEL); if (adev->acp.acp_cell == NULL) return -ENOMEM; - adev->acp.acp_res = kzalloc(sizeof(struct resource) * 4, GFP_KERNEL); + adev->acp.acp_res = kcalloc(4, sizeof(struct resource), GFP_KERNEL); if (adev->acp.acp_res == NULL) { kfree(adev->acp.acp_cell); return -ENOMEM; } - i2s_pdata = kzalloc(sizeof(struct i2s_platform_data) * 2, GFP_KERNEL); + i2s_pdata = kcalloc(2, sizeof(struct i2s_platform_data), GFP_KERNEL); if (i2s_pdata == NULL) { kfree(adev->acp.acp_res); kfree(adev->acp.acp_cell); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 8f6f45567bfa..305143fcc1ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -342,15 +342,12 @@ void get_local_mem_info(struct kgd_dev *kgd, mem_info->local_mem_size_public, mem_info->local_mem_size_private); - if (amdgpu_emu_mode == 1) { - mem_info->mem_clk_max = 100; - return; - } - if (amdgpu_sriov_vf(adev)) mem_info->mem_clk_max = adev->clock.default_mclk / 100; - else + else if (adev->powerplay.pp_funcs) mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100; + else + mem_info->mem_clk_max = 100; } uint64_t get_gpu_clock_counter(struct kgd_dev *kgd) @@ -367,13 +364,12 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd) struct amdgpu_device *adev = (struct amdgpu_device *)kgd; /* the sclk is in quantas of 10kHz */ - if (amdgpu_emu_mode == 1) - return 100; - if (amdgpu_sriov_vf(adev)) return adev->clock.default_sclk / 100; - - return amdgpu_dpm_get_sclk(adev, false) / 100; + else if (adev->powerplay.pp_funcs) + return amdgpu_dpm_get_sclk(adev, false) / 100; + else + return 100; } void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 0ff36d45a597..ea79908dac4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -407,7 +407,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, (*dump)[i++][1] = RREG32(addr); \ } while (0) - *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; @@ -504,7 +504,7 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, #undef HQD_N_REGS #define HQD_N_REGS (19+4) - *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 6ef9762b4b00..19dd665e7307 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -395,7 +395,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, (*dump)[i++][1] = RREG32(addr); \ } while (0) - *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; @@ -491,7 +491,7 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, #undef HQD_N_REGS #define HQD_N_REGS (19+4+2+3+7) - *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index f0c0d3953f69..1db60aa5b7f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -504,7 +504,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, (*dump)[i++][1] = RREG32(addr); \ } while (0) - *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; @@ -606,7 +606,7 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, #undef HQD_N_REGS #define HQD_N_REGS (19+6+7+10) - *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 1bcb2b247335..daa06e7c5bb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -569,7 +569,6 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, - { 0x1002, 0x67DF, 0x1028, 0x0774, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0, 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 9c1d491d742e..82312a7bc6ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -522,6 +522,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, struct amdgpu_bo_list_entry *e; struct list_head duplicates; unsigned i, tries = 10; + struct amdgpu_bo *gds; + struct amdgpu_bo *gws; + struct amdgpu_bo *oa; int r; INIT_LIST_HEAD(&p->validated); @@ -652,31 +655,36 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, p->bytes_moved_vis); + if (p->bo_list) { - struct amdgpu_bo *gds = p->bo_list->gds_obj; - struct amdgpu_bo *gws = p->bo_list->gws_obj; - struct amdgpu_bo *oa = p->bo_list->oa_obj; struct amdgpu_vm *vm = &fpriv->vm; unsigned i; + gds = p->bo_list->gds_obj; + gws = p->bo_list->gws_obj; + oa = p->bo_list->oa_obj; for (i = 0; i < p->bo_list->num_entries; i++) { struct amdgpu_bo *bo = p->bo_list->array[i].robj; p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo); } + } else { + gds = p->adev->gds.gds_gfx_bo; + gws = p->adev->gds.gws_gfx_bo; + oa = p->adev->gds.oa_gfx_bo; + } - if (gds) { - p->job->gds_base = amdgpu_bo_gpu_offset(gds); - p->job->gds_size = amdgpu_bo_size(gds); - } - if (gws) { - p->job->gws_base = amdgpu_bo_gpu_offset(gws); - p->job->gws_size = amdgpu_bo_size(gws); - } - if (oa) { - p->job->oa_base = amdgpu_bo_gpu_offset(oa); - p->job->oa_size = amdgpu_bo_size(oa); - } + if (gds) { + p->job->gds_base = amdgpu_bo_gpu_offset(gds); + p->job->gds_size = amdgpu_bo_size(gds); + } + if (gws) { + p->job->gws_base = amdgpu_bo_gpu_offset(gws); + p->job->gws_size = amdgpu_bo_size(gws); + } + if (oa) { + p->job->oa_base = amdgpu_bo_gpu_offset(oa); + p->job->oa_size = amdgpu_bo_size(oa); } if (!r && p->uf_entry.robj) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 290e279abf0d..3317d1536f4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1730,6 +1730,18 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev) } } } + + if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) { + /* enable gfx powergating */ + amdgpu_device_ip_set_powergating_state(adev, + AMD_IP_BLOCK_TYPE_GFX, + AMD_PG_STATE_GATE); + /* enable gfxoff */ + amdgpu_device_ip_set_powergating_state(adev, + AMD_IP_BLOCK_TYPE_SMC, + AMD_PG_STATE_GATE); + } + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c index def1010ac05e..77ad59ade85c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c @@ -452,7 +452,7 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) ATOM_PPLIB_PhaseSheddingLimits_Record *entry; adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = - kzalloc(psl->ucNumEntries * + kcalloc(psl->ucNumEntries, sizeof(struct amdgpu_phase_shedding_limits_entry), GFP_KERNEL); if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 17d6b9fb6d77..dd11b7313ca0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -369,7 +369,8 @@ int amdgpu_gart_init(struct amdgpu_device *adev) #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS /* Allocate pages table */ - adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages); + adev->gart.pages = vzalloc(array_size(sizeof(void *), + adev->gart.num_cpu_pages)); if (adev->gart.pages == NULL) return -ENOMEM; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 2c8e27370284..5fb156a01774 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -30,6 +30,7 @@ #include #include #include "amdgpu.h" +#include "amdgpu_display.h" void amdgpu_gem_object_free(struct drm_gem_object *gobj) { @@ -235,6 +236,13 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, /* create a gem object to contain this object in */ if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { + if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { + /* if gds bo is created from user space, it must be + * passed to bo list + */ + DRM_ERROR("GDS bo cannot be per-vm-bo\n"); + return -EINVAL; + } flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS; if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS) size = size << AMDGPU_GDS_SHIFT; @@ -749,15 +757,16 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, struct amdgpu_device *adev = dev->dev_private; struct drm_gem_object *gobj; uint32_t handle; + u32 domain; int r; args->pitch = amdgpu_align_pitch(adev, args->width, DIV_ROUND_UP(args->bpp, 8), 0); args->size = (u64)args->pitch * args->height; args->size = ALIGN(args->size, PAGE_SIZE); - - r = amdgpu_gem_object_create(adev, args->size, 0, - AMDGPU_GEM_DOMAIN_VRAM, + domain = amdgpu_bo_get_preferred_pin_domain(adev, + amdgpu_display_supported_domains(adev)); + r = amdgpu_gem_object_create(adev, args->size, 0, domain, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, false, NULL, &gobj); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 6a9e46ae7f0a..5e4e1bd90383 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -703,11 +703,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, /* This assumes only APU display buffers are pinned with (VRAM|GTT). * See function amdgpu_display_supported_domains() */ - if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) { - domain = AMDGPU_GEM_DOMAIN_VRAM; - if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD) - domain = AMDGPU_GEM_DOMAIN_GTT; - } + domain = amdgpu_bo_get_preferred_pin_domain(adev, domain); if (bo->pin_count) { uint32_t mem_type = bo->tbo.mem.mem_type; @@ -1066,3 +1062,14 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) return bo->tbo.offset; } + +uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev, + uint32_t domain) +{ + if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) { + domain = AMDGPU_GEM_DOMAIN_VRAM; + if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD) + domain = AMDGPU_GEM_DOMAIN_GTT; + } + return domain; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 540e03fa159f..731748033878 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -289,7 +289,8 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, struct reservation_object *resv, struct dma_fence **fence, bool direct); - +uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev, + uint32_t domain); /* * sub allocation diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index d167e8ab76d3..e3878256743a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -53,7 +53,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) n -= adev->irq.ih.ring_size; n /= size; - gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); + gtt_obj = kcalloc(n, sizeof(*gtt_obj), GFP_KERNEL); if (!gtt_obj) { DRM_ERROR("Failed to allocate %d pointers\n", n); r = 1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 8851bcdfc260..127e87b470ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -49,8 +49,6 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work); int amdgpu_vcn_sw_init(struct amdgpu_device *adev) { - struct amdgpu_ring *ring; - struct drm_sched_rq *rq; unsigned long bo_size; const char *fw_name; const struct common_firmware_header *hdr; @@ -84,6 +82,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) } hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); family_id = le32_to_cpu(hdr->ucode_version) & 0xff; version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; @@ -102,24 +101,6 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) return r; } - ring = &adev->vcn.ring_dec; - rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec, - rq, NULL); - if (r != 0) { - DRM_ERROR("Failed setting up VCN dec run queue.\n"); - return r; - } - - ring = &adev->vcn.ring_enc[0]; - rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc, - rq, NULL); - if (r != 0) { - DRM_ERROR("Failed setting up VCN enc run queue.\n"); - return r; - } - return 0; } @@ -129,10 +110,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) kfree(adev->vcn.saved_bo); - drm_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec); - - drm_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc); - amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo, &adev->vcn.gpu_addr, (void **)&adev->vcn.cpu_addr); @@ -278,7 +255,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) } static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, - struct amdgpu_bo *bo, bool direct, + struct amdgpu_bo *bo, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; @@ -306,19 +283,12 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, } ib->length_dw = 16; - if (direct) { - r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); - job->fence = dma_fence_get(f); - if (r) - goto err_free; + r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); + job->fence = dma_fence_get(f); + if (r) + goto err_free; - amdgpu_job_free(job); - } else { - r = amdgpu_job_submit(job, ring, &adev->vcn.entity_dec, - AMDGPU_FENCE_OWNER_UNDEFINED, &f); - if (r) - goto err_free; - } + amdgpu_job_free(job); amdgpu_bo_fence(bo, f, false); amdgpu_bo_unreserve(bo); @@ -370,11 +340,11 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand for (i = 14; i < 1024; ++i) msg[i] = cpu_to_le32(0x0); - return amdgpu_vcn_dec_send_msg(ring, bo, true, fence); + return amdgpu_vcn_dec_send_msg(ring, bo, fence); } static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - bool direct, struct dma_fence **fence) + struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_bo *bo = NULL; @@ -396,7 +366,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han for (i = 6; i < 1024; ++i) msg[i] = cpu_to_le32(0x0); - return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence); + return amdgpu_vcn_dec_send_msg(ring, bo, fence); } int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) @@ -410,7 +380,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) goto error; } - r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, true, &fence); + r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence); if (r) { DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 181e6afa9847..773010b9ff15 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -67,8 +67,6 @@ struct amdgpu_vcn { struct amdgpu_ring ring_dec; struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS]; struct amdgpu_irq_src irq; - struct drm_sched_entity entity_dec; - struct drm_sched_entity entity_enc; unsigned num_enc_rings; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ccba88cc8c54..b0eb2f537392 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2123,7 +2123,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, before->last = saddr - 1; before->offset = tmp->offset; before->flags = tmp->flags; - list_add(&before->list, &tmp->list); + before->bo_va = tmp->bo_va; + list_add(&before->list, &tmp->bo_va->invalids); } /* Remember mapping split at the end */ @@ -2133,7 +2134,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, after->offset = tmp->offset; after->offset += after->start - tmp->start; after->flags = tmp->flags; - list_add(&after->list, &tmp->list); + after->bo_va = tmp->bo_va; + list_add(&after->list, &tmp->bo_va->invalids); } list_del(&tmp->list); diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index 69500a8b4e2d..e9934de1b9cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -1221,7 +1221,7 @@ static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, ectx.abort = false; ectx.last_jump = 0; if (ws) - ectx.ws = kzalloc(4 * ws, GFP_KERNEL); + ectx.ws = kcalloc(4, ws, GFP_KERNEL); else ectx.ws = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index a266dcf5daed..7fbad2f5f0bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -5679,8 +5679,9 @@ static int ci_parse_power_table(struct amdgpu_device *adev) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); - adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) * - state_array->ucNumEntries, GFP_KERNEL); + adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, + sizeof(struct amdgpu_ps), + GFP_KERNEL); if (!adev->pm.dpm.ps) return -ENOMEM; power_state_offset = (u8 *)state_array->states; @@ -5927,7 +5928,9 @@ static int ci_dpm_init(struct amdgpu_device *adev) ci_set_private_data_variables_based_on_pptable(adev); adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = - kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL); + kcalloc(4, + sizeof(struct amdgpu_clock_voltage_dependency_entry), + GFP_KERNEL); if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { ci_dpm_fini(adev); return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c index 60608b3df881..d5ebe566809b 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c @@ -64,7 +64,7 @@ static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev) int fb_channel_number; fb_channel_number = adev->df_funcs->get_fb_channel_number(adev); - if (fb_channel_number > ARRAY_SIZE(df_v3_6_channel_number)) + if (fb_channel_number >= ARRAY_SIZE(df_v3_6_channel_number)) fb_channel_number = 0; return df_v3_6_channel_number[fb_channel_number]; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index d7530fdfaad5..a69153435ea7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -111,6 +111,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] = { + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042), @@ -1837,13 +1838,15 @@ static void gfx_v9_1_parse_ind_reg_list(int *register_list_format, int indirect_offset, int list_size, int *unique_indirect_regs, - int *unique_indirect_reg_count, + int unique_indirect_reg_count, int *indirect_start_offsets, - int *indirect_start_offsets_count) + int *indirect_start_offsets_count, + int max_start_offsets_count) { int idx; for (; indirect_offset < list_size; indirect_offset++) { + WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count); indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset; *indirect_start_offsets_count = *indirect_start_offsets_count + 1; @@ -1851,14 +1854,14 @@ static void gfx_v9_1_parse_ind_reg_list(int *register_list_format, indirect_offset += 2; /* look for the matching indice */ - for (idx = 0; idx < *unique_indirect_reg_count; idx++) { + for (idx = 0; idx < unique_indirect_reg_count; idx++) { if (unique_indirect_regs[idx] == register_list_format[indirect_offset] || !unique_indirect_regs[idx]) break; } - BUG_ON(idx >= *unique_indirect_reg_count); + BUG_ON(idx >= unique_indirect_reg_count); if (!unique_indirect_regs[idx]) unique_indirect_regs[idx] = register_list_format[indirect_offset]; @@ -1893,9 +1896,10 @@ static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev) adev->gfx.rlc.reg_list_format_direct_reg_list_length, adev->gfx.rlc.reg_list_format_size_bytes >> 2, unique_indirect_regs, - &unique_indirect_reg_count, + unique_indirect_reg_count, indirect_start_offsets, - &indirect_start_offsets_count); + &indirect_start_offsets_count, + ARRAY_SIZE(indirect_start_offsets)); /* enable auto inc in case it is disabled */ tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL)); @@ -3404,11 +3408,6 @@ static int gfx_v9_0_late_init(void *handle) if (r) return r; - r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX, - AMD_PG_STATE_GATE); - if (r) - return r; - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 17f7f074cedc..7a1e77c93bf1 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -2727,8 +2727,9 @@ static int kv_parse_power_table(struct amdgpu_device *adev) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); - adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) * - state_array->ucNumEntries, GFP_KERNEL); + adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, + sizeof(struct amdgpu_ps), + GFP_KERNEL); if (!adev->pm.dpm.ps) return -ENOMEM; power_state_offset = (u8 *)state_array->states; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index 0c768e388ace..727071fee6f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -47,6 +47,8 @@ MODULE_FIRMWARE("amdgpu/vega20_asd.bin"); #define smnMP1_FIRMWARE_FLAGS 0x3010028 +static uint32_t sos_old_versions[] = {1517616, 1510592, 1448594, 1446554}; + static int psp_v3_1_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type) { @@ -210,12 +212,31 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp) return ret; } +static bool psp_v3_1_match_version(struct amdgpu_device *adev, uint32_t ver) +{ + int i; + + if (ver == adev->psp.sos_fw_version) + return true; + + /* + * Double check if the latest four legacy versions. + * If yes, it is still the right version. + */ + for (i = 0; i < sizeof(sos_old_versions) / sizeof(uint32_t); i++) { + if (sos_old_versions[i] == adev->psp.sos_fw_version) + return true; + } + + return false; +} + static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) { int ret; unsigned int psp_gfxdrv_command_reg = 0; struct amdgpu_device *adev = psp->adev; - uint32_t sol_reg; + uint32_t sol_reg, ver; /* Check sOS sign of life register to confirm sys driver and sOS * are already been loaded. @@ -248,6 +269,10 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0, true); + ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); + if (!psp_v3_1_match_version(adev, ver)) + DRM_WARN("SOS version doesn't match\n"); + return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index b12d7c9d42a0..5c97a3671726 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -7242,8 +7242,9 @@ static int si_parse_power_table(struct amdgpu_device *adev) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); - adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) * - state_array->ucNumEntries, GFP_KERNEL); + adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, + sizeof(struct amdgpu_ps), + GFP_KERNEL); if (!adev->pm.dpm.ps) return -ENOMEM; power_state_offset = (u8 *)state_array->states; @@ -7346,7 +7347,9 @@ static int si_dpm_init(struct amdgpu_device *adev) return ret; adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = - kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL); + kcalloc(4, + sizeof(struct amdgpu_clock_voltage_dependency_entry), + GFP_KERNEL); if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { amdgpu_free_extended_power_table(adev); return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 68b4a22a8892..83f2717fcf81 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -685,6 +685,7 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_BIF_MGCG | AMD_CG_SUPPORT_BIF_LS | AMD_CG_SUPPORT_HDP_MGCG | + AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_ROM_MGCG | AMD_CG_SUPPORT_VCE_MGCG | AMD_CG_SUPPORT_UVD_MGCG; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 110b294ebed3..29684c3ea4ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -769,14 +769,14 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev) return 0; } -bool vcn_v1_0_is_idle(void *handle) +static bool vcn_v1_0_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == 0x2); } -int vcn_v1_0_wait_for_idle(void *handle) +static int vcn_v1_0_wait_for_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret = 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f9b9ab90558c..f9add85157e7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include @@ -2095,12 +2096,6 @@ convert_color_depth_from_display_info(const struct drm_connector *connector) { uint32_t bpc = connector->display_info.bpc; - /* Limited color depth to 8bit - * TODO: Still need to handle deep color - */ - if (bpc > 8) - bpc = 8; - switch (bpc) { case 0: /* Temporary Work around, DRM don't parse color depth for @@ -2316,27 +2311,22 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, } } -static int create_fake_sink(struct amdgpu_dm_connector *aconnector) +static struct dc_sink * +create_fake_sink(struct amdgpu_dm_connector *aconnector) { - struct dc_sink *sink = NULL; struct dc_sink_init_data sink_init_data = { 0 }; - + struct dc_sink *sink = NULL; sink_init_data.link = aconnector->dc_link; sink_init_data.sink_signal = aconnector->dc_link->connector_signal; sink = dc_sink_create(&sink_init_data); if (!sink) { DRM_ERROR("Failed to create sink!\n"); - return -ENOMEM; + return NULL; } - sink->sink_signal = SIGNAL_TYPE_VIRTUAL; - aconnector->fake_enable = true; - aconnector->dc_sink = sink; - aconnector->dc_link->local_sink = sink; - - return 0; + return sink; } static void set_multisync_trigger_params( @@ -2399,7 +2389,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, struct dc_stream_state *stream = NULL; struct drm_display_mode mode = *drm_mode; bool native_mode_found = false; - + struct dc_sink *sink = NULL; if (aconnector == NULL) { DRM_ERROR("aconnector is NULL!\n"); return stream; @@ -2417,15 +2407,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, return stream; } - if (create_fake_sink(aconnector)) + sink = create_fake_sink(aconnector); + if (!sink) return stream; + } else { + sink = aconnector->dc_sink; } - stream = dc_create_stream_for_sink(aconnector->dc_sink); + stream = dc_create_stream_for_sink(sink); if (stream == NULL) { DRM_ERROR("Failed to create stream for sink!\n"); - return stream; + goto finish; } list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { @@ -2464,12 +2457,15 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, fill_audio_info( &stream->audio_info, drm_connector, - aconnector->dc_sink); + sink); update_stream_signal(stream); if (dm_state && dm_state->freesync_capable) stream->ignore_msa_timing_param = true; +finish: + if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL) + dc_sink_release(sink); return stream; } @@ -2714,6 +2710,9 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) struct dm_connector_state *state = to_dm_connector_state(connector->state); + if (connector->state) + __drm_atomic_helper_connector_destroy_state(connector->state); + kfree(state); state = kzalloc(sizeof(*state), GFP_KERNEL); @@ -2724,8 +2723,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) state->underscan_hborder = 0; state->underscan_vborder = 0; - connector->state = &state->base; - connector->state->connector = connector; + __drm_atomic_helper_connector_reset(connector, &state->base); } } @@ -3083,17 +3081,6 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, } } - /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer - * prepare and cleanup in drm_atomic_helper_prepare_planes - * and drm_atomic_helper_cleanup_planes because fb doens't in s3. - * IN 4.10 kernel this code should be removed and amdgpu_device_suspend - * code touching fram buffers should be avoided for DC. - */ - if (plane->type == DRM_PLANE_TYPE_CURSOR) { - struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc); - - acrtc->cursor_bo = obj; - } return 0; } @@ -4281,6 +4268,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) if (dm_old_crtc_state->stream) remove_stream(adev, acrtc, dm_old_crtc_state->stream); + pm_runtime_get_noresume(dev->dev); + acrtc->enabled = true; acrtc->hw_mode = new_crtc_state->mode; crtc->hwmode = new_crtc_state->mode; @@ -4469,6 +4458,16 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_wait_for_flip_done(dev, state); drm_atomic_helper_cleanup_planes(dev, state); + + /* Finally, drop a runtime PM reference for each newly disabled CRTC, + * so we can put the GPU into runtime suspend if we're not driving any + * displays anymore + */ + pm_runtime_mark_last_busy(dev->dev); + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + if (old_crtc_state->active && !new_crtc_state->active) + pm_runtime_put_autosuspend(dev->dev); + } } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index bd449351803f..ec304b1a5973 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -435,7 +435,7 @@ bool dm_helpers_submit_i2c( return false; } - msgs = kzalloc(num * sizeof(struct i2c_msg), GFP_KERNEL); + msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL); if (!msgs) return false; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 4be21bf54749..a910f01838ab 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -555,6 +555,9 @@ static inline int dm_irq_state(struct amdgpu_device *adev, return 0; } + if (acrtc->otg_inst == -1) + return 0; + irq_source = dal_irq_type + acrtc->otg_inst; st = (state == AMDGPU_IRQ_STATE_ENABLE); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c index 0229c7edb8ad..5a3346124a01 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c @@ -234,6 +234,33 @@ static void pp_to_dc_clock_levels( } } +static void pp_to_dc_clock_levels_with_latency( + const struct pp_clock_levels_with_latency *pp_clks, + struct dm_pp_clock_levels_with_latency *clk_level_info, + enum dm_pp_clock_type dc_clk_type) +{ + uint32_t i; + + if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) { + DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n", + DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), + pp_clks->num_levels, + DM_PP_MAX_CLOCK_LEVELS); + + clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS; + } else + clk_level_info->num_levels = pp_clks->num_levels; + + DRM_DEBUG("DM_PPLIB: values for %s clock\n", + DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); + + for (i = 0; i < clk_level_info->num_levels; i++) { + DRM_DEBUG("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz); + clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz; + clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us; + } +} + bool dm_pp_get_clock_levels_by_type( const struct dc_context *ctx, enum dm_pp_clock_type clk_type, @@ -311,8 +338,22 @@ bool dm_pp_get_clock_levels_by_type_with_latency( enum dm_pp_clock_type clk_type, struct dm_pp_clock_levels_with_latency *clk_level_info) { - /* TODO: to be implemented */ - return false; + struct amdgpu_device *adev = ctx->driver_context; + void *pp_handle = adev->powerplay.pp_handle; + struct pp_clock_levels_with_latency pp_clks = { 0 }; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency) + return false; + + if (pp_funcs->get_clock_by_type_with_latency(pp_handle, + dc_to_pp_clock_type(clk_type), + &pp_clks)) + return false; + + pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type); + + return true; } bool dm_pp_get_clock_levels_by_type_with_voltage( diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c index e61dd97d0928..f28989860fd8 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c +++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c @@ -449,6 +449,11 @@ static inline unsigned int clamp_ux_dy( return min_clamp; } +unsigned int dc_fixpt_u3d19(struct fixed31_32 arg) +{ + return ux_dy(arg.value, 3, 19); +} + unsigned int dc_fixpt_u2d19(struct fixed31_32 arg) { return ux_dy(arg.value, 2, 19); diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c index 738a818d58d1..0866874ae8c6 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/logger.c +++ b/drivers/gpu/drm/amd/display/dc/basics/logger.c @@ -364,7 +364,7 @@ void dm_logger_open( entry->type = log_type; entry->logger = logger; - entry->buf = kzalloc(DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char), + entry->buf = kzalloc(DAL_LOGGER_BUFFER_MAX_SIZE, GFP_KERNEL); entry->buf_offset = 0; diff --git a/drivers/gpu/drm/amd/display/dc/basics/vector.c b/drivers/gpu/drm/amd/display/dc/basics/vector.c index 217b8f1f7bf6..d28e9cf0e961 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/vector.c +++ b/drivers/gpu/drm/amd/display/dc/basics/vector.c @@ -40,7 +40,7 @@ bool dal_vector_construct( return false; } - vector->container = kzalloc(struct_size * capacity, GFP_KERNEL); + vector->container = kcalloc(capacity, struct_size, GFP_KERNEL); if (vector->container == NULL) return false; vector->capacity = capacity; @@ -67,7 +67,7 @@ bool dal_vector_presized_costruct( return false; } - vector->container = kzalloc(struct_size * count, GFP_KERNEL); + vector->container = kcalloc(count, struct_size, GFP_KERNEL); if (vector->container == NULL) return false; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 7d609c71394b..7857cb42b3e6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1630,17 +1630,42 @@ static enum dc_status read_hpd_rx_irq_data( struct dc_link *link, union hpd_irq_data *irq_data) { + static enum dc_status retval; + /* The HW reads 16 bytes from 200h on HPD, * but if we get an AUX_DEFER, the HW cannot retry * and this causes the CTS tests 4.3.2.1 - 3.2.4 to * fail, so we now explicitly read 6 bytes which is * the req from the above mentioned test cases. + * + * For DP 1.4 we need to read those from 2002h range. */ - return core_link_read_dpcd( - link, - DP_SINK_COUNT, - irq_data->raw, - sizeof(union hpd_irq_data)); + if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14) + retval = core_link_read_dpcd( + link, + DP_SINK_COUNT, + irq_data->raw, + sizeof(union hpd_irq_data)); + else { + /* Read 2 bytes at this location,... */ + retval = core_link_read_dpcd( + link, + DP_SINK_COUNT_ESI, + irq_data->raw, + 2); + + if (retval != DC_OK) + return retval; + + /* ... then read remaining 4 at the other location */ + retval = core_link_read_dpcd( + link, + DP_LANE0_1_STATUS_ESI, + &irq_data->raw[2], + 4); + } + + return retval; } static bool allow_hpd_rx_irq(const struct dc_link *link) @@ -2278,7 +2303,7 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, static bool retrieve_link_cap(struct dc_link *link) { - uint8_t dpcd_data[DP_TRAINING_AUX_RD_INTERVAL - DP_DPCD_REV + 1]; + uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1]; union down_stream_port_count down_strm_port_count; union edp_configuration_cap edp_config_cap; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index 599c7ab6befe..88b09dd758ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -1079,13 +1079,15 @@ static void get_ss_info_from_atombios( if (*ss_entries_num == 0) return; - ss_info = kzalloc(sizeof(struct spread_spectrum_info) * (*ss_entries_num), + ss_info = kcalloc(*ss_entries_num, + sizeof(struct spread_spectrum_info), GFP_KERNEL); ss_info_cur = ss_info; if (ss_info == NULL) return; - ss_data = kzalloc(sizeof(struct spread_spectrum_data) * (*ss_entries_num), + ss_data = kcalloc(*ss_entries_num, + sizeof(struct spread_spectrum_data), GFP_KERNEL); if (ss_data == NULL) goto out_free_info; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 0a6d483dc046..c0e813c7ddd4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -72,7 +72,8 @@ static void dce110_update_generic_info_packet( uint32_t max_retries = 50; /*we need turn on clock before programming AFMT block*/ - REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1); + if (REG(AFMT_CNTL)) + REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1); if (REG(AFMT_VBI_PACKET_CONTROL1)) { if (packet_index >= 8) @@ -719,7 +720,8 @@ static void dce110_stream_encoder_update_hdmi_info_packets( const uint32_t *content = (const uint32_t *) &info_frame->avi.sb[0]; /*we need turn on clock before programming AFMT block*/ - REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1); + if (REG(AFMT_CNTL)) + REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1); REG_WRITE(AFMT_AVI_INFO0, content[0]); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c index 9150d2694450..e2994d337044 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c @@ -121,10 +121,10 @@ static void reset_lb_on_vblank(struct dc_context *ctx) frame_count = dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT); - for (retry = 100; retry > 0; retry--) { + for (retry = 10000; retry > 0; retry--) { if (frame_count != dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT)) break; - msleep(1); + udelay(10); } if (!retry) dm_error("Frame count did not increase for 100ms.\n"); @@ -147,14 +147,14 @@ static void wait_for_fbc_state_changed( uint32_t addr = mmFBC_STATUS; uint32_t value; - while (counter < 10) { + while (counter < 1000) { value = dm_read_reg(cp110->base.ctx, addr); if (get_reg_field_value( value, FBC_STATUS, FBC_ENABLE_STATUS) == enabled) break; - msleep(10); + udelay(100); counter++; } diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index a92fb0aa2ff3..c29052b6da5a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1004,9 +1004,9 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option) /*don't free audio if it is from retrain or internal disable stream*/ if (option == FREE_ACQUIRED_RESOURCE && dc->caps.dynamic_audio == true) { /*we have to dynamic arbitrate the audio endpoints*/ - pipe_ctx->stream_res.audio = NULL; /*we free the resource, need reset is_audio_acquired*/ update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false); + pipe_ctx->stream_res.audio = NULL; } /* TODO: notify audio driver for if audio modes list changed diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index 46a35c7f01df..c69fa4bfab0a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -132,8 +132,7 @@ void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp) #define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19)) - -bool dpp_get_optimal_number_of_taps( +static bool dpp_get_optimal_number_of_taps( struct dpp *dpp, struct scaler_data *scl_data, const struct scaling_taps *in_taps) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h index 5944a3ba0409..e862cafa6501 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h @@ -1424,12 +1424,8 @@ void dpp1_set_degamma( enum ipp_degamma_mode mode); void dpp1_set_degamma_pwl(struct dpp *dpp_base, - const struct pwl_params *params); + const struct pwl_params *params); -bool dpp_get_optimal_number_of_taps( - struct dpp *dpp, - struct scaler_data *scl_data, - const struct scaling_taps *in_taps); void dpp_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index 4ddd6273d5a5..f862fd148cca 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -565,16 +565,16 @@ static void dpp1_dscl_set_manual_ratio_init( uint32_t init_int = 0; REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0, - SCL_H_SCALE_RATIO, dc_fixpt_u2d19(data->ratios.horz) << 5); + SCL_H_SCALE_RATIO, dc_fixpt_u3d19(data->ratios.horz) << 5); REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0, - SCL_V_SCALE_RATIO, dc_fixpt_u2d19(data->ratios.vert) << 5); + SCL_V_SCALE_RATIO, dc_fixpt_u3d19(data->ratios.vert) << 5); REG_SET(SCL_HORZ_FILTER_SCALE_RATIO_C, 0, - SCL_H_SCALE_RATIO_C, dc_fixpt_u2d19(data->ratios.horz_c) << 5); + SCL_H_SCALE_RATIO_C, dc_fixpt_u3d19(data->ratios.horz_c) << 5); REG_SET(SCL_VERT_FILTER_SCALE_RATIO_C, 0, - SCL_V_SCALE_RATIO_C, dc_fixpt_u2d19(data->ratios.vert_c) << 5); + SCL_V_SCALE_RATIO_C, dc_fixpt_u3d19(data->ratios.vert_c) << 5); /* * 0.24 format for fraction, first five bits zeroed diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index d2ab78b35a7a..c28085be39ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -396,11 +396,15 @@ bool hubp1_program_surface_flip_and_addr( if (address->grph_stereo.right_addr.quad_part == 0) break; - REG_UPDATE_4(DCSURF_SURFACE_CONTROL, + REG_UPDATE_8(DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_TMZ, address->tmz_surface, PRIMARY_SURFACE_TMZ_C, address->tmz_surface, PRIMARY_META_SURFACE_TMZ, address->tmz_surface, - PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface); + PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface, + SECONDARY_SURFACE_TMZ, address->tmz_surface, + SECONDARY_SURFACE_TMZ_C, address->tmz_surface, + SECONDARY_META_SURFACE_TMZ, address->tmz_surface, + SECONDARY_META_SURFACE_TMZ_C, address->tmz_surface); if (address->grph_stereo.right_meta_addr.quad_part != 0) { @@ -459,9 +463,11 @@ void hubp1_dcc_control(struct hubp *hubp, bool enable, uint32_t dcc_ind_64b_blk = independent_64b_blks ? 1 : 0; struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); - REG_UPDATE_2(DCSURF_SURFACE_CONTROL, + REG_UPDATE_4(DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_EN, dcc_en, - PRIMARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk); + PRIMARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk, + SECONDARY_SURFACE_DCC_EN, dcc_en, + SECONDARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk); } void hubp1_program_surface_config( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index af384034398f..d901d5092969 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -312,6 +312,12 @@ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_META_SURFACE_TMZ_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_EN, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_TMZ, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_TMZ_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_META_SURFACE_TMZ, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_META_SURFACE_TMZ_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\ @@ -489,6 +495,8 @@ type SECONDARY_META_SURFACE_TMZ_C;\ type PRIMARY_SURFACE_DCC_EN;\ type PRIMARY_SURFACE_DCC_IND_64B_BLK;\ + type SECONDARY_SURFACE_DCC_EN;\ + type SECONDARY_SURFACE_DCC_IND_64B_BLK;\ type DET_BUF_PLANE1_BASE_ADDRESS;\ type CROSSBAR_SRC_CB_B;\ type CROSSBAR_SRC_CR_R;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 653b7b2efe2e..c928ee4cd382 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -319,6 +319,10 @@ void enc1_stream_encoder_dp_set_stream_attribute( REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, DP_COMPONENT_PIXEL_DEPTH_12BPC); break; + case COLOR_DEPTH_161616: + REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, + DP_COMPONENT_PIXEL_DEPTH_16BPC); + break; default: REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, DP_COMPONENT_PIXEL_DEPTH_6BPC); diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c index 80038e0e610f..ab5483c0c502 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c @@ -98,7 +98,8 @@ struct gpio_service *dal_gpio_service_create( if (number_of_bits) { uint32_t index_of_uint = 0; - slot = kzalloc(number_of_uints * sizeof(uint32_t), + slot = kcalloc(number_of_uints, + sizeof(uint32_t), GFP_KERNEL); if (!slot) { diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h index bb0d4ebba9f0..a981b3e99ab3 100644 --- a/drivers/gpu/drm/amd/display/include/fixed31_32.h +++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h @@ -496,6 +496,8 @@ static inline int dc_fixpt_ceil(struct fixed31_32 arg) * fractional */ +unsigned int dc_fixpt_u3d19(struct fixed31_32 arg); + unsigned int dc_fixpt_u2d19(struct fixed31_32 arg); unsigned int dc_fixpt_u0d19(struct fixed31_32 arg); diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 0cd111d59018..eee0dfad6962 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1274,19 +1274,22 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; - rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS), + rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS, + sizeof(*rgb_user), GFP_KERNEL); if (!rgb_user) goto rgb_user_alloc_fail; - rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS), + rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, + sizeof(*rgb_regamma), GFP_KERNEL); if (!rgb_regamma) goto rgb_regamma_alloc_fail; - axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + 3), + axix_x = kvcalloc(ramp->num_entries + 3, sizeof(*axix_x), GFP_KERNEL); if (!axix_x) goto axix_x_alloc_fail; - coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL); + coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff), + GFP_KERNEL); if (!coeff) goto coeff_alloc_fail; @@ -1413,13 +1416,15 @@ bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf, output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; - rgb_user = kzalloc(sizeof(*rgb_user) * (GAMMA_RGB_256_ENTRIES + _EXTRA_POINTS), - GFP_KERNEL); + rgb_user = kcalloc(GAMMA_RGB_256_ENTRIES + _EXTRA_POINTS, + sizeof(*rgb_user), + GFP_KERNEL); if (!rgb_user) goto rgb_user_alloc_fail; - rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS), - GFP_KERNEL); + rgb_regamma = kcalloc(MAX_HW_POINTS + _EXTRA_POINTS, + sizeof(*rgb_regamma), + GFP_KERNEL); if (!rgb_regamma) goto rgb_regamma_alloc_fail; @@ -1480,19 +1485,21 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, input_tf->type = TF_TYPE_DISTRIBUTED_POINTS; - rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS), + rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS, + sizeof(*rgb_user), GFP_KERNEL); if (!rgb_user) goto rgb_user_alloc_fail; - curve = kvzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS), + curve = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*curve), GFP_KERNEL); if (!curve) goto curve_alloc_fail; - axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS), + axix_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axix_x), GFP_KERNEL); if (!axix_x) goto axix_x_alloc_fail; - coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL); + coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff), + GFP_KERNEL); if (!coeff) goto coeff_alloc_fail; @@ -1569,8 +1576,8 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, } ret = true; } else if (trans == TRANSFER_FUNCTION_PQ) { - rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * - (MAX_HW_POINTS + _EXTRA_POINTS), + rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, + sizeof(*rgb_regamma), GFP_KERNEL); if (!rgb_regamma) goto rgb_regamma_alloc_fail; @@ -1594,8 +1601,8 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, kvfree(rgb_regamma); } else if (trans == TRANSFER_FUNCTION_SRGB || trans == TRANSFER_FUNCTION_BT709) { - rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * - (MAX_HW_POINTS + _EXTRA_POINTS), + rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, + sizeof(*rgb_regamma), GFP_KERNEL); if (!rgb_regamma) goto rgb_regamma_alloc_fail; @@ -1638,8 +1645,8 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, } ret = true; } else if (trans == TRANSFER_FUNCTION_PQ) { - rgb_degamma = kvzalloc(sizeof(*rgb_degamma) * - (MAX_HW_POINTS + _EXTRA_POINTS), + rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, + sizeof(*rgb_degamma), GFP_KERNEL); if (!rgb_degamma) goto rgb_degamma_alloc_fail; @@ -1658,8 +1665,8 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, kvfree(rgb_degamma); } else if (trans == TRANSFER_FUNCTION_SRGB || trans == TRANSFER_FUNCTION_BT709) { - rgb_degamma = kvzalloc(sizeof(*rgb_degamma) * - (MAX_HW_POINTS + _EXTRA_POINTS), + rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, + sizeof(*rgb_degamma), GFP_KERNEL); if (!rgb_degamma) goto rgb_degamma_alloc_fail; diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 27d4003aa2c7..fa344ceafc17 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -155,7 +155,8 @@ struct mod_freesync *mod_freesync_create(struct dc *dc) if (core_freesync == NULL) goto fail_alloc_context; - core_freesync->map = kzalloc(sizeof(struct freesync_entity) * MOD_FREESYNC_MAX_CONCURRENT_STREAMS, + core_freesync->map = kcalloc(MOD_FREESYNC_MAX_CONCURRENT_STREAMS, + sizeof(struct freesync_entity), GFP_KERNEL); if (core_freesync->map == NULL) diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c index 3f7d47fdc367..710852ad03f3 100644 --- a/drivers/gpu/drm/amd/display/modules/stats/stats.c +++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c @@ -141,19 +141,17 @@ struct mod_stats *mod_stats_create(struct dc *dc) else core_stats->entries = reg_data; } - core_stats->time = kzalloc( - sizeof(struct stats_time_cache) * - core_stats->entries, + core_stats->time = kcalloc(core_stats->entries, + sizeof(struct stats_time_cache), GFP_KERNEL); if (core_stats->time == NULL) goto fail_construct_time; core_stats->event_entries = DAL_STATS_EVENT_ENTRIES_DEFAULT; - core_stats->events = kzalloc( - sizeof(struct stats_event_cache) * - core_stats->event_entries, - GFP_KERNEL); + core_stats->events = kcalloc(core_stats->event_entries, + sizeof(struct stats_event_cache), + GFP_KERNEL); if (core_stats->events == NULL) goto fail_construct_events; diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h index 88f7c69df6b9..06fac509e987 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h @@ -36,13 +36,13 @@ /* DF_CS_AON0_DramBaseAddress0 */ #define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0 #define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1 -#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4 -#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8 +#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x2 +#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x9 #define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc #define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L #define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L -#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L -#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L +#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x0000003CL +#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000E00L #define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L #endif diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index c6c1666ac120..092d800b703a 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -2026,17 +2026,15 @@ enum atom_smu11_syspll_id { SMU11_SYSPLL3_1_ID = 6, }; - enum atom_smu11_syspll0_clock_id { - SMU11_SYSPLL0_SOCCLK_ID = 0, // SOCCLK - SMU11_SYSPLL0_MP0CLK_ID = 1, // MP0CLK - SMU11_SYSPLL0_DCLK_ID = 2, // DCLK - SMU11_SYSPLL0_VCLK_ID = 3, // VCLK - SMU11_SYSPLL0_ECLK_ID = 4, // ECLK + SMU11_SYSPLL0_ECLK_ID = 0, // ECLK + SMU11_SYSPLL0_SOCCLK_ID = 1, // SOCCLK + SMU11_SYSPLL0_MP0CLK_ID = 2, // MP0CLK + SMU11_SYSPLL0_DCLK_ID = 3, // DCLK + SMU11_SYSPLL0_VCLK_ID = 4, // VCLK SMU11_SYSPLL0_DCEFCLK_ID = 5, // DCEFCLK }; - enum atom_smu11_syspll1_0_clock_id { SMU11_SYSPLL1_0_UCLKA_ID = 0, // UCLK_a }; diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index b493369e6d0f..d567be49c31b 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -180,7 +180,6 @@ static int pp_late_init(void *handle) { struct amdgpu_device *adev = handle; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - int ret; if (hwmgr && hwmgr->pm_en) { mutex_lock(&hwmgr->smu_lock); @@ -191,13 +190,6 @@ static int pp_late_init(void *handle) if (adev->pm.smu_prv_buffer_size != 0) pp_reserve_vram_for_smu(adev); - if (hwmgr->hwmgr_func->gfx_off_control && - (hwmgr->feature_mask & PP_GFXOFF_MASK)) { - ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr, true); - if (ret) - pr_err("gfx off enabling failed!\n"); - } - return 0; } @@ -245,7 +237,7 @@ static int pp_set_powergating_state(void *handle, } if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_debug("%s was not implemented.\n", __func__); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c index 0af13c154328..91ffb7bc4ee7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c @@ -50,7 +50,7 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr) return 0; } - hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL); + hwmgr->ps = kcalloc(table_entries, size, GFP_KERNEL); if (hwmgr->ps == NULL) return -ENOMEM; @@ -265,19 +265,18 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip, if (skip) return 0; - if (!hwmgr->ps) - /* - * for vega12/vega20 which does not support power state manager - * DAL clock limits should also be honoured - */ - phm_apply_clock_adjust_rules(hwmgr); - phm_pre_display_configuration_changed(hwmgr); phm_display_configuration_changed(hwmgr); if (hwmgr->ps) power_state_management(hwmgr, new_ps); + else + /* + * for vega12/vega20 which does not support power state manager + * DAL clock limits should also be honoured + */ + phm_apply_clock_adjust_rules(hwmgr); phm_notify_smc_display_config_after_ps_adjustment(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c index c97b0e5ba43b..5325661fedff 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c @@ -496,7 +496,9 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI uint32_t ix; parameters.clk_id = id; + parameters.syspll_id = 0; parameters.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; + parameters.dfsdid = 0; ix = GetIndexIntoMasterCmdTable(getsmuclockinfo); @@ -505,7 +507,7 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI return -EINVAL; output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)¶meters; - *frequency = output->atom_smu_outputclkfreq.smu_clock_freq_hz / 10000; + *frequency = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index f0d48b183d22..35bd9870ab10 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c @@ -870,12 +870,6 @@ static int init_over_drive_limits( hwmgr->platform_descriptor.maxOverdriveVDDC = 0; hwmgr->platform_descriptor.overdriveVDDCStep = 0; - if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0 \ - || hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) { - hwmgr->od_enabled = false; - pr_debug("OverDrive feature not support by VBIOS\n"); - } - return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index ce64dfabd34b..925e17104f90 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c @@ -1074,12 +1074,6 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr, powerplay_table, (const ATOM_FIRMWARE_INFO_V2_1 *)fw_info); - if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0 - && hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) { - hwmgr->od_enabled = false; - pr_debug("OverDrive feature not support by VBIOS\n"); - } - return result; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 85f84f4d8be5..d4bc83e81389 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -53,8 +53,37 @@ static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic; static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr, - struct pp_display_clock_request *clock_req); + struct pp_display_clock_request *clock_req) +{ + struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); + enum amd_pp_clock_type clk_type = clock_req->clock_type; + uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; + PPSMC_Msg msg; + switch (clk_type) { + case amd_pp_dcf_clock: + if (clk_freq == smu10_data->dcf_actual_hard_min_freq) + return 0; + msg = PPSMC_MSG_SetHardMinDcefclkByFreq; + smu10_data->dcf_actual_hard_min_freq = clk_freq; + break; + case amd_pp_soc_clock: + msg = PPSMC_MSG_SetHardMinSocclkByFreq; + break; + case amd_pp_f_clock: + if (clk_freq == smu10_data->f_actual_hard_min_freq) + return 0; + smu10_data->f_actual_hard_min_freq = clk_freq; + msg = PPSMC_MSG_SetHardMinFclkByFreq; + break; + default: + pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!"); + return -EINVAL; + } + smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq); + + return 0; +} static struct smu10_power_state *cast_smu10_ps(struct pp_hw_power_state *hw_ps) { @@ -284,7 +313,7 @@ static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr) static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr) { - return smu10_disable_gfx_off(hwmgr); + return 0; } static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr) @@ -299,7 +328,7 @@ static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr) static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) { - return smu10_enable_gfx_off(hwmgr); + return 0; } static int smu10_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable) @@ -1000,6 +1029,12 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, case amd_pp_soc_clock: pclk_vol_table = pinfo->vdd_dep_on_socclk; break; + case amd_pp_disp_clock: + pclk_vol_table = pinfo->vdd_dep_on_dispclk; + break; + case amd_pp_phy_clock: + pclk_vol_table = pinfo->vdd_dep_on_phyclk; + break; default: return -EINVAL; } @@ -1017,39 +1052,7 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, return 0; } -static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr, - struct pp_display_clock_request *clock_req) -{ - struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); - enum amd_pp_clock_type clk_type = clock_req->clock_type; - uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; - PPSMC_Msg msg; - switch (clk_type) { - case amd_pp_dcf_clock: - if (clk_freq == smu10_data->dcf_actual_hard_min_freq) - return 0; - msg = PPSMC_MSG_SetHardMinDcefclkByFreq; - smu10_data->dcf_actual_hard_min_freq = clk_freq; - break; - case amd_pp_soc_clock: - msg = PPSMC_MSG_SetHardMinSocclkByFreq; - break; - case amd_pp_f_clock: - if (clk_freq == smu10_data->f_actual_hard_min_freq) - return 0; - smu10_data->f_actual_hard_min_freq = clk_freq; - msg = PPSMC_MSG_SetHardMinFclkByFreq; - break; - default: - pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!"); - return -EINVAL; - } - - smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq); - - return 0; -} static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) { @@ -1182,6 +1185,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = { .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu, .smus_notify_pwe = smu10_smus_notify_pwe, .gfx_off_control = smu10_gfx_off_control, + .display_clock_voltage_request = smu10_display_clock_voltage_request, }; int smu10_init_function_pointers(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 45e9b8cb169d..f8e866ceda02 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -791,7 +791,8 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) data->dpm_table.sclk_table.count++; } } - + if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0) + hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk; /* Initialize Mclk DPM table based on allow Mclk values */ data->dpm_table.mclk_table.count = 0; for (i = 0; i < dep_mclk_table->count; i++) { @@ -806,6 +807,8 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) } } + if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) + hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk; return 0; } @@ -3752,14 +3755,17 @@ static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr, static int smu7_generate_dpm_level_enable_mask( struct pp_hwmgr *hwmgr, const void *input) { - int result; + int result = 0; const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); const struct smu7_power_state *smu7_ps = cast_const_phw_smu7_power_state(states->pnew_state); - result = smu7_trim_dpm_states(hwmgr, smu7_ps); + /*skip the trim if od is enabled*/ + if (!hwmgr->od_enabled) + result = smu7_trim_dpm_states(hwmgr, smu7_ps); + if (result) return result; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index d156b7bb92ae..05e680d55dbb 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -321,8 +321,12 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr) odn_table->min_vddc = dep_table[0]->entries[0].vddc; i = od_table[2]->count - 1; - od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock; - od_table[2]->entries[i].vddc = odn_table->max_vddc; + od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock > od_table[2]->entries[i].clk ? + hwmgr->platform_descriptor.overdriveLimit.memoryClock : + od_table[2]->entries[i].clk; + od_table[2]->entries[i].vddc = odn_table->max_vddc > od_table[2]->entries[i].vddc ? + odn_table->max_vddc : + od_table[2]->entries[i].vddc; return 0; } @@ -1311,6 +1315,9 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) vega10_setup_default_single_dpm_table(hwmgr, dpm_table, dep_gfx_table); + if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0) + hwmgr->platform_descriptor.overdriveLimit.engineClock = + dpm_table->dpm_levels[dpm_table->count-1].value; vega10_init_dpm_state(&(dpm_table->dpm_state)); /* Initialize Mclk DPM table based on allow Mclk values */ @@ -1319,6 +1326,10 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) vega10_setup_default_single_dpm_table(hwmgr, dpm_table, dep_mclk_table); + if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) + hwmgr->platform_descriptor.overdriveLimit.memoryClock = + dpm_table->dpm_levels[dpm_table->count-1].value; + vega10_init_dpm_state(&(dpm_table->dpm_state)); data->dpm_table.eclk_table.count = 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c index a9efd8554fbc..dbe4b1f66784 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c @@ -1104,7 +1104,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) for (count = 0; count < num_se; count++) { data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); - result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result = vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c index 0768d259c07c..16b1a9cf6cf0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c @@ -267,12 +267,6 @@ static int init_over_drive_limits( hwmgr->platform_descriptor.maxOverdriveVDDC = 0; hwmgr->platform_descriptor.overdriveVDDCStep = 0; - if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0 || - hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) { - hwmgr->od_enabled = false; - pr_debug("OverDrive feature not support by VBIOS\n"); - } - return 0; } diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 40e1e24f2ff0..a5808382bdf0 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1633,7 +1633,8 @@ struct edid *drm_do_get_edid(struct drm_connector *connector, edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions; edid[0x7e] = valid_extensions; - new = kmalloc((valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); + new = kmalloc_array(valid_extensions + 1, EDID_LENGTH, + GFP_KERNEL); if (!new) goto out; diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c index dae18e58e79b..c92b00d42ece 100644 --- a/drivers/gpu/drm/drm_hashtab.c +++ b/drivers/gpu/drm/drm_hashtab.c @@ -47,7 +47,7 @@ int drm_ht_create(struct drm_open_hash *ht, unsigned int order) if (size <= PAGE_SIZE / sizeof(*ht->table)) ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL); else - ht->table = vzalloc(size*sizeof(*ht->table)); + ht->table = vzalloc(array_size(size, sizeof(*ht->table))); if (!ht->table) { DRM_ERROR("Out of memory for hash table\n"); return -ENOMEM; diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c index 3c54044214db..d69e4fc1ee77 100644 --- a/drivers/gpu/drm/drm_memory.c +++ b/drivers/gpu/drm/drm_memory.c @@ -80,7 +80,7 @@ static void *agp_remap(unsigned long offset, unsigned long size, * page-table instead (that's probably faster anyhow...). */ /* note: use vmalloc() because num_pages could be large... */ - page_map = vmalloc(num_pages * sizeof(struct page *)); + page_map = vmalloc(array_size(num_pages, sizeof(struct page *))); if (!page_map) return NULL; diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 7c3030b7e586..6d29777884f9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1723,8 +1723,8 @@ static int exynos_dsi_probe(struct platform_device *pdev) return -EPROBE_DEFER; } - dsi->clks = devm_kzalloc(dev, - sizeof(*dsi->clks) * dsi->driver_data->num_clks, + dsi->clks = devm_kcalloc(dev, + dsi->driver_data->num_clks, sizeof(*dsi->clks), GFP_KERNEL); if (!dsi->clks) return -ENOMEM; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 5ce84025d1cb..6127ef25acd6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -1271,7 +1271,8 @@ static int fimc_probe(struct platform_device *pdev) /* construct formats/limits array */ num_formats = ARRAY_SIZE(fimc_formats) + ARRAY_SIZE(fimc_tiled_formats); - formats = devm_kzalloc(dev, sizeof(*formats) * num_formats, GFP_KERNEL); + formats = devm_kcalloc(dev, num_formats, sizeof(*formats), + GFP_KERNEL); if (!formats) return -ENOMEM; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index e99dd1e4ba65..35ac66730563 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -1202,8 +1202,9 @@ static int gsc_probe(struct platform_device *pdev) if (!ctx) return -ENOMEM; - formats = devm_kzalloc(dev, sizeof(*formats) * - (ARRAY_SIZE(gsc_formats)), GFP_KERNEL); + formats = devm_kcalloc(dev, + ARRAY_SIZE(gsc_formats), sizeof(*formats), + GFP_KERNEL); if (!formats) return -ENOMEM; diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 09c4bc0b1859..db91932550cf 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1692,7 +1692,7 @@ static int hdmi_clk_init(struct hdmi_context *hdata) if (!count) return 0; - clks = devm_kzalloc(dev, sizeof(*clks) * count, GFP_KERNEL); + clks = devm_kcalloc(dev, count, sizeof(*clks), GFP_KERNEL); if (!clks) return -ENOMEM; diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c index 7171b7475f58..237041a37532 100644 --- a/drivers/gpu/drm/gma500/mid_bios.c +++ b/drivers/gpu/drm/gma500/mid_bios.c @@ -239,7 +239,7 @@ static int mid_get_vbt_data_r10(struct drm_psb_private *dev_priv, u32 addr) if (read_vbt_r10(addr, &vbt)) return -1; - gct = kmalloc(sizeof(*gct) * vbt.panel_count, GFP_KERNEL); + gct = kmalloc_array(vbt.panel_count, sizeof(*gct), GFP_KERNEL); if (!gct) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 78e55aafc8bc..23296547da95 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1585,8 +1585,9 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu) mm->type = INTEL_GVT_MM_GGTT; nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT; - mm->ggtt_mm.virtual_ggtt = vzalloc(nr_entries * - vgpu->gvt->device_info.gtt_entry_size); + mm->ggtt_mm.virtual_ggtt = + vzalloc(array_size(nr_entries, + vgpu->gvt->device_info.gtt_entry_size)); if (!mm->ggtt_mm.virtual_ggtt) { vgpu_free_mm(mm); return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index e4960aff68bd..b31eb36fc102 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -267,7 +267,7 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) { const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; - vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); + vgpu->mmio.vreg = vzalloc(array_size(info->mmio_size, 2)); if (!vgpu->mmio.vreg) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 2e0a02a80fe4..572a18c2bfb5 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -121,7 +121,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]); - gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type), + gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type), GFP_KERNEL); if (!gvt->types) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c index 2db5da550a1c..0cc6a861bcf8 100644 --- a/drivers/gpu/drm/i915/intel_hdcp.c +++ b/drivers/gpu/drm/i915/intel_hdcp.c @@ -429,7 +429,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port, if (num_downstream == 0) return -EINVAL; - ksv_fifo = kzalloc(num_downstream * DRM_HDCP_KSV_LEN, GFP_KERNEL); + ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL); if (!ksv_fifo) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c index f76f2597df5c..47bc5b2ddb56 100644 --- a/drivers/gpu/drm/i915/selftests/intel_uncore.c +++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c @@ -137,7 +137,7 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN)) return 0; - valid = kzalloc(BITS_TO_LONGS(FW_RANGE) * sizeof(*valid), + valid = kcalloc(BITS_TO_LONGS(FW_RANGE), sizeof(*valid), GFP_KERNEL); if (!valid) return -ENOMEM; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index e63dc0fb55f8..c79659ca5706 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -157,8 +157,10 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev) hdmi->qfprom_mmio = NULL; } - hdmi->hpd_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_regs[0]) * - config->hpd_reg_cnt, GFP_KERNEL); + hdmi->hpd_regs = devm_kcalloc(&pdev->dev, + config->hpd_reg_cnt, + sizeof(hdmi->hpd_regs[0]), + GFP_KERNEL); if (!hdmi->hpd_regs) { ret = -ENOMEM; goto fail; @@ -178,8 +180,10 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev) hdmi->hpd_regs[i] = reg; } - hdmi->pwr_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->pwr_regs[0]) * - config->pwr_reg_cnt, GFP_KERNEL); + hdmi->pwr_regs = devm_kcalloc(&pdev->dev, + config->pwr_reg_cnt, + sizeof(hdmi->pwr_regs[0]), + GFP_KERNEL); if (!hdmi->pwr_regs) { ret = -ENOMEM; goto fail; @@ -199,8 +203,10 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev) hdmi->pwr_regs[i] = reg; } - hdmi->hpd_clks = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_clks[0]) * - config->hpd_clk_cnt, GFP_KERNEL); + hdmi->hpd_clks = devm_kcalloc(&pdev->dev, + config->hpd_clk_cnt, + sizeof(hdmi->hpd_clks[0]), + GFP_KERNEL); if (!hdmi->hpd_clks) { ret = -ENOMEM; goto fail; @@ -219,8 +225,10 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev) hdmi->hpd_clks[i] = clk; } - hdmi->pwr_clks = devm_kzalloc(&pdev->dev, sizeof(hdmi->pwr_clks[0]) * - config->pwr_clk_cnt, GFP_KERNEL); + hdmi->pwr_clks = devm_kcalloc(&pdev->dev, + config->pwr_clk_cnt, + sizeof(hdmi->pwr_clks[0]), + GFP_KERNEL); if (!hdmi->pwr_clks) { ret = -ENOMEM; goto fail; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c index 5e631392dc85..4157722d6b4d 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c @@ -21,12 +21,12 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy) struct device *dev = &phy->pdev->dev; int i, ret; - phy->regs = devm_kzalloc(dev, sizeof(phy->regs[0]) * cfg->num_regs, + phy->regs = devm_kcalloc(dev, cfg->num_regs, sizeof(phy->regs[0]), GFP_KERNEL); if (!phy->regs) return -ENOMEM; - phy->clks = devm_kzalloc(dev, sizeof(phy->clks[0]) * cfg->num_clks, + phy->clks = devm_kcalloc(dev, cfg->num_clks, sizeof(phy->clks[0]), GFP_KERNEL); if (!phy->clks) return -ENOMEM; diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 090664899247..e721bb2163a0 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c @@ -141,7 +141,7 @@ nv84_fence_suspend(struct nouveau_drm *drm) struct nv84_fence_priv *priv = drm->fence; int i; - priv->suspend = vmalloc(drm->chan.nr * sizeof(u32)); + priv->suspend = vmalloc(array_size(sizeof(u32), drm->chan.nr)); if (priv->suspend) { for (i = 0; i < drm->chan.nr; i++) priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4); diff --git a/drivers/gpu/drm/nouveau/nvif/fifo.c b/drivers/gpu/drm/nouveau/nvif/fifo.c index 99d4fd17543c..e84a2e2ff043 100644 --- a/drivers/gpu/drm/nouveau/nvif/fifo.c +++ b/drivers/gpu/drm/nouveau/nvif/fifo.c @@ -50,8 +50,8 @@ nvif_fifo_runlists(struct nvif_device *device) goto done; device->runlists = fls64(a->v.runlists.data); - device->runlist = kzalloc(sizeof(*device->runlist) * - device->runlists, GFP_KERNEL); + device->runlist = kcalloc(device->runlists, sizeof(*device->runlist), + GFP_KERNEL); if (!device->runlist) { ret = -ENOMEM; goto done; diff --git a/drivers/gpu/drm/nouveau/nvif/mmu.c b/drivers/gpu/drm/nouveau/nvif/mmu.c index 358ac4f3cf91..ae08a1ca8044 100644 --- a/drivers/gpu/drm/nouveau/nvif/mmu.c +++ b/drivers/gpu/drm/nouveau/nvif/mmu.c @@ -65,12 +65,15 @@ nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu) goto done; mmu->mem = mems[ret].oclass; - mmu->heap = kmalloc(sizeof(*mmu->heap) * mmu->heap_nr, GFP_KERNEL); - mmu->type = kmalloc(sizeof(*mmu->type) * mmu->type_nr, GFP_KERNEL); + mmu->heap = kmalloc_array(mmu->heap_nr, sizeof(*mmu->heap), + GFP_KERNEL); + mmu->type = kmalloc_array(mmu->type_nr, sizeof(*mmu->type), + GFP_KERNEL); if (ret = -ENOMEM, !mmu->heap || !mmu->type) goto done; - mmu->kind = kmalloc(sizeof(*mmu->kind) * mmu->kind_nr, GFP_KERNEL); + mmu->kind = kmalloc_array(mmu->kind_nr, sizeof(*mmu->kind), + GFP_KERNEL); if (!mmu->kind && mmu->kind_nr) goto done; diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c index 40adfe9b334b..ef3f62840e83 100644 --- a/drivers/gpu/drm/nouveau/nvif/object.c +++ b/drivers/gpu/drm/nouveau/nvif/object.c @@ -83,7 +83,7 @@ nvif_object_sclass_get(struct nvif_object *object, struct nvif_sclass **psclass) return ret; } - *psclass = kzalloc(sizeof(**psclass) * args->sclass.count, GFP_KERNEL); + *psclass = kcalloc(args->sclass.count, sizeof(**psclass), GFP_KERNEL); if (*psclass) { for (i = 0; i < args->sclass.count; i++) { (*psclass)[i].oclass = args->sclass.oclass[i].oclass; diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c b/drivers/gpu/drm/nouveau/nvif/vmm.c index 191832be6c65..6b9c5776547f 100644 --- a/drivers/gpu/drm/nouveau/nvif/vmm.c +++ b/drivers/gpu/drm/nouveau/nvif/vmm.c @@ -138,7 +138,8 @@ nvif_vmm_init(struct nvif_mmu *mmu, s32 oclass, u64 addr, u64 size, vmm->limit = args->size; vmm->page_nr = args->page_nr; - vmm->page = kmalloc(sizeof(*vmm->page) * vmm->page_nr, GFP_KERNEL); + vmm->page = kmalloc_array(vmm->page_nr, sizeof(*vmm->page), + GFP_KERNEL); if (!vmm->page) { ret = -ENOMEM; goto done; diff --git a/drivers/gpu/drm/nouveau/nvkm/core/event.c b/drivers/gpu/drm/nouveau/nvkm/core/event.c index 4e8d3fa042df..006618d77aa4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/event.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/event.c @@ -84,7 +84,8 @@ int nvkm_event_init(const struct nvkm_event_func *func, int types_nr, int index_nr, struct nvkm_event *event) { - event->refs = kzalloc(sizeof(*event->refs) * index_nr * types_nr, + event->refs = kzalloc(array3_size(index_nr, types_nr, + sizeof(*event->refs)), GFP_KERNEL); if (!event->refs) return -ENOMEM; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c index a99046414a18..afccf9721cf0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c @@ -910,7 +910,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base) nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr); /* Read PBDMA->runlist(s) mapping from HW. */ - if (!(map = kzalloc(sizeof(*map) * fifo->pbdma_nr, GFP_KERNEL))) + if (!(map = kcalloc(fifo->pbdma_nr, sizeof(*map), GFP_KERNEL))) return -ENOMEM; for (i = 0; i < fifo->pbdma_nr; i++) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c index 73e463ed55c3..dea444d48f94 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c @@ -73,7 +73,8 @@ nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense) } iccsense->nr_entry = cnt; - iccsense->rail = kmalloc(sizeof(struct pwr_rail_t) * cnt, GFP_KERNEL); + iccsense->rail = kmalloc_array(cnt, sizeof(struct pwr_rail_t), + GFP_KERNEL); if (!iccsense->rail) return -ENOMEM; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c index 920b3d347803..bbfde1cb3a17 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c @@ -171,7 +171,7 @@ gt215_link_train(struct gt215_ram *ram) return -ENOSYS; /* XXX: Multiple partitions? */ - result = kmalloc(64 * sizeof(u32), GFP_KERNEL); + result = kmalloc_array(64, sizeof(u32), GFP_KERNEL); if (!result) return -ENOMEM; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c index 39808489f21d..92e363dbbc5a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c @@ -191,9 +191,9 @@ nvkm_mem_new_host(struct nvkm_mmu *mmu, int type, u8 page, u64 size, nvkm_memory_ctor(&nvkm_mem_dma, &mem->memory); size = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT; - if (!(mem->mem = kvmalloc(sizeof(*mem->mem) * size, GFP_KERNEL))) + if (!(mem->mem = kvmalloc_array(size, sizeof(*mem->mem), GFP_KERNEL))) return -ENOMEM; - if (!(mem->dma = kvmalloc(sizeof(*mem->dma) * size, GFP_KERNEL))) + if (!(mem->dma = kvmalloc_array(size, sizeof(*mem->dma), GFP_KERNEL))) return -ENOMEM; if (mmu->dma_bits > 32) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c index 1c12e58f44c2..de269eb482dd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c @@ -59,7 +59,7 @@ nvkm_vmm_pt_new(const struct nvkm_vmm_desc *desc, bool sparse, pgt->sparse = sparse; if (desc->type == PGD) { - pgt->pde = kvzalloc(sizeof(*pgt->pde) * pten, GFP_KERNEL); + pgt->pde = kvcalloc(pten, sizeof(*pgt->pde), GFP_KERNEL); if (!pgt->pde) { kfree(pgt); return NULL; diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index 401c02e9e6b2..f92fe205550b 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -940,8 +940,8 @@ int tiler_map_show(struct seq_file *s, void *arg) h_adj = omap_dmm->container_height / ydiv; w_adj = omap_dmm->container_width / xdiv; - map = kmalloc(h_adj * sizeof(*map), GFP_KERNEL); - global_map = kmalloc((w_adj + 1) * h_adj, GFP_KERNEL); + map = kmalloc_array(h_adj, sizeof(*map), GFP_KERNEL); + global_map = kmalloc_array(w_adj + 1, h_adj, GFP_KERNEL); if (!map || !global_map) goto error; diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 0faf042b82e1..17a53d207978 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -244,7 +244,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj) * DSS, GPU, etc. are not cache coherent: */ if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { - addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL); + addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL); if (!addrs) { ret = -ENOMEM; goto free_pages; @@ -268,7 +268,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj) } } } else { - addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL); + addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL); if (!addrs) { ret = -ENOMEM; goto free_pages; diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index 9a6752606079..ca465c0d49fa 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -241,7 +241,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev, DRM_DEBUG_DRIVER("%dx%d %d\n", mode_cmd.width, mode_cmd.height, mode_cmd.pitches[0]); - shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height); + shadow = vmalloc(array_size(mode_cmd.pitches[0], mode_cmd.height)); /* TODO: what's the usual response to memory allocation errors? */ BUG_ON(!shadow); DRM_DEBUG_DRIVER("surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n", diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c index c5716a0ca3b8..771250aed78d 100644 --- a/drivers/gpu/drm/qxl/qxl_kms.c +++ b/drivers/gpu/drm/qxl/qxl_kms.c @@ -200,8 +200,8 @@ int qxl_device_init(struct qxl_device *qdev, (~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits); qdev->mem_slots = - kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot), - GFP_KERNEL); + kmalloc_array(qdev->n_mem_slots, sizeof(struct qxl_memslot), + GFP_KERNEL); idr_init(&qdev->release_idr); spin_lock_init(&qdev->release_idr_lock); diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 6a2e091aa7b6..e55cbeee7a53 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -1176,7 +1176,7 @@ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32 ectx.abort = false; ectx.last_jump = 0; if (ws) - ectx.ws = kzalloc(4 * ws, GFP_KERNEL); + ectx.ws = kcalloc(4, ws, GFP_KERNEL); else ectx.ws = NULL; diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index 95652e643da1..0aef4937c901 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c @@ -2581,7 +2581,9 @@ int btc_dpm_init(struct radeon_device *rdev) return ret; rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = - kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL); + kcalloc(4, + sizeof(struct radeon_clock_voltage_dependency_entry), + GFP_KERNEL); if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { r600_free_extended_power_table(rdev); return -ENOMEM; diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 7e1b04dc5593..b9302c918271 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -5568,8 +5568,9 @@ static int ci_parse_power_table(struct radeon_device *rdev) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); - rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) * - state_array->ucNumEntries, GFP_KERNEL); + rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, + sizeof(struct radeon_ps), + GFP_KERNEL); if (!rdev->pm.dpm.ps) return -ENOMEM; power_state_offset = (u8 *)state_array->states; @@ -5770,7 +5771,9 @@ int ci_dpm_init(struct radeon_device *rdev) ci_set_private_data_variables_based_on_pptable(rdev); rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = - kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL); + kcalloc(4, + sizeof(struct radeon_clock_voltage_dependency_entry), + GFP_KERNEL); if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { ci_dpm_fini(rdev); return -ENOMEM; diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index ae1529b0ef6f..f055d6ea3522 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -2660,8 +2660,9 @@ static int kv_parse_power_table(struct radeon_device *rdev) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); - rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) * - state_array->ucNumEntries, GFP_KERNEL); + rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, + sizeof(struct radeon_ps), + GFP_KERNEL); if (!rdev->pm.dpm.ps) return -ENOMEM; power_state_offset = (u8 *)state_array->states; diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 9416e72f86aa..0fd8d6ba9828 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -3998,8 +3998,9 @@ static int ni_parse_power_table(struct radeon_device *rdev) return -EINVAL; power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); - rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) * - power_info->pplib.ucNumStates, GFP_KERNEL); + rdev->pm.dpm.ps = kcalloc(power_info->pplib.ucNumStates, + sizeof(struct radeon_ps), + GFP_KERNEL); if (!rdev->pm.dpm.ps) return -ENOMEM; @@ -4075,7 +4076,9 @@ int ni_dpm_init(struct radeon_device *rdev) return ret; rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = - kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL); + kcalloc(4, + sizeof(struct radeon_clock_voltage_dependency_entry), + GFP_KERNEL); if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { r600_free_extended_power_table(rdev); return -ENOMEM; diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index 31d1b4710844..73d4c5348116 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -991,7 +991,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) ATOM_PPLIB_PhaseSheddingLimits_Record *entry; rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = - kzalloc(psl->ucNumEntries * + kcalloc(psl->ucNumEntries, sizeof(struct radeon_phase_shedding_limits_entry), GFP_KERNEL); if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 4134759a6823..f422a8d6aec4 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -2126,13 +2126,16 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; if (num_modes == 0) return state_index; - rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL); + rdev->pm.power_state = kcalloc(num_modes, + sizeof(struct radeon_power_state), + GFP_KERNEL); if (!rdev->pm.power_state) return state_index; /* last mode is usually default, array is low to high */ for (i = 0; i < num_modes; i++) { rdev->pm.power_state[state_index].clock_info = - kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); + kcalloc(1, sizeof(struct radeon_pm_clock_info), + GFP_KERNEL); if (!rdev->pm.power_state[state_index].clock_info) return state_index; rdev->pm.power_state[state_index].num_clock_modes = 1; @@ -2587,8 +2590,9 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev) radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); if (power_info->pplib.ucNumStates == 0) return state_index; - rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * - power_info->pplib.ucNumStates, GFP_KERNEL); + rdev->pm.power_state = kcalloc(power_info->pplib.ucNumStates, + sizeof(struct radeon_power_state), + GFP_KERNEL); if (!rdev->pm.power_state) return state_index; /* first mode is usually default, followed by low to high */ @@ -2603,10 +2607,11 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev) le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) + (power_state->v1.ucNonClockStateIndex * power_info->pplib.ucNonClockSize)); - rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) * - ((power_info->pplib.ucStateEntrySize - 1) ? - (power_info->pplib.ucStateEntrySize - 1) : 1), - GFP_KERNEL); + rdev->pm.power_state[i].clock_info = + kcalloc((power_info->pplib.ucStateEntrySize - 1) ? + (power_info->pplib.ucStateEntrySize - 1) : 1, + sizeof(struct radeon_pm_clock_info), + GFP_KERNEL); if (!rdev->pm.power_state[i].clock_info) return state_index; if (power_info->pplib.ucStateEntrySize - 1) { @@ -2688,8 +2693,9 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); if (state_array->ucNumEntries == 0) return state_index; - rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * - state_array->ucNumEntries, GFP_KERNEL); + rdev->pm.power_state = kcalloc(state_array->ucNumEntries, + sizeof(struct radeon_power_state), + GFP_KERNEL); if (!rdev->pm.power_state) return state_index; power_state_offset = (u8 *)state_array->states; @@ -2699,10 +2705,11 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) non_clock_array_index = power_state->v2.nonClockInfoIndex; non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) &non_clock_info_array->nonClockInfo[non_clock_array_index]; - rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) * - (power_state->v2.ucNumDPMLevels ? - power_state->v2.ucNumDPMLevels : 1), - GFP_KERNEL); + rdev->pm.power_state[i].clock_info = + kcalloc(power_state->v2.ucNumDPMLevels ? + power_state->v2.ucNumDPMLevels : 1, + sizeof(struct radeon_pm_clock_info), + GFP_KERNEL); if (!rdev->pm.power_state[i].clock_info) return state_index; if (power_state->v2.ucNumDPMLevels) { @@ -2782,7 +2789,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); if (rdev->pm.power_state) { rdev->pm.power_state[0].clock_info = - kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); + kcalloc(1, + sizeof(struct radeon_pm_clock_info), + GFP_KERNEL); if (rdev->pm.power_state[0].clock_info) { /* add the default mode */ rdev->pm.power_state[state_index].type = diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 3178ba0c537c..60a61d33f607 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -2642,13 +2642,16 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) rdev->pm.default_power_state_index = -1; /* allocate 2 power states */ - rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL); + rdev->pm.power_state = kcalloc(2, sizeof(struct radeon_power_state), + GFP_KERNEL); if (rdev->pm.power_state) { /* allocate 1 clock mode per state */ rdev->pm.power_state[0].clock_info = - kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); + kcalloc(1, sizeof(struct radeon_pm_clock_info), + GFP_KERNEL); rdev->pm.power_state[1].clock_info = - kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); + kcalloc(1, sizeof(struct radeon_pm_clock_info), + GFP_KERNEL); if (!rdev->pm.power_state[0].clock_info || !rdev->pm.power_state[1].clock_info) goto pm_failed; diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 0b3ec35515f3..1cef155cc933 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -347,13 +347,14 @@ int radeon_gart_init(struct radeon_device *rdev) DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages); /* Allocate pages table */ - rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages); + rdev->gart.pages = vzalloc(array_size(sizeof(void *), + rdev->gart.num_cpu_pages)); if (rdev->gart.pages == NULL) { radeon_gart_fini(rdev); return -ENOMEM; } - rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) * - rdev->gart.num_gpu_pages); + rdev->gart.pages_entry = vmalloc(array_size(sizeof(uint64_t), + rdev->gart.num_gpu_pages)); if (rdev->gart.pages_entry == NULL) { radeon_gart_fini(rdev); return -ENOMEM; diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index f5e9abfadb56..48f4b273e316 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -59,7 +59,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) n = rdev->mc.gtt_size - rdev->gart_pin_size; n /= size; - gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); + gtt_obj = kcalloc(n, sizeof(*gtt_obj), GFP_KERNEL); if (!gtt_obj) { DRM_ERROR("Failed to allocate %d pointers\n", n); r = 1; diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c index b5e4e09a8996..694b7b3e9799 100644 --- a/drivers/gpu/drm/radeon/rs780_dpm.c +++ b/drivers/gpu/drm/radeon/rs780_dpm.c @@ -804,8 +804,9 @@ static int rs780_parse_power_table(struct radeon_device *rdev) return -EINVAL; power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); - rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) * - power_info->pplib.ucNumStates, GFP_KERNEL); + rdev->pm.dpm.ps = kcalloc(power_info->pplib.ucNumStates, + sizeof(struct radeon_ps), + GFP_KERNEL); if (!rdev->pm.dpm.ps) return -ENOMEM; diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c index d91aa3944593..6986051fbb89 100644 --- a/drivers/gpu/drm/radeon/rv6xx_dpm.c +++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c @@ -1888,8 +1888,9 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev) return -EINVAL; power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); - rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) * - power_info->pplib.ucNumStates, GFP_KERNEL); + rdev->pm.dpm.ps = kcalloc(power_info->pplib.ucNumStates, + sizeof(struct radeon_ps), + GFP_KERNEL); if (!rdev->pm.dpm.ps) return -ENOMEM; diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index cb2a7ec4e217..c765ae7ea806 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@ -2282,8 +2282,9 @@ int rv7xx_parse_power_table(struct radeon_device *rdev) return -EINVAL; power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); - rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) * - power_info->pplib.ucNumStates, GFP_KERNEL); + rdev->pm.dpm.ps = kcalloc(power_info->pplib.ucNumStates, + sizeof(struct radeon_ps), + GFP_KERNEL); if (!rdev->pm.dpm.ps) return -ENOMEM; diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 90d5b41007bf..fea88078cf8e 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -6832,8 +6832,9 @@ static int si_parse_power_table(struct radeon_device *rdev) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); - rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) * - state_array->ucNumEntries, GFP_KERNEL); + rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, + sizeof(struct radeon_ps), + GFP_KERNEL); if (!rdev->pm.dpm.ps) return -ENOMEM; power_state_offset = (u8 *)state_array->states; @@ -6941,7 +6942,9 @@ int si_dpm_init(struct radeon_device *rdev) return ret; rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = - kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL); + kcalloc(4, + sizeof(struct radeon_clock_voltage_dependency_entry), + GFP_KERNEL); if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { r600_free_extended_power_table(rdev); return -ENOMEM; diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c index fd4804829e46..1e4975f3374c 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.c +++ b/drivers/gpu/drm/radeon/sumo_dpm.c @@ -1482,8 +1482,9 @@ static int sumo_parse_power_table(struct radeon_device *rdev) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); - rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) * - state_array->ucNumEntries, GFP_KERNEL); + rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, + sizeof(struct radeon_ps), + GFP_KERNEL); if (!rdev->pm.dpm.ps) return -ENOMEM; power_state_offset = (u8 *)state_array->states; diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index 2ef7c4e5e495..5d317f763eea 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c @@ -1757,8 +1757,9 @@ static int trinity_parse_power_table(struct radeon_device *rdev) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); - rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) * - state_array->ucNumEntries, GFP_KERNEL); + rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, + sizeof(struct radeon_ps), + GFP_KERNEL); if (!rdev->pm.dpm.ps) return -ENOMEM; power_state_offset = (u8 *)state_array->states; diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c index 2a5b8466d806..35dc74883f83 100644 --- a/drivers/gpu/drm/savage/savage_bci.c +++ b/drivers/gpu/drm/savage/savage_bci.c @@ -298,8 +298,9 @@ static int savage_dma_init(drm_savage_private_t * dev_priv) dev_priv->nr_dma_pages = dev_priv->cmd_dma->size / (SAVAGE_DMA_PAGE_SIZE * 4); - dev_priv->dma_pages = kmalloc(sizeof(drm_savage_dma_page_t) * - dev_priv->nr_dma_pages, GFP_KERNEL); + dev_priv->dma_pages = kmalloc_array(dev_priv->nr_dma_pages, + sizeof(drm_savage_dma_page_t), + GFP_KERNEL); if (dev_priv->dma_pages == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index df1578d6f42e..44d480768dfe 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -349,8 +349,13 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) struct dma_fence * fence = entity->dependency; struct drm_sched_fence *s_fence; - if (fence->context == entity->fence_context) { - /* We can ignore fences from ourself */ + if (fence->context == entity->fence_context || + fence->context == entity->fence_context + 1) { + /* + * Fence is a scheduled/finished fence from a job + * which belongs to the same entity, we can ignore + * fences from ourself + */ dma_fence_put(entity->dependency); return false; } diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c index 7cc935d7b7aa..933af1c25387 100644 --- a/drivers/gpu/drm/selftests/test-drm_mm.c +++ b/drivers/gpu/drm/selftests/test-drm_mm.c @@ -389,7 +389,7 @@ static int __igt_reserve(unsigned int count, u64 size) if (!order) goto err; - nodes = vzalloc(sizeof(*nodes) * count); + nodes = vzalloc(array_size(count, sizeof(*nodes))); if (!nodes) goto err_order; @@ -579,7 +579,7 @@ static int __igt_insert(unsigned int count, u64 size, bool replace) DRM_MM_BUG_ON(!size); ret = -ENOMEM; - nodes = vmalloc(count * sizeof(*nodes)); + nodes = vmalloc(array_size(count, sizeof(*nodes))); if (!nodes) goto err; @@ -889,7 +889,7 @@ static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end) */ ret = -ENOMEM; - nodes = vzalloc(count * sizeof(*nodes)); + nodes = vzalloc(array_size(count, sizeof(*nodes))); if (!nodes) goto err; @@ -1046,7 +1046,7 @@ static int igt_align(void *ignored) * meets our requirements. */ - nodes = vzalloc(max_count * sizeof(*nodes)); + nodes = vzalloc(array_size(max_count, sizeof(*nodes))); if (!nodes) goto err; @@ -1416,7 +1416,7 @@ static int igt_evict(void *ignored) */ ret = -ENOMEM; - nodes = vzalloc(size * sizeof(*nodes)); + nodes = vzalloc(array_size(size, sizeof(*nodes))); if (!nodes) goto err; @@ -1526,7 +1526,7 @@ static int igt_evict_range(void *ignored) */ ret = -ENOMEM; - nodes = vzalloc(size * sizeof(*nodes)); + nodes = vzalloc(array_size(size, sizeof(*nodes))); if (!nodes) goto err; @@ -1627,11 +1627,11 @@ static int igt_topdown(void *ignored) */ ret = -ENOMEM; - nodes = vzalloc(count * sizeof(*nodes)); + nodes = vzalloc(array_size(count, sizeof(*nodes))); if (!nodes) goto err; - bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long), + bitmap = kcalloc(count / BITS_PER_LONG, sizeof(unsigned long), GFP_KERNEL); if (!bitmap) goto err_nodes; @@ -1741,11 +1741,11 @@ static int igt_bottomup(void *ignored) */ ret = -ENOMEM; - nodes = vzalloc(count * sizeof(*nodes)); + nodes = vzalloc(array_size(count, sizeof(*nodes))); if (!nodes) goto err; - bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long), + bitmap = kcalloc(count / BITS_PER_LONG, sizeof(unsigned long), GFP_KERNEL); if (!bitmap) goto err_nodes; @@ -2098,7 +2098,7 @@ static int igt_color_evict(void *ignored) */ ret = -ENOMEM; - nodes = vzalloc(total_size * sizeof(*nodes)); + nodes = vzalloc(array_size(total_size, sizeof(*nodes))); if (!nodes) goto err; @@ -2199,7 +2199,7 @@ static int igt_color_evict_range(void *ignored) */ ret = -ENOMEM; - nodes = vzalloc(total_size * sizeof(*nodes)); + nodes = vzalloc(array_size(total_size, sizeof(*nodes))); if (!nodes) goto err; diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig index c987c826daa3..0426d66660d1 100644 --- a/drivers/gpu/drm/shmobile/Kconfig +++ b/drivers/gpu/drm/shmobile/Kconfig @@ -2,7 +2,6 @@ config DRM_SHMOBILE tristate "DRM Support for SH Mobile" depends on DRM && ARM depends on ARCH_SHMOBILE || COMPILE_TEST - depends on FB_SH_MOBILE_MERAM || !FB_SH_MOBILE_MERAM select BACKLIGHT_CLASS_DEVICE select BACKLIGHT_LCD_SUPPORT select DRM_KMS_HELPER diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c index e7738939a86d..40df8887fc17 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c @@ -21,8 +21,6 @@ #include #include -#include